[DFAJumpThreading] Remove incoming StartBlock from all phis when unfolding select...
[llvm-project.git] / clang / lib / CodeGen / CGBuiltin.cpp
blob972aa1c708e5f65d2a9fc73150b920d28872f0be
1 //===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This contains code to emit Builtin calls as LLVM code.
11 //===----------------------------------------------------------------------===//
13 #include "ABIInfo.h"
14 #include "CGCUDARuntime.h"
15 #include "CGCXXABI.h"
16 #include "CGObjCRuntime.h"
17 #include "CGOpenCLRuntime.h"
18 #include "CGRecordLayout.h"
19 #include "CodeGenFunction.h"
20 #include "CodeGenModule.h"
21 #include "ConstantEmitter.h"
22 #include "PatternInit.h"
23 #include "TargetInfo.h"
24 #include "clang/AST/ASTContext.h"
25 #include "clang/AST/Attr.h"
26 #include "clang/AST/Decl.h"
27 #include "clang/AST/OSLog.h"
28 #include "clang/Basic/TargetBuiltins.h"
29 #include "clang/Basic/TargetInfo.h"
30 #include "clang/Basic/TargetOptions.h"
31 #include "clang/CodeGen/CGFunctionInfo.h"
32 #include "clang/Frontend/FrontendDiagnostic.h"
33 #include "llvm/ADT/APFloat.h"
34 #include "llvm/ADT/APInt.h"
35 #include "llvm/ADT/FloatingPointMode.h"
36 #include "llvm/ADT/SmallPtrSet.h"
37 #include "llvm/ADT/StringExtras.h"
38 #include "llvm/Analysis/ValueTracking.h"
39 #include "llvm/IR/DataLayout.h"
40 #include "llvm/IR/InlineAsm.h"
41 #include "llvm/IR/Intrinsics.h"
42 #include "llvm/IR/IntrinsicsAArch64.h"
43 #include "llvm/IR/IntrinsicsAMDGPU.h"
44 #include "llvm/IR/IntrinsicsARM.h"
45 #include "llvm/IR/IntrinsicsBPF.h"
46 #include "llvm/IR/IntrinsicsHexagon.h"
47 #include "llvm/IR/IntrinsicsNVPTX.h"
48 #include "llvm/IR/IntrinsicsPowerPC.h"
49 #include "llvm/IR/IntrinsicsR600.h"
50 #include "llvm/IR/IntrinsicsRISCV.h"
51 #include "llvm/IR/IntrinsicsS390.h"
52 #include "llvm/IR/IntrinsicsVE.h"
53 #include "llvm/IR/IntrinsicsWebAssembly.h"
54 #include "llvm/IR/IntrinsicsX86.h"
55 #include "llvm/IR/MDBuilder.h"
56 #include "llvm/IR/MatrixBuilder.h"
57 #include "llvm/Support/ConvertUTF.h"
58 #include "llvm/Support/MathExtras.h"
59 #include "llvm/Support/ScopedPrinter.h"
60 #include "llvm/TargetParser/AArch64TargetParser.h"
61 #include "llvm/TargetParser/X86TargetParser.h"
62 #include <optional>
63 #include <sstream>
65 using namespace clang;
66 using namespace CodeGen;
67 using namespace llvm;
69 static llvm::cl::opt<bool> ClSanitizeAlignmentBuiltin(
70 "sanitize-alignment-builtin", llvm::cl::Hidden,
71 llvm::cl::desc("Instrument builtin functions for -fsanitize=alignment"),
72 llvm::cl::init(true));
74 static void initializeAlloca(CodeGenFunction &CGF, AllocaInst *AI, Value *Size,
75 Align AlignmentInBytes) {
76 ConstantInt *Byte;
77 switch (CGF.getLangOpts().getTrivialAutoVarInit()) {
78 case LangOptions::TrivialAutoVarInitKind::Uninitialized:
79 // Nothing to initialize.
80 return;
81 case LangOptions::TrivialAutoVarInitKind::Zero:
82 Byte = CGF.Builder.getInt8(0x00);
83 break;
84 case LangOptions::TrivialAutoVarInitKind::Pattern: {
85 llvm::Type *Int8 = llvm::IntegerType::getInt8Ty(CGF.CGM.getLLVMContext());
86 Byte = llvm::dyn_cast<llvm::ConstantInt>(
87 initializationPatternFor(CGF.CGM, Int8));
88 break;
91 if (CGF.CGM.stopAutoInit())
92 return;
93 auto *I = CGF.Builder.CreateMemSet(AI, Byte, Size, AlignmentInBytes);
94 I->addAnnotationMetadata("auto-init");
97 /// getBuiltinLibFunction - Given a builtin id for a function like
98 /// "__builtin_fabsf", return a Function* for "fabsf".
99 llvm::Constant *CodeGenModule::getBuiltinLibFunction(const FunctionDecl *FD,
100 unsigned BuiltinID) {
101 assert(Context.BuiltinInfo.isLibFunction(BuiltinID));
103 // Get the name, skip over the __builtin_ prefix (if necessary).
104 StringRef Name;
105 GlobalDecl D(FD);
107 // TODO: This list should be expanded or refactored after all GCC-compatible
108 // std libcall builtins are implemented.
109 static SmallDenseMap<unsigned, StringRef, 64> F128Builtins{
110 {Builtin::BI__builtin___fprintf_chk, "__fprintf_chkieee128"},
111 {Builtin::BI__builtin___printf_chk, "__printf_chkieee128"},
112 {Builtin::BI__builtin___snprintf_chk, "__snprintf_chkieee128"},
113 {Builtin::BI__builtin___sprintf_chk, "__sprintf_chkieee128"},
114 {Builtin::BI__builtin___vfprintf_chk, "__vfprintf_chkieee128"},
115 {Builtin::BI__builtin___vprintf_chk, "__vprintf_chkieee128"},
116 {Builtin::BI__builtin___vsnprintf_chk, "__vsnprintf_chkieee128"},
117 {Builtin::BI__builtin___vsprintf_chk, "__vsprintf_chkieee128"},
118 {Builtin::BI__builtin_fprintf, "__fprintfieee128"},
119 {Builtin::BI__builtin_printf, "__printfieee128"},
120 {Builtin::BI__builtin_snprintf, "__snprintfieee128"},
121 {Builtin::BI__builtin_sprintf, "__sprintfieee128"},
122 {Builtin::BI__builtin_vfprintf, "__vfprintfieee128"},
123 {Builtin::BI__builtin_vprintf, "__vprintfieee128"},
124 {Builtin::BI__builtin_vsnprintf, "__vsnprintfieee128"},
125 {Builtin::BI__builtin_vsprintf, "__vsprintfieee128"},
126 {Builtin::BI__builtin_fscanf, "__fscanfieee128"},
127 {Builtin::BI__builtin_scanf, "__scanfieee128"},
128 {Builtin::BI__builtin_sscanf, "__sscanfieee128"},
129 {Builtin::BI__builtin_vfscanf, "__vfscanfieee128"},
130 {Builtin::BI__builtin_vscanf, "__vscanfieee128"},
131 {Builtin::BI__builtin_vsscanf, "__vsscanfieee128"},
132 {Builtin::BI__builtin_nexttowardf128, "__nexttowardieee128"},
135 // The AIX library functions frexpl, ldexpl, and modfl are for 128-bit
136 // IBM 'long double' (i.e. __ibm128). Map to the 'double' versions
137 // if it is 64-bit 'long double' mode.
138 static SmallDenseMap<unsigned, StringRef, 4> AIXLongDouble64Builtins{
139 {Builtin::BI__builtin_frexpl, "frexp"},
140 {Builtin::BI__builtin_ldexpl, "ldexp"},
141 {Builtin::BI__builtin_modfl, "modf"},
144 // If the builtin has been declared explicitly with an assembler label,
145 // use the mangled name. This differs from the plain label on platforms
146 // that prefix labels.
147 if (FD->hasAttr<AsmLabelAttr>())
148 Name = getMangledName(D);
149 else {
150 // TODO: This mutation should also be applied to other targets other than
151 // PPC, after backend supports IEEE 128-bit style libcalls.
152 if (getTriple().isPPC64() &&
153 &getTarget().getLongDoubleFormat() == &llvm::APFloat::IEEEquad() &&
154 F128Builtins.contains(BuiltinID))
155 Name = F128Builtins[BuiltinID];
156 else if (getTriple().isOSAIX() &&
157 &getTarget().getLongDoubleFormat() ==
158 &llvm::APFloat::IEEEdouble() &&
159 AIXLongDouble64Builtins.contains(BuiltinID))
160 Name = AIXLongDouble64Builtins[BuiltinID];
161 else
162 Name = Context.BuiltinInfo.getName(BuiltinID).substr(10);
165 llvm::FunctionType *Ty =
166 cast<llvm::FunctionType>(getTypes().ConvertType(FD->getType()));
168 return GetOrCreateLLVMFunction(Name, Ty, D, /*ForVTable=*/false);
171 /// Emit the conversions required to turn the given value into an
172 /// integer of the given size.
173 static Value *EmitToInt(CodeGenFunction &CGF, llvm::Value *V,
174 QualType T, llvm::IntegerType *IntType) {
175 V = CGF.EmitToMemory(V, T);
177 if (V->getType()->isPointerTy())
178 return CGF.Builder.CreatePtrToInt(V, IntType);
180 assert(V->getType() == IntType);
181 return V;
184 static Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V,
185 QualType T, llvm::Type *ResultType) {
186 V = CGF.EmitFromMemory(V, T);
188 if (ResultType->isPointerTy())
189 return CGF.Builder.CreateIntToPtr(V, ResultType);
191 assert(V->getType() == ResultType);
192 return V;
195 static llvm::Value *CheckAtomicAlignment(CodeGenFunction &CGF,
196 const CallExpr *E) {
197 ASTContext &Ctx = CGF.getContext();
198 Address Ptr = CGF.EmitPointerWithAlignment(E->getArg(0));
199 unsigned Bytes = Ptr.getElementType()->isPointerTy()
200 ? Ctx.getTypeSizeInChars(Ctx.VoidPtrTy).getQuantity()
201 : Ptr.getElementType()->getScalarSizeInBits() / 8;
202 unsigned Align = Ptr.getAlignment().getQuantity();
203 if (Align % Bytes != 0) {
204 DiagnosticsEngine &Diags = CGF.CGM.getDiags();
205 Diags.Report(E->getBeginLoc(), diag::warn_sync_op_misaligned);
207 return Ptr.getPointer();
210 /// Utility to insert an atomic instruction based on Intrinsic::ID
211 /// and the expression node.
212 static Value *MakeBinaryAtomicValue(
213 CodeGenFunction &CGF, llvm::AtomicRMWInst::BinOp Kind, const CallExpr *E,
214 AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
216 QualType T = E->getType();
217 assert(E->getArg(0)->getType()->isPointerType());
218 assert(CGF.getContext().hasSameUnqualifiedType(T,
219 E->getArg(0)->getType()->getPointeeType()));
220 assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
222 llvm::Value *DestPtr = CheckAtomicAlignment(CGF, E);
224 llvm::IntegerType *IntType = llvm::IntegerType::get(
225 CGF.getLLVMContext(), CGF.getContext().getTypeSize(T));
227 llvm::Value *Args[2];
228 Args[0] = DestPtr;
229 Args[1] = CGF.EmitScalarExpr(E->getArg(1));
230 llvm::Type *ValueType = Args[1]->getType();
231 Args[1] = EmitToInt(CGF, Args[1], T, IntType);
233 llvm::Value *Result = CGF.Builder.CreateAtomicRMW(
234 Kind, Args[0], Args[1], Ordering);
235 return EmitFromInt(CGF, Result, T, ValueType);
238 static Value *EmitNontemporalStore(CodeGenFunction &CGF, const CallExpr *E) {
239 Value *Val = CGF.EmitScalarExpr(E->getArg(0));
240 Value *Address = CGF.EmitScalarExpr(E->getArg(1));
242 Val = CGF.EmitToMemory(Val, E->getArg(0)->getType());
243 LValue LV = CGF.MakeNaturalAlignAddrLValue(Address, E->getArg(0)->getType());
244 LV.setNontemporal(true);
245 CGF.EmitStoreOfScalar(Val, LV, false);
246 return nullptr;
249 static Value *EmitNontemporalLoad(CodeGenFunction &CGF, const CallExpr *E) {
250 Value *Address = CGF.EmitScalarExpr(E->getArg(0));
252 LValue LV = CGF.MakeNaturalAlignAddrLValue(Address, E->getType());
253 LV.setNontemporal(true);
254 return CGF.EmitLoadOfScalar(LV, E->getExprLoc());
257 static RValue EmitBinaryAtomic(CodeGenFunction &CGF,
258 llvm::AtomicRMWInst::BinOp Kind,
259 const CallExpr *E) {
260 return RValue::get(MakeBinaryAtomicValue(CGF, Kind, E));
263 /// Utility to insert an atomic instruction based Intrinsic::ID and
264 /// the expression node, where the return value is the result of the
265 /// operation.
266 static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF,
267 llvm::AtomicRMWInst::BinOp Kind,
268 const CallExpr *E,
269 Instruction::BinaryOps Op,
270 bool Invert = false) {
271 QualType T = E->getType();
272 assert(E->getArg(0)->getType()->isPointerType());
273 assert(CGF.getContext().hasSameUnqualifiedType(T,
274 E->getArg(0)->getType()->getPointeeType()));
275 assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
277 llvm::Value *DestPtr = CheckAtomicAlignment(CGF, E);
279 llvm::IntegerType *IntType = llvm::IntegerType::get(
280 CGF.getLLVMContext(), CGF.getContext().getTypeSize(T));
282 llvm::Value *Args[2];
283 Args[1] = CGF.EmitScalarExpr(E->getArg(1));
284 llvm::Type *ValueType = Args[1]->getType();
285 Args[1] = EmitToInt(CGF, Args[1], T, IntType);
286 Args[0] = DestPtr;
288 llvm::Value *Result = CGF.Builder.CreateAtomicRMW(
289 Kind, Args[0], Args[1], llvm::AtomicOrdering::SequentiallyConsistent);
290 Result = CGF.Builder.CreateBinOp(Op, Result, Args[1]);
291 if (Invert)
292 Result =
293 CGF.Builder.CreateBinOp(llvm::Instruction::Xor, Result,
294 llvm::ConstantInt::getAllOnesValue(IntType));
295 Result = EmitFromInt(CGF, Result, T, ValueType);
296 return RValue::get(Result);
299 /// Utility to insert an atomic cmpxchg instruction.
301 /// @param CGF The current codegen function.
302 /// @param E Builtin call expression to convert to cmpxchg.
303 /// arg0 - address to operate on
304 /// arg1 - value to compare with
305 /// arg2 - new value
306 /// @param ReturnBool Specifies whether to return success flag of
307 /// cmpxchg result or the old value.
309 /// @returns result of cmpxchg, according to ReturnBool
311 /// Note: In order to lower Microsoft's _InterlockedCompareExchange* intrinsics
312 /// invoke the function EmitAtomicCmpXchgForMSIntrin.
313 static Value *MakeAtomicCmpXchgValue(CodeGenFunction &CGF, const CallExpr *E,
314 bool ReturnBool) {
315 QualType T = ReturnBool ? E->getArg(1)->getType() : E->getType();
316 llvm::Value *DestPtr = CheckAtomicAlignment(CGF, E);
318 llvm::IntegerType *IntType = llvm::IntegerType::get(
319 CGF.getLLVMContext(), CGF.getContext().getTypeSize(T));
321 Value *Args[3];
322 Args[0] = DestPtr;
323 Args[1] = CGF.EmitScalarExpr(E->getArg(1));
324 llvm::Type *ValueType = Args[1]->getType();
325 Args[1] = EmitToInt(CGF, Args[1], T, IntType);
326 Args[2] = EmitToInt(CGF, CGF.EmitScalarExpr(E->getArg(2)), T, IntType);
328 Value *Pair = CGF.Builder.CreateAtomicCmpXchg(
329 Args[0], Args[1], Args[2], llvm::AtomicOrdering::SequentiallyConsistent,
330 llvm::AtomicOrdering::SequentiallyConsistent);
331 if (ReturnBool)
332 // Extract boolean success flag and zext it to int.
333 return CGF.Builder.CreateZExt(CGF.Builder.CreateExtractValue(Pair, 1),
334 CGF.ConvertType(E->getType()));
335 else
336 // Extract old value and emit it using the same type as compare value.
337 return EmitFromInt(CGF, CGF.Builder.CreateExtractValue(Pair, 0), T,
338 ValueType);
341 /// This function should be invoked to emit atomic cmpxchg for Microsoft's
342 /// _InterlockedCompareExchange* intrinsics which have the following signature:
343 /// T _InterlockedCompareExchange(T volatile *Destination,
344 /// T Exchange,
345 /// T Comparand);
347 /// Whereas the llvm 'cmpxchg' instruction has the following syntax:
348 /// cmpxchg *Destination, Comparand, Exchange.
349 /// So we need to swap Comparand and Exchange when invoking
350 /// CreateAtomicCmpXchg. That is the reason we could not use the above utility
351 /// function MakeAtomicCmpXchgValue since it expects the arguments to be
352 /// already swapped.
354 static
355 Value *EmitAtomicCmpXchgForMSIntrin(CodeGenFunction &CGF, const CallExpr *E,
356 AtomicOrdering SuccessOrdering = AtomicOrdering::SequentiallyConsistent) {
357 assert(E->getArg(0)->getType()->isPointerType());
358 assert(CGF.getContext().hasSameUnqualifiedType(
359 E->getType(), E->getArg(0)->getType()->getPointeeType()));
360 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(),
361 E->getArg(1)->getType()));
362 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(),
363 E->getArg(2)->getType()));
365 auto *Destination = CGF.EmitScalarExpr(E->getArg(0));
366 auto *Comparand = CGF.EmitScalarExpr(E->getArg(2));
367 auto *Exchange = CGF.EmitScalarExpr(E->getArg(1));
369 // For Release ordering, the failure ordering should be Monotonic.
370 auto FailureOrdering = SuccessOrdering == AtomicOrdering::Release ?
371 AtomicOrdering::Monotonic :
372 SuccessOrdering;
374 // The atomic instruction is marked volatile for consistency with MSVC. This
375 // blocks the few atomics optimizations that LLVM has. If we want to optimize
376 // _Interlocked* operations in the future, we will have to remove the volatile
377 // marker.
378 auto *Result = CGF.Builder.CreateAtomicCmpXchg(
379 Destination, Comparand, Exchange,
380 SuccessOrdering, FailureOrdering);
381 Result->setVolatile(true);
382 return CGF.Builder.CreateExtractValue(Result, 0);
385 // 64-bit Microsoft platforms support 128 bit cmpxchg operations. They are
386 // prototyped like this:
388 // unsigned char _InterlockedCompareExchange128...(
389 // __int64 volatile * _Destination,
390 // __int64 _ExchangeHigh,
391 // __int64 _ExchangeLow,
392 // __int64 * _ComparandResult);
393 static Value *EmitAtomicCmpXchg128ForMSIntrin(CodeGenFunction &CGF,
394 const CallExpr *E,
395 AtomicOrdering SuccessOrdering) {
396 assert(E->getNumArgs() == 4);
397 llvm::Value *Destination = CGF.EmitScalarExpr(E->getArg(0));
398 llvm::Value *ExchangeHigh = CGF.EmitScalarExpr(E->getArg(1));
399 llvm::Value *ExchangeLow = CGF.EmitScalarExpr(E->getArg(2));
400 llvm::Value *ComparandPtr = CGF.EmitScalarExpr(E->getArg(3));
402 assert(Destination->getType()->isPointerTy());
403 assert(!ExchangeHigh->getType()->isPointerTy());
404 assert(!ExchangeLow->getType()->isPointerTy());
405 assert(ComparandPtr->getType()->isPointerTy());
407 // For Release ordering, the failure ordering should be Monotonic.
408 auto FailureOrdering = SuccessOrdering == AtomicOrdering::Release
409 ? AtomicOrdering::Monotonic
410 : SuccessOrdering;
412 // Convert to i128 pointers and values.
413 llvm::Type *Int128Ty = llvm::IntegerType::get(CGF.getLLVMContext(), 128);
414 Address ComparandResult(ComparandPtr, Int128Ty,
415 CGF.getContext().toCharUnitsFromBits(128));
417 // (((i128)hi) << 64) | ((i128)lo)
418 ExchangeHigh = CGF.Builder.CreateZExt(ExchangeHigh, Int128Ty);
419 ExchangeLow = CGF.Builder.CreateZExt(ExchangeLow, Int128Ty);
420 ExchangeHigh =
421 CGF.Builder.CreateShl(ExchangeHigh, llvm::ConstantInt::get(Int128Ty, 64));
422 llvm::Value *Exchange = CGF.Builder.CreateOr(ExchangeHigh, ExchangeLow);
424 // Load the comparand for the instruction.
425 llvm::Value *Comparand = CGF.Builder.CreateLoad(ComparandResult);
427 auto *CXI = CGF.Builder.CreateAtomicCmpXchg(Destination, Comparand, Exchange,
428 SuccessOrdering, FailureOrdering);
430 // The atomic instruction is marked volatile for consistency with MSVC. This
431 // blocks the few atomics optimizations that LLVM has. If we want to optimize
432 // _Interlocked* operations in the future, we will have to remove the volatile
433 // marker.
434 CXI->setVolatile(true);
436 // Store the result as an outparameter.
437 CGF.Builder.CreateStore(CGF.Builder.CreateExtractValue(CXI, 0),
438 ComparandResult);
440 // Get the success boolean and zero extend it to i8.
441 Value *Success = CGF.Builder.CreateExtractValue(CXI, 1);
442 return CGF.Builder.CreateZExt(Success, CGF.Int8Ty);
445 static Value *EmitAtomicIncrementValue(CodeGenFunction &CGF, const CallExpr *E,
446 AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
447 assert(E->getArg(0)->getType()->isPointerType());
449 auto *IntTy = CGF.ConvertType(E->getType());
450 auto *Result = CGF.Builder.CreateAtomicRMW(
451 AtomicRMWInst::Add,
452 CGF.EmitScalarExpr(E->getArg(0)),
453 ConstantInt::get(IntTy, 1),
454 Ordering);
455 return CGF.Builder.CreateAdd(Result, ConstantInt::get(IntTy, 1));
458 static Value *EmitAtomicDecrementValue(CodeGenFunction &CGF, const CallExpr *E,
459 AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
460 assert(E->getArg(0)->getType()->isPointerType());
462 auto *IntTy = CGF.ConvertType(E->getType());
463 auto *Result = CGF.Builder.CreateAtomicRMW(
464 AtomicRMWInst::Sub,
465 CGF.EmitScalarExpr(E->getArg(0)),
466 ConstantInt::get(IntTy, 1),
467 Ordering);
468 return CGF.Builder.CreateSub(Result, ConstantInt::get(IntTy, 1));
471 // Build a plain volatile load.
472 static Value *EmitISOVolatileLoad(CodeGenFunction &CGF, const CallExpr *E) {
473 Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
474 QualType ElTy = E->getArg(0)->getType()->getPointeeType();
475 CharUnits LoadSize = CGF.getContext().getTypeSizeInChars(ElTy);
476 llvm::Type *ITy =
477 llvm::IntegerType::get(CGF.getLLVMContext(), LoadSize.getQuantity() * 8);
478 llvm::LoadInst *Load = CGF.Builder.CreateAlignedLoad(ITy, Ptr, LoadSize);
479 Load->setVolatile(true);
480 return Load;
483 // Build a plain volatile store.
484 static Value *EmitISOVolatileStore(CodeGenFunction &CGF, const CallExpr *E) {
485 Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
486 Value *Value = CGF.EmitScalarExpr(E->getArg(1));
487 QualType ElTy = E->getArg(0)->getType()->getPointeeType();
488 CharUnits StoreSize = CGF.getContext().getTypeSizeInChars(ElTy);
489 llvm::StoreInst *Store =
490 CGF.Builder.CreateAlignedStore(Value, Ptr, StoreSize);
491 Store->setVolatile(true);
492 return Store;
495 // Emit a simple mangled intrinsic that has 1 argument and a return type
496 // matching the argument type. Depending on mode, this may be a constrained
497 // floating-point intrinsic.
498 static Value *emitUnaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
499 const CallExpr *E, unsigned IntrinsicID,
500 unsigned ConstrainedIntrinsicID) {
501 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
503 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
504 if (CGF.Builder.getIsFPConstrained()) {
505 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
506 return CGF.Builder.CreateConstrainedFPCall(F, { Src0 });
507 } else {
508 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
509 return CGF.Builder.CreateCall(F, Src0);
513 // Emit an intrinsic that has 2 operands of the same type as its result.
514 // Depending on mode, this may be a constrained floating-point intrinsic.
515 static Value *emitBinaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
516 const CallExpr *E, unsigned IntrinsicID,
517 unsigned ConstrainedIntrinsicID) {
518 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
519 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
521 if (CGF.Builder.getIsFPConstrained()) {
522 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
523 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
524 return CGF.Builder.CreateConstrainedFPCall(F, { Src0, Src1 });
525 } else {
526 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
527 return CGF.Builder.CreateCall(F, { Src0, Src1 });
531 // Has second type mangled argument.
532 static Value *emitBinaryExpMaybeConstrainedFPBuiltin(
533 CodeGenFunction &CGF, const CallExpr *E, llvm::Intrinsic::ID IntrinsicID,
534 llvm::Intrinsic::ID ConstrainedIntrinsicID) {
535 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
536 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
538 if (CGF.Builder.getIsFPConstrained()) {
539 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
540 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID,
541 {Src0->getType(), Src1->getType()});
542 return CGF.Builder.CreateConstrainedFPCall(F, {Src0, Src1});
545 Function *F =
546 CGF.CGM.getIntrinsic(IntrinsicID, {Src0->getType(), Src1->getType()});
547 return CGF.Builder.CreateCall(F, {Src0, Src1});
550 // Emit an intrinsic that has 3 operands of the same type as its result.
551 // Depending on mode, this may be a constrained floating-point intrinsic.
552 static Value *emitTernaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
553 const CallExpr *E, unsigned IntrinsicID,
554 unsigned ConstrainedIntrinsicID) {
555 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
556 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
557 llvm::Value *Src2 = CGF.EmitScalarExpr(E->getArg(2));
559 if (CGF.Builder.getIsFPConstrained()) {
560 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
561 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
562 return CGF.Builder.CreateConstrainedFPCall(F, { Src0, Src1, Src2 });
563 } else {
564 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
565 return CGF.Builder.CreateCall(F, { Src0, Src1, Src2 });
569 // Emit an intrinsic where all operands are of the same type as the result.
570 // Depending on mode, this may be a constrained floating-point intrinsic.
571 static Value *emitCallMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
572 unsigned IntrinsicID,
573 unsigned ConstrainedIntrinsicID,
574 llvm::Type *Ty,
575 ArrayRef<Value *> Args) {
576 Function *F;
577 if (CGF.Builder.getIsFPConstrained())
578 F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Ty);
579 else
580 F = CGF.CGM.getIntrinsic(IntrinsicID, Ty);
582 if (CGF.Builder.getIsFPConstrained())
583 return CGF.Builder.CreateConstrainedFPCall(F, Args);
584 else
585 return CGF.Builder.CreateCall(F, Args);
588 // Emit a simple mangled intrinsic that has 1 argument and a return type
589 // matching the argument type.
590 static Value *emitUnaryBuiltin(CodeGenFunction &CGF, const CallExpr *E,
591 unsigned IntrinsicID,
592 llvm::StringRef Name = "") {
593 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
595 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
596 return CGF.Builder.CreateCall(F, Src0, Name);
599 // Emit an intrinsic that has 2 operands of the same type as its result.
600 static Value *emitBinaryBuiltin(CodeGenFunction &CGF,
601 const CallExpr *E,
602 unsigned IntrinsicID) {
603 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
604 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
606 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
607 return CGF.Builder.CreateCall(F, { Src0, Src1 });
610 // Emit an intrinsic that has 3 operands of the same type as its result.
611 static Value *emitTernaryBuiltin(CodeGenFunction &CGF,
612 const CallExpr *E,
613 unsigned IntrinsicID) {
614 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
615 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
616 llvm::Value *Src2 = CGF.EmitScalarExpr(E->getArg(2));
618 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
619 return CGF.Builder.CreateCall(F, { Src0, Src1, Src2 });
622 // Emit an intrinsic that has 1 float or double operand, and 1 integer.
623 static Value *emitFPIntBuiltin(CodeGenFunction &CGF,
624 const CallExpr *E,
625 unsigned IntrinsicID) {
626 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
627 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
629 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
630 return CGF.Builder.CreateCall(F, {Src0, Src1});
633 // Emit an intrinsic that has overloaded integer result and fp operand.
634 static Value *
635 emitMaybeConstrainedFPToIntRoundBuiltin(CodeGenFunction &CGF, const CallExpr *E,
636 unsigned IntrinsicID,
637 unsigned ConstrainedIntrinsicID) {
638 llvm::Type *ResultType = CGF.ConvertType(E->getType());
639 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
641 if (CGF.Builder.getIsFPConstrained()) {
642 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
643 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID,
644 {ResultType, Src0->getType()});
645 return CGF.Builder.CreateConstrainedFPCall(F, {Src0});
646 } else {
647 Function *F =
648 CGF.CGM.getIntrinsic(IntrinsicID, {ResultType, Src0->getType()});
649 return CGF.Builder.CreateCall(F, Src0);
653 static Value *emitFrexpBuiltin(CodeGenFunction &CGF, const CallExpr *E,
654 llvm::Intrinsic::ID IntrinsicID) {
655 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
656 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
658 QualType IntPtrTy = E->getArg(1)->getType()->getPointeeType();
659 llvm::Type *IntTy = CGF.ConvertType(IntPtrTy);
660 llvm::Function *F =
661 CGF.CGM.getIntrinsic(IntrinsicID, {Src0->getType(), IntTy});
662 llvm::Value *Call = CGF.Builder.CreateCall(F, Src0);
664 llvm::Value *Exp = CGF.Builder.CreateExtractValue(Call, 1);
665 LValue LV = CGF.MakeNaturalAlignAddrLValue(Src1, IntPtrTy);
666 CGF.EmitStoreOfScalar(Exp, LV);
668 return CGF.Builder.CreateExtractValue(Call, 0);
671 /// EmitFAbs - Emit a call to @llvm.fabs().
672 static Value *EmitFAbs(CodeGenFunction &CGF, Value *V) {
673 Function *F = CGF.CGM.getIntrinsic(Intrinsic::fabs, V->getType());
674 llvm::CallInst *Call = CGF.Builder.CreateCall(F, V);
675 Call->setDoesNotAccessMemory();
676 return Call;
679 /// Emit the computation of the sign bit for a floating point value. Returns
680 /// the i1 sign bit value.
681 static Value *EmitSignBit(CodeGenFunction &CGF, Value *V) {
682 LLVMContext &C = CGF.CGM.getLLVMContext();
684 llvm::Type *Ty = V->getType();
685 int Width = Ty->getPrimitiveSizeInBits();
686 llvm::Type *IntTy = llvm::IntegerType::get(C, Width);
687 V = CGF.Builder.CreateBitCast(V, IntTy);
688 if (Ty->isPPC_FP128Ty()) {
689 // We want the sign bit of the higher-order double. The bitcast we just
690 // did works as if the double-double was stored to memory and then
691 // read as an i128. The "store" will put the higher-order double in the
692 // lower address in both little- and big-Endian modes, but the "load"
693 // will treat those bits as a different part of the i128: the low bits in
694 // little-Endian, the high bits in big-Endian. Therefore, on big-Endian
695 // we need to shift the high bits down to the low before truncating.
696 Width >>= 1;
697 if (CGF.getTarget().isBigEndian()) {
698 Value *ShiftCst = llvm::ConstantInt::get(IntTy, Width);
699 V = CGF.Builder.CreateLShr(V, ShiftCst);
701 // We are truncating value in order to extract the higher-order
702 // double, which we will be using to extract the sign from.
703 IntTy = llvm::IntegerType::get(C, Width);
704 V = CGF.Builder.CreateTrunc(V, IntTy);
706 Value *Zero = llvm::Constant::getNullValue(IntTy);
707 return CGF.Builder.CreateICmpSLT(V, Zero);
710 static RValue emitLibraryCall(CodeGenFunction &CGF, const FunctionDecl *FD,
711 const CallExpr *E, llvm::Constant *calleeValue) {
712 CGCallee callee = CGCallee::forDirect(calleeValue, GlobalDecl(FD));
713 return CGF.EmitCall(E->getCallee()->getType(), callee, E, ReturnValueSlot());
716 /// Emit a call to llvm.{sadd,uadd,ssub,usub,smul,umul}.with.overflow.*
717 /// depending on IntrinsicID.
719 /// \arg CGF The current codegen function.
720 /// \arg IntrinsicID The ID for the Intrinsic we wish to generate.
721 /// \arg X The first argument to the llvm.*.with.overflow.*.
722 /// \arg Y The second argument to the llvm.*.with.overflow.*.
723 /// \arg Carry The carry returned by the llvm.*.with.overflow.*.
724 /// \returns The result (i.e. sum/product) returned by the intrinsic.
725 static llvm::Value *EmitOverflowIntrinsic(CodeGenFunction &CGF,
726 const llvm::Intrinsic::ID IntrinsicID,
727 llvm::Value *X, llvm::Value *Y,
728 llvm::Value *&Carry) {
729 // Make sure we have integers of the same width.
730 assert(X->getType() == Y->getType() &&
731 "Arguments must be the same type. (Did you forget to make sure both "
732 "arguments have the same integer width?)");
734 Function *Callee = CGF.CGM.getIntrinsic(IntrinsicID, X->getType());
735 llvm::Value *Tmp = CGF.Builder.CreateCall(Callee, {X, Y});
736 Carry = CGF.Builder.CreateExtractValue(Tmp, 1);
737 return CGF.Builder.CreateExtractValue(Tmp, 0);
740 static Value *emitRangedBuiltin(CodeGenFunction &CGF,
741 unsigned IntrinsicID,
742 int low, int high) {
743 llvm::MDBuilder MDHelper(CGF.getLLVMContext());
744 llvm::MDNode *RNode = MDHelper.createRange(APInt(32, low), APInt(32, high));
745 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, {});
746 llvm::Instruction *Call = CGF.Builder.CreateCall(F);
747 Call->setMetadata(llvm::LLVMContext::MD_range, RNode);
748 Call->setMetadata(llvm::LLVMContext::MD_noundef,
749 llvm::MDNode::get(CGF.getLLVMContext(), std::nullopt));
750 return Call;
753 namespace {
754 struct WidthAndSignedness {
755 unsigned Width;
756 bool Signed;
760 static WidthAndSignedness
761 getIntegerWidthAndSignedness(const clang::ASTContext &context,
762 const clang::QualType Type) {
763 assert(Type->isIntegerType() && "Given type is not an integer.");
764 unsigned Width = Type->isBooleanType() ? 1
765 : Type->isBitIntType() ? context.getIntWidth(Type)
766 : context.getTypeInfo(Type).Width;
767 bool Signed = Type->isSignedIntegerType();
768 return {Width, Signed};
771 // Given one or more integer types, this function produces an integer type that
772 // encompasses them: any value in one of the given types could be expressed in
773 // the encompassing type.
774 static struct WidthAndSignedness
775 EncompassingIntegerType(ArrayRef<struct WidthAndSignedness> Types) {
776 assert(Types.size() > 0 && "Empty list of types.");
778 // If any of the given types is signed, we must return a signed type.
779 bool Signed = false;
780 for (const auto &Type : Types) {
781 Signed |= Type.Signed;
784 // The encompassing type must have a width greater than or equal to the width
785 // of the specified types. Additionally, if the encompassing type is signed,
786 // its width must be strictly greater than the width of any unsigned types
787 // given.
788 unsigned Width = 0;
789 for (const auto &Type : Types) {
790 unsigned MinWidth = Type.Width + (Signed && !Type.Signed);
791 if (Width < MinWidth) {
792 Width = MinWidth;
796 return {Width, Signed};
799 Value *CodeGenFunction::EmitVAStartEnd(Value *ArgValue, bool IsStart) {
800 Intrinsic::ID inst = IsStart ? Intrinsic::vastart : Intrinsic::vaend;
801 return Builder.CreateCall(CGM.getIntrinsic(inst), ArgValue);
804 /// Checks if using the result of __builtin_object_size(p, @p From) in place of
805 /// __builtin_object_size(p, @p To) is correct
806 static bool areBOSTypesCompatible(int From, int To) {
807 // Note: Our __builtin_object_size implementation currently treats Type=0 and
808 // Type=2 identically. Encoding this implementation detail here may make
809 // improving __builtin_object_size difficult in the future, so it's omitted.
810 return From == To || (From == 0 && To == 1) || (From == 3 && To == 2);
813 static llvm::Value *
814 getDefaultBuiltinObjectSizeResult(unsigned Type, llvm::IntegerType *ResType) {
815 return ConstantInt::get(ResType, (Type & 2) ? 0 : -1, /*isSigned=*/true);
818 llvm::Value *
819 CodeGenFunction::evaluateOrEmitBuiltinObjectSize(const Expr *E, unsigned Type,
820 llvm::IntegerType *ResType,
821 llvm::Value *EmittedE,
822 bool IsDynamic) {
823 uint64_t ObjectSize;
824 if (!E->tryEvaluateObjectSize(ObjectSize, getContext(), Type))
825 return emitBuiltinObjectSize(E, Type, ResType, EmittedE, IsDynamic);
826 return ConstantInt::get(ResType, ObjectSize, /*isSigned=*/true);
829 /// Returns a Value corresponding to the size of the given expression.
830 /// This Value may be either of the following:
831 /// - A llvm::Argument (if E is a param with the pass_object_size attribute on
832 /// it)
833 /// - A call to the @llvm.objectsize intrinsic
835 /// EmittedE is the result of emitting `E` as a scalar expr. If it's non-null
836 /// and we wouldn't otherwise try to reference a pass_object_size parameter,
837 /// we'll call @llvm.objectsize on EmittedE, rather than emitting E.
838 llvm::Value *
839 CodeGenFunction::emitBuiltinObjectSize(const Expr *E, unsigned Type,
840 llvm::IntegerType *ResType,
841 llvm::Value *EmittedE, bool IsDynamic) {
842 // We need to reference an argument if the pointer is a parameter with the
843 // pass_object_size attribute.
844 if (auto *D = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts())) {
845 auto *Param = dyn_cast<ParmVarDecl>(D->getDecl());
846 auto *PS = D->getDecl()->getAttr<PassObjectSizeAttr>();
847 if (Param != nullptr && PS != nullptr &&
848 areBOSTypesCompatible(PS->getType(), Type)) {
849 auto Iter = SizeArguments.find(Param);
850 assert(Iter != SizeArguments.end());
852 const ImplicitParamDecl *D = Iter->second;
853 auto DIter = LocalDeclMap.find(D);
854 assert(DIter != LocalDeclMap.end());
856 return EmitLoadOfScalar(DIter->second, /*Volatile=*/false,
857 getContext().getSizeType(), E->getBeginLoc());
861 if (IsDynamic) {
862 LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel =
863 getLangOpts().getStrictFlexArraysLevel();
864 const Expr *Base = E->IgnoreParenImpCasts();
866 if (FieldDecl *FD = FindCountedByField(Base, StrictFlexArraysLevel)) {
867 const auto *ME = dyn_cast<MemberExpr>(Base);
868 llvm::Value *ObjectSize = nullptr;
870 if (!ME) {
871 const auto *DRE = dyn_cast<DeclRefExpr>(Base);
872 ValueDecl *VD = nullptr;
874 ObjectSize = ConstantInt::get(
875 ResType,
876 getContext().getTypeSize(DRE->getType()->getPointeeType()) / 8,
877 true);
879 if (auto *RD = DRE->getType()->getPointeeType()->getAsRecordDecl())
880 VD = RD->getLastField();
882 Expr *ICE = ImplicitCastExpr::Create(
883 getContext(), DRE->getType(), CK_LValueToRValue,
884 const_cast<Expr *>(cast<Expr>(DRE)), nullptr, VK_PRValue,
885 FPOptionsOverride());
886 ME = MemberExpr::CreateImplicit(getContext(), ICE, true, VD,
887 VD->getType(), VK_LValue, OK_Ordinary);
890 // At this point, we know that \p ME is a flexible array member.
891 const auto *ArrayTy = getContext().getAsArrayType(ME->getType());
892 unsigned Size = getContext().getTypeSize(ArrayTy->getElementType());
894 llvm::Value *CountField =
895 EmitAnyExprToTemp(MemberExpr::CreateImplicit(
896 getContext(), const_cast<Expr *>(ME->getBase()),
897 ME->isArrow(), FD, FD->getType(), VK_LValue,
898 OK_Ordinary))
899 .getScalarVal();
901 llvm::Value *Mul = Builder.CreateMul(
902 CountField, llvm::ConstantInt::get(CountField->getType(), Size / 8));
903 Mul = Builder.CreateZExtOrTrunc(Mul, ResType);
905 if (ObjectSize)
906 return Builder.CreateAdd(ObjectSize, Mul);
908 return Mul;
912 // LLVM can't handle Type=3 appropriately, and __builtin_object_size shouldn't
913 // evaluate E for side-effects. In either case, we shouldn't lower to
914 // @llvm.objectsize.
915 if (Type == 3 || (!EmittedE && E->HasSideEffects(getContext())))
916 return getDefaultBuiltinObjectSizeResult(Type, ResType);
918 Value *Ptr = EmittedE ? EmittedE : EmitScalarExpr(E);
919 assert(Ptr->getType()->isPointerTy() &&
920 "Non-pointer passed to __builtin_object_size?");
922 Function *F =
923 CGM.getIntrinsic(Intrinsic::objectsize, {ResType, Ptr->getType()});
925 // LLVM only supports 0 and 2, make sure that we pass along that as a boolean.
926 Value *Min = Builder.getInt1((Type & 2) != 0);
927 // For GCC compatibility, __builtin_object_size treat NULL as unknown size.
928 Value *NullIsUnknown = Builder.getTrue();
929 Value *Dynamic = Builder.getInt1(IsDynamic);
930 return Builder.CreateCall(F, {Ptr, Min, NullIsUnknown, Dynamic});
933 namespace {
934 /// A struct to generically describe a bit test intrinsic.
935 struct BitTest {
936 enum ActionKind : uint8_t { TestOnly, Complement, Reset, Set };
937 enum InterlockingKind : uint8_t {
938 Unlocked,
939 Sequential,
940 Acquire,
941 Release,
942 NoFence
945 ActionKind Action;
946 InterlockingKind Interlocking;
947 bool Is64Bit;
949 static BitTest decodeBitTestBuiltin(unsigned BuiltinID);
951 } // namespace
953 BitTest BitTest::decodeBitTestBuiltin(unsigned BuiltinID) {
954 switch (BuiltinID) {
955 // Main portable variants.
956 case Builtin::BI_bittest:
957 return {TestOnly, Unlocked, false};
958 case Builtin::BI_bittestandcomplement:
959 return {Complement, Unlocked, false};
960 case Builtin::BI_bittestandreset:
961 return {Reset, Unlocked, false};
962 case Builtin::BI_bittestandset:
963 return {Set, Unlocked, false};
964 case Builtin::BI_interlockedbittestandreset:
965 return {Reset, Sequential, false};
966 case Builtin::BI_interlockedbittestandset:
967 return {Set, Sequential, false};
969 // X86-specific 64-bit variants.
970 case Builtin::BI_bittest64:
971 return {TestOnly, Unlocked, true};
972 case Builtin::BI_bittestandcomplement64:
973 return {Complement, Unlocked, true};
974 case Builtin::BI_bittestandreset64:
975 return {Reset, Unlocked, true};
976 case Builtin::BI_bittestandset64:
977 return {Set, Unlocked, true};
978 case Builtin::BI_interlockedbittestandreset64:
979 return {Reset, Sequential, true};
980 case Builtin::BI_interlockedbittestandset64:
981 return {Set, Sequential, true};
983 // ARM/AArch64-specific ordering variants.
984 case Builtin::BI_interlockedbittestandset_acq:
985 return {Set, Acquire, false};
986 case Builtin::BI_interlockedbittestandset_rel:
987 return {Set, Release, false};
988 case Builtin::BI_interlockedbittestandset_nf:
989 return {Set, NoFence, false};
990 case Builtin::BI_interlockedbittestandreset_acq:
991 return {Reset, Acquire, false};
992 case Builtin::BI_interlockedbittestandreset_rel:
993 return {Reset, Release, false};
994 case Builtin::BI_interlockedbittestandreset_nf:
995 return {Reset, NoFence, false};
997 llvm_unreachable("expected only bittest intrinsics");
1000 static char bitActionToX86BTCode(BitTest::ActionKind A) {
1001 switch (A) {
1002 case BitTest::TestOnly: return '\0';
1003 case BitTest::Complement: return 'c';
1004 case BitTest::Reset: return 'r';
1005 case BitTest::Set: return 's';
1007 llvm_unreachable("invalid action");
1010 static llvm::Value *EmitX86BitTestIntrinsic(CodeGenFunction &CGF,
1011 BitTest BT,
1012 const CallExpr *E, Value *BitBase,
1013 Value *BitPos) {
1014 char Action = bitActionToX86BTCode(BT.Action);
1015 char SizeSuffix = BT.Is64Bit ? 'q' : 'l';
1017 // Build the assembly.
1018 SmallString<64> Asm;
1019 raw_svector_ostream AsmOS(Asm);
1020 if (BT.Interlocking != BitTest::Unlocked)
1021 AsmOS << "lock ";
1022 AsmOS << "bt";
1023 if (Action)
1024 AsmOS << Action;
1025 AsmOS << SizeSuffix << " $2, ($1)";
1027 // Build the constraints. FIXME: We should support immediates when possible.
1028 std::string Constraints = "={@ccc},r,r,~{cc},~{memory}";
1029 std::string_view MachineClobbers = CGF.getTarget().getClobbers();
1030 if (!MachineClobbers.empty()) {
1031 Constraints += ',';
1032 Constraints += MachineClobbers;
1034 llvm::IntegerType *IntType = llvm::IntegerType::get(
1035 CGF.getLLVMContext(),
1036 CGF.getContext().getTypeSize(E->getArg(1)->getType()));
1037 llvm::FunctionType *FTy =
1038 llvm::FunctionType::get(CGF.Int8Ty, {CGF.UnqualPtrTy, IntType}, false);
1040 llvm::InlineAsm *IA =
1041 llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true);
1042 return CGF.Builder.CreateCall(IA, {BitBase, BitPos});
1045 static llvm::AtomicOrdering
1046 getBitTestAtomicOrdering(BitTest::InterlockingKind I) {
1047 switch (I) {
1048 case BitTest::Unlocked: return llvm::AtomicOrdering::NotAtomic;
1049 case BitTest::Sequential: return llvm::AtomicOrdering::SequentiallyConsistent;
1050 case BitTest::Acquire: return llvm::AtomicOrdering::Acquire;
1051 case BitTest::Release: return llvm::AtomicOrdering::Release;
1052 case BitTest::NoFence: return llvm::AtomicOrdering::Monotonic;
1054 llvm_unreachable("invalid interlocking");
1057 /// Emit a _bittest* intrinsic. These intrinsics take a pointer to an array of
1058 /// bits and a bit position and read and optionally modify the bit at that
1059 /// position. The position index can be arbitrarily large, i.e. it can be larger
1060 /// than 31 or 63, so we need an indexed load in the general case.
1061 static llvm::Value *EmitBitTestIntrinsic(CodeGenFunction &CGF,
1062 unsigned BuiltinID,
1063 const CallExpr *E) {
1064 Value *BitBase = CGF.EmitScalarExpr(E->getArg(0));
1065 Value *BitPos = CGF.EmitScalarExpr(E->getArg(1));
1067 BitTest BT = BitTest::decodeBitTestBuiltin(BuiltinID);
1069 // X86 has special BT, BTC, BTR, and BTS instructions that handle the array
1070 // indexing operation internally. Use them if possible.
1071 if (CGF.getTarget().getTriple().isX86())
1072 return EmitX86BitTestIntrinsic(CGF, BT, E, BitBase, BitPos);
1074 // Otherwise, use generic code to load one byte and test the bit. Use all but
1075 // the bottom three bits as the array index, and the bottom three bits to form
1076 // a mask.
1077 // Bit = BitBaseI8[BitPos >> 3] & (1 << (BitPos & 0x7)) != 0;
1078 Value *ByteIndex = CGF.Builder.CreateAShr(
1079 BitPos, llvm::ConstantInt::get(BitPos->getType(), 3), "bittest.byteidx");
1080 Value *BitBaseI8 = CGF.Builder.CreatePointerCast(BitBase, CGF.Int8PtrTy);
1081 Address ByteAddr(CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, BitBaseI8,
1082 ByteIndex, "bittest.byteaddr"),
1083 CGF.Int8Ty, CharUnits::One());
1084 Value *PosLow =
1085 CGF.Builder.CreateAnd(CGF.Builder.CreateTrunc(BitPos, CGF.Int8Ty),
1086 llvm::ConstantInt::get(CGF.Int8Ty, 0x7));
1088 // The updating instructions will need a mask.
1089 Value *Mask = nullptr;
1090 if (BT.Action != BitTest::TestOnly) {
1091 Mask = CGF.Builder.CreateShl(llvm::ConstantInt::get(CGF.Int8Ty, 1), PosLow,
1092 "bittest.mask");
1095 // Check the action and ordering of the interlocked intrinsics.
1096 llvm::AtomicOrdering Ordering = getBitTestAtomicOrdering(BT.Interlocking);
1098 Value *OldByte = nullptr;
1099 if (Ordering != llvm::AtomicOrdering::NotAtomic) {
1100 // Emit a combined atomicrmw load/store operation for the interlocked
1101 // intrinsics.
1102 llvm::AtomicRMWInst::BinOp RMWOp = llvm::AtomicRMWInst::Or;
1103 if (BT.Action == BitTest::Reset) {
1104 Mask = CGF.Builder.CreateNot(Mask);
1105 RMWOp = llvm::AtomicRMWInst::And;
1107 OldByte = CGF.Builder.CreateAtomicRMW(RMWOp, ByteAddr.getPointer(), Mask,
1108 Ordering);
1109 } else {
1110 // Emit a plain load for the non-interlocked intrinsics.
1111 OldByte = CGF.Builder.CreateLoad(ByteAddr, "bittest.byte");
1112 Value *NewByte = nullptr;
1113 switch (BT.Action) {
1114 case BitTest::TestOnly:
1115 // Don't store anything.
1116 break;
1117 case BitTest::Complement:
1118 NewByte = CGF.Builder.CreateXor(OldByte, Mask);
1119 break;
1120 case BitTest::Reset:
1121 NewByte = CGF.Builder.CreateAnd(OldByte, CGF.Builder.CreateNot(Mask));
1122 break;
1123 case BitTest::Set:
1124 NewByte = CGF.Builder.CreateOr(OldByte, Mask);
1125 break;
1127 if (NewByte)
1128 CGF.Builder.CreateStore(NewByte, ByteAddr);
1131 // However we loaded the old byte, either by plain load or atomicrmw, shift
1132 // the bit into the low position and mask it to 0 or 1.
1133 Value *ShiftedByte = CGF.Builder.CreateLShr(OldByte, PosLow, "bittest.shr");
1134 return CGF.Builder.CreateAnd(
1135 ShiftedByte, llvm::ConstantInt::get(CGF.Int8Ty, 1), "bittest.res");
1138 static llvm::Value *emitPPCLoadReserveIntrinsic(CodeGenFunction &CGF,
1139 unsigned BuiltinID,
1140 const CallExpr *E) {
1141 Value *Addr = CGF.EmitScalarExpr(E->getArg(0));
1143 SmallString<64> Asm;
1144 raw_svector_ostream AsmOS(Asm);
1145 llvm::IntegerType *RetType = CGF.Int32Ty;
1147 switch (BuiltinID) {
1148 case clang::PPC::BI__builtin_ppc_ldarx:
1149 AsmOS << "ldarx ";
1150 RetType = CGF.Int64Ty;
1151 break;
1152 case clang::PPC::BI__builtin_ppc_lwarx:
1153 AsmOS << "lwarx ";
1154 RetType = CGF.Int32Ty;
1155 break;
1156 case clang::PPC::BI__builtin_ppc_lharx:
1157 AsmOS << "lharx ";
1158 RetType = CGF.Int16Ty;
1159 break;
1160 case clang::PPC::BI__builtin_ppc_lbarx:
1161 AsmOS << "lbarx ";
1162 RetType = CGF.Int8Ty;
1163 break;
1164 default:
1165 llvm_unreachable("Expected only PowerPC load reserve intrinsics");
1168 AsmOS << "$0, ${1:y}";
1170 std::string Constraints = "=r,*Z,~{memory}";
1171 std::string_view MachineClobbers = CGF.getTarget().getClobbers();
1172 if (!MachineClobbers.empty()) {
1173 Constraints += ',';
1174 Constraints += MachineClobbers;
1177 llvm::Type *PtrType = CGF.UnqualPtrTy;
1178 llvm::FunctionType *FTy = llvm::FunctionType::get(RetType, {PtrType}, false);
1180 llvm::InlineAsm *IA =
1181 llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true);
1182 llvm::CallInst *CI = CGF.Builder.CreateCall(IA, {Addr});
1183 CI->addParamAttr(
1184 0, Attribute::get(CGF.getLLVMContext(), Attribute::ElementType, RetType));
1185 return CI;
1188 namespace {
1189 enum class MSVCSetJmpKind {
1190 _setjmpex,
1191 _setjmp3,
1192 _setjmp
1196 /// MSVC handles setjmp a bit differently on different platforms. On every
1197 /// architecture except 32-bit x86, the frame address is passed. On x86, extra
1198 /// parameters can be passed as variadic arguments, but we always pass none.
1199 static RValue EmitMSVCRTSetJmp(CodeGenFunction &CGF, MSVCSetJmpKind SJKind,
1200 const CallExpr *E) {
1201 llvm::Value *Arg1 = nullptr;
1202 llvm::Type *Arg1Ty = nullptr;
1203 StringRef Name;
1204 bool IsVarArg = false;
1205 if (SJKind == MSVCSetJmpKind::_setjmp3) {
1206 Name = "_setjmp3";
1207 Arg1Ty = CGF.Int32Ty;
1208 Arg1 = llvm::ConstantInt::get(CGF.IntTy, 0);
1209 IsVarArg = true;
1210 } else {
1211 Name = SJKind == MSVCSetJmpKind::_setjmp ? "_setjmp" : "_setjmpex";
1212 Arg1Ty = CGF.Int8PtrTy;
1213 if (CGF.getTarget().getTriple().getArch() == llvm::Triple::aarch64) {
1214 Arg1 = CGF.Builder.CreateCall(
1215 CGF.CGM.getIntrinsic(Intrinsic::sponentry, CGF.AllocaInt8PtrTy));
1216 } else
1217 Arg1 = CGF.Builder.CreateCall(
1218 CGF.CGM.getIntrinsic(Intrinsic::frameaddress, CGF.AllocaInt8PtrTy),
1219 llvm::ConstantInt::get(CGF.Int32Ty, 0));
1222 // Mark the call site and declaration with ReturnsTwice.
1223 llvm::Type *ArgTypes[2] = {CGF.Int8PtrTy, Arg1Ty};
1224 llvm::AttributeList ReturnsTwiceAttr = llvm::AttributeList::get(
1225 CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex,
1226 llvm::Attribute::ReturnsTwice);
1227 llvm::FunctionCallee SetJmpFn = CGF.CGM.CreateRuntimeFunction(
1228 llvm::FunctionType::get(CGF.IntTy, ArgTypes, IsVarArg), Name,
1229 ReturnsTwiceAttr, /*Local=*/true);
1231 llvm::Value *Buf = CGF.Builder.CreateBitOrPointerCast(
1232 CGF.EmitScalarExpr(E->getArg(0)), CGF.Int8PtrTy);
1233 llvm::Value *Args[] = {Buf, Arg1};
1234 llvm::CallBase *CB = CGF.EmitRuntimeCallOrInvoke(SetJmpFn, Args);
1235 CB->setAttributes(ReturnsTwiceAttr);
1236 return RValue::get(CB);
1239 // Many of MSVC builtins are on x64, ARM and AArch64; to avoid repeating code,
1240 // we handle them here.
1241 enum class CodeGenFunction::MSVCIntrin {
1242 _BitScanForward,
1243 _BitScanReverse,
1244 _InterlockedAnd,
1245 _InterlockedDecrement,
1246 _InterlockedExchange,
1247 _InterlockedExchangeAdd,
1248 _InterlockedExchangeSub,
1249 _InterlockedIncrement,
1250 _InterlockedOr,
1251 _InterlockedXor,
1252 _InterlockedExchangeAdd_acq,
1253 _InterlockedExchangeAdd_rel,
1254 _InterlockedExchangeAdd_nf,
1255 _InterlockedExchange_acq,
1256 _InterlockedExchange_rel,
1257 _InterlockedExchange_nf,
1258 _InterlockedCompareExchange_acq,
1259 _InterlockedCompareExchange_rel,
1260 _InterlockedCompareExchange_nf,
1261 _InterlockedCompareExchange128,
1262 _InterlockedCompareExchange128_acq,
1263 _InterlockedCompareExchange128_rel,
1264 _InterlockedCompareExchange128_nf,
1265 _InterlockedOr_acq,
1266 _InterlockedOr_rel,
1267 _InterlockedOr_nf,
1268 _InterlockedXor_acq,
1269 _InterlockedXor_rel,
1270 _InterlockedXor_nf,
1271 _InterlockedAnd_acq,
1272 _InterlockedAnd_rel,
1273 _InterlockedAnd_nf,
1274 _InterlockedIncrement_acq,
1275 _InterlockedIncrement_rel,
1276 _InterlockedIncrement_nf,
1277 _InterlockedDecrement_acq,
1278 _InterlockedDecrement_rel,
1279 _InterlockedDecrement_nf,
1280 __fastfail,
1283 static std::optional<CodeGenFunction::MSVCIntrin>
1284 translateArmToMsvcIntrin(unsigned BuiltinID) {
1285 using MSVCIntrin = CodeGenFunction::MSVCIntrin;
1286 switch (BuiltinID) {
1287 default:
1288 return std::nullopt;
1289 case clang::ARM::BI_BitScanForward:
1290 case clang::ARM::BI_BitScanForward64:
1291 return MSVCIntrin::_BitScanForward;
1292 case clang::ARM::BI_BitScanReverse:
1293 case clang::ARM::BI_BitScanReverse64:
1294 return MSVCIntrin::_BitScanReverse;
1295 case clang::ARM::BI_InterlockedAnd64:
1296 return MSVCIntrin::_InterlockedAnd;
1297 case clang::ARM::BI_InterlockedExchange64:
1298 return MSVCIntrin::_InterlockedExchange;
1299 case clang::ARM::BI_InterlockedExchangeAdd64:
1300 return MSVCIntrin::_InterlockedExchangeAdd;
1301 case clang::ARM::BI_InterlockedExchangeSub64:
1302 return MSVCIntrin::_InterlockedExchangeSub;
1303 case clang::ARM::BI_InterlockedOr64:
1304 return MSVCIntrin::_InterlockedOr;
1305 case clang::ARM::BI_InterlockedXor64:
1306 return MSVCIntrin::_InterlockedXor;
1307 case clang::ARM::BI_InterlockedDecrement64:
1308 return MSVCIntrin::_InterlockedDecrement;
1309 case clang::ARM::BI_InterlockedIncrement64:
1310 return MSVCIntrin::_InterlockedIncrement;
1311 case clang::ARM::BI_InterlockedExchangeAdd8_acq:
1312 case clang::ARM::BI_InterlockedExchangeAdd16_acq:
1313 case clang::ARM::BI_InterlockedExchangeAdd_acq:
1314 case clang::ARM::BI_InterlockedExchangeAdd64_acq:
1315 return MSVCIntrin::_InterlockedExchangeAdd_acq;
1316 case clang::ARM::BI_InterlockedExchangeAdd8_rel:
1317 case clang::ARM::BI_InterlockedExchangeAdd16_rel:
1318 case clang::ARM::BI_InterlockedExchangeAdd_rel:
1319 case clang::ARM::BI_InterlockedExchangeAdd64_rel:
1320 return MSVCIntrin::_InterlockedExchangeAdd_rel;
1321 case clang::ARM::BI_InterlockedExchangeAdd8_nf:
1322 case clang::ARM::BI_InterlockedExchangeAdd16_nf:
1323 case clang::ARM::BI_InterlockedExchangeAdd_nf:
1324 case clang::ARM::BI_InterlockedExchangeAdd64_nf:
1325 return MSVCIntrin::_InterlockedExchangeAdd_nf;
1326 case clang::ARM::BI_InterlockedExchange8_acq:
1327 case clang::ARM::BI_InterlockedExchange16_acq:
1328 case clang::ARM::BI_InterlockedExchange_acq:
1329 case clang::ARM::BI_InterlockedExchange64_acq:
1330 return MSVCIntrin::_InterlockedExchange_acq;
1331 case clang::ARM::BI_InterlockedExchange8_rel:
1332 case clang::ARM::BI_InterlockedExchange16_rel:
1333 case clang::ARM::BI_InterlockedExchange_rel:
1334 case clang::ARM::BI_InterlockedExchange64_rel:
1335 return MSVCIntrin::_InterlockedExchange_rel;
1336 case clang::ARM::BI_InterlockedExchange8_nf:
1337 case clang::ARM::BI_InterlockedExchange16_nf:
1338 case clang::ARM::BI_InterlockedExchange_nf:
1339 case clang::ARM::BI_InterlockedExchange64_nf:
1340 return MSVCIntrin::_InterlockedExchange_nf;
1341 case clang::ARM::BI_InterlockedCompareExchange8_acq:
1342 case clang::ARM::BI_InterlockedCompareExchange16_acq:
1343 case clang::ARM::BI_InterlockedCompareExchange_acq:
1344 case clang::ARM::BI_InterlockedCompareExchange64_acq:
1345 return MSVCIntrin::_InterlockedCompareExchange_acq;
1346 case clang::ARM::BI_InterlockedCompareExchange8_rel:
1347 case clang::ARM::BI_InterlockedCompareExchange16_rel:
1348 case clang::ARM::BI_InterlockedCompareExchange_rel:
1349 case clang::ARM::BI_InterlockedCompareExchange64_rel:
1350 return MSVCIntrin::_InterlockedCompareExchange_rel;
1351 case clang::ARM::BI_InterlockedCompareExchange8_nf:
1352 case clang::ARM::BI_InterlockedCompareExchange16_nf:
1353 case clang::ARM::BI_InterlockedCompareExchange_nf:
1354 case clang::ARM::BI_InterlockedCompareExchange64_nf:
1355 return MSVCIntrin::_InterlockedCompareExchange_nf;
1356 case clang::ARM::BI_InterlockedOr8_acq:
1357 case clang::ARM::BI_InterlockedOr16_acq:
1358 case clang::ARM::BI_InterlockedOr_acq:
1359 case clang::ARM::BI_InterlockedOr64_acq:
1360 return MSVCIntrin::_InterlockedOr_acq;
1361 case clang::ARM::BI_InterlockedOr8_rel:
1362 case clang::ARM::BI_InterlockedOr16_rel:
1363 case clang::ARM::BI_InterlockedOr_rel:
1364 case clang::ARM::BI_InterlockedOr64_rel:
1365 return MSVCIntrin::_InterlockedOr_rel;
1366 case clang::ARM::BI_InterlockedOr8_nf:
1367 case clang::ARM::BI_InterlockedOr16_nf:
1368 case clang::ARM::BI_InterlockedOr_nf:
1369 case clang::ARM::BI_InterlockedOr64_nf:
1370 return MSVCIntrin::_InterlockedOr_nf;
1371 case clang::ARM::BI_InterlockedXor8_acq:
1372 case clang::ARM::BI_InterlockedXor16_acq:
1373 case clang::ARM::BI_InterlockedXor_acq:
1374 case clang::ARM::BI_InterlockedXor64_acq:
1375 return MSVCIntrin::_InterlockedXor_acq;
1376 case clang::ARM::BI_InterlockedXor8_rel:
1377 case clang::ARM::BI_InterlockedXor16_rel:
1378 case clang::ARM::BI_InterlockedXor_rel:
1379 case clang::ARM::BI_InterlockedXor64_rel:
1380 return MSVCIntrin::_InterlockedXor_rel;
1381 case clang::ARM::BI_InterlockedXor8_nf:
1382 case clang::ARM::BI_InterlockedXor16_nf:
1383 case clang::ARM::BI_InterlockedXor_nf:
1384 case clang::ARM::BI_InterlockedXor64_nf:
1385 return MSVCIntrin::_InterlockedXor_nf;
1386 case clang::ARM::BI_InterlockedAnd8_acq:
1387 case clang::ARM::BI_InterlockedAnd16_acq:
1388 case clang::ARM::BI_InterlockedAnd_acq:
1389 case clang::ARM::BI_InterlockedAnd64_acq:
1390 return MSVCIntrin::_InterlockedAnd_acq;
1391 case clang::ARM::BI_InterlockedAnd8_rel:
1392 case clang::ARM::BI_InterlockedAnd16_rel:
1393 case clang::ARM::BI_InterlockedAnd_rel:
1394 case clang::ARM::BI_InterlockedAnd64_rel:
1395 return MSVCIntrin::_InterlockedAnd_rel;
1396 case clang::ARM::BI_InterlockedAnd8_nf:
1397 case clang::ARM::BI_InterlockedAnd16_nf:
1398 case clang::ARM::BI_InterlockedAnd_nf:
1399 case clang::ARM::BI_InterlockedAnd64_nf:
1400 return MSVCIntrin::_InterlockedAnd_nf;
1401 case clang::ARM::BI_InterlockedIncrement16_acq:
1402 case clang::ARM::BI_InterlockedIncrement_acq:
1403 case clang::ARM::BI_InterlockedIncrement64_acq:
1404 return MSVCIntrin::_InterlockedIncrement_acq;
1405 case clang::ARM::BI_InterlockedIncrement16_rel:
1406 case clang::ARM::BI_InterlockedIncrement_rel:
1407 case clang::ARM::BI_InterlockedIncrement64_rel:
1408 return MSVCIntrin::_InterlockedIncrement_rel;
1409 case clang::ARM::BI_InterlockedIncrement16_nf:
1410 case clang::ARM::BI_InterlockedIncrement_nf:
1411 case clang::ARM::BI_InterlockedIncrement64_nf:
1412 return MSVCIntrin::_InterlockedIncrement_nf;
1413 case clang::ARM::BI_InterlockedDecrement16_acq:
1414 case clang::ARM::BI_InterlockedDecrement_acq:
1415 case clang::ARM::BI_InterlockedDecrement64_acq:
1416 return MSVCIntrin::_InterlockedDecrement_acq;
1417 case clang::ARM::BI_InterlockedDecrement16_rel:
1418 case clang::ARM::BI_InterlockedDecrement_rel:
1419 case clang::ARM::BI_InterlockedDecrement64_rel:
1420 return MSVCIntrin::_InterlockedDecrement_rel;
1421 case clang::ARM::BI_InterlockedDecrement16_nf:
1422 case clang::ARM::BI_InterlockedDecrement_nf:
1423 case clang::ARM::BI_InterlockedDecrement64_nf:
1424 return MSVCIntrin::_InterlockedDecrement_nf;
1426 llvm_unreachable("must return from switch");
1429 static std::optional<CodeGenFunction::MSVCIntrin>
1430 translateAarch64ToMsvcIntrin(unsigned BuiltinID) {
1431 using MSVCIntrin = CodeGenFunction::MSVCIntrin;
1432 switch (BuiltinID) {
1433 default:
1434 return std::nullopt;
1435 case clang::AArch64::BI_BitScanForward:
1436 case clang::AArch64::BI_BitScanForward64:
1437 return MSVCIntrin::_BitScanForward;
1438 case clang::AArch64::BI_BitScanReverse:
1439 case clang::AArch64::BI_BitScanReverse64:
1440 return MSVCIntrin::_BitScanReverse;
1441 case clang::AArch64::BI_InterlockedAnd64:
1442 return MSVCIntrin::_InterlockedAnd;
1443 case clang::AArch64::BI_InterlockedExchange64:
1444 return MSVCIntrin::_InterlockedExchange;
1445 case clang::AArch64::BI_InterlockedExchangeAdd64:
1446 return MSVCIntrin::_InterlockedExchangeAdd;
1447 case clang::AArch64::BI_InterlockedExchangeSub64:
1448 return MSVCIntrin::_InterlockedExchangeSub;
1449 case clang::AArch64::BI_InterlockedOr64:
1450 return MSVCIntrin::_InterlockedOr;
1451 case clang::AArch64::BI_InterlockedXor64:
1452 return MSVCIntrin::_InterlockedXor;
1453 case clang::AArch64::BI_InterlockedDecrement64:
1454 return MSVCIntrin::_InterlockedDecrement;
1455 case clang::AArch64::BI_InterlockedIncrement64:
1456 return MSVCIntrin::_InterlockedIncrement;
1457 case clang::AArch64::BI_InterlockedExchangeAdd8_acq:
1458 case clang::AArch64::BI_InterlockedExchangeAdd16_acq:
1459 case clang::AArch64::BI_InterlockedExchangeAdd_acq:
1460 case clang::AArch64::BI_InterlockedExchangeAdd64_acq:
1461 return MSVCIntrin::_InterlockedExchangeAdd_acq;
1462 case clang::AArch64::BI_InterlockedExchangeAdd8_rel:
1463 case clang::AArch64::BI_InterlockedExchangeAdd16_rel:
1464 case clang::AArch64::BI_InterlockedExchangeAdd_rel:
1465 case clang::AArch64::BI_InterlockedExchangeAdd64_rel:
1466 return MSVCIntrin::_InterlockedExchangeAdd_rel;
1467 case clang::AArch64::BI_InterlockedExchangeAdd8_nf:
1468 case clang::AArch64::BI_InterlockedExchangeAdd16_nf:
1469 case clang::AArch64::BI_InterlockedExchangeAdd_nf:
1470 case clang::AArch64::BI_InterlockedExchangeAdd64_nf:
1471 return MSVCIntrin::_InterlockedExchangeAdd_nf;
1472 case clang::AArch64::BI_InterlockedExchange8_acq:
1473 case clang::AArch64::BI_InterlockedExchange16_acq:
1474 case clang::AArch64::BI_InterlockedExchange_acq:
1475 case clang::AArch64::BI_InterlockedExchange64_acq:
1476 return MSVCIntrin::_InterlockedExchange_acq;
1477 case clang::AArch64::BI_InterlockedExchange8_rel:
1478 case clang::AArch64::BI_InterlockedExchange16_rel:
1479 case clang::AArch64::BI_InterlockedExchange_rel:
1480 case clang::AArch64::BI_InterlockedExchange64_rel:
1481 return MSVCIntrin::_InterlockedExchange_rel;
1482 case clang::AArch64::BI_InterlockedExchange8_nf:
1483 case clang::AArch64::BI_InterlockedExchange16_nf:
1484 case clang::AArch64::BI_InterlockedExchange_nf:
1485 case clang::AArch64::BI_InterlockedExchange64_nf:
1486 return MSVCIntrin::_InterlockedExchange_nf;
1487 case clang::AArch64::BI_InterlockedCompareExchange8_acq:
1488 case clang::AArch64::BI_InterlockedCompareExchange16_acq:
1489 case clang::AArch64::BI_InterlockedCompareExchange_acq:
1490 case clang::AArch64::BI_InterlockedCompareExchange64_acq:
1491 return MSVCIntrin::_InterlockedCompareExchange_acq;
1492 case clang::AArch64::BI_InterlockedCompareExchange8_rel:
1493 case clang::AArch64::BI_InterlockedCompareExchange16_rel:
1494 case clang::AArch64::BI_InterlockedCompareExchange_rel:
1495 case clang::AArch64::BI_InterlockedCompareExchange64_rel:
1496 return MSVCIntrin::_InterlockedCompareExchange_rel;
1497 case clang::AArch64::BI_InterlockedCompareExchange8_nf:
1498 case clang::AArch64::BI_InterlockedCompareExchange16_nf:
1499 case clang::AArch64::BI_InterlockedCompareExchange_nf:
1500 case clang::AArch64::BI_InterlockedCompareExchange64_nf:
1501 return MSVCIntrin::_InterlockedCompareExchange_nf;
1502 case clang::AArch64::BI_InterlockedCompareExchange128:
1503 return MSVCIntrin::_InterlockedCompareExchange128;
1504 case clang::AArch64::BI_InterlockedCompareExchange128_acq:
1505 return MSVCIntrin::_InterlockedCompareExchange128_acq;
1506 case clang::AArch64::BI_InterlockedCompareExchange128_nf:
1507 return MSVCIntrin::_InterlockedCompareExchange128_nf;
1508 case clang::AArch64::BI_InterlockedCompareExchange128_rel:
1509 return MSVCIntrin::_InterlockedCompareExchange128_rel;
1510 case clang::AArch64::BI_InterlockedOr8_acq:
1511 case clang::AArch64::BI_InterlockedOr16_acq:
1512 case clang::AArch64::BI_InterlockedOr_acq:
1513 case clang::AArch64::BI_InterlockedOr64_acq:
1514 return MSVCIntrin::_InterlockedOr_acq;
1515 case clang::AArch64::BI_InterlockedOr8_rel:
1516 case clang::AArch64::BI_InterlockedOr16_rel:
1517 case clang::AArch64::BI_InterlockedOr_rel:
1518 case clang::AArch64::BI_InterlockedOr64_rel:
1519 return MSVCIntrin::_InterlockedOr_rel;
1520 case clang::AArch64::BI_InterlockedOr8_nf:
1521 case clang::AArch64::BI_InterlockedOr16_nf:
1522 case clang::AArch64::BI_InterlockedOr_nf:
1523 case clang::AArch64::BI_InterlockedOr64_nf:
1524 return MSVCIntrin::_InterlockedOr_nf;
1525 case clang::AArch64::BI_InterlockedXor8_acq:
1526 case clang::AArch64::BI_InterlockedXor16_acq:
1527 case clang::AArch64::BI_InterlockedXor_acq:
1528 case clang::AArch64::BI_InterlockedXor64_acq:
1529 return MSVCIntrin::_InterlockedXor_acq;
1530 case clang::AArch64::BI_InterlockedXor8_rel:
1531 case clang::AArch64::BI_InterlockedXor16_rel:
1532 case clang::AArch64::BI_InterlockedXor_rel:
1533 case clang::AArch64::BI_InterlockedXor64_rel:
1534 return MSVCIntrin::_InterlockedXor_rel;
1535 case clang::AArch64::BI_InterlockedXor8_nf:
1536 case clang::AArch64::BI_InterlockedXor16_nf:
1537 case clang::AArch64::BI_InterlockedXor_nf:
1538 case clang::AArch64::BI_InterlockedXor64_nf:
1539 return MSVCIntrin::_InterlockedXor_nf;
1540 case clang::AArch64::BI_InterlockedAnd8_acq:
1541 case clang::AArch64::BI_InterlockedAnd16_acq:
1542 case clang::AArch64::BI_InterlockedAnd_acq:
1543 case clang::AArch64::BI_InterlockedAnd64_acq:
1544 return MSVCIntrin::_InterlockedAnd_acq;
1545 case clang::AArch64::BI_InterlockedAnd8_rel:
1546 case clang::AArch64::BI_InterlockedAnd16_rel:
1547 case clang::AArch64::BI_InterlockedAnd_rel:
1548 case clang::AArch64::BI_InterlockedAnd64_rel:
1549 return MSVCIntrin::_InterlockedAnd_rel;
1550 case clang::AArch64::BI_InterlockedAnd8_nf:
1551 case clang::AArch64::BI_InterlockedAnd16_nf:
1552 case clang::AArch64::BI_InterlockedAnd_nf:
1553 case clang::AArch64::BI_InterlockedAnd64_nf:
1554 return MSVCIntrin::_InterlockedAnd_nf;
1555 case clang::AArch64::BI_InterlockedIncrement16_acq:
1556 case clang::AArch64::BI_InterlockedIncrement_acq:
1557 case clang::AArch64::BI_InterlockedIncrement64_acq:
1558 return MSVCIntrin::_InterlockedIncrement_acq;
1559 case clang::AArch64::BI_InterlockedIncrement16_rel:
1560 case clang::AArch64::BI_InterlockedIncrement_rel:
1561 case clang::AArch64::BI_InterlockedIncrement64_rel:
1562 return MSVCIntrin::_InterlockedIncrement_rel;
1563 case clang::AArch64::BI_InterlockedIncrement16_nf:
1564 case clang::AArch64::BI_InterlockedIncrement_nf:
1565 case clang::AArch64::BI_InterlockedIncrement64_nf:
1566 return MSVCIntrin::_InterlockedIncrement_nf;
1567 case clang::AArch64::BI_InterlockedDecrement16_acq:
1568 case clang::AArch64::BI_InterlockedDecrement_acq:
1569 case clang::AArch64::BI_InterlockedDecrement64_acq:
1570 return MSVCIntrin::_InterlockedDecrement_acq;
1571 case clang::AArch64::BI_InterlockedDecrement16_rel:
1572 case clang::AArch64::BI_InterlockedDecrement_rel:
1573 case clang::AArch64::BI_InterlockedDecrement64_rel:
1574 return MSVCIntrin::_InterlockedDecrement_rel;
1575 case clang::AArch64::BI_InterlockedDecrement16_nf:
1576 case clang::AArch64::BI_InterlockedDecrement_nf:
1577 case clang::AArch64::BI_InterlockedDecrement64_nf:
1578 return MSVCIntrin::_InterlockedDecrement_nf;
1580 llvm_unreachable("must return from switch");
1583 static std::optional<CodeGenFunction::MSVCIntrin>
1584 translateX86ToMsvcIntrin(unsigned BuiltinID) {
1585 using MSVCIntrin = CodeGenFunction::MSVCIntrin;
1586 switch (BuiltinID) {
1587 default:
1588 return std::nullopt;
1589 case clang::X86::BI_BitScanForward:
1590 case clang::X86::BI_BitScanForward64:
1591 return MSVCIntrin::_BitScanForward;
1592 case clang::X86::BI_BitScanReverse:
1593 case clang::X86::BI_BitScanReverse64:
1594 return MSVCIntrin::_BitScanReverse;
1595 case clang::X86::BI_InterlockedAnd64:
1596 return MSVCIntrin::_InterlockedAnd;
1597 case clang::X86::BI_InterlockedCompareExchange128:
1598 return MSVCIntrin::_InterlockedCompareExchange128;
1599 case clang::X86::BI_InterlockedExchange64:
1600 return MSVCIntrin::_InterlockedExchange;
1601 case clang::X86::BI_InterlockedExchangeAdd64:
1602 return MSVCIntrin::_InterlockedExchangeAdd;
1603 case clang::X86::BI_InterlockedExchangeSub64:
1604 return MSVCIntrin::_InterlockedExchangeSub;
1605 case clang::X86::BI_InterlockedOr64:
1606 return MSVCIntrin::_InterlockedOr;
1607 case clang::X86::BI_InterlockedXor64:
1608 return MSVCIntrin::_InterlockedXor;
1609 case clang::X86::BI_InterlockedDecrement64:
1610 return MSVCIntrin::_InterlockedDecrement;
1611 case clang::X86::BI_InterlockedIncrement64:
1612 return MSVCIntrin::_InterlockedIncrement;
1614 llvm_unreachable("must return from switch");
1617 // Emit an MSVC intrinsic. Assumes that arguments have *not* been evaluated.
1618 Value *CodeGenFunction::EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID,
1619 const CallExpr *E) {
1620 switch (BuiltinID) {
1621 case MSVCIntrin::_BitScanForward:
1622 case MSVCIntrin::_BitScanReverse: {
1623 Address IndexAddress(EmitPointerWithAlignment(E->getArg(0)));
1624 Value *ArgValue = EmitScalarExpr(E->getArg(1));
1626 llvm::Type *ArgType = ArgValue->getType();
1627 llvm::Type *IndexType = IndexAddress.getElementType();
1628 llvm::Type *ResultType = ConvertType(E->getType());
1630 Value *ArgZero = llvm::Constant::getNullValue(ArgType);
1631 Value *ResZero = llvm::Constant::getNullValue(ResultType);
1632 Value *ResOne = llvm::ConstantInt::get(ResultType, 1);
1634 BasicBlock *Begin = Builder.GetInsertBlock();
1635 BasicBlock *End = createBasicBlock("bitscan_end", this->CurFn);
1636 Builder.SetInsertPoint(End);
1637 PHINode *Result = Builder.CreatePHI(ResultType, 2, "bitscan_result");
1639 Builder.SetInsertPoint(Begin);
1640 Value *IsZero = Builder.CreateICmpEQ(ArgValue, ArgZero);
1641 BasicBlock *NotZero = createBasicBlock("bitscan_not_zero", this->CurFn);
1642 Builder.CreateCondBr(IsZero, End, NotZero);
1643 Result->addIncoming(ResZero, Begin);
1645 Builder.SetInsertPoint(NotZero);
1647 if (BuiltinID == MSVCIntrin::_BitScanForward) {
1648 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
1649 Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()});
1650 ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false);
1651 Builder.CreateStore(ZeroCount, IndexAddress, false);
1652 } else {
1653 unsigned ArgWidth = cast<llvm::IntegerType>(ArgType)->getBitWidth();
1654 Value *ArgTypeLastIndex = llvm::ConstantInt::get(IndexType, ArgWidth - 1);
1656 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
1657 Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()});
1658 ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false);
1659 Value *Index = Builder.CreateNSWSub(ArgTypeLastIndex, ZeroCount);
1660 Builder.CreateStore(Index, IndexAddress, false);
1662 Builder.CreateBr(End);
1663 Result->addIncoming(ResOne, NotZero);
1665 Builder.SetInsertPoint(End);
1666 return Result;
1668 case MSVCIntrin::_InterlockedAnd:
1669 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E);
1670 case MSVCIntrin::_InterlockedExchange:
1671 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E);
1672 case MSVCIntrin::_InterlockedExchangeAdd:
1673 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E);
1674 case MSVCIntrin::_InterlockedExchangeSub:
1675 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Sub, E);
1676 case MSVCIntrin::_InterlockedOr:
1677 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E);
1678 case MSVCIntrin::_InterlockedXor:
1679 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E);
1680 case MSVCIntrin::_InterlockedExchangeAdd_acq:
1681 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
1682 AtomicOrdering::Acquire);
1683 case MSVCIntrin::_InterlockedExchangeAdd_rel:
1684 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
1685 AtomicOrdering::Release);
1686 case MSVCIntrin::_InterlockedExchangeAdd_nf:
1687 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
1688 AtomicOrdering::Monotonic);
1689 case MSVCIntrin::_InterlockedExchange_acq:
1690 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
1691 AtomicOrdering::Acquire);
1692 case MSVCIntrin::_InterlockedExchange_rel:
1693 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
1694 AtomicOrdering::Release);
1695 case MSVCIntrin::_InterlockedExchange_nf:
1696 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
1697 AtomicOrdering::Monotonic);
1698 case MSVCIntrin::_InterlockedCompareExchange_acq:
1699 return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Acquire);
1700 case MSVCIntrin::_InterlockedCompareExchange_rel:
1701 return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Release);
1702 case MSVCIntrin::_InterlockedCompareExchange_nf:
1703 return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Monotonic);
1704 case MSVCIntrin::_InterlockedCompareExchange128:
1705 return EmitAtomicCmpXchg128ForMSIntrin(
1706 *this, E, AtomicOrdering::SequentiallyConsistent);
1707 case MSVCIntrin::_InterlockedCompareExchange128_acq:
1708 return EmitAtomicCmpXchg128ForMSIntrin(*this, E, AtomicOrdering::Acquire);
1709 case MSVCIntrin::_InterlockedCompareExchange128_rel:
1710 return EmitAtomicCmpXchg128ForMSIntrin(*this, E, AtomicOrdering::Release);
1711 case MSVCIntrin::_InterlockedCompareExchange128_nf:
1712 return EmitAtomicCmpXchg128ForMSIntrin(*this, E, AtomicOrdering::Monotonic);
1713 case MSVCIntrin::_InterlockedOr_acq:
1714 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
1715 AtomicOrdering::Acquire);
1716 case MSVCIntrin::_InterlockedOr_rel:
1717 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
1718 AtomicOrdering::Release);
1719 case MSVCIntrin::_InterlockedOr_nf:
1720 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
1721 AtomicOrdering::Monotonic);
1722 case MSVCIntrin::_InterlockedXor_acq:
1723 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
1724 AtomicOrdering::Acquire);
1725 case MSVCIntrin::_InterlockedXor_rel:
1726 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
1727 AtomicOrdering::Release);
1728 case MSVCIntrin::_InterlockedXor_nf:
1729 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
1730 AtomicOrdering::Monotonic);
1731 case MSVCIntrin::_InterlockedAnd_acq:
1732 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
1733 AtomicOrdering::Acquire);
1734 case MSVCIntrin::_InterlockedAnd_rel:
1735 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
1736 AtomicOrdering::Release);
1737 case MSVCIntrin::_InterlockedAnd_nf:
1738 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
1739 AtomicOrdering::Monotonic);
1740 case MSVCIntrin::_InterlockedIncrement_acq:
1741 return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Acquire);
1742 case MSVCIntrin::_InterlockedIncrement_rel:
1743 return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Release);
1744 case MSVCIntrin::_InterlockedIncrement_nf:
1745 return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Monotonic);
1746 case MSVCIntrin::_InterlockedDecrement_acq:
1747 return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Acquire);
1748 case MSVCIntrin::_InterlockedDecrement_rel:
1749 return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Release);
1750 case MSVCIntrin::_InterlockedDecrement_nf:
1751 return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Monotonic);
1753 case MSVCIntrin::_InterlockedDecrement:
1754 return EmitAtomicDecrementValue(*this, E);
1755 case MSVCIntrin::_InterlockedIncrement:
1756 return EmitAtomicIncrementValue(*this, E);
1758 case MSVCIntrin::__fastfail: {
1759 // Request immediate process termination from the kernel. The instruction
1760 // sequences to do this are documented on MSDN:
1761 // https://msdn.microsoft.com/en-us/library/dn774154.aspx
1762 llvm::Triple::ArchType ISA = getTarget().getTriple().getArch();
1763 StringRef Asm, Constraints;
1764 switch (ISA) {
1765 default:
1766 ErrorUnsupported(E, "__fastfail call for this architecture");
1767 break;
1768 case llvm::Triple::x86:
1769 case llvm::Triple::x86_64:
1770 Asm = "int $$0x29";
1771 Constraints = "{cx}";
1772 break;
1773 case llvm::Triple::thumb:
1774 Asm = "udf #251";
1775 Constraints = "{r0}";
1776 break;
1777 case llvm::Triple::aarch64:
1778 Asm = "brk #0xF003";
1779 Constraints = "{w0}";
1781 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, {Int32Ty}, false);
1782 llvm::InlineAsm *IA =
1783 llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true);
1784 llvm::AttributeList NoReturnAttr = llvm::AttributeList::get(
1785 getLLVMContext(), llvm::AttributeList::FunctionIndex,
1786 llvm::Attribute::NoReturn);
1787 llvm::CallInst *CI = Builder.CreateCall(IA, EmitScalarExpr(E->getArg(0)));
1788 CI->setAttributes(NoReturnAttr);
1789 return CI;
1792 llvm_unreachable("Incorrect MSVC intrinsic!");
1795 namespace {
1796 // ARC cleanup for __builtin_os_log_format
1797 struct CallObjCArcUse final : EHScopeStack::Cleanup {
1798 CallObjCArcUse(llvm::Value *object) : object(object) {}
1799 llvm::Value *object;
1801 void Emit(CodeGenFunction &CGF, Flags flags) override {
1802 CGF.EmitARCIntrinsicUse(object);
1807 Value *CodeGenFunction::EmitCheckedArgForBuiltin(const Expr *E,
1808 BuiltinCheckKind Kind) {
1809 assert((Kind == BCK_CLZPassedZero || Kind == BCK_CTZPassedZero)
1810 && "Unsupported builtin check kind");
1812 Value *ArgValue = EmitScalarExpr(E);
1813 if (!SanOpts.has(SanitizerKind::Builtin))
1814 return ArgValue;
1816 SanitizerScope SanScope(this);
1817 Value *Cond = Builder.CreateICmpNE(
1818 ArgValue, llvm::Constant::getNullValue(ArgValue->getType()));
1819 EmitCheck(std::make_pair(Cond, SanitizerKind::Builtin),
1820 SanitizerHandler::InvalidBuiltin,
1821 {EmitCheckSourceLocation(E->getExprLoc()),
1822 llvm::ConstantInt::get(Builder.getInt8Ty(), Kind)},
1823 std::nullopt);
1824 return ArgValue;
1827 static Value *EmitAbs(CodeGenFunction &CGF, Value *ArgValue, bool HasNSW) {
1828 return CGF.Builder.CreateBinaryIntrinsic(
1829 Intrinsic::abs, ArgValue,
1830 ConstantInt::get(CGF.Builder.getInt1Ty(), HasNSW));
1833 static Value *EmitOverflowCheckedAbs(CodeGenFunction &CGF, const CallExpr *E,
1834 bool SanitizeOverflow) {
1835 Value *ArgValue = CGF.EmitScalarExpr(E->getArg(0));
1837 // Try to eliminate overflow check.
1838 if (const auto *VCI = dyn_cast<llvm::ConstantInt>(ArgValue)) {
1839 if (!VCI->isMinSignedValue())
1840 return EmitAbs(CGF, ArgValue, true);
1843 CodeGenFunction::SanitizerScope SanScope(&CGF);
1845 Constant *Zero = Constant::getNullValue(ArgValue->getType());
1846 Value *ResultAndOverflow = CGF.Builder.CreateBinaryIntrinsic(
1847 Intrinsic::ssub_with_overflow, Zero, ArgValue);
1848 Value *Result = CGF.Builder.CreateExtractValue(ResultAndOverflow, 0);
1849 Value *NotOverflow = CGF.Builder.CreateNot(
1850 CGF.Builder.CreateExtractValue(ResultAndOverflow, 1));
1852 // TODO: support -ftrapv-handler.
1853 if (SanitizeOverflow) {
1854 CGF.EmitCheck({{NotOverflow, SanitizerKind::SignedIntegerOverflow}},
1855 SanitizerHandler::NegateOverflow,
1856 {CGF.EmitCheckSourceLocation(E->getArg(0)->getExprLoc()),
1857 CGF.EmitCheckTypeDescriptor(E->getType())},
1858 {ArgValue});
1859 } else
1860 CGF.EmitTrapCheck(NotOverflow, SanitizerHandler::SubOverflow);
1862 Value *CmpResult = CGF.Builder.CreateICmpSLT(ArgValue, Zero, "abscond");
1863 return CGF.Builder.CreateSelect(CmpResult, Result, ArgValue, "abs");
1866 /// Get the argument type for arguments to os_log_helper.
1867 static CanQualType getOSLogArgType(ASTContext &C, int Size) {
1868 QualType UnsignedTy = C.getIntTypeForBitwidth(Size * 8, /*Signed=*/false);
1869 return C.getCanonicalType(UnsignedTy);
1872 llvm::Function *CodeGenFunction::generateBuiltinOSLogHelperFunction(
1873 const analyze_os_log::OSLogBufferLayout &Layout,
1874 CharUnits BufferAlignment) {
1875 ASTContext &Ctx = getContext();
1877 llvm::SmallString<64> Name;
1879 raw_svector_ostream OS(Name);
1880 OS << "__os_log_helper";
1881 OS << "_" << BufferAlignment.getQuantity();
1882 OS << "_" << int(Layout.getSummaryByte());
1883 OS << "_" << int(Layout.getNumArgsByte());
1884 for (const auto &Item : Layout.Items)
1885 OS << "_" << int(Item.getSizeByte()) << "_"
1886 << int(Item.getDescriptorByte());
1889 if (llvm::Function *F = CGM.getModule().getFunction(Name))
1890 return F;
1892 llvm::SmallVector<QualType, 4> ArgTys;
1893 FunctionArgList Args;
1894 Args.push_back(ImplicitParamDecl::Create(
1895 Ctx, nullptr, SourceLocation(), &Ctx.Idents.get("buffer"), Ctx.VoidPtrTy,
1896 ImplicitParamDecl::Other));
1897 ArgTys.emplace_back(Ctx.VoidPtrTy);
1899 for (unsigned int I = 0, E = Layout.Items.size(); I < E; ++I) {
1900 char Size = Layout.Items[I].getSizeByte();
1901 if (!Size)
1902 continue;
1904 QualType ArgTy = getOSLogArgType(Ctx, Size);
1905 Args.push_back(ImplicitParamDecl::Create(
1906 Ctx, nullptr, SourceLocation(),
1907 &Ctx.Idents.get(std::string("arg") + llvm::to_string(I)), ArgTy,
1908 ImplicitParamDecl::Other));
1909 ArgTys.emplace_back(ArgTy);
1912 QualType ReturnTy = Ctx.VoidTy;
1914 // The helper function has linkonce_odr linkage to enable the linker to merge
1915 // identical functions. To ensure the merging always happens, 'noinline' is
1916 // attached to the function when compiling with -Oz.
1917 const CGFunctionInfo &FI =
1918 CGM.getTypes().arrangeBuiltinFunctionDeclaration(ReturnTy, Args);
1919 llvm::FunctionType *FuncTy = CGM.getTypes().GetFunctionType(FI);
1920 llvm::Function *Fn = llvm::Function::Create(
1921 FuncTy, llvm::GlobalValue::LinkOnceODRLinkage, Name, &CGM.getModule());
1922 Fn->setVisibility(llvm::GlobalValue::HiddenVisibility);
1923 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, Fn, /*IsThunk=*/false);
1924 CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Fn);
1925 Fn->setDoesNotThrow();
1927 // Attach 'noinline' at -Oz.
1928 if (CGM.getCodeGenOpts().OptimizeSize == 2)
1929 Fn->addFnAttr(llvm::Attribute::NoInline);
1931 auto NL = ApplyDebugLocation::CreateEmpty(*this);
1932 StartFunction(GlobalDecl(), ReturnTy, Fn, FI, Args);
1934 // Create a scope with an artificial location for the body of this function.
1935 auto AL = ApplyDebugLocation::CreateArtificial(*this);
1937 CharUnits Offset;
1938 Address BufAddr =
1939 Address(Builder.CreateLoad(GetAddrOfLocalVar(Args[0]), "buf"), Int8Ty,
1940 BufferAlignment);
1941 Builder.CreateStore(Builder.getInt8(Layout.getSummaryByte()),
1942 Builder.CreateConstByteGEP(BufAddr, Offset++, "summary"));
1943 Builder.CreateStore(Builder.getInt8(Layout.getNumArgsByte()),
1944 Builder.CreateConstByteGEP(BufAddr, Offset++, "numArgs"));
1946 unsigned I = 1;
1947 for (const auto &Item : Layout.Items) {
1948 Builder.CreateStore(
1949 Builder.getInt8(Item.getDescriptorByte()),
1950 Builder.CreateConstByteGEP(BufAddr, Offset++, "argDescriptor"));
1951 Builder.CreateStore(
1952 Builder.getInt8(Item.getSizeByte()),
1953 Builder.CreateConstByteGEP(BufAddr, Offset++, "argSize"));
1955 CharUnits Size = Item.size();
1956 if (!Size.getQuantity())
1957 continue;
1959 Address Arg = GetAddrOfLocalVar(Args[I]);
1960 Address Addr = Builder.CreateConstByteGEP(BufAddr, Offset, "argData");
1961 Addr = Addr.withElementType(Arg.getElementType());
1962 Builder.CreateStore(Builder.CreateLoad(Arg), Addr);
1963 Offset += Size;
1964 ++I;
1967 FinishFunction();
1969 return Fn;
1972 RValue CodeGenFunction::emitBuiltinOSLogFormat(const CallExpr &E) {
1973 assert(E.getNumArgs() >= 2 &&
1974 "__builtin_os_log_format takes at least 2 arguments");
1975 ASTContext &Ctx = getContext();
1976 analyze_os_log::OSLogBufferLayout Layout;
1977 analyze_os_log::computeOSLogBufferLayout(Ctx, &E, Layout);
1978 Address BufAddr = EmitPointerWithAlignment(E.getArg(0));
1979 llvm::SmallVector<llvm::Value *, 4> RetainableOperands;
1981 // Ignore argument 1, the format string. It is not currently used.
1982 CallArgList Args;
1983 Args.add(RValue::get(BufAddr.getPointer()), Ctx.VoidPtrTy);
1985 for (const auto &Item : Layout.Items) {
1986 int Size = Item.getSizeByte();
1987 if (!Size)
1988 continue;
1990 llvm::Value *ArgVal;
1992 if (Item.getKind() == analyze_os_log::OSLogBufferItem::MaskKind) {
1993 uint64_t Val = 0;
1994 for (unsigned I = 0, E = Item.getMaskType().size(); I < E; ++I)
1995 Val |= ((uint64_t)Item.getMaskType()[I]) << I * 8;
1996 ArgVal = llvm::Constant::getIntegerValue(Int64Ty, llvm::APInt(64, Val));
1997 } else if (const Expr *TheExpr = Item.getExpr()) {
1998 ArgVal = EmitScalarExpr(TheExpr, /*Ignore*/ false);
2000 // If a temporary object that requires destruction after the full
2001 // expression is passed, push a lifetime-extended cleanup to extend its
2002 // lifetime to the end of the enclosing block scope.
2003 auto LifetimeExtendObject = [&](const Expr *E) {
2004 E = E->IgnoreParenCasts();
2005 // Extend lifetimes of objects returned by function calls and message
2006 // sends.
2008 // FIXME: We should do this in other cases in which temporaries are
2009 // created including arguments of non-ARC types (e.g., C++
2010 // temporaries).
2011 if (isa<CallExpr>(E) || isa<ObjCMessageExpr>(E))
2012 return true;
2013 return false;
2016 if (TheExpr->getType()->isObjCRetainableType() &&
2017 getLangOpts().ObjCAutoRefCount && LifetimeExtendObject(TheExpr)) {
2018 assert(getEvaluationKind(TheExpr->getType()) == TEK_Scalar &&
2019 "Only scalar can be a ObjC retainable type");
2020 if (!isa<Constant>(ArgVal)) {
2021 CleanupKind Cleanup = getARCCleanupKind();
2022 QualType Ty = TheExpr->getType();
2023 Address Alloca = Address::invalid();
2024 Address Addr = CreateMemTemp(Ty, "os.log.arg", &Alloca);
2025 ArgVal = EmitARCRetain(Ty, ArgVal);
2026 Builder.CreateStore(ArgVal, Addr);
2027 pushLifetimeExtendedDestroy(Cleanup, Alloca, Ty,
2028 CodeGenFunction::destroyARCStrongPrecise,
2029 Cleanup & EHCleanup);
2031 // Push a clang.arc.use call to ensure ARC optimizer knows that the
2032 // argument has to be alive.
2033 if (CGM.getCodeGenOpts().OptimizationLevel != 0)
2034 pushCleanupAfterFullExpr<CallObjCArcUse>(Cleanup, ArgVal);
2037 } else {
2038 ArgVal = Builder.getInt32(Item.getConstValue().getQuantity());
2041 unsigned ArgValSize =
2042 CGM.getDataLayout().getTypeSizeInBits(ArgVal->getType());
2043 llvm::IntegerType *IntTy = llvm::Type::getIntNTy(getLLVMContext(),
2044 ArgValSize);
2045 ArgVal = Builder.CreateBitOrPointerCast(ArgVal, IntTy);
2046 CanQualType ArgTy = getOSLogArgType(Ctx, Size);
2047 // If ArgVal has type x86_fp80, zero-extend ArgVal.
2048 ArgVal = Builder.CreateZExtOrBitCast(ArgVal, ConvertType(ArgTy));
2049 Args.add(RValue::get(ArgVal), ArgTy);
2052 const CGFunctionInfo &FI =
2053 CGM.getTypes().arrangeBuiltinFunctionCall(Ctx.VoidTy, Args);
2054 llvm::Function *F = CodeGenFunction(CGM).generateBuiltinOSLogHelperFunction(
2055 Layout, BufAddr.getAlignment());
2056 EmitCall(FI, CGCallee::forDirect(F), ReturnValueSlot(), Args);
2057 return RValue::get(BufAddr.getPointer());
2060 static bool isSpecialUnsignedMultiplySignedResult(
2061 unsigned BuiltinID, WidthAndSignedness Op1Info, WidthAndSignedness Op2Info,
2062 WidthAndSignedness ResultInfo) {
2063 return BuiltinID == Builtin::BI__builtin_mul_overflow &&
2064 Op1Info.Width == Op2Info.Width && Op2Info.Width == ResultInfo.Width &&
2065 !Op1Info.Signed && !Op2Info.Signed && ResultInfo.Signed;
2068 static RValue EmitCheckedUnsignedMultiplySignedResult(
2069 CodeGenFunction &CGF, const clang::Expr *Op1, WidthAndSignedness Op1Info,
2070 const clang::Expr *Op2, WidthAndSignedness Op2Info,
2071 const clang::Expr *ResultArg, QualType ResultQTy,
2072 WidthAndSignedness ResultInfo) {
2073 assert(isSpecialUnsignedMultiplySignedResult(
2074 Builtin::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo) &&
2075 "Cannot specialize this multiply");
2077 llvm::Value *V1 = CGF.EmitScalarExpr(Op1);
2078 llvm::Value *V2 = CGF.EmitScalarExpr(Op2);
2080 llvm::Value *HasOverflow;
2081 llvm::Value *Result = EmitOverflowIntrinsic(
2082 CGF, llvm::Intrinsic::umul_with_overflow, V1, V2, HasOverflow);
2084 // The intrinsic call will detect overflow when the value is > UINT_MAX,
2085 // however, since the original builtin had a signed result, we need to report
2086 // an overflow when the result is greater than INT_MAX.
2087 auto IntMax = llvm::APInt::getSignedMaxValue(ResultInfo.Width);
2088 llvm::Value *IntMaxValue = llvm::ConstantInt::get(Result->getType(), IntMax);
2090 llvm::Value *IntMaxOverflow = CGF.Builder.CreateICmpUGT(Result, IntMaxValue);
2091 HasOverflow = CGF.Builder.CreateOr(HasOverflow, IntMaxOverflow);
2093 bool isVolatile =
2094 ResultArg->getType()->getPointeeType().isVolatileQualified();
2095 Address ResultPtr = CGF.EmitPointerWithAlignment(ResultArg);
2096 CGF.Builder.CreateStore(CGF.EmitToMemory(Result, ResultQTy), ResultPtr,
2097 isVolatile);
2098 return RValue::get(HasOverflow);
2101 /// Determine if a binop is a checked mixed-sign multiply we can specialize.
2102 static bool isSpecialMixedSignMultiply(unsigned BuiltinID,
2103 WidthAndSignedness Op1Info,
2104 WidthAndSignedness Op2Info,
2105 WidthAndSignedness ResultInfo) {
2106 return BuiltinID == Builtin::BI__builtin_mul_overflow &&
2107 std::max(Op1Info.Width, Op2Info.Width) >= ResultInfo.Width &&
2108 Op1Info.Signed != Op2Info.Signed;
2111 /// Emit a checked mixed-sign multiply. This is a cheaper specialization of
2112 /// the generic checked-binop irgen.
2113 static RValue
2114 EmitCheckedMixedSignMultiply(CodeGenFunction &CGF, const clang::Expr *Op1,
2115 WidthAndSignedness Op1Info, const clang::Expr *Op2,
2116 WidthAndSignedness Op2Info,
2117 const clang::Expr *ResultArg, QualType ResultQTy,
2118 WidthAndSignedness ResultInfo) {
2119 assert(isSpecialMixedSignMultiply(Builtin::BI__builtin_mul_overflow, Op1Info,
2120 Op2Info, ResultInfo) &&
2121 "Not a mixed-sign multipliction we can specialize");
2123 // Emit the signed and unsigned operands.
2124 const clang::Expr *SignedOp = Op1Info.Signed ? Op1 : Op2;
2125 const clang::Expr *UnsignedOp = Op1Info.Signed ? Op2 : Op1;
2126 llvm::Value *Signed = CGF.EmitScalarExpr(SignedOp);
2127 llvm::Value *Unsigned = CGF.EmitScalarExpr(UnsignedOp);
2128 unsigned SignedOpWidth = Op1Info.Signed ? Op1Info.Width : Op2Info.Width;
2129 unsigned UnsignedOpWidth = Op1Info.Signed ? Op2Info.Width : Op1Info.Width;
2131 // One of the operands may be smaller than the other. If so, [s|z]ext it.
2132 if (SignedOpWidth < UnsignedOpWidth)
2133 Signed = CGF.Builder.CreateSExt(Signed, Unsigned->getType(), "op.sext");
2134 if (UnsignedOpWidth < SignedOpWidth)
2135 Unsigned = CGF.Builder.CreateZExt(Unsigned, Signed->getType(), "op.zext");
2137 llvm::Type *OpTy = Signed->getType();
2138 llvm::Value *Zero = llvm::Constant::getNullValue(OpTy);
2139 Address ResultPtr = CGF.EmitPointerWithAlignment(ResultArg);
2140 llvm::Type *ResTy = ResultPtr.getElementType();
2141 unsigned OpWidth = std::max(Op1Info.Width, Op2Info.Width);
2143 // Take the absolute value of the signed operand.
2144 llvm::Value *IsNegative = CGF.Builder.CreateICmpSLT(Signed, Zero);
2145 llvm::Value *AbsOfNegative = CGF.Builder.CreateSub(Zero, Signed);
2146 llvm::Value *AbsSigned =
2147 CGF.Builder.CreateSelect(IsNegative, AbsOfNegative, Signed);
2149 // Perform a checked unsigned multiplication.
2150 llvm::Value *UnsignedOverflow;
2151 llvm::Value *UnsignedResult =
2152 EmitOverflowIntrinsic(CGF, llvm::Intrinsic::umul_with_overflow, AbsSigned,
2153 Unsigned, UnsignedOverflow);
2155 llvm::Value *Overflow, *Result;
2156 if (ResultInfo.Signed) {
2157 // Signed overflow occurs if the result is greater than INT_MAX or lesser
2158 // than INT_MIN, i.e when |Result| > (INT_MAX + IsNegative).
2159 auto IntMax =
2160 llvm::APInt::getSignedMaxValue(ResultInfo.Width).zext(OpWidth);
2161 llvm::Value *MaxResult =
2162 CGF.Builder.CreateAdd(llvm::ConstantInt::get(OpTy, IntMax),
2163 CGF.Builder.CreateZExt(IsNegative, OpTy));
2164 llvm::Value *SignedOverflow =
2165 CGF.Builder.CreateICmpUGT(UnsignedResult, MaxResult);
2166 Overflow = CGF.Builder.CreateOr(UnsignedOverflow, SignedOverflow);
2168 // Prepare the signed result (possibly by negating it).
2169 llvm::Value *NegativeResult = CGF.Builder.CreateNeg(UnsignedResult);
2170 llvm::Value *SignedResult =
2171 CGF.Builder.CreateSelect(IsNegative, NegativeResult, UnsignedResult);
2172 Result = CGF.Builder.CreateTrunc(SignedResult, ResTy);
2173 } else {
2174 // Unsigned overflow occurs if the result is < 0 or greater than UINT_MAX.
2175 llvm::Value *Underflow = CGF.Builder.CreateAnd(
2176 IsNegative, CGF.Builder.CreateIsNotNull(UnsignedResult));
2177 Overflow = CGF.Builder.CreateOr(UnsignedOverflow, Underflow);
2178 if (ResultInfo.Width < OpWidth) {
2179 auto IntMax =
2180 llvm::APInt::getMaxValue(ResultInfo.Width).zext(OpWidth);
2181 llvm::Value *TruncOverflow = CGF.Builder.CreateICmpUGT(
2182 UnsignedResult, llvm::ConstantInt::get(OpTy, IntMax));
2183 Overflow = CGF.Builder.CreateOr(Overflow, TruncOverflow);
2186 // Negate the product if it would be negative in infinite precision.
2187 Result = CGF.Builder.CreateSelect(
2188 IsNegative, CGF.Builder.CreateNeg(UnsignedResult), UnsignedResult);
2190 Result = CGF.Builder.CreateTrunc(Result, ResTy);
2192 assert(Overflow && Result && "Missing overflow or result");
2194 bool isVolatile =
2195 ResultArg->getType()->getPointeeType().isVolatileQualified();
2196 CGF.Builder.CreateStore(CGF.EmitToMemory(Result, ResultQTy), ResultPtr,
2197 isVolatile);
2198 return RValue::get(Overflow);
2201 static bool
2202 TypeRequiresBuiltinLaunderImp(const ASTContext &Ctx, QualType Ty,
2203 llvm::SmallPtrSetImpl<const Decl *> &Seen) {
2204 if (const auto *Arr = Ctx.getAsArrayType(Ty))
2205 Ty = Ctx.getBaseElementType(Arr);
2207 const auto *Record = Ty->getAsCXXRecordDecl();
2208 if (!Record)
2209 return false;
2211 // We've already checked this type, or are in the process of checking it.
2212 if (!Seen.insert(Record).second)
2213 return false;
2215 assert(Record->hasDefinition() &&
2216 "Incomplete types should already be diagnosed");
2218 if (Record->isDynamicClass())
2219 return true;
2221 for (FieldDecl *F : Record->fields()) {
2222 if (TypeRequiresBuiltinLaunderImp(Ctx, F->getType(), Seen))
2223 return true;
2225 return false;
2228 /// Determine if the specified type requires laundering by checking if it is a
2229 /// dynamic class type or contains a subobject which is a dynamic class type.
2230 static bool TypeRequiresBuiltinLaunder(CodeGenModule &CGM, QualType Ty) {
2231 if (!CGM.getCodeGenOpts().StrictVTablePointers)
2232 return false;
2233 llvm::SmallPtrSet<const Decl *, 16> Seen;
2234 return TypeRequiresBuiltinLaunderImp(CGM.getContext(), Ty, Seen);
2237 RValue CodeGenFunction::emitRotate(const CallExpr *E, bool IsRotateRight) {
2238 llvm::Value *Src = EmitScalarExpr(E->getArg(0));
2239 llvm::Value *ShiftAmt = EmitScalarExpr(E->getArg(1));
2241 // The builtin's shift arg may have a different type than the source arg and
2242 // result, but the LLVM intrinsic uses the same type for all values.
2243 llvm::Type *Ty = Src->getType();
2244 ShiftAmt = Builder.CreateIntCast(ShiftAmt, Ty, false);
2246 // Rotate is a special case of LLVM funnel shift - 1st 2 args are the same.
2247 unsigned IID = IsRotateRight ? Intrinsic::fshr : Intrinsic::fshl;
2248 Function *F = CGM.getIntrinsic(IID, Ty);
2249 return RValue::get(Builder.CreateCall(F, { Src, Src, ShiftAmt }));
2252 // Map math builtins for long-double to f128 version.
2253 static unsigned mutateLongDoubleBuiltin(unsigned BuiltinID) {
2254 switch (BuiltinID) {
2255 #define MUTATE_LDBL(func) \
2256 case Builtin::BI__builtin_##func##l: \
2257 return Builtin::BI__builtin_##func##f128;
2258 MUTATE_LDBL(sqrt)
2259 MUTATE_LDBL(cbrt)
2260 MUTATE_LDBL(fabs)
2261 MUTATE_LDBL(log)
2262 MUTATE_LDBL(log2)
2263 MUTATE_LDBL(log10)
2264 MUTATE_LDBL(log1p)
2265 MUTATE_LDBL(logb)
2266 MUTATE_LDBL(exp)
2267 MUTATE_LDBL(exp2)
2268 MUTATE_LDBL(expm1)
2269 MUTATE_LDBL(fdim)
2270 MUTATE_LDBL(hypot)
2271 MUTATE_LDBL(ilogb)
2272 MUTATE_LDBL(pow)
2273 MUTATE_LDBL(fmin)
2274 MUTATE_LDBL(fmax)
2275 MUTATE_LDBL(ceil)
2276 MUTATE_LDBL(trunc)
2277 MUTATE_LDBL(rint)
2278 MUTATE_LDBL(nearbyint)
2279 MUTATE_LDBL(round)
2280 MUTATE_LDBL(floor)
2281 MUTATE_LDBL(lround)
2282 MUTATE_LDBL(llround)
2283 MUTATE_LDBL(lrint)
2284 MUTATE_LDBL(llrint)
2285 MUTATE_LDBL(fmod)
2286 MUTATE_LDBL(modf)
2287 MUTATE_LDBL(nan)
2288 MUTATE_LDBL(nans)
2289 MUTATE_LDBL(inf)
2290 MUTATE_LDBL(fma)
2291 MUTATE_LDBL(sin)
2292 MUTATE_LDBL(cos)
2293 MUTATE_LDBL(tan)
2294 MUTATE_LDBL(sinh)
2295 MUTATE_LDBL(cosh)
2296 MUTATE_LDBL(tanh)
2297 MUTATE_LDBL(asin)
2298 MUTATE_LDBL(acos)
2299 MUTATE_LDBL(atan)
2300 MUTATE_LDBL(asinh)
2301 MUTATE_LDBL(acosh)
2302 MUTATE_LDBL(atanh)
2303 MUTATE_LDBL(atan2)
2304 MUTATE_LDBL(erf)
2305 MUTATE_LDBL(erfc)
2306 MUTATE_LDBL(ldexp)
2307 MUTATE_LDBL(frexp)
2308 MUTATE_LDBL(huge_val)
2309 MUTATE_LDBL(copysign)
2310 MUTATE_LDBL(nextafter)
2311 MUTATE_LDBL(nexttoward)
2312 MUTATE_LDBL(remainder)
2313 MUTATE_LDBL(remquo)
2314 MUTATE_LDBL(scalbln)
2315 MUTATE_LDBL(scalbn)
2316 MUTATE_LDBL(tgamma)
2317 MUTATE_LDBL(lgamma)
2318 #undef MUTATE_LDBL
2319 default:
2320 return BuiltinID;
2324 static Value *tryUseTestFPKind(CodeGenFunction &CGF, unsigned BuiltinID,
2325 Value *V) {
2326 if (CGF.Builder.getIsFPConstrained() &&
2327 CGF.Builder.getDefaultConstrainedExcept() != fp::ebIgnore) {
2328 if (Value *Result =
2329 CGF.getTargetHooks().testFPKind(V, BuiltinID, CGF.Builder, CGF.CGM))
2330 return Result;
2332 return nullptr;
2335 static RValue EmitHipStdParUnsupportedBuiltin(CodeGenFunction *CGF,
2336 const FunctionDecl *FD) {
2337 auto Name = FD->getNameAsString() + "__hipstdpar_unsupported";
2338 auto FnTy = CGF->CGM.getTypes().GetFunctionType(FD);
2339 auto UBF = CGF->CGM.getModule().getOrInsertFunction(Name, FnTy);
2341 SmallVector<Value *, 16> Args;
2342 for (auto &&FormalTy : FnTy->params())
2343 Args.push_back(llvm::PoisonValue::get(FormalTy));
2345 return RValue::get(CGF->Builder.CreateCall(UBF, Args));
2348 RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
2349 const CallExpr *E,
2350 ReturnValueSlot ReturnValue) {
2351 const FunctionDecl *FD = GD.getDecl()->getAsFunction();
2352 // See if we can constant fold this builtin. If so, don't emit it at all.
2353 // TODO: Extend this handling to all builtin calls that we can constant-fold.
2354 Expr::EvalResult Result;
2355 if (E->isPRValue() && E->EvaluateAsRValue(Result, CGM.getContext()) &&
2356 !Result.hasSideEffects()) {
2357 if (Result.Val.isInt())
2358 return RValue::get(llvm::ConstantInt::get(getLLVMContext(),
2359 Result.Val.getInt()));
2360 if (Result.Val.isFloat())
2361 return RValue::get(llvm::ConstantFP::get(getLLVMContext(),
2362 Result.Val.getFloat()));
2365 // If current long-double semantics is IEEE 128-bit, replace math builtins
2366 // of long-double with f128 equivalent.
2367 // TODO: This mutation should also be applied to other targets other than PPC,
2368 // after backend supports IEEE 128-bit style libcalls.
2369 if (getTarget().getTriple().isPPC64() &&
2370 &getTarget().getLongDoubleFormat() == &llvm::APFloat::IEEEquad())
2371 BuiltinID = mutateLongDoubleBuiltin(BuiltinID);
2373 // If the builtin has been declared explicitly with an assembler label,
2374 // disable the specialized emitting below. Ideally we should communicate the
2375 // rename in IR, or at least avoid generating the intrinsic calls that are
2376 // likely to get lowered to the renamed library functions.
2377 const unsigned BuiltinIDIfNoAsmLabel =
2378 FD->hasAttr<AsmLabelAttr>() ? 0 : BuiltinID;
2380 std::optional<bool> ErrnoOverriden;
2381 // ErrnoOverriden is true if math-errno is overriden via the
2382 // '#pragma float_control(precise, on)'. This pragma disables fast-math,
2383 // which implies math-errno.
2384 if (E->hasStoredFPFeatures()) {
2385 FPOptionsOverride OP = E->getFPFeatures();
2386 if (OP.hasMathErrnoOverride())
2387 ErrnoOverriden = OP.getMathErrnoOverride();
2389 // True if 'atttibute__((optnone)) is used. This attibute overrides
2390 // fast-math which implies math-errno.
2391 bool OptNone = CurFuncDecl && CurFuncDecl->hasAttr<OptimizeNoneAttr>();
2393 // True if we are compiling at -O2 and errno has been disabled
2394 // using the '#pragma float_control(precise, off)', and
2395 // attribute opt-none hasn't been seen.
2396 bool ErrnoOverridenToFalseWithOpt =
2397 ErrnoOverriden.has_value() && !ErrnoOverriden.value() && !OptNone &&
2398 CGM.getCodeGenOpts().OptimizationLevel != 0;
2400 // There are LLVM math intrinsics/instructions corresponding to math library
2401 // functions except the LLVM op will never set errno while the math library
2402 // might. Also, math builtins have the same semantics as their math library
2403 // twins. Thus, we can transform math library and builtin calls to their
2404 // LLVM counterparts if the call is marked 'const' (known to never set errno).
2405 // In case FP exceptions are enabled, the experimental versions of the
2406 // intrinsics model those.
2407 bool ConstAlways =
2408 getContext().BuiltinInfo.isConst(BuiltinID);
2410 // There's a special case with the fma builtins where they are always const
2411 // if the target environment is GNU or the target is OS is Windows and we're
2412 // targeting the MSVCRT.dll environment.
2413 // FIXME: This list can be become outdated. Need to find a way to get it some
2414 // other way.
2415 switch (BuiltinID) {
2416 case Builtin::BI__builtin_fma:
2417 case Builtin::BI__builtin_fmaf:
2418 case Builtin::BI__builtin_fmal:
2419 case Builtin::BIfma:
2420 case Builtin::BIfmaf:
2421 case Builtin::BIfmal: {
2422 auto &Trip = CGM.getTriple();
2423 if (Trip.isGNUEnvironment() || Trip.isOSMSVCRT())
2424 ConstAlways = true;
2425 break;
2427 default:
2428 break;
2431 bool ConstWithoutErrnoAndExceptions =
2432 getContext().BuiltinInfo.isConstWithoutErrnoAndExceptions(BuiltinID);
2433 bool ConstWithoutExceptions =
2434 getContext().BuiltinInfo.isConstWithoutExceptions(BuiltinID);
2436 // ConstAttr is enabled in fast-math mode. In fast-math mode, math-errno is
2437 // disabled.
2438 // Math intrinsics are generated only when math-errno is disabled. Any pragmas
2439 // or attributes that affect math-errno should prevent or allow math
2440 // intrincs to be generated. Intrinsics are generated:
2441 // 1- In fast math mode, unless math-errno is overriden
2442 // via '#pragma float_control(precise, on)', or via an
2443 // 'attribute__((optnone))'.
2444 // 2- If math-errno was enabled on command line but overriden
2445 // to false via '#pragma float_control(precise, off))' and
2446 // 'attribute__((optnone))' hasn't been used.
2447 // 3- If we are compiling with optimization and errno has been disabled
2448 // via '#pragma float_control(precise, off)', and
2449 // 'attribute__((optnone))' hasn't been used.
2451 bool ConstWithoutErrnoOrExceptions =
2452 ConstWithoutErrnoAndExceptions || ConstWithoutExceptions;
2453 bool GenerateIntrinsics =
2454 (ConstAlways && !OptNone) ||
2455 (!getLangOpts().MathErrno &&
2456 !(ErrnoOverriden.has_value() && ErrnoOverriden.value()) && !OptNone);
2457 if (!GenerateIntrinsics) {
2458 GenerateIntrinsics =
2459 ConstWithoutErrnoOrExceptions && !ConstWithoutErrnoAndExceptions;
2460 if (!GenerateIntrinsics)
2461 GenerateIntrinsics =
2462 ConstWithoutErrnoOrExceptions &&
2463 (!getLangOpts().MathErrno &&
2464 !(ErrnoOverriden.has_value() && ErrnoOverriden.value()) && !OptNone);
2465 if (!GenerateIntrinsics)
2466 GenerateIntrinsics =
2467 ConstWithoutErrnoOrExceptions && ErrnoOverridenToFalseWithOpt;
2469 if (GenerateIntrinsics) {
2470 switch (BuiltinIDIfNoAsmLabel) {
2471 case Builtin::BIceil:
2472 case Builtin::BIceilf:
2473 case Builtin::BIceill:
2474 case Builtin::BI__builtin_ceil:
2475 case Builtin::BI__builtin_ceilf:
2476 case Builtin::BI__builtin_ceilf16:
2477 case Builtin::BI__builtin_ceill:
2478 case Builtin::BI__builtin_ceilf128:
2479 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2480 Intrinsic::ceil,
2481 Intrinsic::experimental_constrained_ceil));
2483 case Builtin::BIcopysign:
2484 case Builtin::BIcopysignf:
2485 case Builtin::BIcopysignl:
2486 case Builtin::BI__builtin_copysign:
2487 case Builtin::BI__builtin_copysignf:
2488 case Builtin::BI__builtin_copysignf16:
2489 case Builtin::BI__builtin_copysignl:
2490 case Builtin::BI__builtin_copysignf128:
2491 return RValue::get(emitBinaryBuiltin(*this, E, Intrinsic::copysign));
2493 case Builtin::BIcos:
2494 case Builtin::BIcosf:
2495 case Builtin::BIcosl:
2496 case Builtin::BI__builtin_cos:
2497 case Builtin::BI__builtin_cosf:
2498 case Builtin::BI__builtin_cosf16:
2499 case Builtin::BI__builtin_cosl:
2500 case Builtin::BI__builtin_cosf128:
2501 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2502 Intrinsic::cos,
2503 Intrinsic::experimental_constrained_cos));
2505 case Builtin::BIexp:
2506 case Builtin::BIexpf:
2507 case Builtin::BIexpl:
2508 case Builtin::BI__builtin_exp:
2509 case Builtin::BI__builtin_expf:
2510 case Builtin::BI__builtin_expf16:
2511 case Builtin::BI__builtin_expl:
2512 case Builtin::BI__builtin_expf128:
2513 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2514 Intrinsic::exp,
2515 Intrinsic::experimental_constrained_exp));
2517 case Builtin::BIexp2:
2518 case Builtin::BIexp2f:
2519 case Builtin::BIexp2l:
2520 case Builtin::BI__builtin_exp2:
2521 case Builtin::BI__builtin_exp2f:
2522 case Builtin::BI__builtin_exp2f16:
2523 case Builtin::BI__builtin_exp2l:
2524 case Builtin::BI__builtin_exp2f128:
2525 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2526 Intrinsic::exp2,
2527 Intrinsic::experimental_constrained_exp2));
2528 case Builtin::BI__builtin_exp10:
2529 case Builtin::BI__builtin_exp10f:
2530 case Builtin::BI__builtin_exp10f16:
2531 case Builtin::BI__builtin_exp10l:
2532 case Builtin::BI__builtin_exp10f128: {
2533 // TODO: strictfp support
2534 if (Builder.getIsFPConstrained())
2535 break;
2536 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::exp10));
2538 case Builtin::BIfabs:
2539 case Builtin::BIfabsf:
2540 case Builtin::BIfabsl:
2541 case Builtin::BI__builtin_fabs:
2542 case Builtin::BI__builtin_fabsf:
2543 case Builtin::BI__builtin_fabsf16:
2544 case Builtin::BI__builtin_fabsl:
2545 case Builtin::BI__builtin_fabsf128:
2546 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::fabs));
2548 case Builtin::BIfloor:
2549 case Builtin::BIfloorf:
2550 case Builtin::BIfloorl:
2551 case Builtin::BI__builtin_floor:
2552 case Builtin::BI__builtin_floorf:
2553 case Builtin::BI__builtin_floorf16:
2554 case Builtin::BI__builtin_floorl:
2555 case Builtin::BI__builtin_floorf128:
2556 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2557 Intrinsic::floor,
2558 Intrinsic::experimental_constrained_floor));
2560 case Builtin::BIfma:
2561 case Builtin::BIfmaf:
2562 case Builtin::BIfmal:
2563 case Builtin::BI__builtin_fma:
2564 case Builtin::BI__builtin_fmaf:
2565 case Builtin::BI__builtin_fmaf16:
2566 case Builtin::BI__builtin_fmal:
2567 case Builtin::BI__builtin_fmaf128:
2568 return RValue::get(emitTernaryMaybeConstrainedFPBuiltin(*this, E,
2569 Intrinsic::fma,
2570 Intrinsic::experimental_constrained_fma));
2572 case Builtin::BIfmax:
2573 case Builtin::BIfmaxf:
2574 case Builtin::BIfmaxl:
2575 case Builtin::BI__builtin_fmax:
2576 case Builtin::BI__builtin_fmaxf:
2577 case Builtin::BI__builtin_fmaxf16:
2578 case Builtin::BI__builtin_fmaxl:
2579 case Builtin::BI__builtin_fmaxf128:
2580 return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E,
2581 Intrinsic::maxnum,
2582 Intrinsic::experimental_constrained_maxnum));
2584 case Builtin::BIfmin:
2585 case Builtin::BIfminf:
2586 case Builtin::BIfminl:
2587 case Builtin::BI__builtin_fmin:
2588 case Builtin::BI__builtin_fminf:
2589 case Builtin::BI__builtin_fminf16:
2590 case Builtin::BI__builtin_fminl:
2591 case Builtin::BI__builtin_fminf128:
2592 return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E,
2593 Intrinsic::minnum,
2594 Intrinsic::experimental_constrained_minnum));
2596 // fmod() is a special-case. It maps to the frem instruction rather than an
2597 // LLVM intrinsic.
2598 case Builtin::BIfmod:
2599 case Builtin::BIfmodf:
2600 case Builtin::BIfmodl:
2601 case Builtin::BI__builtin_fmod:
2602 case Builtin::BI__builtin_fmodf:
2603 case Builtin::BI__builtin_fmodf16:
2604 case Builtin::BI__builtin_fmodl:
2605 case Builtin::BI__builtin_fmodf128: {
2606 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
2607 Value *Arg1 = EmitScalarExpr(E->getArg(0));
2608 Value *Arg2 = EmitScalarExpr(E->getArg(1));
2609 return RValue::get(Builder.CreateFRem(Arg1, Arg2, "fmod"));
2612 case Builtin::BIlog:
2613 case Builtin::BIlogf:
2614 case Builtin::BIlogl:
2615 case Builtin::BI__builtin_log:
2616 case Builtin::BI__builtin_logf:
2617 case Builtin::BI__builtin_logf16:
2618 case Builtin::BI__builtin_logl:
2619 case Builtin::BI__builtin_logf128:
2620 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2621 Intrinsic::log,
2622 Intrinsic::experimental_constrained_log));
2624 case Builtin::BIlog10:
2625 case Builtin::BIlog10f:
2626 case Builtin::BIlog10l:
2627 case Builtin::BI__builtin_log10:
2628 case Builtin::BI__builtin_log10f:
2629 case Builtin::BI__builtin_log10f16:
2630 case Builtin::BI__builtin_log10l:
2631 case Builtin::BI__builtin_log10f128:
2632 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2633 Intrinsic::log10,
2634 Intrinsic::experimental_constrained_log10));
2636 case Builtin::BIlog2:
2637 case Builtin::BIlog2f:
2638 case Builtin::BIlog2l:
2639 case Builtin::BI__builtin_log2:
2640 case Builtin::BI__builtin_log2f:
2641 case Builtin::BI__builtin_log2f16:
2642 case Builtin::BI__builtin_log2l:
2643 case Builtin::BI__builtin_log2f128:
2644 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2645 Intrinsic::log2,
2646 Intrinsic::experimental_constrained_log2));
2648 case Builtin::BInearbyint:
2649 case Builtin::BInearbyintf:
2650 case Builtin::BInearbyintl:
2651 case Builtin::BI__builtin_nearbyint:
2652 case Builtin::BI__builtin_nearbyintf:
2653 case Builtin::BI__builtin_nearbyintl:
2654 case Builtin::BI__builtin_nearbyintf128:
2655 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2656 Intrinsic::nearbyint,
2657 Intrinsic::experimental_constrained_nearbyint));
2659 case Builtin::BIpow:
2660 case Builtin::BIpowf:
2661 case Builtin::BIpowl:
2662 case Builtin::BI__builtin_pow:
2663 case Builtin::BI__builtin_powf:
2664 case Builtin::BI__builtin_powf16:
2665 case Builtin::BI__builtin_powl:
2666 case Builtin::BI__builtin_powf128:
2667 return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E,
2668 Intrinsic::pow,
2669 Intrinsic::experimental_constrained_pow));
2671 case Builtin::BIrint:
2672 case Builtin::BIrintf:
2673 case Builtin::BIrintl:
2674 case Builtin::BI__builtin_rint:
2675 case Builtin::BI__builtin_rintf:
2676 case Builtin::BI__builtin_rintf16:
2677 case Builtin::BI__builtin_rintl:
2678 case Builtin::BI__builtin_rintf128:
2679 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2680 Intrinsic::rint,
2681 Intrinsic::experimental_constrained_rint));
2683 case Builtin::BIround:
2684 case Builtin::BIroundf:
2685 case Builtin::BIroundl:
2686 case Builtin::BI__builtin_round:
2687 case Builtin::BI__builtin_roundf:
2688 case Builtin::BI__builtin_roundf16:
2689 case Builtin::BI__builtin_roundl:
2690 case Builtin::BI__builtin_roundf128:
2691 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2692 Intrinsic::round,
2693 Intrinsic::experimental_constrained_round));
2695 case Builtin::BIroundeven:
2696 case Builtin::BIroundevenf:
2697 case Builtin::BIroundevenl:
2698 case Builtin::BI__builtin_roundeven:
2699 case Builtin::BI__builtin_roundevenf:
2700 case Builtin::BI__builtin_roundevenf16:
2701 case Builtin::BI__builtin_roundevenl:
2702 case Builtin::BI__builtin_roundevenf128:
2703 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2704 Intrinsic::roundeven,
2705 Intrinsic::experimental_constrained_roundeven));
2707 case Builtin::BIsin:
2708 case Builtin::BIsinf:
2709 case Builtin::BIsinl:
2710 case Builtin::BI__builtin_sin:
2711 case Builtin::BI__builtin_sinf:
2712 case Builtin::BI__builtin_sinf16:
2713 case Builtin::BI__builtin_sinl:
2714 case Builtin::BI__builtin_sinf128:
2715 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2716 Intrinsic::sin,
2717 Intrinsic::experimental_constrained_sin));
2719 case Builtin::BIsqrt:
2720 case Builtin::BIsqrtf:
2721 case Builtin::BIsqrtl:
2722 case Builtin::BI__builtin_sqrt:
2723 case Builtin::BI__builtin_sqrtf:
2724 case Builtin::BI__builtin_sqrtf16:
2725 case Builtin::BI__builtin_sqrtl:
2726 case Builtin::BI__builtin_sqrtf128:
2727 case Builtin::BI__builtin_elementwise_sqrt: {
2728 llvm::Value *Call = emitUnaryMaybeConstrainedFPBuiltin(
2729 *this, E, Intrinsic::sqrt, Intrinsic::experimental_constrained_sqrt);
2730 SetSqrtFPAccuracy(Call);
2731 return RValue::get(Call);
2733 case Builtin::BItrunc:
2734 case Builtin::BItruncf:
2735 case Builtin::BItruncl:
2736 case Builtin::BI__builtin_trunc:
2737 case Builtin::BI__builtin_truncf:
2738 case Builtin::BI__builtin_truncf16:
2739 case Builtin::BI__builtin_truncl:
2740 case Builtin::BI__builtin_truncf128:
2741 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2742 Intrinsic::trunc,
2743 Intrinsic::experimental_constrained_trunc));
2745 case Builtin::BIlround:
2746 case Builtin::BIlroundf:
2747 case Builtin::BIlroundl:
2748 case Builtin::BI__builtin_lround:
2749 case Builtin::BI__builtin_lroundf:
2750 case Builtin::BI__builtin_lroundl:
2751 case Builtin::BI__builtin_lroundf128:
2752 return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin(
2753 *this, E, Intrinsic::lround,
2754 Intrinsic::experimental_constrained_lround));
2756 case Builtin::BIllround:
2757 case Builtin::BIllroundf:
2758 case Builtin::BIllroundl:
2759 case Builtin::BI__builtin_llround:
2760 case Builtin::BI__builtin_llroundf:
2761 case Builtin::BI__builtin_llroundl:
2762 case Builtin::BI__builtin_llroundf128:
2763 return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin(
2764 *this, E, Intrinsic::llround,
2765 Intrinsic::experimental_constrained_llround));
2767 case Builtin::BIlrint:
2768 case Builtin::BIlrintf:
2769 case Builtin::BIlrintl:
2770 case Builtin::BI__builtin_lrint:
2771 case Builtin::BI__builtin_lrintf:
2772 case Builtin::BI__builtin_lrintl:
2773 case Builtin::BI__builtin_lrintf128:
2774 return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin(
2775 *this, E, Intrinsic::lrint,
2776 Intrinsic::experimental_constrained_lrint));
2778 case Builtin::BIllrint:
2779 case Builtin::BIllrintf:
2780 case Builtin::BIllrintl:
2781 case Builtin::BI__builtin_llrint:
2782 case Builtin::BI__builtin_llrintf:
2783 case Builtin::BI__builtin_llrintl:
2784 case Builtin::BI__builtin_llrintf128:
2785 return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin(
2786 *this, E, Intrinsic::llrint,
2787 Intrinsic::experimental_constrained_llrint));
2788 case Builtin::BI__builtin_ldexp:
2789 case Builtin::BI__builtin_ldexpf:
2790 case Builtin::BI__builtin_ldexpl:
2791 case Builtin::BI__builtin_ldexpf16:
2792 case Builtin::BI__builtin_ldexpf128: {
2793 return RValue::get(emitBinaryExpMaybeConstrainedFPBuiltin(
2794 *this, E, Intrinsic::ldexp,
2795 Intrinsic::experimental_constrained_ldexp));
2797 default:
2798 break;
2802 // Check NonnullAttribute/NullabilityArg and Alignment.
2803 auto EmitArgCheck = [&](TypeCheckKind Kind, Address A, const Expr *Arg,
2804 unsigned ParmNum) {
2805 Value *Val = A.getPointer();
2806 EmitNonNullArgCheck(RValue::get(Val), Arg->getType(), Arg->getExprLoc(), FD,
2807 ParmNum);
2809 if (SanOpts.has(SanitizerKind::Alignment) && ClSanitizeAlignmentBuiltin) {
2810 SanitizerSet SkippedChecks;
2811 SkippedChecks.set(SanitizerKind::All);
2812 SkippedChecks.clear(SanitizerKind::Alignment);
2813 SourceLocation Loc = Arg->getExprLoc();
2814 // Strip an implicit cast.
2815 if (auto *CE = dyn_cast<ImplicitCastExpr>(Arg))
2816 if (CE->getCastKind() == CK_BitCast)
2817 Arg = CE->getSubExpr();
2818 EmitTypeCheck(Kind, Loc, Val, Arg->getType(), A.getAlignment(),
2819 SkippedChecks);
2823 switch (BuiltinIDIfNoAsmLabel) {
2824 default: break;
2825 case Builtin::BI__builtin___CFStringMakeConstantString:
2826 case Builtin::BI__builtin___NSStringMakeConstantString:
2827 return RValue::get(ConstantEmitter(*this).emitAbstract(E, E->getType()));
2828 case Builtin::BI__builtin_stdarg_start:
2829 case Builtin::BI__builtin_va_start:
2830 case Builtin::BI__va_start:
2831 case Builtin::BI__builtin_va_end:
2832 EmitVAStartEnd(BuiltinID == Builtin::BI__va_start
2833 ? EmitScalarExpr(E->getArg(0))
2834 : EmitVAListRef(E->getArg(0)).getPointer(),
2835 BuiltinID != Builtin::BI__builtin_va_end);
2836 return RValue::get(nullptr);
2837 case Builtin::BI__builtin_va_copy: {
2838 Value *DstPtr = EmitVAListRef(E->getArg(0)).getPointer();
2839 Value *SrcPtr = EmitVAListRef(E->getArg(1)).getPointer();
2840 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::vacopy), {DstPtr, SrcPtr});
2841 return RValue::get(nullptr);
2843 case Builtin::BIabs:
2844 case Builtin::BIlabs:
2845 case Builtin::BIllabs:
2846 case Builtin::BI__builtin_abs:
2847 case Builtin::BI__builtin_labs:
2848 case Builtin::BI__builtin_llabs: {
2849 bool SanitizeOverflow = SanOpts.has(SanitizerKind::SignedIntegerOverflow);
2851 Value *Result;
2852 switch (getLangOpts().getSignedOverflowBehavior()) {
2853 case LangOptions::SOB_Defined:
2854 Result = EmitAbs(*this, EmitScalarExpr(E->getArg(0)), false);
2855 break;
2856 case LangOptions::SOB_Undefined:
2857 if (!SanitizeOverflow) {
2858 Result = EmitAbs(*this, EmitScalarExpr(E->getArg(0)), true);
2859 break;
2861 [[fallthrough]];
2862 case LangOptions::SOB_Trapping:
2863 // TODO: Somehow handle the corner case when the address of abs is taken.
2864 Result = EmitOverflowCheckedAbs(*this, E, SanitizeOverflow);
2865 break;
2867 return RValue::get(Result);
2869 case Builtin::BI__builtin_complex: {
2870 Value *Real = EmitScalarExpr(E->getArg(0));
2871 Value *Imag = EmitScalarExpr(E->getArg(1));
2872 return RValue::getComplex({Real, Imag});
2874 case Builtin::BI__builtin_conj:
2875 case Builtin::BI__builtin_conjf:
2876 case Builtin::BI__builtin_conjl:
2877 case Builtin::BIconj:
2878 case Builtin::BIconjf:
2879 case Builtin::BIconjl: {
2880 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
2881 Value *Real = ComplexVal.first;
2882 Value *Imag = ComplexVal.second;
2883 Imag = Builder.CreateFNeg(Imag, "neg");
2884 return RValue::getComplex(std::make_pair(Real, Imag));
2886 case Builtin::BI__builtin_creal:
2887 case Builtin::BI__builtin_crealf:
2888 case Builtin::BI__builtin_creall:
2889 case Builtin::BIcreal:
2890 case Builtin::BIcrealf:
2891 case Builtin::BIcreall: {
2892 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
2893 return RValue::get(ComplexVal.first);
2896 case Builtin::BI__builtin_preserve_access_index: {
2897 // Only enabled preserved access index region when debuginfo
2898 // is available as debuginfo is needed to preserve user-level
2899 // access pattern.
2900 if (!getDebugInfo()) {
2901 CGM.Error(E->getExprLoc(), "using builtin_preserve_access_index() without -g");
2902 return RValue::get(EmitScalarExpr(E->getArg(0)));
2905 // Nested builtin_preserve_access_index() not supported
2906 if (IsInPreservedAIRegion) {
2907 CGM.Error(E->getExprLoc(), "nested builtin_preserve_access_index() not supported");
2908 return RValue::get(EmitScalarExpr(E->getArg(0)));
2911 IsInPreservedAIRegion = true;
2912 Value *Res = EmitScalarExpr(E->getArg(0));
2913 IsInPreservedAIRegion = false;
2914 return RValue::get(Res);
2917 case Builtin::BI__builtin_cimag:
2918 case Builtin::BI__builtin_cimagf:
2919 case Builtin::BI__builtin_cimagl:
2920 case Builtin::BIcimag:
2921 case Builtin::BIcimagf:
2922 case Builtin::BIcimagl: {
2923 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
2924 return RValue::get(ComplexVal.second);
2927 case Builtin::BI__builtin_clrsb:
2928 case Builtin::BI__builtin_clrsbl:
2929 case Builtin::BI__builtin_clrsbll: {
2930 // clrsb(x) -> clz(x < 0 ? ~x : x) - 1 or
2931 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2933 llvm::Type *ArgType = ArgValue->getType();
2934 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
2936 llvm::Type *ResultType = ConvertType(E->getType());
2937 Value *Zero = llvm::Constant::getNullValue(ArgType);
2938 Value *IsNeg = Builder.CreateICmpSLT(ArgValue, Zero, "isneg");
2939 Value *Inverse = Builder.CreateNot(ArgValue, "not");
2940 Value *Tmp = Builder.CreateSelect(IsNeg, Inverse, ArgValue);
2941 Value *Ctlz = Builder.CreateCall(F, {Tmp, Builder.getFalse()});
2942 Value *Result = Builder.CreateSub(Ctlz, llvm::ConstantInt::get(ArgType, 1));
2943 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2944 "cast");
2945 return RValue::get(Result);
2947 case Builtin::BI__builtin_ctzs:
2948 case Builtin::BI__builtin_ctz:
2949 case Builtin::BI__builtin_ctzl:
2950 case Builtin::BI__builtin_ctzll: {
2951 Value *ArgValue = EmitCheckedArgForBuiltin(E->getArg(0), BCK_CTZPassedZero);
2953 llvm::Type *ArgType = ArgValue->getType();
2954 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
2956 llvm::Type *ResultType = ConvertType(E->getType());
2957 Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef());
2958 Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef});
2959 if (Result->getType() != ResultType)
2960 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2961 "cast");
2962 return RValue::get(Result);
2964 case Builtin::BI__builtin_clzs:
2965 case Builtin::BI__builtin_clz:
2966 case Builtin::BI__builtin_clzl:
2967 case Builtin::BI__builtin_clzll: {
2968 Value *ArgValue = EmitCheckedArgForBuiltin(E->getArg(0), BCK_CLZPassedZero);
2970 llvm::Type *ArgType = ArgValue->getType();
2971 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
2973 llvm::Type *ResultType = ConvertType(E->getType());
2974 Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef());
2975 Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef});
2976 if (Result->getType() != ResultType)
2977 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2978 "cast");
2979 return RValue::get(Result);
2981 case Builtin::BI__builtin_ffs:
2982 case Builtin::BI__builtin_ffsl:
2983 case Builtin::BI__builtin_ffsll: {
2984 // ffs(x) -> x ? cttz(x) + 1 : 0
2985 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2987 llvm::Type *ArgType = ArgValue->getType();
2988 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
2990 llvm::Type *ResultType = ConvertType(E->getType());
2991 Value *Tmp =
2992 Builder.CreateAdd(Builder.CreateCall(F, {ArgValue, Builder.getTrue()}),
2993 llvm::ConstantInt::get(ArgType, 1));
2994 Value *Zero = llvm::Constant::getNullValue(ArgType);
2995 Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
2996 Value *Result = Builder.CreateSelect(IsZero, Zero, Tmp, "ffs");
2997 if (Result->getType() != ResultType)
2998 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2999 "cast");
3000 return RValue::get(Result);
3002 case Builtin::BI__builtin_parity:
3003 case Builtin::BI__builtin_parityl:
3004 case Builtin::BI__builtin_parityll: {
3005 // parity(x) -> ctpop(x) & 1
3006 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3008 llvm::Type *ArgType = ArgValue->getType();
3009 Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
3011 llvm::Type *ResultType = ConvertType(E->getType());
3012 Value *Tmp = Builder.CreateCall(F, ArgValue);
3013 Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1));
3014 if (Result->getType() != ResultType)
3015 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
3016 "cast");
3017 return RValue::get(Result);
3019 case Builtin::BI__lzcnt16:
3020 case Builtin::BI__lzcnt:
3021 case Builtin::BI__lzcnt64: {
3022 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3024 llvm::Type *ArgType = ArgValue->getType();
3025 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
3027 llvm::Type *ResultType = ConvertType(E->getType());
3028 Value *Result = Builder.CreateCall(F, {ArgValue, Builder.getFalse()});
3029 if (Result->getType() != ResultType)
3030 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
3031 "cast");
3032 return RValue::get(Result);
3034 case Builtin::BI__popcnt16:
3035 case Builtin::BI__popcnt:
3036 case Builtin::BI__popcnt64:
3037 case Builtin::BI__builtin_popcount:
3038 case Builtin::BI__builtin_popcountl:
3039 case Builtin::BI__builtin_popcountll: {
3040 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3042 llvm::Type *ArgType = ArgValue->getType();
3043 Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
3045 llvm::Type *ResultType = ConvertType(E->getType());
3046 Value *Result = Builder.CreateCall(F, ArgValue);
3047 if (Result->getType() != ResultType)
3048 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
3049 "cast");
3050 return RValue::get(Result);
3052 case Builtin::BI__builtin_unpredictable: {
3053 // Always return the argument of __builtin_unpredictable. LLVM does not
3054 // handle this builtin. Metadata for this builtin should be added directly
3055 // to instructions such as branches or switches that use it.
3056 return RValue::get(EmitScalarExpr(E->getArg(0)));
3058 case Builtin::BI__builtin_expect: {
3059 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3060 llvm::Type *ArgType = ArgValue->getType();
3062 Value *ExpectedValue = EmitScalarExpr(E->getArg(1));
3063 // Don't generate llvm.expect on -O0 as the backend won't use it for
3064 // anything.
3065 // Note, we still IRGen ExpectedValue because it could have side-effects.
3066 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
3067 return RValue::get(ArgValue);
3069 Function *FnExpect = CGM.getIntrinsic(Intrinsic::expect, ArgType);
3070 Value *Result =
3071 Builder.CreateCall(FnExpect, {ArgValue, ExpectedValue}, "expval");
3072 return RValue::get(Result);
3074 case Builtin::BI__builtin_expect_with_probability: {
3075 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3076 llvm::Type *ArgType = ArgValue->getType();
3078 Value *ExpectedValue = EmitScalarExpr(E->getArg(1));
3079 llvm::APFloat Probability(0.0);
3080 const Expr *ProbArg = E->getArg(2);
3081 bool EvalSucceed = ProbArg->EvaluateAsFloat(Probability, CGM.getContext());
3082 assert(EvalSucceed && "probability should be able to evaluate as float");
3083 (void)EvalSucceed;
3084 bool LoseInfo = false;
3085 Probability.convert(llvm::APFloat::IEEEdouble(),
3086 llvm::RoundingMode::Dynamic, &LoseInfo);
3087 llvm::Type *Ty = ConvertType(ProbArg->getType());
3088 Constant *Confidence = ConstantFP::get(Ty, Probability);
3089 // Don't generate llvm.expect.with.probability on -O0 as the backend
3090 // won't use it for anything.
3091 // Note, we still IRGen ExpectedValue because it could have side-effects.
3092 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
3093 return RValue::get(ArgValue);
3095 Function *FnExpect =
3096 CGM.getIntrinsic(Intrinsic::expect_with_probability, ArgType);
3097 Value *Result = Builder.CreateCall(
3098 FnExpect, {ArgValue, ExpectedValue, Confidence}, "expval");
3099 return RValue::get(Result);
3101 case Builtin::BI__builtin_assume_aligned: {
3102 const Expr *Ptr = E->getArg(0);
3103 Value *PtrValue = EmitScalarExpr(Ptr);
3104 Value *OffsetValue =
3105 (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) : nullptr;
3107 Value *AlignmentValue = EmitScalarExpr(E->getArg(1));
3108 ConstantInt *AlignmentCI = cast<ConstantInt>(AlignmentValue);
3109 if (AlignmentCI->getValue().ugt(llvm::Value::MaximumAlignment))
3110 AlignmentCI = ConstantInt::get(AlignmentCI->getType(),
3111 llvm::Value::MaximumAlignment);
3113 emitAlignmentAssumption(PtrValue, Ptr,
3114 /*The expr loc is sufficient.*/ SourceLocation(),
3115 AlignmentCI, OffsetValue);
3116 return RValue::get(PtrValue);
3118 case Builtin::BI__assume:
3119 case Builtin::BI__builtin_assume: {
3120 if (E->getArg(0)->HasSideEffects(getContext()))
3121 return RValue::get(nullptr);
3123 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3124 Function *FnAssume = CGM.getIntrinsic(Intrinsic::assume);
3125 Builder.CreateCall(FnAssume, ArgValue);
3126 return RValue::get(nullptr);
3128 case Builtin::BI__builtin_assume_separate_storage: {
3129 const Expr *Arg0 = E->getArg(0);
3130 const Expr *Arg1 = E->getArg(1);
3132 Value *Value0 = EmitScalarExpr(Arg0);
3133 Value *Value1 = EmitScalarExpr(Arg1);
3135 Value *Values[] = {Value0, Value1};
3136 OperandBundleDefT<Value *> OBD("separate_storage", Values);
3137 Builder.CreateAssumption(ConstantInt::getTrue(getLLVMContext()), {OBD});
3138 return RValue::get(nullptr);
3140 case Builtin::BI__arithmetic_fence: {
3141 // Create the builtin call if FastMath is selected, and the target
3142 // supports the builtin, otherwise just return the argument.
3143 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3144 llvm::FastMathFlags FMF = Builder.getFastMathFlags();
3145 bool isArithmeticFenceEnabled =
3146 FMF.allowReassoc() &&
3147 getContext().getTargetInfo().checkArithmeticFenceSupported();
3148 QualType ArgType = E->getArg(0)->getType();
3149 if (ArgType->isComplexType()) {
3150 if (isArithmeticFenceEnabled) {
3151 QualType ElementType = ArgType->castAs<ComplexType>()->getElementType();
3152 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
3153 Value *Real = Builder.CreateArithmeticFence(ComplexVal.first,
3154 ConvertType(ElementType));
3155 Value *Imag = Builder.CreateArithmeticFence(ComplexVal.second,
3156 ConvertType(ElementType));
3157 return RValue::getComplex(std::make_pair(Real, Imag));
3159 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
3160 Value *Real = ComplexVal.first;
3161 Value *Imag = ComplexVal.second;
3162 return RValue::getComplex(std::make_pair(Real, Imag));
3164 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3165 if (isArithmeticFenceEnabled)
3166 return RValue::get(
3167 Builder.CreateArithmeticFence(ArgValue, ConvertType(ArgType)));
3168 return RValue::get(ArgValue);
3170 case Builtin::BI__builtin_bswap16:
3171 case Builtin::BI__builtin_bswap32:
3172 case Builtin::BI__builtin_bswap64:
3173 case Builtin::BI_byteswap_ushort:
3174 case Builtin::BI_byteswap_ulong:
3175 case Builtin::BI_byteswap_uint64: {
3176 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::bswap));
3178 case Builtin::BI__builtin_bitreverse8:
3179 case Builtin::BI__builtin_bitreverse16:
3180 case Builtin::BI__builtin_bitreverse32:
3181 case Builtin::BI__builtin_bitreverse64: {
3182 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::bitreverse));
3184 case Builtin::BI__builtin_rotateleft8:
3185 case Builtin::BI__builtin_rotateleft16:
3186 case Builtin::BI__builtin_rotateleft32:
3187 case Builtin::BI__builtin_rotateleft64:
3188 case Builtin::BI_rotl8: // Microsoft variants of rotate left
3189 case Builtin::BI_rotl16:
3190 case Builtin::BI_rotl:
3191 case Builtin::BI_lrotl:
3192 case Builtin::BI_rotl64:
3193 return emitRotate(E, false);
3195 case Builtin::BI__builtin_rotateright8:
3196 case Builtin::BI__builtin_rotateright16:
3197 case Builtin::BI__builtin_rotateright32:
3198 case Builtin::BI__builtin_rotateright64:
3199 case Builtin::BI_rotr8: // Microsoft variants of rotate right
3200 case Builtin::BI_rotr16:
3201 case Builtin::BI_rotr:
3202 case Builtin::BI_lrotr:
3203 case Builtin::BI_rotr64:
3204 return emitRotate(E, true);
3206 case Builtin::BI__builtin_constant_p: {
3207 llvm::Type *ResultType = ConvertType(E->getType());
3209 const Expr *Arg = E->getArg(0);
3210 QualType ArgType = Arg->getType();
3211 // FIXME: The allowance for Obj-C pointers and block pointers is historical
3212 // and likely a mistake.
3213 if (!ArgType->isIntegralOrEnumerationType() && !ArgType->isFloatingType() &&
3214 !ArgType->isObjCObjectPointerType() && !ArgType->isBlockPointerType())
3215 // Per the GCC documentation, only numeric constants are recognized after
3216 // inlining.
3217 return RValue::get(ConstantInt::get(ResultType, 0));
3219 if (Arg->HasSideEffects(getContext()))
3220 // The argument is unevaluated, so be conservative if it might have
3221 // side-effects.
3222 return RValue::get(ConstantInt::get(ResultType, 0));
3224 Value *ArgValue = EmitScalarExpr(Arg);
3225 if (ArgType->isObjCObjectPointerType()) {
3226 // Convert Objective-C objects to id because we cannot distinguish between
3227 // LLVM types for Obj-C classes as they are opaque.
3228 ArgType = CGM.getContext().getObjCIdType();
3229 ArgValue = Builder.CreateBitCast(ArgValue, ConvertType(ArgType));
3231 Function *F =
3232 CGM.getIntrinsic(Intrinsic::is_constant, ConvertType(ArgType));
3233 Value *Result = Builder.CreateCall(F, ArgValue);
3234 if (Result->getType() != ResultType)
3235 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/false);
3236 return RValue::get(Result);
3238 case Builtin::BI__builtin_dynamic_object_size:
3239 case Builtin::BI__builtin_object_size: {
3240 unsigned Type =
3241 E->getArg(1)->EvaluateKnownConstInt(getContext()).getZExtValue();
3242 auto *ResType = cast<llvm::IntegerType>(ConvertType(E->getType()));
3244 // We pass this builtin onto the optimizer so that it can figure out the
3245 // object size in more complex cases.
3246 bool IsDynamic = BuiltinID == Builtin::BI__builtin_dynamic_object_size;
3247 return RValue::get(emitBuiltinObjectSize(E->getArg(0), Type, ResType,
3248 /*EmittedE=*/nullptr, IsDynamic));
3250 case Builtin::BI__builtin_prefetch: {
3251 Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0));
3252 // FIXME: Technically these constants should of type 'int', yes?
3253 RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) :
3254 llvm::ConstantInt::get(Int32Ty, 0);
3255 Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) :
3256 llvm::ConstantInt::get(Int32Ty, 3);
3257 Value *Data = llvm::ConstantInt::get(Int32Ty, 1);
3258 Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType());
3259 Builder.CreateCall(F, {Address, RW, Locality, Data});
3260 return RValue::get(nullptr);
3262 case Builtin::BI__builtin_readcyclecounter: {
3263 Function *F = CGM.getIntrinsic(Intrinsic::readcyclecounter);
3264 return RValue::get(Builder.CreateCall(F));
3266 case Builtin::BI__builtin___clear_cache: {
3267 Value *Begin = EmitScalarExpr(E->getArg(0));
3268 Value *End = EmitScalarExpr(E->getArg(1));
3269 Function *F = CGM.getIntrinsic(Intrinsic::clear_cache);
3270 return RValue::get(Builder.CreateCall(F, {Begin, End}));
3272 case Builtin::BI__builtin_trap:
3273 EmitTrapCall(Intrinsic::trap);
3274 return RValue::get(nullptr);
3275 case Builtin::BI__debugbreak:
3276 EmitTrapCall(Intrinsic::debugtrap);
3277 return RValue::get(nullptr);
3278 case Builtin::BI__builtin_unreachable: {
3279 EmitUnreachable(E->getExprLoc());
3281 // We do need to preserve an insertion point.
3282 EmitBlock(createBasicBlock("unreachable.cont"));
3284 return RValue::get(nullptr);
3287 case Builtin::BI__builtin_powi:
3288 case Builtin::BI__builtin_powif:
3289 case Builtin::BI__builtin_powil: {
3290 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
3291 llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
3293 if (Builder.getIsFPConstrained()) {
3294 // FIXME: llvm.powi has 2 mangling types,
3295 // llvm.experimental.constrained.powi has one.
3296 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3297 Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_powi,
3298 Src0->getType());
3299 return RValue::get(Builder.CreateConstrainedFPCall(F, { Src0, Src1 }));
3302 Function *F = CGM.getIntrinsic(Intrinsic::powi,
3303 { Src0->getType(), Src1->getType() });
3304 return RValue::get(Builder.CreateCall(F, { Src0, Src1 }));
3306 case Builtin::BI__builtin_frexp:
3307 case Builtin::BI__builtin_frexpf:
3308 case Builtin::BI__builtin_frexpl:
3309 case Builtin::BI__builtin_frexpf128:
3310 case Builtin::BI__builtin_frexpf16:
3311 return RValue::get(emitFrexpBuiltin(*this, E, Intrinsic::frexp));
3312 case Builtin::BI__builtin_isgreater:
3313 case Builtin::BI__builtin_isgreaterequal:
3314 case Builtin::BI__builtin_isless:
3315 case Builtin::BI__builtin_islessequal:
3316 case Builtin::BI__builtin_islessgreater:
3317 case Builtin::BI__builtin_isunordered: {
3318 // Ordered comparisons: we know the arguments to these are matching scalar
3319 // floating point values.
3320 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3321 Value *LHS = EmitScalarExpr(E->getArg(0));
3322 Value *RHS = EmitScalarExpr(E->getArg(1));
3324 switch (BuiltinID) {
3325 default: llvm_unreachable("Unknown ordered comparison");
3326 case Builtin::BI__builtin_isgreater:
3327 LHS = Builder.CreateFCmpOGT(LHS, RHS, "cmp");
3328 break;
3329 case Builtin::BI__builtin_isgreaterequal:
3330 LHS = Builder.CreateFCmpOGE(LHS, RHS, "cmp");
3331 break;
3332 case Builtin::BI__builtin_isless:
3333 LHS = Builder.CreateFCmpOLT(LHS, RHS, "cmp");
3334 break;
3335 case Builtin::BI__builtin_islessequal:
3336 LHS = Builder.CreateFCmpOLE(LHS, RHS, "cmp");
3337 break;
3338 case Builtin::BI__builtin_islessgreater:
3339 LHS = Builder.CreateFCmpONE(LHS, RHS, "cmp");
3340 break;
3341 case Builtin::BI__builtin_isunordered:
3342 LHS = Builder.CreateFCmpUNO(LHS, RHS, "cmp");
3343 break;
3345 // ZExt bool to int type.
3346 return RValue::get(Builder.CreateZExt(LHS, ConvertType(E->getType())));
3349 case Builtin::BI__builtin_isnan: {
3350 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3351 Value *V = EmitScalarExpr(E->getArg(0));
3352 if (Value *Result = tryUseTestFPKind(*this, BuiltinID, V))
3353 return RValue::get(Result);
3354 return RValue::get(
3355 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcNan),
3356 ConvertType(E->getType())));
3359 case Builtin::BI__builtin_issignaling: {
3360 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3361 Value *V = EmitScalarExpr(E->getArg(0));
3362 return RValue::get(
3363 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcSNan),
3364 ConvertType(E->getType())));
3367 case Builtin::BI__builtin_isinf: {
3368 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3369 Value *V = EmitScalarExpr(E->getArg(0));
3370 if (Value *Result = tryUseTestFPKind(*this, BuiltinID, V))
3371 return RValue::get(Result);
3372 return RValue::get(
3373 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcInf),
3374 ConvertType(E->getType())));
3377 case Builtin::BIfinite:
3378 case Builtin::BI__finite:
3379 case Builtin::BIfinitef:
3380 case Builtin::BI__finitef:
3381 case Builtin::BIfinitel:
3382 case Builtin::BI__finitel:
3383 case Builtin::BI__builtin_isfinite: {
3384 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3385 Value *V = EmitScalarExpr(E->getArg(0));
3386 if (Value *Result = tryUseTestFPKind(*this, BuiltinID, V))
3387 return RValue::get(Result);
3388 return RValue::get(
3389 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcFinite),
3390 ConvertType(E->getType())));
3393 case Builtin::BI__builtin_isnormal: {
3394 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3395 Value *V = EmitScalarExpr(E->getArg(0));
3396 return RValue::get(
3397 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcNormal),
3398 ConvertType(E->getType())));
3401 case Builtin::BI__builtin_issubnormal: {
3402 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3403 Value *V = EmitScalarExpr(E->getArg(0));
3404 return RValue::get(
3405 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcSubnormal),
3406 ConvertType(E->getType())));
3409 case Builtin::BI__builtin_iszero: {
3410 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3411 Value *V = EmitScalarExpr(E->getArg(0));
3412 return RValue::get(
3413 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcZero),
3414 ConvertType(E->getType())));
3417 case Builtin::BI__builtin_isfpclass: {
3418 Expr::EvalResult Result;
3419 if (!E->getArg(1)->EvaluateAsInt(Result, CGM.getContext()))
3420 break;
3421 uint64_t Test = Result.Val.getInt().getLimitedValue();
3422 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3423 Value *V = EmitScalarExpr(E->getArg(0));
3424 return RValue::get(Builder.CreateZExt(Builder.createIsFPClass(V, Test),
3425 ConvertType(E->getType())));
3428 case Builtin::BI__builtin_nondeterministic_value: {
3429 llvm::Type *Ty = ConvertType(E->getArg(0)->getType());
3431 Value *Result = PoisonValue::get(Ty);
3432 Result = Builder.CreateFreeze(Result);
3434 return RValue::get(Result);
3437 case Builtin::BI__builtin_elementwise_abs: {
3438 Value *Result;
3439 QualType QT = E->getArg(0)->getType();
3441 if (auto *VecTy = QT->getAs<VectorType>())
3442 QT = VecTy->getElementType();
3443 if (QT->isIntegerType())
3444 Result = Builder.CreateBinaryIntrinsic(
3445 llvm::Intrinsic::abs, EmitScalarExpr(E->getArg(0)),
3446 Builder.getFalse(), nullptr, "elt.abs");
3447 else
3448 Result = emitUnaryBuiltin(*this, E, llvm::Intrinsic::fabs, "elt.abs");
3450 return RValue::get(Result);
3453 case Builtin::BI__builtin_elementwise_ceil:
3454 return RValue::get(
3455 emitUnaryBuiltin(*this, E, llvm::Intrinsic::ceil, "elt.ceil"));
3456 case Builtin::BI__builtin_elementwise_exp:
3457 return RValue::get(
3458 emitUnaryBuiltin(*this, E, llvm::Intrinsic::exp, "elt.exp"));
3459 case Builtin::BI__builtin_elementwise_exp2:
3460 return RValue::get(
3461 emitUnaryBuiltin(*this, E, llvm::Intrinsic::exp2, "elt.exp2"));
3462 case Builtin::BI__builtin_elementwise_log:
3463 return RValue::get(
3464 emitUnaryBuiltin(*this, E, llvm::Intrinsic::log, "elt.log"));
3465 case Builtin::BI__builtin_elementwise_log2:
3466 return RValue::get(
3467 emitUnaryBuiltin(*this, E, llvm::Intrinsic::log2, "elt.log2"));
3468 case Builtin::BI__builtin_elementwise_log10:
3469 return RValue::get(
3470 emitUnaryBuiltin(*this, E, llvm::Intrinsic::log10, "elt.log10"));
3471 case Builtin::BI__builtin_elementwise_pow: {
3472 return RValue::get(emitBinaryBuiltin(*this, E, llvm::Intrinsic::pow));
3474 case Builtin::BI__builtin_elementwise_bitreverse:
3475 return RValue::get(emitUnaryBuiltin(*this, E, llvm::Intrinsic::bitreverse,
3476 "elt.bitreverse"));
3477 case Builtin::BI__builtin_elementwise_cos:
3478 return RValue::get(
3479 emitUnaryBuiltin(*this, E, llvm::Intrinsic::cos, "elt.cos"));
3480 case Builtin::BI__builtin_elementwise_floor:
3481 return RValue::get(
3482 emitUnaryBuiltin(*this, E, llvm::Intrinsic::floor, "elt.floor"));
3483 case Builtin::BI__builtin_elementwise_roundeven:
3484 return RValue::get(emitUnaryBuiltin(*this, E, llvm::Intrinsic::roundeven,
3485 "elt.roundeven"));
3486 case Builtin::BI__builtin_elementwise_round:
3487 return RValue::get(emitUnaryBuiltin(*this, E, llvm::Intrinsic::round,
3488 "elt.round"));
3489 case Builtin::BI__builtin_elementwise_rint:
3490 return RValue::get(emitUnaryBuiltin(*this, E, llvm::Intrinsic::rint,
3491 "elt.rint"));
3492 case Builtin::BI__builtin_elementwise_nearbyint:
3493 return RValue::get(emitUnaryBuiltin(*this, E, llvm::Intrinsic::nearbyint,
3494 "elt.nearbyint"));
3495 case Builtin::BI__builtin_elementwise_sin:
3496 return RValue::get(
3497 emitUnaryBuiltin(*this, E, llvm::Intrinsic::sin, "elt.sin"));
3499 case Builtin::BI__builtin_elementwise_trunc:
3500 return RValue::get(
3501 emitUnaryBuiltin(*this, E, llvm::Intrinsic::trunc, "elt.trunc"));
3502 case Builtin::BI__builtin_elementwise_canonicalize:
3503 return RValue::get(
3504 emitUnaryBuiltin(*this, E, llvm::Intrinsic::canonicalize, "elt.canonicalize"));
3505 case Builtin::BI__builtin_elementwise_copysign:
3506 return RValue::get(emitBinaryBuiltin(*this, E, llvm::Intrinsic::copysign));
3507 case Builtin::BI__builtin_elementwise_fma:
3508 return RValue::get(emitTernaryBuiltin(*this, E, llvm::Intrinsic::fma));
3509 case Builtin::BI__builtin_elementwise_add_sat:
3510 case Builtin::BI__builtin_elementwise_sub_sat: {
3511 Value *Op0 = EmitScalarExpr(E->getArg(0));
3512 Value *Op1 = EmitScalarExpr(E->getArg(1));
3513 Value *Result;
3514 assert(Op0->getType()->isIntOrIntVectorTy() && "integer type expected");
3515 QualType Ty = E->getArg(0)->getType();
3516 if (auto *VecTy = Ty->getAs<VectorType>())
3517 Ty = VecTy->getElementType();
3518 bool IsSigned = Ty->isSignedIntegerType();
3519 unsigned Opc;
3520 if (BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_elementwise_add_sat)
3521 Opc = IsSigned ? llvm::Intrinsic::sadd_sat : llvm::Intrinsic::uadd_sat;
3522 else
3523 Opc = IsSigned ? llvm::Intrinsic::ssub_sat : llvm::Intrinsic::usub_sat;
3524 Result = Builder.CreateBinaryIntrinsic(Opc, Op0, Op1, nullptr, "elt.sat");
3525 return RValue::get(Result);
3528 case Builtin::BI__builtin_elementwise_max: {
3529 Value *Op0 = EmitScalarExpr(E->getArg(0));
3530 Value *Op1 = EmitScalarExpr(E->getArg(1));
3531 Value *Result;
3532 if (Op0->getType()->isIntOrIntVectorTy()) {
3533 QualType Ty = E->getArg(0)->getType();
3534 if (auto *VecTy = Ty->getAs<VectorType>())
3535 Ty = VecTy->getElementType();
3536 Result = Builder.CreateBinaryIntrinsic(Ty->isSignedIntegerType()
3537 ? llvm::Intrinsic::smax
3538 : llvm::Intrinsic::umax,
3539 Op0, Op1, nullptr, "elt.max");
3540 } else
3541 Result = Builder.CreateMaxNum(Op0, Op1, "elt.max");
3542 return RValue::get(Result);
3544 case Builtin::BI__builtin_elementwise_min: {
3545 Value *Op0 = EmitScalarExpr(E->getArg(0));
3546 Value *Op1 = EmitScalarExpr(E->getArg(1));
3547 Value *Result;
3548 if (Op0->getType()->isIntOrIntVectorTy()) {
3549 QualType Ty = E->getArg(0)->getType();
3550 if (auto *VecTy = Ty->getAs<VectorType>())
3551 Ty = VecTy->getElementType();
3552 Result = Builder.CreateBinaryIntrinsic(Ty->isSignedIntegerType()
3553 ? llvm::Intrinsic::smin
3554 : llvm::Intrinsic::umin,
3555 Op0, Op1, nullptr, "elt.min");
3556 } else
3557 Result = Builder.CreateMinNum(Op0, Op1, "elt.min");
3558 return RValue::get(Result);
3561 case Builtin::BI__builtin_reduce_max: {
3562 auto GetIntrinsicID = [](QualType QT) {
3563 if (auto *VecTy = QT->getAs<VectorType>())
3564 QT = VecTy->getElementType();
3565 if (QT->isSignedIntegerType())
3566 return llvm::Intrinsic::vector_reduce_smax;
3567 if (QT->isUnsignedIntegerType())
3568 return llvm::Intrinsic::vector_reduce_umax;
3569 assert(QT->isFloatingType() && "must have a float here");
3570 return llvm::Intrinsic::vector_reduce_fmax;
3572 return RValue::get(emitUnaryBuiltin(
3573 *this, E, GetIntrinsicID(E->getArg(0)->getType()), "rdx.min"));
3576 case Builtin::BI__builtin_reduce_min: {
3577 auto GetIntrinsicID = [](QualType QT) {
3578 if (auto *VecTy = QT->getAs<VectorType>())
3579 QT = VecTy->getElementType();
3580 if (QT->isSignedIntegerType())
3581 return llvm::Intrinsic::vector_reduce_smin;
3582 if (QT->isUnsignedIntegerType())
3583 return llvm::Intrinsic::vector_reduce_umin;
3584 assert(QT->isFloatingType() && "must have a float here");
3585 return llvm::Intrinsic::vector_reduce_fmin;
3588 return RValue::get(emitUnaryBuiltin(
3589 *this, E, GetIntrinsicID(E->getArg(0)->getType()), "rdx.min"));
3592 case Builtin::BI__builtin_reduce_add:
3593 return RValue::get(emitUnaryBuiltin(
3594 *this, E, llvm::Intrinsic::vector_reduce_add, "rdx.add"));
3595 case Builtin::BI__builtin_reduce_mul:
3596 return RValue::get(emitUnaryBuiltin(
3597 *this, E, llvm::Intrinsic::vector_reduce_mul, "rdx.mul"));
3598 case Builtin::BI__builtin_reduce_xor:
3599 return RValue::get(emitUnaryBuiltin(
3600 *this, E, llvm::Intrinsic::vector_reduce_xor, "rdx.xor"));
3601 case Builtin::BI__builtin_reduce_or:
3602 return RValue::get(emitUnaryBuiltin(
3603 *this, E, llvm::Intrinsic::vector_reduce_or, "rdx.or"));
3604 case Builtin::BI__builtin_reduce_and:
3605 return RValue::get(emitUnaryBuiltin(
3606 *this, E, llvm::Intrinsic::vector_reduce_and, "rdx.and"));
3608 case Builtin::BI__builtin_matrix_transpose: {
3609 auto *MatrixTy = E->getArg(0)->getType()->castAs<ConstantMatrixType>();
3610 Value *MatValue = EmitScalarExpr(E->getArg(0));
3611 MatrixBuilder MB(Builder);
3612 Value *Result = MB.CreateMatrixTranspose(MatValue, MatrixTy->getNumRows(),
3613 MatrixTy->getNumColumns());
3614 return RValue::get(Result);
3617 case Builtin::BI__builtin_matrix_column_major_load: {
3618 MatrixBuilder MB(Builder);
3619 // Emit everything that isn't dependent on the first parameter type
3620 Value *Stride = EmitScalarExpr(E->getArg(3));
3621 const auto *ResultTy = E->getType()->getAs<ConstantMatrixType>();
3622 auto *PtrTy = E->getArg(0)->getType()->getAs<PointerType>();
3623 assert(PtrTy && "arg0 must be of pointer type");
3624 bool IsVolatile = PtrTy->getPointeeType().isVolatileQualified();
3626 Address Src = EmitPointerWithAlignment(E->getArg(0));
3627 EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(0)->getType(),
3628 E->getArg(0)->getExprLoc(), FD, 0);
3629 Value *Result = MB.CreateColumnMajorLoad(
3630 Src.getElementType(), Src.getPointer(),
3631 Align(Src.getAlignment().getQuantity()), Stride, IsVolatile,
3632 ResultTy->getNumRows(), ResultTy->getNumColumns(),
3633 "matrix");
3634 return RValue::get(Result);
3637 case Builtin::BI__builtin_matrix_column_major_store: {
3638 MatrixBuilder MB(Builder);
3639 Value *Matrix = EmitScalarExpr(E->getArg(0));
3640 Address Dst = EmitPointerWithAlignment(E->getArg(1));
3641 Value *Stride = EmitScalarExpr(E->getArg(2));
3643 const auto *MatrixTy = E->getArg(0)->getType()->getAs<ConstantMatrixType>();
3644 auto *PtrTy = E->getArg(1)->getType()->getAs<PointerType>();
3645 assert(PtrTy && "arg1 must be of pointer type");
3646 bool IsVolatile = PtrTy->getPointeeType().isVolatileQualified();
3648 EmitNonNullArgCheck(RValue::get(Dst.getPointer()), E->getArg(1)->getType(),
3649 E->getArg(1)->getExprLoc(), FD, 0);
3650 Value *Result = MB.CreateColumnMajorStore(
3651 Matrix, Dst.getPointer(), Align(Dst.getAlignment().getQuantity()),
3652 Stride, IsVolatile, MatrixTy->getNumRows(), MatrixTy->getNumColumns());
3653 return RValue::get(Result);
3656 case Builtin::BI__builtin_isinf_sign: {
3657 // isinf_sign(x) -> fabs(x) == infinity ? (signbit(x) ? -1 : 1) : 0
3658 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3659 // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here.
3660 Value *Arg = EmitScalarExpr(E->getArg(0));
3661 Value *AbsArg = EmitFAbs(*this, Arg);
3662 Value *IsInf = Builder.CreateFCmpOEQ(
3663 AbsArg, ConstantFP::getInfinity(Arg->getType()), "isinf");
3664 Value *IsNeg = EmitSignBit(*this, Arg);
3666 llvm::Type *IntTy = ConvertType(E->getType());
3667 Value *Zero = Constant::getNullValue(IntTy);
3668 Value *One = ConstantInt::get(IntTy, 1);
3669 Value *NegativeOne = ConstantInt::get(IntTy, -1);
3670 Value *SignResult = Builder.CreateSelect(IsNeg, NegativeOne, One);
3671 Value *Result = Builder.CreateSelect(IsInf, SignResult, Zero);
3672 return RValue::get(Result);
3675 case Builtin::BI__builtin_flt_rounds: {
3676 Function *F = CGM.getIntrinsic(Intrinsic::get_rounding);
3678 llvm::Type *ResultType = ConvertType(E->getType());
3679 Value *Result = Builder.CreateCall(F);
3680 if (Result->getType() != ResultType)
3681 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
3682 "cast");
3683 return RValue::get(Result);
3686 case Builtin::BI__builtin_set_flt_rounds: {
3687 Function *F = CGM.getIntrinsic(Intrinsic::set_rounding);
3689 Value *V = EmitScalarExpr(E->getArg(0));
3690 Builder.CreateCall(F, V);
3691 return RValue::get(nullptr);
3694 case Builtin::BI__builtin_fpclassify: {
3695 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3696 // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here.
3697 Value *V = EmitScalarExpr(E->getArg(5));
3698 llvm::Type *Ty = ConvertType(E->getArg(5)->getType());
3700 // Create Result
3701 BasicBlock *Begin = Builder.GetInsertBlock();
3702 BasicBlock *End = createBasicBlock("fpclassify_end", this->CurFn);
3703 Builder.SetInsertPoint(End);
3704 PHINode *Result =
3705 Builder.CreatePHI(ConvertType(E->getArg(0)->getType()), 4,
3706 "fpclassify_result");
3708 // if (V==0) return FP_ZERO
3709 Builder.SetInsertPoint(Begin);
3710 Value *IsZero = Builder.CreateFCmpOEQ(V, Constant::getNullValue(Ty),
3711 "iszero");
3712 Value *ZeroLiteral = EmitScalarExpr(E->getArg(4));
3713 BasicBlock *NotZero = createBasicBlock("fpclassify_not_zero", this->CurFn);
3714 Builder.CreateCondBr(IsZero, End, NotZero);
3715 Result->addIncoming(ZeroLiteral, Begin);
3717 // if (V != V) return FP_NAN
3718 Builder.SetInsertPoint(NotZero);
3719 Value *IsNan = Builder.CreateFCmpUNO(V, V, "cmp");
3720 Value *NanLiteral = EmitScalarExpr(E->getArg(0));
3721 BasicBlock *NotNan = createBasicBlock("fpclassify_not_nan", this->CurFn);
3722 Builder.CreateCondBr(IsNan, End, NotNan);
3723 Result->addIncoming(NanLiteral, NotZero);
3725 // if (fabs(V) == infinity) return FP_INFINITY
3726 Builder.SetInsertPoint(NotNan);
3727 Value *VAbs = EmitFAbs(*this, V);
3728 Value *IsInf =
3729 Builder.CreateFCmpOEQ(VAbs, ConstantFP::getInfinity(V->getType()),
3730 "isinf");
3731 Value *InfLiteral = EmitScalarExpr(E->getArg(1));
3732 BasicBlock *NotInf = createBasicBlock("fpclassify_not_inf", this->CurFn);
3733 Builder.CreateCondBr(IsInf, End, NotInf);
3734 Result->addIncoming(InfLiteral, NotNan);
3736 // if (fabs(V) >= MIN_NORMAL) return FP_NORMAL else FP_SUBNORMAL
3737 Builder.SetInsertPoint(NotInf);
3738 APFloat Smallest = APFloat::getSmallestNormalized(
3739 getContext().getFloatTypeSemantics(E->getArg(5)->getType()));
3740 Value *IsNormal =
3741 Builder.CreateFCmpUGE(VAbs, ConstantFP::get(V->getContext(), Smallest),
3742 "isnormal");
3743 Value *NormalResult =
3744 Builder.CreateSelect(IsNormal, EmitScalarExpr(E->getArg(2)),
3745 EmitScalarExpr(E->getArg(3)));
3746 Builder.CreateBr(End);
3747 Result->addIncoming(NormalResult, NotInf);
3749 // return Result
3750 Builder.SetInsertPoint(End);
3751 return RValue::get(Result);
3754 // An alloca will always return a pointer to the alloca (stack) address
3755 // space. This address space need not be the same as the AST / Language
3756 // default (e.g. in C / C++ auto vars are in the generic address space). At
3757 // the AST level this is handled within CreateTempAlloca et al., but for the
3758 // builtin / dynamic alloca we have to handle it here. We use an explicit cast
3759 // instead of passing an AS to CreateAlloca so as to not inhibit optimisation.
3760 case Builtin::BIalloca:
3761 case Builtin::BI_alloca:
3762 case Builtin::BI__builtin_alloca_uninitialized:
3763 case Builtin::BI__builtin_alloca: {
3764 Value *Size = EmitScalarExpr(E->getArg(0));
3765 const TargetInfo &TI = getContext().getTargetInfo();
3766 // The alignment of the alloca should correspond to __BIGGEST_ALIGNMENT__.
3767 const Align SuitableAlignmentInBytes =
3768 CGM.getContext()
3769 .toCharUnitsFromBits(TI.getSuitableAlign())
3770 .getAsAlign();
3771 AllocaInst *AI = Builder.CreateAlloca(Builder.getInt8Ty(), Size);
3772 AI->setAlignment(SuitableAlignmentInBytes);
3773 if (BuiltinID != Builtin::BI__builtin_alloca_uninitialized)
3774 initializeAlloca(*this, AI, Size, SuitableAlignmentInBytes);
3775 LangAS AAS = getASTAllocaAddressSpace();
3776 LangAS EAS = E->getType()->getPointeeType().getAddressSpace();
3777 if (AAS != EAS) {
3778 llvm::Type *Ty = CGM.getTypes().ConvertType(E->getType());
3779 return RValue::get(getTargetHooks().performAddrSpaceCast(*this, AI, AAS,
3780 EAS, Ty));
3782 return RValue::get(AI);
3785 case Builtin::BI__builtin_alloca_with_align_uninitialized:
3786 case Builtin::BI__builtin_alloca_with_align: {
3787 Value *Size = EmitScalarExpr(E->getArg(0));
3788 Value *AlignmentInBitsValue = EmitScalarExpr(E->getArg(1));
3789 auto *AlignmentInBitsCI = cast<ConstantInt>(AlignmentInBitsValue);
3790 unsigned AlignmentInBits = AlignmentInBitsCI->getZExtValue();
3791 const Align AlignmentInBytes =
3792 CGM.getContext().toCharUnitsFromBits(AlignmentInBits).getAsAlign();
3793 AllocaInst *AI = Builder.CreateAlloca(Builder.getInt8Ty(), Size);
3794 AI->setAlignment(AlignmentInBytes);
3795 if (BuiltinID != Builtin::BI__builtin_alloca_with_align_uninitialized)
3796 initializeAlloca(*this, AI, Size, AlignmentInBytes);
3797 LangAS AAS = getASTAllocaAddressSpace();
3798 LangAS EAS = E->getType()->getPointeeType().getAddressSpace();
3799 if (AAS != EAS) {
3800 llvm::Type *Ty = CGM.getTypes().ConvertType(E->getType());
3801 return RValue::get(getTargetHooks().performAddrSpaceCast(*this, AI, AAS,
3802 EAS, Ty));
3804 return RValue::get(AI);
3807 case Builtin::BIbzero:
3808 case Builtin::BI__builtin_bzero: {
3809 Address Dest = EmitPointerWithAlignment(E->getArg(0));
3810 Value *SizeVal = EmitScalarExpr(E->getArg(1));
3811 EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
3812 E->getArg(0)->getExprLoc(), FD, 0);
3813 Builder.CreateMemSet(Dest, Builder.getInt8(0), SizeVal, false);
3814 return RValue::get(nullptr);
3817 case Builtin::BIbcopy:
3818 case Builtin::BI__builtin_bcopy: {
3819 Address Src = EmitPointerWithAlignment(E->getArg(0));
3820 Address Dest = EmitPointerWithAlignment(E->getArg(1));
3821 Value *SizeVal = EmitScalarExpr(E->getArg(2));
3822 EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(0)->getType(),
3823 E->getArg(0)->getExprLoc(), FD, 0);
3824 EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(1)->getType(),
3825 E->getArg(1)->getExprLoc(), FD, 0);
3826 Builder.CreateMemMove(Dest, Src, SizeVal, false);
3827 return RValue::get(Dest.getPointer());
3830 case Builtin::BImemcpy:
3831 case Builtin::BI__builtin_memcpy:
3832 case Builtin::BImempcpy:
3833 case Builtin::BI__builtin_mempcpy: {
3834 Address Dest = EmitPointerWithAlignment(E->getArg(0));
3835 Address Src = EmitPointerWithAlignment(E->getArg(1));
3836 Value *SizeVal = EmitScalarExpr(E->getArg(2));
3837 EmitArgCheck(TCK_Store, Dest, E->getArg(0), 0);
3838 EmitArgCheck(TCK_Load, Src, E->getArg(1), 1);
3839 Builder.CreateMemCpy(Dest, Src, SizeVal, false);
3840 if (BuiltinID == Builtin::BImempcpy ||
3841 BuiltinID == Builtin::BI__builtin_mempcpy)
3842 return RValue::get(Builder.CreateInBoundsGEP(Dest.getElementType(),
3843 Dest.getPointer(), SizeVal));
3844 else
3845 return RValue::get(Dest.getPointer());
3848 case Builtin::BI__builtin_memcpy_inline: {
3849 Address Dest = EmitPointerWithAlignment(E->getArg(0));
3850 Address Src = EmitPointerWithAlignment(E->getArg(1));
3851 uint64_t Size =
3852 E->getArg(2)->EvaluateKnownConstInt(getContext()).getZExtValue();
3853 EmitArgCheck(TCK_Store, Dest, E->getArg(0), 0);
3854 EmitArgCheck(TCK_Load, Src, E->getArg(1), 1);
3855 Builder.CreateMemCpyInline(Dest, Src, Size);
3856 return RValue::get(nullptr);
3859 case Builtin::BI__builtin_char_memchr:
3860 BuiltinID = Builtin::BI__builtin_memchr;
3861 break;
3863 case Builtin::BI__builtin___memcpy_chk: {
3864 // fold __builtin_memcpy_chk(x, y, cst1, cst2) to memcpy iff cst1<=cst2.
3865 Expr::EvalResult SizeResult, DstSizeResult;
3866 if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
3867 !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
3868 break;
3869 llvm::APSInt Size = SizeResult.Val.getInt();
3870 llvm::APSInt DstSize = DstSizeResult.Val.getInt();
3871 if (Size.ugt(DstSize))
3872 break;
3873 Address Dest = EmitPointerWithAlignment(E->getArg(0));
3874 Address Src = EmitPointerWithAlignment(E->getArg(1));
3875 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
3876 Builder.CreateMemCpy(Dest, Src, SizeVal, false);
3877 return RValue::get(Dest.getPointer());
3880 case Builtin::BI__builtin_objc_memmove_collectable: {
3881 Address DestAddr = EmitPointerWithAlignment(E->getArg(0));
3882 Address SrcAddr = EmitPointerWithAlignment(E->getArg(1));
3883 Value *SizeVal = EmitScalarExpr(E->getArg(2));
3884 CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this,
3885 DestAddr, SrcAddr, SizeVal);
3886 return RValue::get(DestAddr.getPointer());
3889 case Builtin::BI__builtin___memmove_chk: {
3890 // fold __builtin_memmove_chk(x, y, cst1, cst2) to memmove iff cst1<=cst2.
3891 Expr::EvalResult SizeResult, DstSizeResult;
3892 if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
3893 !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
3894 break;
3895 llvm::APSInt Size = SizeResult.Val.getInt();
3896 llvm::APSInt DstSize = DstSizeResult.Val.getInt();
3897 if (Size.ugt(DstSize))
3898 break;
3899 Address Dest = EmitPointerWithAlignment(E->getArg(0));
3900 Address Src = EmitPointerWithAlignment(E->getArg(1));
3901 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
3902 Builder.CreateMemMove(Dest, Src, SizeVal, false);
3903 return RValue::get(Dest.getPointer());
3906 case Builtin::BImemmove:
3907 case Builtin::BI__builtin_memmove: {
3908 Address Dest = EmitPointerWithAlignment(E->getArg(0));
3909 Address Src = EmitPointerWithAlignment(E->getArg(1));
3910 Value *SizeVal = EmitScalarExpr(E->getArg(2));
3911 EmitArgCheck(TCK_Store, Dest, E->getArg(0), 0);
3912 EmitArgCheck(TCK_Load, Src, E->getArg(1), 1);
3913 Builder.CreateMemMove(Dest, Src, SizeVal, false);
3914 return RValue::get(Dest.getPointer());
3916 case Builtin::BImemset:
3917 case Builtin::BI__builtin_memset: {
3918 Address Dest = EmitPointerWithAlignment(E->getArg(0));
3919 Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
3920 Builder.getInt8Ty());
3921 Value *SizeVal = EmitScalarExpr(E->getArg(2));
3922 EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
3923 E->getArg(0)->getExprLoc(), FD, 0);
3924 Builder.CreateMemSet(Dest, ByteVal, SizeVal, false);
3925 return RValue::get(Dest.getPointer());
3927 case Builtin::BI__builtin_memset_inline: {
3928 Address Dest = EmitPointerWithAlignment(E->getArg(0));
3929 Value *ByteVal =
3930 Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)), Builder.getInt8Ty());
3931 uint64_t Size =
3932 E->getArg(2)->EvaluateKnownConstInt(getContext()).getZExtValue();
3933 EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
3934 E->getArg(0)->getExprLoc(), FD, 0);
3935 Builder.CreateMemSetInline(Dest, ByteVal, Size);
3936 return RValue::get(nullptr);
3938 case Builtin::BI__builtin___memset_chk: {
3939 // fold __builtin_memset_chk(x, y, cst1, cst2) to memset iff cst1<=cst2.
3940 Expr::EvalResult SizeResult, DstSizeResult;
3941 if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
3942 !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
3943 break;
3944 llvm::APSInt Size = SizeResult.Val.getInt();
3945 llvm::APSInt DstSize = DstSizeResult.Val.getInt();
3946 if (Size.ugt(DstSize))
3947 break;
3948 Address Dest = EmitPointerWithAlignment(E->getArg(0));
3949 Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
3950 Builder.getInt8Ty());
3951 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
3952 Builder.CreateMemSet(Dest, ByteVal, SizeVal, false);
3953 return RValue::get(Dest.getPointer());
3955 case Builtin::BI__builtin_wmemchr: {
3956 // The MSVC runtime library does not provide a definition of wmemchr, so we
3957 // need an inline implementation.
3958 if (!getTarget().getTriple().isOSMSVCRT())
3959 break;
3961 llvm::Type *WCharTy = ConvertType(getContext().WCharTy);
3962 Value *Str = EmitScalarExpr(E->getArg(0));
3963 Value *Chr = EmitScalarExpr(E->getArg(1));
3964 Value *Size = EmitScalarExpr(E->getArg(2));
3966 BasicBlock *Entry = Builder.GetInsertBlock();
3967 BasicBlock *CmpEq = createBasicBlock("wmemchr.eq");
3968 BasicBlock *Next = createBasicBlock("wmemchr.next");
3969 BasicBlock *Exit = createBasicBlock("wmemchr.exit");
3970 Value *SizeEq0 = Builder.CreateICmpEQ(Size, ConstantInt::get(SizeTy, 0));
3971 Builder.CreateCondBr(SizeEq0, Exit, CmpEq);
3973 EmitBlock(CmpEq);
3974 PHINode *StrPhi = Builder.CreatePHI(Str->getType(), 2);
3975 StrPhi->addIncoming(Str, Entry);
3976 PHINode *SizePhi = Builder.CreatePHI(SizeTy, 2);
3977 SizePhi->addIncoming(Size, Entry);
3978 CharUnits WCharAlign =
3979 getContext().getTypeAlignInChars(getContext().WCharTy);
3980 Value *StrCh = Builder.CreateAlignedLoad(WCharTy, StrPhi, WCharAlign);
3981 Value *FoundChr = Builder.CreateConstInBoundsGEP1_32(WCharTy, StrPhi, 0);
3982 Value *StrEqChr = Builder.CreateICmpEQ(StrCh, Chr);
3983 Builder.CreateCondBr(StrEqChr, Exit, Next);
3985 EmitBlock(Next);
3986 Value *NextStr = Builder.CreateConstInBoundsGEP1_32(WCharTy, StrPhi, 1);
3987 Value *NextSize = Builder.CreateSub(SizePhi, ConstantInt::get(SizeTy, 1));
3988 Value *NextSizeEq0 =
3989 Builder.CreateICmpEQ(NextSize, ConstantInt::get(SizeTy, 0));
3990 Builder.CreateCondBr(NextSizeEq0, Exit, CmpEq);
3991 StrPhi->addIncoming(NextStr, Next);
3992 SizePhi->addIncoming(NextSize, Next);
3994 EmitBlock(Exit);
3995 PHINode *Ret = Builder.CreatePHI(Str->getType(), 3);
3996 Ret->addIncoming(llvm::Constant::getNullValue(Str->getType()), Entry);
3997 Ret->addIncoming(llvm::Constant::getNullValue(Str->getType()), Next);
3998 Ret->addIncoming(FoundChr, CmpEq);
3999 return RValue::get(Ret);
4001 case Builtin::BI__builtin_wmemcmp: {
4002 // The MSVC runtime library does not provide a definition of wmemcmp, so we
4003 // need an inline implementation.
4004 if (!getTarget().getTriple().isOSMSVCRT())
4005 break;
4007 llvm::Type *WCharTy = ConvertType(getContext().WCharTy);
4009 Value *Dst = EmitScalarExpr(E->getArg(0));
4010 Value *Src = EmitScalarExpr(E->getArg(1));
4011 Value *Size = EmitScalarExpr(E->getArg(2));
4013 BasicBlock *Entry = Builder.GetInsertBlock();
4014 BasicBlock *CmpGT = createBasicBlock("wmemcmp.gt");
4015 BasicBlock *CmpLT = createBasicBlock("wmemcmp.lt");
4016 BasicBlock *Next = createBasicBlock("wmemcmp.next");
4017 BasicBlock *Exit = createBasicBlock("wmemcmp.exit");
4018 Value *SizeEq0 = Builder.CreateICmpEQ(Size, ConstantInt::get(SizeTy, 0));
4019 Builder.CreateCondBr(SizeEq0, Exit, CmpGT);
4021 EmitBlock(CmpGT);
4022 PHINode *DstPhi = Builder.CreatePHI(Dst->getType(), 2);
4023 DstPhi->addIncoming(Dst, Entry);
4024 PHINode *SrcPhi = Builder.CreatePHI(Src->getType(), 2);
4025 SrcPhi->addIncoming(Src, Entry);
4026 PHINode *SizePhi = Builder.CreatePHI(SizeTy, 2);
4027 SizePhi->addIncoming(Size, Entry);
4028 CharUnits WCharAlign =
4029 getContext().getTypeAlignInChars(getContext().WCharTy);
4030 Value *DstCh = Builder.CreateAlignedLoad(WCharTy, DstPhi, WCharAlign);
4031 Value *SrcCh = Builder.CreateAlignedLoad(WCharTy, SrcPhi, WCharAlign);
4032 Value *DstGtSrc = Builder.CreateICmpUGT(DstCh, SrcCh);
4033 Builder.CreateCondBr(DstGtSrc, Exit, CmpLT);
4035 EmitBlock(CmpLT);
4036 Value *DstLtSrc = Builder.CreateICmpULT(DstCh, SrcCh);
4037 Builder.CreateCondBr(DstLtSrc, Exit, Next);
4039 EmitBlock(Next);
4040 Value *NextDst = Builder.CreateConstInBoundsGEP1_32(WCharTy, DstPhi, 1);
4041 Value *NextSrc = Builder.CreateConstInBoundsGEP1_32(WCharTy, SrcPhi, 1);
4042 Value *NextSize = Builder.CreateSub(SizePhi, ConstantInt::get(SizeTy, 1));
4043 Value *NextSizeEq0 =
4044 Builder.CreateICmpEQ(NextSize, ConstantInt::get(SizeTy, 0));
4045 Builder.CreateCondBr(NextSizeEq0, Exit, CmpGT);
4046 DstPhi->addIncoming(NextDst, Next);
4047 SrcPhi->addIncoming(NextSrc, Next);
4048 SizePhi->addIncoming(NextSize, Next);
4050 EmitBlock(Exit);
4051 PHINode *Ret = Builder.CreatePHI(IntTy, 4);
4052 Ret->addIncoming(ConstantInt::get(IntTy, 0), Entry);
4053 Ret->addIncoming(ConstantInt::get(IntTy, 1), CmpGT);
4054 Ret->addIncoming(ConstantInt::get(IntTy, -1), CmpLT);
4055 Ret->addIncoming(ConstantInt::get(IntTy, 0), Next);
4056 return RValue::get(Ret);
4058 case Builtin::BI__builtin_dwarf_cfa: {
4059 // The offset in bytes from the first argument to the CFA.
4061 // Why on earth is this in the frontend? Is there any reason at
4062 // all that the backend can't reasonably determine this while
4063 // lowering llvm.eh.dwarf.cfa()?
4065 // TODO: If there's a satisfactory reason, add a target hook for
4066 // this instead of hard-coding 0, which is correct for most targets.
4067 int32_t Offset = 0;
4069 Function *F = CGM.getIntrinsic(Intrinsic::eh_dwarf_cfa);
4070 return RValue::get(Builder.CreateCall(F,
4071 llvm::ConstantInt::get(Int32Ty, Offset)));
4073 case Builtin::BI__builtin_return_address: {
4074 Value *Depth = ConstantEmitter(*this).emitAbstract(E->getArg(0),
4075 getContext().UnsignedIntTy);
4076 Function *F = CGM.getIntrinsic(Intrinsic::returnaddress);
4077 return RValue::get(Builder.CreateCall(F, Depth));
4079 case Builtin::BI_ReturnAddress: {
4080 Function *F = CGM.getIntrinsic(Intrinsic::returnaddress);
4081 return RValue::get(Builder.CreateCall(F, Builder.getInt32(0)));
4083 case Builtin::BI__builtin_frame_address: {
4084 Value *Depth = ConstantEmitter(*this).emitAbstract(E->getArg(0),
4085 getContext().UnsignedIntTy);
4086 Function *F = CGM.getIntrinsic(Intrinsic::frameaddress, AllocaInt8PtrTy);
4087 return RValue::get(Builder.CreateCall(F, Depth));
4089 case Builtin::BI__builtin_extract_return_addr: {
4090 Value *Address = EmitScalarExpr(E->getArg(0));
4091 Value *Result = getTargetHooks().decodeReturnAddress(*this, Address);
4092 return RValue::get(Result);
4094 case Builtin::BI__builtin_frob_return_addr: {
4095 Value *Address = EmitScalarExpr(E->getArg(0));
4096 Value *Result = getTargetHooks().encodeReturnAddress(*this, Address);
4097 return RValue::get(Result);
4099 case Builtin::BI__builtin_dwarf_sp_column: {
4100 llvm::IntegerType *Ty
4101 = cast<llvm::IntegerType>(ConvertType(E->getType()));
4102 int Column = getTargetHooks().getDwarfEHStackPointer(CGM);
4103 if (Column == -1) {
4104 CGM.ErrorUnsupported(E, "__builtin_dwarf_sp_column");
4105 return RValue::get(llvm::UndefValue::get(Ty));
4107 return RValue::get(llvm::ConstantInt::get(Ty, Column, true));
4109 case Builtin::BI__builtin_init_dwarf_reg_size_table: {
4110 Value *Address = EmitScalarExpr(E->getArg(0));
4111 if (getTargetHooks().initDwarfEHRegSizeTable(*this, Address))
4112 CGM.ErrorUnsupported(E, "__builtin_init_dwarf_reg_size_table");
4113 return RValue::get(llvm::UndefValue::get(ConvertType(E->getType())));
4115 case Builtin::BI__builtin_eh_return: {
4116 Value *Int = EmitScalarExpr(E->getArg(0));
4117 Value *Ptr = EmitScalarExpr(E->getArg(1));
4119 llvm::IntegerType *IntTy = cast<llvm::IntegerType>(Int->getType());
4120 assert((IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) &&
4121 "LLVM's __builtin_eh_return only supports 32- and 64-bit variants");
4122 Function *F =
4123 CGM.getIntrinsic(IntTy->getBitWidth() == 32 ? Intrinsic::eh_return_i32
4124 : Intrinsic::eh_return_i64);
4125 Builder.CreateCall(F, {Int, Ptr});
4126 Builder.CreateUnreachable();
4128 // We do need to preserve an insertion point.
4129 EmitBlock(createBasicBlock("builtin_eh_return.cont"));
4131 return RValue::get(nullptr);
4133 case Builtin::BI__builtin_unwind_init: {
4134 Function *F = CGM.getIntrinsic(Intrinsic::eh_unwind_init);
4135 Builder.CreateCall(F);
4136 return RValue::get(nullptr);
4138 case Builtin::BI__builtin_extend_pointer: {
4139 // Extends a pointer to the size of an _Unwind_Word, which is
4140 // uint64_t on all platforms. Generally this gets poked into a
4141 // register and eventually used as an address, so if the
4142 // addressing registers are wider than pointers and the platform
4143 // doesn't implicitly ignore high-order bits when doing
4144 // addressing, we need to make sure we zext / sext based on
4145 // the platform's expectations.
4147 // See: http://gcc.gnu.org/ml/gcc-bugs/2002-02/msg00237.html
4149 // Cast the pointer to intptr_t.
4150 Value *Ptr = EmitScalarExpr(E->getArg(0));
4151 Value *Result = Builder.CreatePtrToInt(Ptr, IntPtrTy, "extend.cast");
4153 // If that's 64 bits, we're done.
4154 if (IntPtrTy->getBitWidth() == 64)
4155 return RValue::get(Result);
4157 // Otherwise, ask the codegen data what to do.
4158 if (getTargetHooks().extendPointerWithSExt())
4159 return RValue::get(Builder.CreateSExt(Result, Int64Ty, "extend.sext"));
4160 else
4161 return RValue::get(Builder.CreateZExt(Result, Int64Ty, "extend.zext"));
4163 case Builtin::BI__builtin_setjmp: {
4164 // Buffer is a void**.
4165 Address Buf = EmitPointerWithAlignment(E->getArg(0));
4167 // Store the frame pointer to the setjmp buffer.
4168 Value *FrameAddr = Builder.CreateCall(
4169 CGM.getIntrinsic(Intrinsic::frameaddress, AllocaInt8PtrTy),
4170 ConstantInt::get(Int32Ty, 0));
4171 Builder.CreateStore(FrameAddr, Buf);
4173 // Store the stack pointer to the setjmp buffer.
4174 Value *StackAddr = Builder.CreateStackSave();
4175 assert(Buf.getPointer()->getType() == StackAddr->getType());
4177 Address StackSaveSlot = Builder.CreateConstInBoundsGEP(Buf, 2);
4178 Builder.CreateStore(StackAddr, StackSaveSlot);
4180 // Call LLVM's EH setjmp, which is lightweight.
4181 Function *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp);
4182 return RValue::get(Builder.CreateCall(F, Buf.getPointer()));
4184 case Builtin::BI__builtin_longjmp: {
4185 Value *Buf = EmitScalarExpr(E->getArg(0));
4187 // Call LLVM's EH longjmp, which is lightweight.
4188 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::eh_sjlj_longjmp), Buf);
4190 // longjmp doesn't return; mark this as unreachable.
4191 Builder.CreateUnreachable();
4193 // We do need to preserve an insertion point.
4194 EmitBlock(createBasicBlock("longjmp.cont"));
4196 return RValue::get(nullptr);
4198 case Builtin::BI__builtin_launder: {
4199 const Expr *Arg = E->getArg(0);
4200 QualType ArgTy = Arg->getType()->getPointeeType();
4201 Value *Ptr = EmitScalarExpr(Arg);
4202 if (TypeRequiresBuiltinLaunder(CGM, ArgTy))
4203 Ptr = Builder.CreateLaunderInvariantGroup(Ptr);
4205 return RValue::get(Ptr);
4207 case Builtin::BI__sync_fetch_and_add:
4208 case Builtin::BI__sync_fetch_and_sub:
4209 case Builtin::BI__sync_fetch_and_or:
4210 case Builtin::BI__sync_fetch_and_and:
4211 case Builtin::BI__sync_fetch_and_xor:
4212 case Builtin::BI__sync_fetch_and_nand:
4213 case Builtin::BI__sync_add_and_fetch:
4214 case Builtin::BI__sync_sub_and_fetch:
4215 case Builtin::BI__sync_and_and_fetch:
4216 case Builtin::BI__sync_or_and_fetch:
4217 case Builtin::BI__sync_xor_and_fetch:
4218 case Builtin::BI__sync_nand_and_fetch:
4219 case Builtin::BI__sync_val_compare_and_swap:
4220 case Builtin::BI__sync_bool_compare_and_swap:
4221 case Builtin::BI__sync_lock_test_and_set:
4222 case Builtin::BI__sync_lock_release:
4223 case Builtin::BI__sync_swap:
4224 llvm_unreachable("Shouldn't make it through sema");
4225 case Builtin::BI__sync_fetch_and_add_1:
4226 case Builtin::BI__sync_fetch_and_add_2:
4227 case Builtin::BI__sync_fetch_and_add_4:
4228 case Builtin::BI__sync_fetch_and_add_8:
4229 case Builtin::BI__sync_fetch_and_add_16:
4230 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Add, E);
4231 case Builtin::BI__sync_fetch_and_sub_1:
4232 case Builtin::BI__sync_fetch_and_sub_2:
4233 case Builtin::BI__sync_fetch_and_sub_4:
4234 case Builtin::BI__sync_fetch_and_sub_8:
4235 case Builtin::BI__sync_fetch_and_sub_16:
4236 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Sub, E);
4237 case Builtin::BI__sync_fetch_and_or_1:
4238 case Builtin::BI__sync_fetch_and_or_2:
4239 case Builtin::BI__sync_fetch_and_or_4:
4240 case Builtin::BI__sync_fetch_and_or_8:
4241 case Builtin::BI__sync_fetch_and_or_16:
4242 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Or, E);
4243 case Builtin::BI__sync_fetch_and_and_1:
4244 case Builtin::BI__sync_fetch_and_and_2:
4245 case Builtin::BI__sync_fetch_and_and_4:
4246 case Builtin::BI__sync_fetch_and_and_8:
4247 case Builtin::BI__sync_fetch_and_and_16:
4248 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::And, E);
4249 case Builtin::BI__sync_fetch_and_xor_1:
4250 case Builtin::BI__sync_fetch_and_xor_2:
4251 case Builtin::BI__sync_fetch_and_xor_4:
4252 case Builtin::BI__sync_fetch_and_xor_8:
4253 case Builtin::BI__sync_fetch_and_xor_16:
4254 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xor, E);
4255 case Builtin::BI__sync_fetch_and_nand_1:
4256 case Builtin::BI__sync_fetch_and_nand_2:
4257 case Builtin::BI__sync_fetch_and_nand_4:
4258 case Builtin::BI__sync_fetch_and_nand_8:
4259 case Builtin::BI__sync_fetch_and_nand_16:
4260 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Nand, E);
4262 // Clang extensions: not overloaded yet.
4263 case Builtin::BI__sync_fetch_and_min:
4264 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Min, E);
4265 case Builtin::BI__sync_fetch_and_max:
4266 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Max, E);
4267 case Builtin::BI__sync_fetch_and_umin:
4268 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMin, E);
4269 case Builtin::BI__sync_fetch_and_umax:
4270 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMax, E);
4272 case Builtin::BI__sync_add_and_fetch_1:
4273 case Builtin::BI__sync_add_and_fetch_2:
4274 case Builtin::BI__sync_add_and_fetch_4:
4275 case Builtin::BI__sync_add_and_fetch_8:
4276 case Builtin::BI__sync_add_and_fetch_16:
4277 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Add, E,
4278 llvm::Instruction::Add);
4279 case Builtin::BI__sync_sub_and_fetch_1:
4280 case Builtin::BI__sync_sub_and_fetch_2:
4281 case Builtin::BI__sync_sub_and_fetch_4:
4282 case Builtin::BI__sync_sub_and_fetch_8:
4283 case Builtin::BI__sync_sub_and_fetch_16:
4284 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Sub, E,
4285 llvm::Instruction::Sub);
4286 case Builtin::BI__sync_and_and_fetch_1:
4287 case Builtin::BI__sync_and_and_fetch_2:
4288 case Builtin::BI__sync_and_and_fetch_4:
4289 case Builtin::BI__sync_and_and_fetch_8:
4290 case Builtin::BI__sync_and_and_fetch_16:
4291 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::And, E,
4292 llvm::Instruction::And);
4293 case Builtin::BI__sync_or_and_fetch_1:
4294 case Builtin::BI__sync_or_and_fetch_2:
4295 case Builtin::BI__sync_or_and_fetch_4:
4296 case Builtin::BI__sync_or_and_fetch_8:
4297 case Builtin::BI__sync_or_and_fetch_16:
4298 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Or, E,
4299 llvm::Instruction::Or);
4300 case Builtin::BI__sync_xor_and_fetch_1:
4301 case Builtin::BI__sync_xor_and_fetch_2:
4302 case Builtin::BI__sync_xor_and_fetch_4:
4303 case Builtin::BI__sync_xor_and_fetch_8:
4304 case Builtin::BI__sync_xor_and_fetch_16:
4305 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Xor, E,
4306 llvm::Instruction::Xor);
4307 case Builtin::BI__sync_nand_and_fetch_1:
4308 case Builtin::BI__sync_nand_and_fetch_2:
4309 case Builtin::BI__sync_nand_and_fetch_4:
4310 case Builtin::BI__sync_nand_and_fetch_8:
4311 case Builtin::BI__sync_nand_and_fetch_16:
4312 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Nand, E,
4313 llvm::Instruction::And, true);
4315 case Builtin::BI__sync_val_compare_and_swap_1:
4316 case Builtin::BI__sync_val_compare_and_swap_2:
4317 case Builtin::BI__sync_val_compare_and_swap_4:
4318 case Builtin::BI__sync_val_compare_and_swap_8:
4319 case Builtin::BI__sync_val_compare_and_swap_16:
4320 return RValue::get(MakeAtomicCmpXchgValue(*this, E, false));
4322 case Builtin::BI__sync_bool_compare_and_swap_1:
4323 case Builtin::BI__sync_bool_compare_and_swap_2:
4324 case Builtin::BI__sync_bool_compare_and_swap_4:
4325 case Builtin::BI__sync_bool_compare_and_swap_8:
4326 case Builtin::BI__sync_bool_compare_and_swap_16:
4327 return RValue::get(MakeAtomicCmpXchgValue(*this, E, true));
4329 case Builtin::BI__sync_swap_1:
4330 case Builtin::BI__sync_swap_2:
4331 case Builtin::BI__sync_swap_4:
4332 case Builtin::BI__sync_swap_8:
4333 case Builtin::BI__sync_swap_16:
4334 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
4336 case Builtin::BI__sync_lock_test_and_set_1:
4337 case Builtin::BI__sync_lock_test_and_set_2:
4338 case Builtin::BI__sync_lock_test_and_set_4:
4339 case Builtin::BI__sync_lock_test_and_set_8:
4340 case Builtin::BI__sync_lock_test_and_set_16:
4341 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
4343 case Builtin::BI__sync_lock_release_1:
4344 case Builtin::BI__sync_lock_release_2:
4345 case Builtin::BI__sync_lock_release_4:
4346 case Builtin::BI__sync_lock_release_8:
4347 case Builtin::BI__sync_lock_release_16: {
4348 Value *Ptr = CheckAtomicAlignment(*this, E);
4349 QualType ElTy = E->getArg(0)->getType()->getPointeeType();
4350 CharUnits StoreSize = getContext().getTypeSizeInChars(ElTy);
4351 llvm::Type *ITy =
4352 llvm::IntegerType::get(getLLVMContext(), StoreSize.getQuantity() * 8);
4353 llvm::StoreInst *Store =
4354 Builder.CreateAlignedStore(llvm::Constant::getNullValue(ITy), Ptr,
4355 StoreSize);
4356 Store->setAtomic(llvm::AtomicOrdering::Release);
4357 return RValue::get(nullptr);
4360 case Builtin::BI__sync_synchronize: {
4361 // We assume this is supposed to correspond to a C++0x-style
4362 // sequentially-consistent fence (i.e. this is only usable for
4363 // synchronization, not device I/O or anything like that). This intrinsic
4364 // is really badly designed in the sense that in theory, there isn't
4365 // any way to safely use it... but in practice, it mostly works
4366 // to use it with non-atomic loads and stores to get acquire/release
4367 // semantics.
4368 Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent);
4369 return RValue::get(nullptr);
4372 case Builtin::BI__builtin_nontemporal_load:
4373 return RValue::get(EmitNontemporalLoad(*this, E));
4374 case Builtin::BI__builtin_nontemporal_store:
4375 return RValue::get(EmitNontemporalStore(*this, E));
4376 case Builtin::BI__c11_atomic_is_lock_free:
4377 case Builtin::BI__atomic_is_lock_free: {
4378 // Call "bool __atomic_is_lock_free(size_t size, void *ptr)". For the
4379 // __c11 builtin, ptr is 0 (indicating a properly-aligned object), since
4380 // _Atomic(T) is always properly-aligned.
4381 const char *LibCallName = "__atomic_is_lock_free";
4382 CallArgList Args;
4383 Args.add(RValue::get(EmitScalarExpr(E->getArg(0))),
4384 getContext().getSizeType());
4385 if (BuiltinID == Builtin::BI__atomic_is_lock_free)
4386 Args.add(RValue::get(EmitScalarExpr(E->getArg(1))),
4387 getContext().VoidPtrTy);
4388 else
4389 Args.add(RValue::get(llvm::Constant::getNullValue(VoidPtrTy)),
4390 getContext().VoidPtrTy);
4391 const CGFunctionInfo &FuncInfo =
4392 CGM.getTypes().arrangeBuiltinFunctionCall(E->getType(), Args);
4393 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
4394 llvm::FunctionCallee Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
4395 return EmitCall(FuncInfo, CGCallee::forDirect(Func),
4396 ReturnValueSlot(), Args);
4399 case Builtin::BI__atomic_test_and_set: {
4400 // Look at the argument type to determine whether this is a volatile
4401 // operation. The parameter type is always volatile.
4402 QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
4403 bool Volatile =
4404 PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
4406 Value *Ptr = EmitScalarExpr(E->getArg(0));
4407 Value *NewVal = Builder.getInt8(1);
4408 Value *Order = EmitScalarExpr(E->getArg(1));
4409 if (isa<llvm::ConstantInt>(Order)) {
4410 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
4411 AtomicRMWInst *Result = nullptr;
4412 switch (ord) {
4413 case 0: // memory_order_relaxed
4414 default: // invalid order
4415 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
4416 llvm::AtomicOrdering::Monotonic);
4417 break;
4418 case 1: // memory_order_consume
4419 case 2: // memory_order_acquire
4420 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
4421 llvm::AtomicOrdering::Acquire);
4422 break;
4423 case 3: // memory_order_release
4424 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
4425 llvm::AtomicOrdering::Release);
4426 break;
4427 case 4: // memory_order_acq_rel
4429 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
4430 llvm::AtomicOrdering::AcquireRelease);
4431 break;
4432 case 5: // memory_order_seq_cst
4433 Result = Builder.CreateAtomicRMW(
4434 llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
4435 llvm::AtomicOrdering::SequentiallyConsistent);
4436 break;
4438 Result->setVolatile(Volatile);
4439 return RValue::get(Builder.CreateIsNotNull(Result, "tobool"));
4442 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
4444 llvm::BasicBlock *BBs[5] = {
4445 createBasicBlock("monotonic", CurFn),
4446 createBasicBlock("acquire", CurFn),
4447 createBasicBlock("release", CurFn),
4448 createBasicBlock("acqrel", CurFn),
4449 createBasicBlock("seqcst", CurFn)
4451 llvm::AtomicOrdering Orders[5] = {
4452 llvm::AtomicOrdering::Monotonic, llvm::AtomicOrdering::Acquire,
4453 llvm::AtomicOrdering::Release, llvm::AtomicOrdering::AcquireRelease,
4454 llvm::AtomicOrdering::SequentiallyConsistent};
4456 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
4457 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]);
4459 Builder.SetInsertPoint(ContBB);
4460 PHINode *Result = Builder.CreatePHI(Int8Ty, 5, "was_set");
4462 for (unsigned i = 0; i < 5; ++i) {
4463 Builder.SetInsertPoint(BBs[i]);
4464 AtomicRMWInst *RMW = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
4465 Ptr, NewVal, Orders[i]);
4466 RMW->setVolatile(Volatile);
4467 Result->addIncoming(RMW, BBs[i]);
4468 Builder.CreateBr(ContBB);
4471 SI->addCase(Builder.getInt32(0), BBs[0]);
4472 SI->addCase(Builder.getInt32(1), BBs[1]);
4473 SI->addCase(Builder.getInt32(2), BBs[1]);
4474 SI->addCase(Builder.getInt32(3), BBs[2]);
4475 SI->addCase(Builder.getInt32(4), BBs[3]);
4476 SI->addCase(Builder.getInt32(5), BBs[4]);
4478 Builder.SetInsertPoint(ContBB);
4479 return RValue::get(Builder.CreateIsNotNull(Result, "tobool"));
4482 case Builtin::BI__atomic_clear: {
4483 QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
4484 bool Volatile =
4485 PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
4487 Address Ptr = EmitPointerWithAlignment(E->getArg(0));
4488 Ptr = Ptr.withElementType(Int8Ty);
4489 Value *NewVal = Builder.getInt8(0);
4490 Value *Order = EmitScalarExpr(E->getArg(1));
4491 if (isa<llvm::ConstantInt>(Order)) {
4492 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
4493 StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile);
4494 switch (ord) {
4495 case 0: // memory_order_relaxed
4496 default: // invalid order
4497 Store->setOrdering(llvm::AtomicOrdering::Monotonic);
4498 break;
4499 case 3: // memory_order_release
4500 Store->setOrdering(llvm::AtomicOrdering::Release);
4501 break;
4502 case 5: // memory_order_seq_cst
4503 Store->setOrdering(llvm::AtomicOrdering::SequentiallyConsistent);
4504 break;
4506 return RValue::get(nullptr);
4509 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
4511 llvm::BasicBlock *BBs[3] = {
4512 createBasicBlock("monotonic", CurFn),
4513 createBasicBlock("release", CurFn),
4514 createBasicBlock("seqcst", CurFn)
4516 llvm::AtomicOrdering Orders[3] = {
4517 llvm::AtomicOrdering::Monotonic, llvm::AtomicOrdering::Release,
4518 llvm::AtomicOrdering::SequentiallyConsistent};
4520 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
4521 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]);
4523 for (unsigned i = 0; i < 3; ++i) {
4524 Builder.SetInsertPoint(BBs[i]);
4525 StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile);
4526 Store->setOrdering(Orders[i]);
4527 Builder.CreateBr(ContBB);
4530 SI->addCase(Builder.getInt32(0), BBs[0]);
4531 SI->addCase(Builder.getInt32(3), BBs[1]);
4532 SI->addCase(Builder.getInt32(5), BBs[2]);
4534 Builder.SetInsertPoint(ContBB);
4535 return RValue::get(nullptr);
4538 case Builtin::BI__atomic_thread_fence:
4539 case Builtin::BI__atomic_signal_fence:
4540 case Builtin::BI__c11_atomic_thread_fence:
4541 case Builtin::BI__c11_atomic_signal_fence: {
4542 llvm::SyncScope::ID SSID;
4543 if (BuiltinID == Builtin::BI__atomic_signal_fence ||
4544 BuiltinID == Builtin::BI__c11_atomic_signal_fence)
4545 SSID = llvm::SyncScope::SingleThread;
4546 else
4547 SSID = llvm::SyncScope::System;
4548 Value *Order = EmitScalarExpr(E->getArg(0));
4549 if (isa<llvm::ConstantInt>(Order)) {
4550 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
4551 switch (ord) {
4552 case 0: // memory_order_relaxed
4553 default: // invalid order
4554 break;
4555 case 1: // memory_order_consume
4556 case 2: // memory_order_acquire
4557 Builder.CreateFence(llvm::AtomicOrdering::Acquire, SSID);
4558 break;
4559 case 3: // memory_order_release
4560 Builder.CreateFence(llvm::AtomicOrdering::Release, SSID);
4561 break;
4562 case 4: // memory_order_acq_rel
4563 Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease, SSID);
4564 break;
4565 case 5: // memory_order_seq_cst
4566 Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, SSID);
4567 break;
4569 return RValue::get(nullptr);
4572 llvm::BasicBlock *AcquireBB, *ReleaseBB, *AcqRelBB, *SeqCstBB;
4573 AcquireBB = createBasicBlock("acquire", CurFn);
4574 ReleaseBB = createBasicBlock("release", CurFn);
4575 AcqRelBB = createBasicBlock("acqrel", CurFn);
4576 SeqCstBB = createBasicBlock("seqcst", CurFn);
4577 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
4579 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
4580 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, ContBB);
4582 Builder.SetInsertPoint(AcquireBB);
4583 Builder.CreateFence(llvm::AtomicOrdering::Acquire, SSID);
4584 Builder.CreateBr(ContBB);
4585 SI->addCase(Builder.getInt32(1), AcquireBB);
4586 SI->addCase(Builder.getInt32(2), AcquireBB);
4588 Builder.SetInsertPoint(ReleaseBB);
4589 Builder.CreateFence(llvm::AtomicOrdering::Release, SSID);
4590 Builder.CreateBr(ContBB);
4591 SI->addCase(Builder.getInt32(3), ReleaseBB);
4593 Builder.SetInsertPoint(AcqRelBB);
4594 Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease, SSID);
4595 Builder.CreateBr(ContBB);
4596 SI->addCase(Builder.getInt32(4), AcqRelBB);
4598 Builder.SetInsertPoint(SeqCstBB);
4599 Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, SSID);
4600 Builder.CreateBr(ContBB);
4601 SI->addCase(Builder.getInt32(5), SeqCstBB);
4603 Builder.SetInsertPoint(ContBB);
4604 return RValue::get(nullptr);
4607 case Builtin::BI__builtin_signbit:
4608 case Builtin::BI__builtin_signbitf:
4609 case Builtin::BI__builtin_signbitl: {
4610 return RValue::get(
4611 Builder.CreateZExt(EmitSignBit(*this, EmitScalarExpr(E->getArg(0))),
4612 ConvertType(E->getType())));
4614 case Builtin::BI__warn_memset_zero_len:
4615 return RValue::getIgnored();
4616 case Builtin::BI__annotation: {
4617 // Re-encode each wide string to UTF8 and make an MDString.
4618 SmallVector<Metadata *, 1> Strings;
4619 for (const Expr *Arg : E->arguments()) {
4620 const auto *Str = cast<StringLiteral>(Arg->IgnoreParenCasts());
4621 assert(Str->getCharByteWidth() == 2);
4622 StringRef WideBytes = Str->getBytes();
4623 std::string StrUtf8;
4624 if (!convertUTF16ToUTF8String(
4625 ArrayRef(WideBytes.data(), WideBytes.size()), StrUtf8)) {
4626 CGM.ErrorUnsupported(E, "non-UTF16 __annotation argument");
4627 continue;
4629 Strings.push_back(llvm::MDString::get(getLLVMContext(), StrUtf8));
4632 // Build and MDTuple of MDStrings and emit the intrinsic call.
4633 llvm::Function *F =
4634 CGM.getIntrinsic(llvm::Intrinsic::codeview_annotation, {});
4635 MDTuple *StrTuple = MDTuple::get(getLLVMContext(), Strings);
4636 Builder.CreateCall(F, MetadataAsValue::get(getLLVMContext(), StrTuple));
4637 return RValue::getIgnored();
4639 case Builtin::BI__builtin_annotation: {
4640 llvm::Value *AnnVal = EmitScalarExpr(E->getArg(0));
4641 llvm::Function *F =
4642 CGM.getIntrinsic(llvm::Intrinsic::annotation,
4643 {AnnVal->getType(), CGM.ConstGlobalsPtrTy});
4645 // Get the annotation string, go through casts. Sema requires this to be a
4646 // non-wide string literal, potentially casted, so the cast<> is safe.
4647 const Expr *AnnotationStrExpr = E->getArg(1)->IgnoreParenCasts();
4648 StringRef Str = cast<StringLiteral>(AnnotationStrExpr)->getString();
4649 return RValue::get(
4650 EmitAnnotationCall(F, AnnVal, Str, E->getExprLoc(), nullptr));
4652 case Builtin::BI__builtin_addcb:
4653 case Builtin::BI__builtin_addcs:
4654 case Builtin::BI__builtin_addc:
4655 case Builtin::BI__builtin_addcl:
4656 case Builtin::BI__builtin_addcll:
4657 case Builtin::BI__builtin_subcb:
4658 case Builtin::BI__builtin_subcs:
4659 case Builtin::BI__builtin_subc:
4660 case Builtin::BI__builtin_subcl:
4661 case Builtin::BI__builtin_subcll: {
4663 // We translate all of these builtins from expressions of the form:
4664 // int x = ..., y = ..., carryin = ..., carryout, result;
4665 // result = __builtin_addc(x, y, carryin, &carryout);
4667 // to LLVM IR of the form:
4669 // %tmp1 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %x, i32 %y)
4670 // %tmpsum1 = extractvalue {i32, i1} %tmp1, 0
4671 // %carry1 = extractvalue {i32, i1} %tmp1, 1
4672 // %tmp2 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %tmpsum1,
4673 // i32 %carryin)
4674 // %result = extractvalue {i32, i1} %tmp2, 0
4675 // %carry2 = extractvalue {i32, i1} %tmp2, 1
4676 // %tmp3 = or i1 %carry1, %carry2
4677 // %tmp4 = zext i1 %tmp3 to i32
4678 // store i32 %tmp4, i32* %carryout
4680 // Scalarize our inputs.
4681 llvm::Value *X = EmitScalarExpr(E->getArg(0));
4682 llvm::Value *Y = EmitScalarExpr(E->getArg(1));
4683 llvm::Value *Carryin = EmitScalarExpr(E->getArg(2));
4684 Address CarryOutPtr = EmitPointerWithAlignment(E->getArg(3));
4686 // Decide if we are lowering to a uadd.with.overflow or usub.with.overflow.
4687 llvm::Intrinsic::ID IntrinsicId;
4688 switch (BuiltinID) {
4689 default: llvm_unreachable("Unknown multiprecision builtin id.");
4690 case Builtin::BI__builtin_addcb:
4691 case Builtin::BI__builtin_addcs:
4692 case Builtin::BI__builtin_addc:
4693 case Builtin::BI__builtin_addcl:
4694 case Builtin::BI__builtin_addcll:
4695 IntrinsicId = llvm::Intrinsic::uadd_with_overflow;
4696 break;
4697 case Builtin::BI__builtin_subcb:
4698 case Builtin::BI__builtin_subcs:
4699 case Builtin::BI__builtin_subc:
4700 case Builtin::BI__builtin_subcl:
4701 case Builtin::BI__builtin_subcll:
4702 IntrinsicId = llvm::Intrinsic::usub_with_overflow;
4703 break;
4706 // Construct our resulting LLVM IR expression.
4707 llvm::Value *Carry1;
4708 llvm::Value *Sum1 = EmitOverflowIntrinsic(*this, IntrinsicId,
4709 X, Y, Carry1);
4710 llvm::Value *Carry2;
4711 llvm::Value *Sum2 = EmitOverflowIntrinsic(*this, IntrinsicId,
4712 Sum1, Carryin, Carry2);
4713 llvm::Value *CarryOut = Builder.CreateZExt(Builder.CreateOr(Carry1, Carry2),
4714 X->getType());
4715 Builder.CreateStore(CarryOut, CarryOutPtr);
4716 return RValue::get(Sum2);
4719 case Builtin::BI__builtin_add_overflow:
4720 case Builtin::BI__builtin_sub_overflow:
4721 case Builtin::BI__builtin_mul_overflow: {
4722 const clang::Expr *LeftArg = E->getArg(0);
4723 const clang::Expr *RightArg = E->getArg(1);
4724 const clang::Expr *ResultArg = E->getArg(2);
4726 clang::QualType ResultQTy =
4727 ResultArg->getType()->castAs<PointerType>()->getPointeeType();
4729 WidthAndSignedness LeftInfo =
4730 getIntegerWidthAndSignedness(CGM.getContext(), LeftArg->getType());
4731 WidthAndSignedness RightInfo =
4732 getIntegerWidthAndSignedness(CGM.getContext(), RightArg->getType());
4733 WidthAndSignedness ResultInfo =
4734 getIntegerWidthAndSignedness(CGM.getContext(), ResultQTy);
4736 // Handle mixed-sign multiplication as a special case, because adding
4737 // runtime or backend support for our generic irgen would be too expensive.
4738 if (isSpecialMixedSignMultiply(BuiltinID, LeftInfo, RightInfo, ResultInfo))
4739 return EmitCheckedMixedSignMultiply(*this, LeftArg, LeftInfo, RightArg,
4740 RightInfo, ResultArg, ResultQTy,
4741 ResultInfo);
4743 if (isSpecialUnsignedMultiplySignedResult(BuiltinID, LeftInfo, RightInfo,
4744 ResultInfo))
4745 return EmitCheckedUnsignedMultiplySignedResult(
4746 *this, LeftArg, LeftInfo, RightArg, RightInfo, ResultArg, ResultQTy,
4747 ResultInfo);
4749 WidthAndSignedness EncompassingInfo =
4750 EncompassingIntegerType({LeftInfo, RightInfo, ResultInfo});
4752 llvm::Type *EncompassingLLVMTy =
4753 llvm::IntegerType::get(CGM.getLLVMContext(), EncompassingInfo.Width);
4755 llvm::Type *ResultLLVMTy = CGM.getTypes().ConvertType(ResultQTy);
4757 llvm::Intrinsic::ID IntrinsicId;
4758 switch (BuiltinID) {
4759 default:
4760 llvm_unreachable("Unknown overflow builtin id.");
4761 case Builtin::BI__builtin_add_overflow:
4762 IntrinsicId = EncompassingInfo.Signed
4763 ? llvm::Intrinsic::sadd_with_overflow
4764 : llvm::Intrinsic::uadd_with_overflow;
4765 break;
4766 case Builtin::BI__builtin_sub_overflow:
4767 IntrinsicId = EncompassingInfo.Signed
4768 ? llvm::Intrinsic::ssub_with_overflow
4769 : llvm::Intrinsic::usub_with_overflow;
4770 break;
4771 case Builtin::BI__builtin_mul_overflow:
4772 IntrinsicId = EncompassingInfo.Signed
4773 ? llvm::Intrinsic::smul_with_overflow
4774 : llvm::Intrinsic::umul_with_overflow;
4775 break;
4778 llvm::Value *Left = EmitScalarExpr(LeftArg);
4779 llvm::Value *Right = EmitScalarExpr(RightArg);
4780 Address ResultPtr = EmitPointerWithAlignment(ResultArg);
4782 // Extend each operand to the encompassing type.
4783 Left = Builder.CreateIntCast(Left, EncompassingLLVMTy, LeftInfo.Signed);
4784 Right = Builder.CreateIntCast(Right, EncompassingLLVMTy, RightInfo.Signed);
4786 // Perform the operation on the extended values.
4787 llvm::Value *Overflow, *Result;
4788 Result = EmitOverflowIntrinsic(*this, IntrinsicId, Left, Right, Overflow);
4790 if (EncompassingInfo.Width > ResultInfo.Width) {
4791 // The encompassing type is wider than the result type, so we need to
4792 // truncate it.
4793 llvm::Value *ResultTrunc = Builder.CreateTrunc(Result, ResultLLVMTy);
4795 // To see if the truncation caused an overflow, we will extend
4796 // the result and then compare it to the original result.
4797 llvm::Value *ResultTruncExt = Builder.CreateIntCast(
4798 ResultTrunc, EncompassingLLVMTy, ResultInfo.Signed);
4799 llvm::Value *TruncationOverflow =
4800 Builder.CreateICmpNE(Result, ResultTruncExt);
4802 Overflow = Builder.CreateOr(Overflow, TruncationOverflow);
4803 Result = ResultTrunc;
4806 // Finally, store the result using the pointer.
4807 bool isVolatile =
4808 ResultArg->getType()->getPointeeType().isVolatileQualified();
4809 Builder.CreateStore(EmitToMemory(Result, ResultQTy), ResultPtr, isVolatile);
4811 return RValue::get(Overflow);
4814 case Builtin::BI__builtin_uadd_overflow:
4815 case Builtin::BI__builtin_uaddl_overflow:
4816 case Builtin::BI__builtin_uaddll_overflow:
4817 case Builtin::BI__builtin_usub_overflow:
4818 case Builtin::BI__builtin_usubl_overflow:
4819 case Builtin::BI__builtin_usubll_overflow:
4820 case Builtin::BI__builtin_umul_overflow:
4821 case Builtin::BI__builtin_umull_overflow:
4822 case Builtin::BI__builtin_umulll_overflow:
4823 case Builtin::BI__builtin_sadd_overflow:
4824 case Builtin::BI__builtin_saddl_overflow:
4825 case Builtin::BI__builtin_saddll_overflow:
4826 case Builtin::BI__builtin_ssub_overflow:
4827 case Builtin::BI__builtin_ssubl_overflow:
4828 case Builtin::BI__builtin_ssubll_overflow:
4829 case Builtin::BI__builtin_smul_overflow:
4830 case Builtin::BI__builtin_smull_overflow:
4831 case Builtin::BI__builtin_smulll_overflow: {
4833 // We translate all of these builtins directly to the relevant llvm IR node.
4835 // Scalarize our inputs.
4836 llvm::Value *X = EmitScalarExpr(E->getArg(0));
4837 llvm::Value *Y = EmitScalarExpr(E->getArg(1));
4838 Address SumOutPtr = EmitPointerWithAlignment(E->getArg(2));
4840 // Decide which of the overflow intrinsics we are lowering to:
4841 llvm::Intrinsic::ID IntrinsicId;
4842 switch (BuiltinID) {
4843 default: llvm_unreachable("Unknown overflow builtin id.");
4844 case Builtin::BI__builtin_uadd_overflow:
4845 case Builtin::BI__builtin_uaddl_overflow:
4846 case Builtin::BI__builtin_uaddll_overflow:
4847 IntrinsicId = llvm::Intrinsic::uadd_with_overflow;
4848 break;
4849 case Builtin::BI__builtin_usub_overflow:
4850 case Builtin::BI__builtin_usubl_overflow:
4851 case Builtin::BI__builtin_usubll_overflow:
4852 IntrinsicId = llvm::Intrinsic::usub_with_overflow;
4853 break;
4854 case Builtin::BI__builtin_umul_overflow:
4855 case Builtin::BI__builtin_umull_overflow:
4856 case Builtin::BI__builtin_umulll_overflow:
4857 IntrinsicId = llvm::Intrinsic::umul_with_overflow;
4858 break;
4859 case Builtin::BI__builtin_sadd_overflow:
4860 case Builtin::BI__builtin_saddl_overflow:
4861 case Builtin::BI__builtin_saddll_overflow:
4862 IntrinsicId = llvm::Intrinsic::sadd_with_overflow;
4863 break;
4864 case Builtin::BI__builtin_ssub_overflow:
4865 case Builtin::BI__builtin_ssubl_overflow:
4866 case Builtin::BI__builtin_ssubll_overflow:
4867 IntrinsicId = llvm::Intrinsic::ssub_with_overflow;
4868 break;
4869 case Builtin::BI__builtin_smul_overflow:
4870 case Builtin::BI__builtin_smull_overflow:
4871 case Builtin::BI__builtin_smulll_overflow:
4872 IntrinsicId = llvm::Intrinsic::smul_with_overflow;
4873 break;
4877 llvm::Value *Carry;
4878 llvm::Value *Sum = EmitOverflowIntrinsic(*this, IntrinsicId, X, Y, Carry);
4879 Builder.CreateStore(Sum, SumOutPtr);
4881 return RValue::get(Carry);
4883 case Builtin::BIaddressof:
4884 case Builtin::BI__addressof:
4885 case Builtin::BI__builtin_addressof:
4886 return RValue::get(EmitLValue(E->getArg(0)).getPointer(*this));
4887 case Builtin::BI__builtin_function_start:
4888 return RValue::get(CGM.GetFunctionStart(
4889 E->getArg(0)->getAsBuiltinConstantDeclRef(CGM.getContext())));
4890 case Builtin::BI__builtin_operator_new:
4891 return EmitBuiltinNewDeleteCall(
4892 E->getCallee()->getType()->castAs<FunctionProtoType>(), E, false);
4893 case Builtin::BI__builtin_operator_delete:
4894 EmitBuiltinNewDeleteCall(
4895 E->getCallee()->getType()->castAs<FunctionProtoType>(), E, true);
4896 return RValue::get(nullptr);
4898 case Builtin::BI__builtin_is_aligned:
4899 return EmitBuiltinIsAligned(E);
4900 case Builtin::BI__builtin_align_up:
4901 return EmitBuiltinAlignTo(E, true);
4902 case Builtin::BI__builtin_align_down:
4903 return EmitBuiltinAlignTo(E, false);
4905 case Builtin::BI__noop:
4906 // __noop always evaluates to an integer literal zero.
4907 return RValue::get(ConstantInt::get(IntTy, 0));
4908 case Builtin::BI__builtin_call_with_static_chain: {
4909 const CallExpr *Call = cast<CallExpr>(E->getArg(0));
4910 const Expr *Chain = E->getArg(1);
4911 return EmitCall(Call->getCallee()->getType(),
4912 EmitCallee(Call->getCallee()), Call, ReturnValue,
4913 EmitScalarExpr(Chain));
4915 case Builtin::BI_InterlockedExchange8:
4916 case Builtin::BI_InterlockedExchange16:
4917 case Builtin::BI_InterlockedExchange:
4918 case Builtin::BI_InterlockedExchangePointer:
4919 return RValue::get(
4920 EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange, E));
4921 case Builtin::BI_InterlockedCompareExchangePointer:
4922 case Builtin::BI_InterlockedCompareExchangePointer_nf: {
4923 llvm::Type *RTy;
4924 llvm::IntegerType *IntType = IntegerType::get(
4925 getLLVMContext(), getContext().getTypeSize(E->getType()));
4927 llvm::Value *Destination = EmitScalarExpr(E->getArg(0));
4929 llvm::Value *Exchange = EmitScalarExpr(E->getArg(1));
4930 RTy = Exchange->getType();
4931 Exchange = Builder.CreatePtrToInt(Exchange, IntType);
4933 llvm::Value *Comparand =
4934 Builder.CreatePtrToInt(EmitScalarExpr(E->getArg(2)), IntType);
4936 auto Ordering =
4937 BuiltinID == Builtin::BI_InterlockedCompareExchangePointer_nf ?
4938 AtomicOrdering::Monotonic : AtomicOrdering::SequentiallyConsistent;
4940 auto Result = Builder.CreateAtomicCmpXchg(Destination, Comparand, Exchange,
4941 Ordering, Ordering);
4942 Result->setVolatile(true);
4944 return RValue::get(Builder.CreateIntToPtr(Builder.CreateExtractValue(Result,
4946 RTy));
4948 case Builtin::BI_InterlockedCompareExchange8:
4949 case Builtin::BI_InterlockedCompareExchange16:
4950 case Builtin::BI_InterlockedCompareExchange:
4951 case Builtin::BI_InterlockedCompareExchange64:
4952 return RValue::get(EmitAtomicCmpXchgForMSIntrin(*this, E));
4953 case Builtin::BI_InterlockedIncrement16:
4954 case Builtin::BI_InterlockedIncrement:
4955 return RValue::get(
4956 EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement, E));
4957 case Builtin::BI_InterlockedDecrement16:
4958 case Builtin::BI_InterlockedDecrement:
4959 return RValue::get(
4960 EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement, E));
4961 case Builtin::BI_InterlockedAnd8:
4962 case Builtin::BI_InterlockedAnd16:
4963 case Builtin::BI_InterlockedAnd:
4964 return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd, E));
4965 case Builtin::BI_InterlockedExchangeAdd8:
4966 case Builtin::BI_InterlockedExchangeAdd16:
4967 case Builtin::BI_InterlockedExchangeAdd:
4968 return RValue::get(
4969 EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd, E));
4970 case Builtin::BI_InterlockedExchangeSub8:
4971 case Builtin::BI_InterlockedExchangeSub16:
4972 case Builtin::BI_InterlockedExchangeSub:
4973 return RValue::get(
4974 EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeSub, E));
4975 case Builtin::BI_InterlockedOr8:
4976 case Builtin::BI_InterlockedOr16:
4977 case Builtin::BI_InterlockedOr:
4978 return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr, E));
4979 case Builtin::BI_InterlockedXor8:
4980 case Builtin::BI_InterlockedXor16:
4981 case Builtin::BI_InterlockedXor:
4982 return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor, E));
4984 case Builtin::BI_bittest64:
4985 case Builtin::BI_bittest:
4986 case Builtin::BI_bittestandcomplement64:
4987 case Builtin::BI_bittestandcomplement:
4988 case Builtin::BI_bittestandreset64:
4989 case Builtin::BI_bittestandreset:
4990 case Builtin::BI_bittestandset64:
4991 case Builtin::BI_bittestandset:
4992 case Builtin::BI_interlockedbittestandreset:
4993 case Builtin::BI_interlockedbittestandreset64:
4994 case Builtin::BI_interlockedbittestandset64:
4995 case Builtin::BI_interlockedbittestandset:
4996 case Builtin::BI_interlockedbittestandset_acq:
4997 case Builtin::BI_interlockedbittestandset_rel:
4998 case Builtin::BI_interlockedbittestandset_nf:
4999 case Builtin::BI_interlockedbittestandreset_acq:
5000 case Builtin::BI_interlockedbittestandreset_rel:
5001 case Builtin::BI_interlockedbittestandreset_nf:
5002 return RValue::get(EmitBitTestIntrinsic(*this, BuiltinID, E));
5004 // These builtins exist to emit regular volatile loads and stores not
5005 // affected by the -fms-volatile setting.
5006 case Builtin::BI__iso_volatile_load8:
5007 case Builtin::BI__iso_volatile_load16:
5008 case Builtin::BI__iso_volatile_load32:
5009 case Builtin::BI__iso_volatile_load64:
5010 return RValue::get(EmitISOVolatileLoad(*this, E));
5011 case Builtin::BI__iso_volatile_store8:
5012 case Builtin::BI__iso_volatile_store16:
5013 case Builtin::BI__iso_volatile_store32:
5014 case Builtin::BI__iso_volatile_store64:
5015 return RValue::get(EmitISOVolatileStore(*this, E));
5017 case Builtin::BI__exception_code:
5018 case Builtin::BI_exception_code:
5019 return RValue::get(EmitSEHExceptionCode());
5020 case Builtin::BI__exception_info:
5021 case Builtin::BI_exception_info:
5022 return RValue::get(EmitSEHExceptionInfo());
5023 case Builtin::BI__abnormal_termination:
5024 case Builtin::BI_abnormal_termination:
5025 return RValue::get(EmitSEHAbnormalTermination());
5026 case Builtin::BI_setjmpex:
5027 if (getTarget().getTriple().isOSMSVCRT() && E->getNumArgs() == 1 &&
5028 E->getArg(0)->getType()->isPointerType())
5029 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmpex, E);
5030 break;
5031 case Builtin::BI_setjmp:
5032 if (getTarget().getTriple().isOSMSVCRT() && E->getNumArgs() == 1 &&
5033 E->getArg(0)->getType()->isPointerType()) {
5034 if (getTarget().getTriple().getArch() == llvm::Triple::x86)
5035 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmp3, E);
5036 else if (getTarget().getTriple().getArch() == llvm::Triple::aarch64)
5037 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmpex, E);
5038 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmp, E);
5040 break;
5042 // C++ std:: builtins.
5043 case Builtin::BImove:
5044 case Builtin::BImove_if_noexcept:
5045 case Builtin::BIforward:
5046 case Builtin::BIforward_like:
5047 case Builtin::BIas_const:
5048 return RValue::get(EmitLValue(E->getArg(0)).getPointer(*this));
5049 case Builtin::BI__GetExceptionInfo: {
5050 if (llvm::GlobalVariable *GV =
5051 CGM.getCXXABI().getThrowInfo(FD->getParamDecl(0)->getType()))
5052 return RValue::get(GV);
5053 break;
5056 case Builtin::BI__fastfail:
5057 return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::__fastfail, E));
5059 case Builtin::BI__builtin_coro_id:
5060 return EmitCoroutineIntrinsic(E, Intrinsic::coro_id);
5061 case Builtin::BI__builtin_coro_promise:
5062 return EmitCoroutineIntrinsic(E, Intrinsic::coro_promise);
5063 case Builtin::BI__builtin_coro_resume:
5064 EmitCoroutineIntrinsic(E, Intrinsic::coro_resume);
5065 return RValue::get(nullptr);
5066 case Builtin::BI__builtin_coro_frame:
5067 return EmitCoroutineIntrinsic(E, Intrinsic::coro_frame);
5068 case Builtin::BI__builtin_coro_noop:
5069 return EmitCoroutineIntrinsic(E, Intrinsic::coro_noop);
5070 case Builtin::BI__builtin_coro_free:
5071 return EmitCoroutineIntrinsic(E, Intrinsic::coro_free);
5072 case Builtin::BI__builtin_coro_destroy:
5073 EmitCoroutineIntrinsic(E, Intrinsic::coro_destroy);
5074 return RValue::get(nullptr);
5075 case Builtin::BI__builtin_coro_done:
5076 return EmitCoroutineIntrinsic(E, Intrinsic::coro_done);
5077 case Builtin::BI__builtin_coro_alloc:
5078 return EmitCoroutineIntrinsic(E, Intrinsic::coro_alloc);
5079 case Builtin::BI__builtin_coro_begin:
5080 return EmitCoroutineIntrinsic(E, Intrinsic::coro_begin);
5081 case Builtin::BI__builtin_coro_end:
5082 return EmitCoroutineIntrinsic(E, Intrinsic::coro_end);
5083 case Builtin::BI__builtin_coro_suspend:
5084 return EmitCoroutineIntrinsic(E, Intrinsic::coro_suspend);
5085 case Builtin::BI__builtin_coro_size:
5086 return EmitCoroutineIntrinsic(E, Intrinsic::coro_size);
5087 case Builtin::BI__builtin_coro_align:
5088 return EmitCoroutineIntrinsic(E, Intrinsic::coro_align);
5090 // OpenCL v2.0 s6.13.16.2, Built-in pipe read and write functions
5091 case Builtin::BIread_pipe:
5092 case Builtin::BIwrite_pipe: {
5093 Value *Arg0 = EmitScalarExpr(E->getArg(0)),
5094 *Arg1 = EmitScalarExpr(E->getArg(1));
5095 CGOpenCLRuntime OpenCLRT(CGM);
5096 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
5097 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
5099 // Type of the generic packet parameter.
5100 unsigned GenericAS =
5101 getContext().getTargetAddressSpace(LangAS::opencl_generic);
5102 llvm::Type *I8PTy = llvm::PointerType::get(getLLVMContext(), GenericAS);
5104 // Testing which overloaded version we should generate the call for.
5105 if (2U == E->getNumArgs()) {
5106 const char *Name = (BuiltinID == Builtin::BIread_pipe) ? "__read_pipe_2"
5107 : "__write_pipe_2";
5108 // Creating a generic function type to be able to call with any builtin or
5109 // user defined type.
5110 llvm::Type *ArgTys[] = {Arg0->getType(), I8PTy, Int32Ty, Int32Ty};
5111 llvm::FunctionType *FTy = llvm::FunctionType::get(
5112 Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
5113 Value *BCast = Builder.CreatePointerCast(Arg1, I8PTy);
5114 return RValue::get(
5115 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
5116 {Arg0, BCast, PacketSize, PacketAlign}));
5117 } else {
5118 assert(4 == E->getNumArgs() &&
5119 "Illegal number of parameters to pipe function");
5120 const char *Name = (BuiltinID == Builtin::BIread_pipe) ? "__read_pipe_4"
5121 : "__write_pipe_4";
5123 llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, I8PTy,
5124 Int32Ty, Int32Ty};
5125 Value *Arg2 = EmitScalarExpr(E->getArg(2)),
5126 *Arg3 = EmitScalarExpr(E->getArg(3));
5127 llvm::FunctionType *FTy = llvm::FunctionType::get(
5128 Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
5129 Value *BCast = Builder.CreatePointerCast(Arg3, I8PTy);
5130 // We know the third argument is an integer type, but we may need to cast
5131 // it to i32.
5132 if (Arg2->getType() != Int32Ty)
5133 Arg2 = Builder.CreateZExtOrTrunc(Arg2, Int32Ty);
5134 return RValue::get(
5135 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
5136 {Arg0, Arg1, Arg2, BCast, PacketSize, PacketAlign}));
5139 // OpenCL v2.0 s6.13.16 ,s9.17.3.5 - Built-in pipe reserve read and write
5140 // functions
5141 case Builtin::BIreserve_read_pipe:
5142 case Builtin::BIreserve_write_pipe:
5143 case Builtin::BIwork_group_reserve_read_pipe:
5144 case Builtin::BIwork_group_reserve_write_pipe:
5145 case Builtin::BIsub_group_reserve_read_pipe:
5146 case Builtin::BIsub_group_reserve_write_pipe: {
5147 // Composing the mangled name for the function.
5148 const char *Name;
5149 if (BuiltinID == Builtin::BIreserve_read_pipe)
5150 Name = "__reserve_read_pipe";
5151 else if (BuiltinID == Builtin::BIreserve_write_pipe)
5152 Name = "__reserve_write_pipe";
5153 else if (BuiltinID == Builtin::BIwork_group_reserve_read_pipe)
5154 Name = "__work_group_reserve_read_pipe";
5155 else if (BuiltinID == Builtin::BIwork_group_reserve_write_pipe)
5156 Name = "__work_group_reserve_write_pipe";
5157 else if (BuiltinID == Builtin::BIsub_group_reserve_read_pipe)
5158 Name = "__sub_group_reserve_read_pipe";
5159 else
5160 Name = "__sub_group_reserve_write_pipe";
5162 Value *Arg0 = EmitScalarExpr(E->getArg(0)),
5163 *Arg1 = EmitScalarExpr(E->getArg(1));
5164 llvm::Type *ReservedIDTy = ConvertType(getContext().OCLReserveIDTy);
5165 CGOpenCLRuntime OpenCLRT(CGM);
5166 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
5167 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
5169 // Building the generic function prototype.
5170 llvm::Type *ArgTys[] = {Arg0->getType(), Int32Ty, Int32Ty, Int32Ty};
5171 llvm::FunctionType *FTy = llvm::FunctionType::get(
5172 ReservedIDTy, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
5173 // We know the second argument is an integer type, but we may need to cast
5174 // it to i32.
5175 if (Arg1->getType() != Int32Ty)
5176 Arg1 = Builder.CreateZExtOrTrunc(Arg1, Int32Ty);
5177 return RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
5178 {Arg0, Arg1, PacketSize, PacketAlign}));
5180 // OpenCL v2.0 s6.13.16, s9.17.3.5 - Built-in pipe commit read and write
5181 // functions
5182 case Builtin::BIcommit_read_pipe:
5183 case Builtin::BIcommit_write_pipe:
5184 case Builtin::BIwork_group_commit_read_pipe:
5185 case Builtin::BIwork_group_commit_write_pipe:
5186 case Builtin::BIsub_group_commit_read_pipe:
5187 case Builtin::BIsub_group_commit_write_pipe: {
5188 const char *Name;
5189 if (BuiltinID == Builtin::BIcommit_read_pipe)
5190 Name = "__commit_read_pipe";
5191 else if (BuiltinID == Builtin::BIcommit_write_pipe)
5192 Name = "__commit_write_pipe";
5193 else if (BuiltinID == Builtin::BIwork_group_commit_read_pipe)
5194 Name = "__work_group_commit_read_pipe";
5195 else if (BuiltinID == Builtin::BIwork_group_commit_write_pipe)
5196 Name = "__work_group_commit_write_pipe";
5197 else if (BuiltinID == Builtin::BIsub_group_commit_read_pipe)
5198 Name = "__sub_group_commit_read_pipe";
5199 else
5200 Name = "__sub_group_commit_write_pipe";
5202 Value *Arg0 = EmitScalarExpr(E->getArg(0)),
5203 *Arg1 = EmitScalarExpr(E->getArg(1));
5204 CGOpenCLRuntime OpenCLRT(CGM);
5205 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
5206 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
5208 // Building the generic function prototype.
5209 llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, Int32Ty};
5210 llvm::FunctionType *FTy =
5211 llvm::FunctionType::get(llvm::Type::getVoidTy(getLLVMContext()),
5212 llvm::ArrayRef<llvm::Type *>(ArgTys), false);
5214 return RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
5215 {Arg0, Arg1, PacketSize, PacketAlign}));
5217 // OpenCL v2.0 s6.13.16.4 Built-in pipe query functions
5218 case Builtin::BIget_pipe_num_packets:
5219 case Builtin::BIget_pipe_max_packets: {
5220 const char *BaseName;
5221 const auto *PipeTy = E->getArg(0)->getType()->castAs<PipeType>();
5222 if (BuiltinID == Builtin::BIget_pipe_num_packets)
5223 BaseName = "__get_pipe_num_packets";
5224 else
5225 BaseName = "__get_pipe_max_packets";
5226 std::string Name = std::string(BaseName) +
5227 std::string(PipeTy->isReadOnly() ? "_ro" : "_wo");
5229 // Building the generic function prototype.
5230 Value *Arg0 = EmitScalarExpr(E->getArg(0));
5231 CGOpenCLRuntime OpenCLRT(CGM);
5232 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
5233 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
5234 llvm::Type *ArgTys[] = {Arg0->getType(), Int32Ty, Int32Ty};
5235 llvm::FunctionType *FTy = llvm::FunctionType::get(
5236 Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
5238 return RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
5239 {Arg0, PacketSize, PacketAlign}));
5242 // OpenCL v2.0 s6.13.9 - Address space qualifier functions.
5243 case Builtin::BIto_global:
5244 case Builtin::BIto_local:
5245 case Builtin::BIto_private: {
5246 auto Arg0 = EmitScalarExpr(E->getArg(0));
5247 auto NewArgT = llvm::PointerType::get(
5248 getLLVMContext(),
5249 CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic));
5250 auto NewRetT = llvm::PointerType::get(
5251 getLLVMContext(),
5252 CGM.getContext().getTargetAddressSpace(
5253 E->getType()->getPointeeType().getAddressSpace()));
5254 auto FTy = llvm::FunctionType::get(NewRetT, {NewArgT}, false);
5255 llvm::Value *NewArg;
5256 if (Arg0->getType()->getPointerAddressSpace() !=
5257 NewArgT->getPointerAddressSpace())
5258 NewArg = Builder.CreateAddrSpaceCast(Arg0, NewArgT);
5259 else
5260 NewArg = Builder.CreateBitOrPointerCast(Arg0, NewArgT);
5261 auto NewName = std::string("__") + E->getDirectCallee()->getName().str();
5262 auto NewCall =
5263 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, NewName), {NewArg});
5264 return RValue::get(Builder.CreateBitOrPointerCast(NewCall,
5265 ConvertType(E->getType())));
5268 // OpenCL v2.0, s6.13.17 - Enqueue kernel function.
5269 // It contains four different overload formats specified in Table 6.13.17.1.
5270 case Builtin::BIenqueue_kernel: {
5271 StringRef Name; // Generated function call name
5272 unsigned NumArgs = E->getNumArgs();
5274 llvm::Type *QueueTy = ConvertType(getContext().OCLQueueTy);
5275 llvm::Type *GenericVoidPtrTy = Builder.getPtrTy(
5276 getContext().getTargetAddressSpace(LangAS::opencl_generic));
5278 llvm::Value *Queue = EmitScalarExpr(E->getArg(0));
5279 llvm::Value *Flags = EmitScalarExpr(E->getArg(1));
5280 LValue NDRangeL = EmitAggExprToLValue(E->getArg(2));
5281 llvm::Value *Range = NDRangeL.getAddress(*this).getPointer();
5282 llvm::Type *RangeTy = NDRangeL.getAddress(*this).getType();
5284 if (NumArgs == 4) {
5285 // The most basic form of the call with parameters:
5286 // queue_t, kernel_enqueue_flags_t, ndrange_t, block(void)
5287 Name = "__enqueue_kernel_basic";
5288 llvm::Type *ArgTys[] = {QueueTy, Int32Ty, RangeTy, GenericVoidPtrTy,
5289 GenericVoidPtrTy};
5290 llvm::FunctionType *FTy = llvm::FunctionType::get(
5291 Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
5293 auto Info =
5294 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(3));
5295 llvm::Value *Kernel =
5296 Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
5297 llvm::Value *Block =
5298 Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
5300 AttrBuilder B(Builder.getContext());
5301 B.addByValAttr(NDRangeL.getAddress(*this).getElementType());
5302 llvm::AttributeList ByValAttrSet =
5303 llvm::AttributeList::get(CGM.getModule().getContext(), 3U, B);
5305 auto RTCall =
5306 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name, ByValAttrSet),
5307 {Queue, Flags, Range, Kernel, Block});
5308 RTCall->setAttributes(ByValAttrSet);
5309 return RValue::get(RTCall);
5311 assert(NumArgs >= 5 && "Invalid enqueue_kernel signature");
5313 // Create a temporary array to hold the sizes of local pointer arguments
5314 // for the block. \p First is the position of the first size argument.
5315 auto CreateArrayForSizeVar = [=](unsigned First)
5316 -> std::tuple<llvm::Value *, llvm::Value *, llvm::Value *> {
5317 llvm::APInt ArraySize(32, NumArgs - First);
5318 QualType SizeArrayTy = getContext().getConstantArrayType(
5319 getContext().getSizeType(), ArraySize, nullptr,
5320 ArraySizeModifier::Normal,
5321 /*IndexTypeQuals=*/0);
5322 auto Tmp = CreateMemTemp(SizeArrayTy, "block_sizes");
5323 llvm::Value *TmpPtr = Tmp.getPointer();
5324 llvm::Value *TmpSize = EmitLifetimeStart(
5325 CGM.getDataLayout().getTypeAllocSize(Tmp.getElementType()), TmpPtr);
5326 llvm::Value *ElemPtr;
5327 // Each of the following arguments specifies the size of the corresponding
5328 // argument passed to the enqueued block.
5329 auto *Zero = llvm::ConstantInt::get(IntTy, 0);
5330 for (unsigned I = First; I < NumArgs; ++I) {
5331 auto *Index = llvm::ConstantInt::get(IntTy, I - First);
5332 auto *GEP = Builder.CreateGEP(Tmp.getElementType(), TmpPtr,
5333 {Zero, Index});
5334 if (I == First)
5335 ElemPtr = GEP;
5336 auto *V =
5337 Builder.CreateZExtOrTrunc(EmitScalarExpr(E->getArg(I)), SizeTy);
5338 Builder.CreateAlignedStore(
5339 V, GEP, CGM.getDataLayout().getPrefTypeAlign(SizeTy));
5341 return std::tie(ElemPtr, TmpSize, TmpPtr);
5344 // Could have events and/or varargs.
5345 if (E->getArg(3)->getType()->isBlockPointerType()) {
5346 // No events passed, but has variadic arguments.
5347 Name = "__enqueue_kernel_varargs";
5348 auto Info =
5349 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(3));
5350 llvm::Value *Kernel =
5351 Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
5352 auto *Block = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
5353 llvm::Value *ElemPtr, *TmpSize, *TmpPtr;
5354 std::tie(ElemPtr, TmpSize, TmpPtr) = CreateArrayForSizeVar(4);
5356 // Create a vector of the arguments, as well as a constant value to
5357 // express to the runtime the number of variadic arguments.
5358 llvm::Value *const Args[] = {Queue, Flags,
5359 Range, Kernel,
5360 Block, ConstantInt::get(IntTy, NumArgs - 4),
5361 ElemPtr};
5362 llvm::Type *const ArgTys[] = {
5363 QueueTy, IntTy, RangeTy, GenericVoidPtrTy,
5364 GenericVoidPtrTy, IntTy, ElemPtr->getType()};
5366 llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
5367 auto Call = RValue::get(
5368 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Args));
5369 if (TmpSize)
5370 EmitLifetimeEnd(TmpSize, TmpPtr);
5371 return Call;
5373 // Any calls now have event arguments passed.
5374 if (NumArgs >= 7) {
5375 llvm::PointerType *PtrTy = llvm::PointerType::get(
5376 CGM.getLLVMContext(),
5377 CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic));
5379 llvm::Value *NumEvents =
5380 Builder.CreateZExtOrTrunc(EmitScalarExpr(E->getArg(3)), Int32Ty);
5382 // Since SemaOpenCLBuiltinEnqueueKernel allows fifth and sixth arguments
5383 // to be a null pointer constant (including `0` literal), we can take it
5384 // into account and emit null pointer directly.
5385 llvm::Value *EventWaitList = nullptr;
5386 if (E->getArg(4)->isNullPointerConstant(
5387 getContext(), Expr::NPC_ValueDependentIsNotNull)) {
5388 EventWaitList = llvm::ConstantPointerNull::get(PtrTy);
5389 } else {
5390 EventWaitList = E->getArg(4)->getType()->isArrayType()
5391 ? EmitArrayToPointerDecay(E->getArg(4)).getPointer()
5392 : EmitScalarExpr(E->getArg(4));
5393 // Convert to generic address space.
5394 EventWaitList = Builder.CreatePointerCast(EventWaitList, PtrTy);
5396 llvm::Value *EventRet = nullptr;
5397 if (E->getArg(5)->isNullPointerConstant(
5398 getContext(), Expr::NPC_ValueDependentIsNotNull)) {
5399 EventRet = llvm::ConstantPointerNull::get(PtrTy);
5400 } else {
5401 EventRet =
5402 Builder.CreatePointerCast(EmitScalarExpr(E->getArg(5)), PtrTy);
5405 auto Info =
5406 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(6));
5407 llvm::Value *Kernel =
5408 Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
5409 llvm::Value *Block =
5410 Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
5412 std::vector<llvm::Type *> ArgTys = {
5413 QueueTy, Int32Ty, RangeTy, Int32Ty,
5414 PtrTy, PtrTy, GenericVoidPtrTy, GenericVoidPtrTy};
5416 std::vector<llvm::Value *> Args = {Queue, Flags, Range,
5417 NumEvents, EventWaitList, EventRet,
5418 Kernel, Block};
5420 if (NumArgs == 7) {
5421 // Has events but no variadics.
5422 Name = "__enqueue_kernel_basic_events";
5423 llvm::FunctionType *FTy = llvm::FunctionType::get(
5424 Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
5425 return RValue::get(
5426 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
5427 llvm::ArrayRef<llvm::Value *>(Args)));
5429 // Has event info and variadics
5430 // Pass the number of variadics to the runtime function too.
5431 Args.push_back(ConstantInt::get(Int32Ty, NumArgs - 7));
5432 ArgTys.push_back(Int32Ty);
5433 Name = "__enqueue_kernel_events_varargs";
5435 llvm::Value *ElemPtr, *TmpSize, *TmpPtr;
5436 std::tie(ElemPtr, TmpSize, TmpPtr) = CreateArrayForSizeVar(7);
5437 Args.push_back(ElemPtr);
5438 ArgTys.push_back(ElemPtr->getType());
5440 llvm::FunctionType *FTy = llvm::FunctionType::get(
5441 Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
5442 auto Call =
5443 RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
5444 llvm::ArrayRef<llvm::Value *>(Args)));
5445 if (TmpSize)
5446 EmitLifetimeEnd(TmpSize, TmpPtr);
5447 return Call;
5449 [[fallthrough]];
5451 // OpenCL v2.0 s6.13.17.6 - Kernel query functions need bitcast of block
5452 // parameter.
5453 case Builtin::BIget_kernel_work_group_size: {
5454 llvm::Type *GenericVoidPtrTy = Builder.getPtrTy(
5455 getContext().getTargetAddressSpace(LangAS::opencl_generic));
5456 auto Info =
5457 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(0));
5458 Value *Kernel =
5459 Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
5460 Value *Arg = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
5461 return RValue::get(EmitRuntimeCall(
5462 CGM.CreateRuntimeFunction(
5463 llvm::FunctionType::get(IntTy, {GenericVoidPtrTy, GenericVoidPtrTy},
5464 false),
5465 "__get_kernel_work_group_size_impl"),
5466 {Kernel, Arg}));
5468 case Builtin::BIget_kernel_preferred_work_group_size_multiple: {
5469 llvm::Type *GenericVoidPtrTy = Builder.getPtrTy(
5470 getContext().getTargetAddressSpace(LangAS::opencl_generic));
5471 auto Info =
5472 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(0));
5473 Value *Kernel =
5474 Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
5475 Value *Arg = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
5476 return RValue::get(EmitRuntimeCall(
5477 CGM.CreateRuntimeFunction(
5478 llvm::FunctionType::get(IntTy, {GenericVoidPtrTy, GenericVoidPtrTy},
5479 false),
5480 "__get_kernel_preferred_work_group_size_multiple_impl"),
5481 {Kernel, Arg}));
5483 case Builtin::BIget_kernel_max_sub_group_size_for_ndrange:
5484 case Builtin::BIget_kernel_sub_group_count_for_ndrange: {
5485 llvm::Type *GenericVoidPtrTy = Builder.getPtrTy(
5486 getContext().getTargetAddressSpace(LangAS::opencl_generic));
5487 LValue NDRangeL = EmitAggExprToLValue(E->getArg(0));
5488 llvm::Value *NDRange = NDRangeL.getAddress(*this).getPointer();
5489 auto Info =
5490 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(1));
5491 Value *Kernel =
5492 Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
5493 Value *Block = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
5494 const char *Name =
5495 BuiltinID == Builtin::BIget_kernel_max_sub_group_size_for_ndrange
5496 ? "__get_kernel_max_sub_group_size_for_ndrange_impl"
5497 : "__get_kernel_sub_group_count_for_ndrange_impl";
5498 return RValue::get(EmitRuntimeCall(
5499 CGM.CreateRuntimeFunction(
5500 llvm::FunctionType::get(
5501 IntTy, {NDRange->getType(), GenericVoidPtrTy, GenericVoidPtrTy},
5502 false),
5503 Name),
5504 {NDRange, Kernel, Block}));
5507 case Builtin::BI__builtin_store_half:
5508 case Builtin::BI__builtin_store_halff: {
5509 Value *Val = EmitScalarExpr(E->getArg(0));
5510 Address Address = EmitPointerWithAlignment(E->getArg(1));
5511 Value *HalfVal = Builder.CreateFPTrunc(Val, Builder.getHalfTy());
5512 Builder.CreateStore(HalfVal, Address);
5513 return RValue::get(nullptr);
5515 case Builtin::BI__builtin_load_half: {
5516 Address Address = EmitPointerWithAlignment(E->getArg(0));
5517 Value *HalfVal = Builder.CreateLoad(Address);
5518 return RValue::get(Builder.CreateFPExt(HalfVal, Builder.getDoubleTy()));
5520 case Builtin::BI__builtin_load_halff: {
5521 Address Address = EmitPointerWithAlignment(E->getArg(0));
5522 Value *HalfVal = Builder.CreateLoad(Address);
5523 return RValue::get(Builder.CreateFPExt(HalfVal, Builder.getFloatTy()));
5525 case Builtin::BIprintf:
5526 if (getTarget().getTriple().isNVPTX() ||
5527 getTarget().getTriple().isAMDGCN()) {
5528 if (getLangOpts().OpenMPIsTargetDevice)
5529 return EmitOpenMPDevicePrintfCallExpr(E);
5530 if (getTarget().getTriple().isNVPTX())
5531 return EmitNVPTXDevicePrintfCallExpr(E);
5532 if (getTarget().getTriple().isAMDGCN() && getLangOpts().HIP)
5533 return EmitAMDGPUDevicePrintfCallExpr(E);
5536 break;
5537 case Builtin::BI__builtin_canonicalize:
5538 case Builtin::BI__builtin_canonicalizef:
5539 case Builtin::BI__builtin_canonicalizef16:
5540 case Builtin::BI__builtin_canonicalizel:
5541 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::canonicalize));
5543 case Builtin::BI__builtin_thread_pointer: {
5544 if (!getContext().getTargetInfo().isTLSSupported())
5545 CGM.ErrorUnsupported(E, "__builtin_thread_pointer");
5546 // Fall through - it's already mapped to the intrinsic by ClangBuiltin.
5547 break;
5549 case Builtin::BI__builtin_os_log_format:
5550 return emitBuiltinOSLogFormat(*E);
5552 case Builtin::BI__xray_customevent: {
5553 if (!ShouldXRayInstrumentFunction())
5554 return RValue::getIgnored();
5556 if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
5557 XRayInstrKind::Custom))
5558 return RValue::getIgnored();
5560 if (const auto *XRayAttr = CurFuncDecl->getAttr<XRayInstrumentAttr>())
5561 if (XRayAttr->neverXRayInstrument() && !AlwaysEmitXRayCustomEvents())
5562 return RValue::getIgnored();
5564 Function *F = CGM.getIntrinsic(Intrinsic::xray_customevent);
5565 auto FTy = F->getFunctionType();
5566 auto Arg0 = E->getArg(0);
5567 auto Arg0Val = EmitScalarExpr(Arg0);
5568 auto Arg0Ty = Arg0->getType();
5569 auto PTy0 = FTy->getParamType(0);
5570 if (PTy0 != Arg0Val->getType()) {
5571 if (Arg0Ty->isArrayType())
5572 Arg0Val = EmitArrayToPointerDecay(Arg0).getPointer();
5573 else
5574 Arg0Val = Builder.CreatePointerCast(Arg0Val, PTy0);
5576 auto Arg1 = EmitScalarExpr(E->getArg(1));
5577 auto PTy1 = FTy->getParamType(1);
5578 if (PTy1 != Arg1->getType())
5579 Arg1 = Builder.CreateTruncOrBitCast(Arg1, PTy1);
5580 return RValue::get(Builder.CreateCall(F, {Arg0Val, Arg1}));
5583 case Builtin::BI__xray_typedevent: {
5584 // TODO: There should be a way to always emit events even if the current
5585 // function is not instrumented. Losing events in a stream can cripple
5586 // a trace.
5587 if (!ShouldXRayInstrumentFunction())
5588 return RValue::getIgnored();
5590 if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
5591 XRayInstrKind::Typed))
5592 return RValue::getIgnored();
5594 if (const auto *XRayAttr = CurFuncDecl->getAttr<XRayInstrumentAttr>())
5595 if (XRayAttr->neverXRayInstrument() && !AlwaysEmitXRayTypedEvents())
5596 return RValue::getIgnored();
5598 Function *F = CGM.getIntrinsic(Intrinsic::xray_typedevent);
5599 auto FTy = F->getFunctionType();
5600 auto Arg0 = EmitScalarExpr(E->getArg(0));
5601 auto PTy0 = FTy->getParamType(0);
5602 if (PTy0 != Arg0->getType())
5603 Arg0 = Builder.CreateTruncOrBitCast(Arg0, PTy0);
5604 auto Arg1 = E->getArg(1);
5605 auto Arg1Val = EmitScalarExpr(Arg1);
5606 auto Arg1Ty = Arg1->getType();
5607 auto PTy1 = FTy->getParamType(1);
5608 if (PTy1 != Arg1Val->getType()) {
5609 if (Arg1Ty->isArrayType())
5610 Arg1Val = EmitArrayToPointerDecay(Arg1).getPointer();
5611 else
5612 Arg1Val = Builder.CreatePointerCast(Arg1Val, PTy1);
5614 auto Arg2 = EmitScalarExpr(E->getArg(2));
5615 auto PTy2 = FTy->getParamType(2);
5616 if (PTy2 != Arg2->getType())
5617 Arg2 = Builder.CreateTruncOrBitCast(Arg2, PTy2);
5618 return RValue::get(Builder.CreateCall(F, {Arg0, Arg1Val, Arg2}));
5621 case Builtin::BI__builtin_ms_va_start:
5622 case Builtin::BI__builtin_ms_va_end:
5623 return RValue::get(
5624 EmitVAStartEnd(EmitMSVAListRef(E->getArg(0)).getPointer(),
5625 BuiltinID == Builtin::BI__builtin_ms_va_start));
5627 case Builtin::BI__builtin_ms_va_copy: {
5628 // Lower this manually. We can't reliably determine whether or not any
5629 // given va_copy() is for a Win64 va_list from the calling convention
5630 // alone, because it's legal to do this from a System V ABI function.
5631 // With opaque pointer types, we won't have enough information in LLVM
5632 // IR to determine this from the argument types, either. Best to do it
5633 // now, while we have enough information.
5634 Address DestAddr = EmitMSVAListRef(E->getArg(0));
5635 Address SrcAddr = EmitMSVAListRef(E->getArg(1));
5637 llvm::Type *BPP = Int8PtrPtrTy;
5639 DestAddr = Address(Builder.CreateBitCast(DestAddr.getPointer(), BPP, "cp"),
5640 Int8PtrTy, DestAddr.getAlignment());
5641 SrcAddr = Address(Builder.CreateBitCast(SrcAddr.getPointer(), BPP, "ap"),
5642 Int8PtrTy, SrcAddr.getAlignment());
5644 Value *ArgPtr = Builder.CreateLoad(SrcAddr, "ap.val");
5645 return RValue::get(Builder.CreateStore(ArgPtr, DestAddr));
5648 case Builtin::BI__builtin_get_device_side_mangled_name: {
5649 auto Name = CGM.getCUDARuntime().getDeviceSideName(
5650 cast<DeclRefExpr>(E->getArg(0)->IgnoreImpCasts())->getDecl());
5651 auto Str = CGM.GetAddrOfConstantCString(Name, "");
5652 llvm::Constant *Zeros[] = {llvm::ConstantInt::get(SizeTy, 0),
5653 llvm::ConstantInt::get(SizeTy, 0)};
5654 auto *Ptr = llvm::ConstantExpr::getGetElementPtr(Str.getElementType(),
5655 Str.getPointer(), Zeros);
5656 return RValue::get(Ptr);
5660 // If this is an alias for a lib function (e.g. __builtin_sin), emit
5661 // the call using the normal call path, but using the unmangled
5662 // version of the function name.
5663 if (getContext().BuiltinInfo.isLibFunction(BuiltinID))
5664 return emitLibraryCall(*this, FD, E,
5665 CGM.getBuiltinLibFunction(FD, BuiltinID));
5667 // If this is a predefined lib function (e.g. malloc), emit the call
5668 // using exactly the normal call path.
5669 if (getContext().BuiltinInfo.isPredefinedLibFunction(BuiltinID))
5670 return emitLibraryCall(*this, FD, E,
5671 cast<llvm::Constant>(EmitScalarExpr(E->getCallee())));
5673 // Check that a call to a target specific builtin has the correct target
5674 // features.
5675 // This is down here to avoid non-target specific builtins, however, if
5676 // generic builtins start to require generic target features then we
5677 // can move this up to the beginning of the function.
5678 checkTargetFeatures(E, FD);
5680 if (unsigned VectorWidth = getContext().BuiltinInfo.getRequiredVectorWidth(BuiltinID))
5681 LargestVectorWidth = std::max(LargestVectorWidth, VectorWidth);
5683 // See if we have a target specific intrinsic.
5684 StringRef Name = getContext().BuiltinInfo.getName(BuiltinID);
5685 Intrinsic::ID IntrinsicID = Intrinsic::not_intrinsic;
5686 StringRef Prefix =
5687 llvm::Triple::getArchTypePrefix(getTarget().getTriple().getArch());
5688 if (!Prefix.empty()) {
5689 IntrinsicID = Intrinsic::getIntrinsicForClangBuiltin(Prefix.data(), Name);
5690 // NOTE we don't need to perform a compatibility flag check here since the
5691 // intrinsics are declared in Builtins*.def via LANGBUILTIN which filter the
5692 // MS builtins via ALL_MS_LANGUAGES and are filtered earlier.
5693 if (IntrinsicID == Intrinsic::not_intrinsic)
5694 IntrinsicID = Intrinsic::getIntrinsicForMSBuiltin(Prefix.data(), Name);
5697 if (IntrinsicID != Intrinsic::not_intrinsic) {
5698 SmallVector<Value*, 16> Args;
5700 // Find out if any arguments are required to be integer constant
5701 // expressions.
5702 unsigned ICEArguments = 0;
5703 ASTContext::GetBuiltinTypeError Error;
5704 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
5705 assert(Error == ASTContext::GE_None && "Should not codegen an error");
5707 Function *F = CGM.getIntrinsic(IntrinsicID);
5708 llvm::FunctionType *FTy = F->getFunctionType();
5710 for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) {
5711 Value *ArgValue;
5712 // If this is a normal argument, just emit it as a scalar.
5713 if ((ICEArguments & (1 << i)) == 0) {
5714 ArgValue = EmitScalarExpr(E->getArg(i));
5715 } else {
5716 // If this is required to be a constant, constant fold it so that we
5717 // know that the generated intrinsic gets a ConstantInt.
5718 ArgValue = llvm::ConstantInt::get(
5719 getLLVMContext(),
5720 *E->getArg(i)->getIntegerConstantExpr(getContext()));
5723 // If the intrinsic arg type is different from the builtin arg type
5724 // we need to do a bit cast.
5725 llvm::Type *PTy = FTy->getParamType(i);
5726 if (PTy != ArgValue->getType()) {
5727 // XXX - vector of pointers?
5728 if (auto *PtrTy = dyn_cast<llvm::PointerType>(PTy)) {
5729 if (PtrTy->getAddressSpace() !=
5730 ArgValue->getType()->getPointerAddressSpace()) {
5731 ArgValue = Builder.CreateAddrSpaceCast(
5732 ArgValue, llvm::PointerType::get(getLLVMContext(),
5733 PtrTy->getAddressSpace()));
5737 assert(PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) &&
5738 "Must be able to losslessly bit cast to param");
5739 // Cast vector type (e.g., v256i32) to x86_amx, this only happen
5740 // in amx intrinsics.
5741 if (PTy->isX86_AMXTy())
5742 ArgValue = Builder.CreateIntrinsic(Intrinsic::x86_cast_vector_to_tile,
5743 {ArgValue->getType()}, {ArgValue});
5744 else
5745 ArgValue = Builder.CreateBitCast(ArgValue, PTy);
5748 Args.push_back(ArgValue);
5751 Value *V = Builder.CreateCall(F, Args);
5752 QualType BuiltinRetType = E->getType();
5754 llvm::Type *RetTy = VoidTy;
5755 if (!BuiltinRetType->isVoidType())
5756 RetTy = ConvertType(BuiltinRetType);
5758 if (RetTy != V->getType()) {
5759 // XXX - vector of pointers?
5760 if (auto *PtrTy = dyn_cast<llvm::PointerType>(RetTy)) {
5761 if (PtrTy->getAddressSpace() != V->getType()->getPointerAddressSpace()) {
5762 V = Builder.CreateAddrSpaceCast(
5763 V, llvm::PointerType::get(getLLVMContext(),
5764 PtrTy->getAddressSpace()));
5768 assert(V->getType()->canLosslesslyBitCastTo(RetTy) &&
5769 "Must be able to losslessly bit cast result type");
5770 // Cast x86_amx to vector type (e.g., v256i32), this only happen
5771 // in amx intrinsics.
5772 if (V->getType()->isX86_AMXTy())
5773 V = Builder.CreateIntrinsic(Intrinsic::x86_cast_tile_to_vector, {RetTy},
5774 {V});
5775 else
5776 V = Builder.CreateBitCast(V, RetTy);
5779 if (RetTy->isVoidTy())
5780 return RValue::get(nullptr);
5782 return RValue::get(V);
5785 // Some target-specific builtins can have aggregate return values, e.g.
5786 // __builtin_arm_mve_vld2q_u32. So if the result is an aggregate, force
5787 // ReturnValue to be non-null, so that the target-specific emission code can
5788 // always just emit into it.
5789 TypeEvaluationKind EvalKind = getEvaluationKind(E->getType());
5790 if (EvalKind == TEK_Aggregate && ReturnValue.isNull()) {
5791 Address DestPtr = CreateMemTemp(E->getType(), "agg.tmp");
5792 ReturnValue = ReturnValueSlot(DestPtr, false);
5795 // Now see if we can emit a target-specific builtin.
5796 if (Value *V = EmitTargetBuiltinExpr(BuiltinID, E, ReturnValue)) {
5797 switch (EvalKind) {
5798 case TEK_Scalar:
5799 if (V->getType()->isVoidTy())
5800 return RValue::get(nullptr);
5801 return RValue::get(V);
5802 case TEK_Aggregate:
5803 return RValue::getAggregate(ReturnValue.getValue(),
5804 ReturnValue.isVolatile());
5805 case TEK_Complex:
5806 llvm_unreachable("No current target builtin returns complex");
5808 llvm_unreachable("Bad evaluation kind in EmitBuiltinExpr");
5811 if (getLangOpts().HIPStdPar && getLangOpts().CUDAIsDevice)
5812 return EmitHipStdParUnsupportedBuiltin(this, FD);
5814 ErrorUnsupported(E, "builtin function");
5816 // Unknown builtin, for now just dump it out and return undef.
5817 return GetUndefRValue(E->getType());
5820 static Value *EmitTargetArchBuiltinExpr(CodeGenFunction *CGF,
5821 unsigned BuiltinID, const CallExpr *E,
5822 ReturnValueSlot ReturnValue,
5823 llvm::Triple::ArchType Arch) {
5824 // When compiling in HipStdPar mode we have to be conservative in rejecting
5825 // target specific features in the FE, and defer the possible error to the
5826 // AcceleratorCodeSelection pass, wherein iff an unsupported target builtin is
5827 // referenced by an accelerator executable function, we emit an error.
5828 // Returning nullptr here leads to the builtin being handled in
5829 // EmitStdParUnsupportedBuiltin.
5830 if (CGF->getLangOpts().HIPStdPar && CGF->getLangOpts().CUDAIsDevice &&
5831 Arch != CGF->getTarget().getTriple().getArch())
5832 return nullptr;
5834 switch (Arch) {
5835 case llvm::Triple::arm:
5836 case llvm::Triple::armeb:
5837 case llvm::Triple::thumb:
5838 case llvm::Triple::thumbeb:
5839 return CGF->EmitARMBuiltinExpr(BuiltinID, E, ReturnValue, Arch);
5840 case llvm::Triple::aarch64:
5841 case llvm::Triple::aarch64_32:
5842 case llvm::Triple::aarch64_be:
5843 return CGF->EmitAArch64BuiltinExpr(BuiltinID, E, Arch);
5844 case llvm::Triple::bpfeb:
5845 case llvm::Triple::bpfel:
5846 return CGF->EmitBPFBuiltinExpr(BuiltinID, E);
5847 case llvm::Triple::x86:
5848 case llvm::Triple::x86_64:
5849 return CGF->EmitX86BuiltinExpr(BuiltinID, E);
5850 case llvm::Triple::ppc:
5851 case llvm::Triple::ppcle:
5852 case llvm::Triple::ppc64:
5853 case llvm::Triple::ppc64le:
5854 return CGF->EmitPPCBuiltinExpr(BuiltinID, E);
5855 case llvm::Triple::r600:
5856 case llvm::Triple::amdgcn:
5857 return CGF->EmitAMDGPUBuiltinExpr(BuiltinID, E);
5858 case llvm::Triple::systemz:
5859 return CGF->EmitSystemZBuiltinExpr(BuiltinID, E);
5860 case llvm::Triple::nvptx:
5861 case llvm::Triple::nvptx64:
5862 return CGF->EmitNVPTXBuiltinExpr(BuiltinID, E);
5863 case llvm::Triple::wasm32:
5864 case llvm::Triple::wasm64:
5865 return CGF->EmitWebAssemblyBuiltinExpr(BuiltinID, E);
5866 case llvm::Triple::hexagon:
5867 return CGF->EmitHexagonBuiltinExpr(BuiltinID, E);
5868 case llvm::Triple::riscv32:
5869 case llvm::Triple::riscv64:
5870 return CGF->EmitRISCVBuiltinExpr(BuiltinID, E, ReturnValue);
5871 default:
5872 return nullptr;
5876 Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID,
5877 const CallExpr *E,
5878 ReturnValueSlot ReturnValue) {
5879 if (getContext().BuiltinInfo.isAuxBuiltinID(BuiltinID)) {
5880 assert(getContext().getAuxTargetInfo() && "Missing aux target info");
5881 return EmitTargetArchBuiltinExpr(
5882 this, getContext().BuiltinInfo.getAuxBuiltinID(BuiltinID), E,
5883 ReturnValue, getContext().getAuxTargetInfo()->getTriple().getArch());
5886 return EmitTargetArchBuiltinExpr(this, BuiltinID, E, ReturnValue,
5887 getTarget().getTriple().getArch());
5890 static llvm::FixedVectorType *GetNeonType(CodeGenFunction *CGF,
5891 NeonTypeFlags TypeFlags,
5892 bool HasLegalHalfType = true,
5893 bool V1Ty = false,
5894 bool AllowBFloatArgsAndRet = true) {
5895 int IsQuad = TypeFlags.isQuad();
5896 switch (TypeFlags.getEltType()) {
5897 case NeonTypeFlags::Int8:
5898 case NeonTypeFlags::Poly8:
5899 return llvm::FixedVectorType::get(CGF->Int8Ty, V1Ty ? 1 : (8 << IsQuad));
5900 case NeonTypeFlags::Int16:
5901 case NeonTypeFlags::Poly16:
5902 return llvm::FixedVectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad));
5903 case NeonTypeFlags::BFloat16:
5904 if (AllowBFloatArgsAndRet)
5905 return llvm::FixedVectorType::get(CGF->BFloatTy, V1Ty ? 1 : (4 << IsQuad));
5906 else
5907 return llvm::FixedVectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad));
5908 case NeonTypeFlags::Float16:
5909 if (HasLegalHalfType)
5910 return llvm::FixedVectorType::get(CGF->HalfTy, V1Ty ? 1 : (4 << IsQuad));
5911 else
5912 return llvm::FixedVectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad));
5913 case NeonTypeFlags::Int32:
5914 return llvm::FixedVectorType::get(CGF->Int32Ty, V1Ty ? 1 : (2 << IsQuad));
5915 case NeonTypeFlags::Int64:
5916 case NeonTypeFlags::Poly64:
5917 return llvm::FixedVectorType::get(CGF->Int64Ty, V1Ty ? 1 : (1 << IsQuad));
5918 case NeonTypeFlags::Poly128:
5919 // FIXME: i128 and f128 doesn't get fully support in Clang and llvm.
5920 // There is a lot of i128 and f128 API missing.
5921 // so we use v16i8 to represent poly128 and get pattern matched.
5922 return llvm::FixedVectorType::get(CGF->Int8Ty, 16);
5923 case NeonTypeFlags::Float32:
5924 return llvm::FixedVectorType::get(CGF->FloatTy, V1Ty ? 1 : (2 << IsQuad));
5925 case NeonTypeFlags::Float64:
5926 return llvm::FixedVectorType::get(CGF->DoubleTy, V1Ty ? 1 : (1 << IsQuad));
5928 llvm_unreachable("Unknown vector element type!");
5931 static llvm::VectorType *GetFloatNeonType(CodeGenFunction *CGF,
5932 NeonTypeFlags IntTypeFlags) {
5933 int IsQuad = IntTypeFlags.isQuad();
5934 switch (IntTypeFlags.getEltType()) {
5935 case NeonTypeFlags::Int16:
5936 return llvm::FixedVectorType::get(CGF->HalfTy, (4 << IsQuad));
5937 case NeonTypeFlags::Int32:
5938 return llvm::FixedVectorType::get(CGF->FloatTy, (2 << IsQuad));
5939 case NeonTypeFlags::Int64:
5940 return llvm::FixedVectorType::get(CGF->DoubleTy, (1 << IsQuad));
5941 default:
5942 llvm_unreachable("Type can't be converted to floating-point!");
5946 Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C,
5947 const ElementCount &Count) {
5948 Value *SV = llvm::ConstantVector::getSplat(Count, C);
5949 return Builder.CreateShuffleVector(V, V, SV, "lane");
5952 Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C) {
5953 ElementCount EC = cast<llvm::VectorType>(V->getType())->getElementCount();
5954 return EmitNeonSplat(V, C, EC);
5957 Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl<Value*> &Ops,
5958 const char *name,
5959 unsigned shift, bool rightshift) {
5960 unsigned j = 0;
5961 for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end();
5962 ai != ae; ++ai, ++j) {
5963 if (F->isConstrainedFPIntrinsic())
5964 if (ai->getType()->isMetadataTy())
5965 continue;
5966 if (shift > 0 && shift == j)
5967 Ops[j] = EmitNeonShiftVector(Ops[j], ai->getType(), rightshift);
5968 else
5969 Ops[j] = Builder.CreateBitCast(Ops[j], ai->getType(), name);
5972 if (F->isConstrainedFPIntrinsic())
5973 return Builder.CreateConstrainedFPCall(F, Ops, name);
5974 else
5975 return Builder.CreateCall(F, Ops, name);
5978 Value *CodeGenFunction::EmitNeonShiftVector(Value *V, llvm::Type *Ty,
5979 bool neg) {
5980 int SV = cast<ConstantInt>(V)->getSExtValue();
5981 return ConstantInt::get(Ty, neg ? -SV : SV);
5984 // Right-shift a vector by a constant.
5985 Value *CodeGenFunction::EmitNeonRShiftImm(Value *Vec, Value *Shift,
5986 llvm::Type *Ty, bool usgn,
5987 const char *name) {
5988 llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
5990 int ShiftAmt = cast<ConstantInt>(Shift)->getSExtValue();
5991 int EltSize = VTy->getScalarSizeInBits();
5993 Vec = Builder.CreateBitCast(Vec, Ty);
5995 // lshr/ashr are undefined when the shift amount is equal to the vector
5996 // element size.
5997 if (ShiftAmt == EltSize) {
5998 if (usgn) {
5999 // Right-shifting an unsigned value by its size yields 0.
6000 return llvm::ConstantAggregateZero::get(VTy);
6001 } else {
6002 // Right-shifting a signed value by its size is equivalent
6003 // to a shift of size-1.
6004 --ShiftAmt;
6005 Shift = ConstantInt::get(VTy->getElementType(), ShiftAmt);
6009 Shift = EmitNeonShiftVector(Shift, Ty, false);
6010 if (usgn)
6011 return Builder.CreateLShr(Vec, Shift, name);
6012 else
6013 return Builder.CreateAShr(Vec, Shift, name);
6016 enum {
6017 AddRetType = (1 << 0),
6018 Add1ArgType = (1 << 1),
6019 Add2ArgTypes = (1 << 2),
6021 VectorizeRetType = (1 << 3),
6022 VectorizeArgTypes = (1 << 4),
6024 InventFloatType = (1 << 5),
6025 UnsignedAlts = (1 << 6),
6027 Use64BitVectors = (1 << 7),
6028 Use128BitVectors = (1 << 8),
6030 Vectorize1ArgType = Add1ArgType | VectorizeArgTypes,
6031 VectorRet = AddRetType | VectorizeRetType,
6032 VectorRetGetArgs01 =
6033 AddRetType | Add2ArgTypes | VectorizeRetType | VectorizeArgTypes,
6034 FpCmpzModifiers =
6035 AddRetType | VectorizeRetType | Add1ArgType | InventFloatType
6038 namespace {
6039 struct ARMVectorIntrinsicInfo {
6040 const char *NameHint;
6041 unsigned BuiltinID;
6042 unsigned LLVMIntrinsic;
6043 unsigned AltLLVMIntrinsic;
6044 uint64_t TypeModifier;
6046 bool operator<(unsigned RHSBuiltinID) const {
6047 return BuiltinID < RHSBuiltinID;
6049 bool operator<(const ARMVectorIntrinsicInfo &TE) const {
6050 return BuiltinID < TE.BuiltinID;
6053 } // end anonymous namespace
6055 #define NEONMAP0(NameBase) \
6056 { #NameBase, NEON::BI__builtin_neon_ ## NameBase, 0, 0, 0 }
6058 #define NEONMAP1(NameBase, LLVMIntrinsic, TypeModifier) \
6059 { #NameBase, NEON:: BI__builtin_neon_ ## NameBase, \
6060 Intrinsic::LLVMIntrinsic, 0, TypeModifier }
6062 #define NEONMAP2(NameBase, LLVMIntrinsic, AltLLVMIntrinsic, TypeModifier) \
6063 { #NameBase, NEON:: BI__builtin_neon_ ## NameBase, \
6064 Intrinsic::LLVMIntrinsic, Intrinsic::AltLLVMIntrinsic, \
6065 TypeModifier }
6067 static const ARMVectorIntrinsicInfo ARMSIMDIntrinsicMap [] = {
6068 NEONMAP1(__a32_vcvt_bf16_f32, arm_neon_vcvtfp2bf, 0),
6069 NEONMAP0(splat_lane_v),
6070 NEONMAP0(splat_laneq_v),
6071 NEONMAP0(splatq_lane_v),
6072 NEONMAP0(splatq_laneq_v),
6073 NEONMAP2(vabd_v, arm_neon_vabdu, arm_neon_vabds, Add1ArgType | UnsignedAlts),
6074 NEONMAP2(vabdq_v, arm_neon_vabdu, arm_neon_vabds, Add1ArgType | UnsignedAlts),
6075 NEONMAP1(vabs_v, arm_neon_vabs, 0),
6076 NEONMAP1(vabsq_v, arm_neon_vabs, 0),
6077 NEONMAP0(vadd_v),
6078 NEONMAP0(vaddhn_v),
6079 NEONMAP0(vaddq_v),
6080 NEONMAP1(vaesdq_u8, arm_neon_aesd, 0),
6081 NEONMAP1(vaeseq_u8, arm_neon_aese, 0),
6082 NEONMAP1(vaesimcq_u8, arm_neon_aesimc, 0),
6083 NEONMAP1(vaesmcq_u8, arm_neon_aesmc, 0),
6084 NEONMAP1(vbfdot_f32, arm_neon_bfdot, 0),
6085 NEONMAP1(vbfdotq_f32, arm_neon_bfdot, 0),
6086 NEONMAP1(vbfmlalbq_f32, arm_neon_bfmlalb, 0),
6087 NEONMAP1(vbfmlaltq_f32, arm_neon_bfmlalt, 0),
6088 NEONMAP1(vbfmmlaq_f32, arm_neon_bfmmla, 0),
6089 NEONMAP1(vbsl_v, arm_neon_vbsl, AddRetType),
6090 NEONMAP1(vbslq_v, arm_neon_vbsl, AddRetType),
6091 NEONMAP1(vcadd_rot270_f16, arm_neon_vcadd_rot270, Add1ArgType),
6092 NEONMAP1(vcadd_rot270_f32, arm_neon_vcadd_rot270, Add1ArgType),
6093 NEONMAP1(vcadd_rot90_f16, arm_neon_vcadd_rot90, Add1ArgType),
6094 NEONMAP1(vcadd_rot90_f32, arm_neon_vcadd_rot90, Add1ArgType),
6095 NEONMAP1(vcaddq_rot270_f16, arm_neon_vcadd_rot270, Add1ArgType),
6096 NEONMAP1(vcaddq_rot270_f32, arm_neon_vcadd_rot270, Add1ArgType),
6097 NEONMAP1(vcaddq_rot270_f64, arm_neon_vcadd_rot270, Add1ArgType),
6098 NEONMAP1(vcaddq_rot90_f16, arm_neon_vcadd_rot90, Add1ArgType),
6099 NEONMAP1(vcaddq_rot90_f32, arm_neon_vcadd_rot90, Add1ArgType),
6100 NEONMAP1(vcaddq_rot90_f64, arm_neon_vcadd_rot90, Add1ArgType),
6101 NEONMAP1(vcage_v, arm_neon_vacge, 0),
6102 NEONMAP1(vcageq_v, arm_neon_vacge, 0),
6103 NEONMAP1(vcagt_v, arm_neon_vacgt, 0),
6104 NEONMAP1(vcagtq_v, arm_neon_vacgt, 0),
6105 NEONMAP1(vcale_v, arm_neon_vacge, 0),
6106 NEONMAP1(vcaleq_v, arm_neon_vacge, 0),
6107 NEONMAP1(vcalt_v, arm_neon_vacgt, 0),
6108 NEONMAP1(vcaltq_v, arm_neon_vacgt, 0),
6109 NEONMAP0(vceqz_v),
6110 NEONMAP0(vceqzq_v),
6111 NEONMAP0(vcgez_v),
6112 NEONMAP0(vcgezq_v),
6113 NEONMAP0(vcgtz_v),
6114 NEONMAP0(vcgtzq_v),
6115 NEONMAP0(vclez_v),
6116 NEONMAP0(vclezq_v),
6117 NEONMAP1(vcls_v, arm_neon_vcls, Add1ArgType),
6118 NEONMAP1(vclsq_v, arm_neon_vcls, Add1ArgType),
6119 NEONMAP0(vcltz_v),
6120 NEONMAP0(vcltzq_v),
6121 NEONMAP1(vclz_v, ctlz, Add1ArgType),
6122 NEONMAP1(vclzq_v, ctlz, Add1ArgType),
6123 NEONMAP1(vcnt_v, ctpop, Add1ArgType),
6124 NEONMAP1(vcntq_v, ctpop, Add1ArgType),
6125 NEONMAP1(vcvt_f16_f32, arm_neon_vcvtfp2hf, 0),
6126 NEONMAP0(vcvt_f16_s16),
6127 NEONMAP0(vcvt_f16_u16),
6128 NEONMAP1(vcvt_f32_f16, arm_neon_vcvthf2fp, 0),
6129 NEONMAP0(vcvt_f32_v),
6130 NEONMAP1(vcvt_n_f16_s16, arm_neon_vcvtfxs2fp, 0),
6131 NEONMAP1(vcvt_n_f16_u16, arm_neon_vcvtfxu2fp, 0),
6132 NEONMAP2(vcvt_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
6133 NEONMAP1(vcvt_n_s16_f16, arm_neon_vcvtfp2fxs, 0),
6134 NEONMAP1(vcvt_n_s32_v, arm_neon_vcvtfp2fxs, 0),
6135 NEONMAP1(vcvt_n_s64_v, arm_neon_vcvtfp2fxs, 0),
6136 NEONMAP1(vcvt_n_u16_f16, arm_neon_vcvtfp2fxu, 0),
6137 NEONMAP1(vcvt_n_u32_v, arm_neon_vcvtfp2fxu, 0),
6138 NEONMAP1(vcvt_n_u64_v, arm_neon_vcvtfp2fxu, 0),
6139 NEONMAP0(vcvt_s16_f16),
6140 NEONMAP0(vcvt_s32_v),
6141 NEONMAP0(vcvt_s64_v),
6142 NEONMAP0(vcvt_u16_f16),
6143 NEONMAP0(vcvt_u32_v),
6144 NEONMAP0(vcvt_u64_v),
6145 NEONMAP1(vcvta_s16_f16, arm_neon_vcvtas, 0),
6146 NEONMAP1(vcvta_s32_v, arm_neon_vcvtas, 0),
6147 NEONMAP1(vcvta_s64_v, arm_neon_vcvtas, 0),
6148 NEONMAP1(vcvta_u16_f16, arm_neon_vcvtau, 0),
6149 NEONMAP1(vcvta_u32_v, arm_neon_vcvtau, 0),
6150 NEONMAP1(vcvta_u64_v, arm_neon_vcvtau, 0),
6151 NEONMAP1(vcvtaq_s16_f16, arm_neon_vcvtas, 0),
6152 NEONMAP1(vcvtaq_s32_v, arm_neon_vcvtas, 0),
6153 NEONMAP1(vcvtaq_s64_v, arm_neon_vcvtas, 0),
6154 NEONMAP1(vcvtaq_u16_f16, arm_neon_vcvtau, 0),
6155 NEONMAP1(vcvtaq_u32_v, arm_neon_vcvtau, 0),
6156 NEONMAP1(vcvtaq_u64_v, arm_neon_vcvtau, 0),
6157 NEONMAP1(vcvth_bf16_f32, arm_neon_vcvtbfp2bf, 0),
6158 NEONMAP1(vcvtm_s16_f16, arm_neon_vcvtms, 0),
6159 NEONMAP1(vcvtm_s32_v, arm_neon_vcvtms, 0),
6160 NEONMAP1(vcvtm_s64_v, arm_neon_vcvtms, 0),
6161 NEONMAP1(vcvtm_u16_f16, arm_neon_vcvtmu, 0),
6162 NEONMAP1(vcvtm_u32_v, arm_neon_vcvtmu, 0),
6163 NEONMAP1(vcvtm_u64_v, arm_neon_vcvtmu, 0),
6164 NEONMAP1(vcvtmq_s16_f16, arm_neon_vcvtms, 0),
6165 NEONMAP1(vcvtmq_s32_v, arm_neon_vcvtms, 0),
6166 NEONMAP1(vcvtmq_s64_v, arm_neon_vcvtms, 0),
6167 NEONMAP1(vcvtmq_u16_f16, arm_neon_vcvtmu, 0),
6168 NEONMAP1(vcvtmq_u32_v, arm_neon_vcvtmu, 0),
6169 NEONMAP1(vcvtmq_u64_v, arm_neon_vcvtmu, 0),
6170 NEONMAP1(vcvtn_s16_f16, arm_neon_vcvtns, 0),
6171 NEONMAP1(vcvtn_s32_v, arm_neon_vcvtns, 0),
6172 NEONMAP1(vcvtn_s64_v, arm_neon_vcvtns, 0),
6173 NEONMAP1(vcvtn_u16_f16, arm_neon_vcvtnu, 0),
6174 NEONMAP1(vcvtn_u32_v, arm_neon_vcvtnu, 0),
6175 NEONMAP1(vcvtn_u64_v, arm_neon_vcvtnu, 0),
6176 NEONMAP1(vcvtnq_s16_f16, arm_neon_vcvtns, 0),
6177 NEONMAP1(vcvtnq_s32_v, arm_neon_vcvtns, 0),
6178 NEONMAP1(vcvtnq_s64_v, arm_neon_vcvtns, 0),
6179 NEONMAP1(vcvtnq_u16_f16, arm_neon_vcvtnu, 0),
6180 NEONMAP1(vcvtnq_u32_v, arm_neon_vcvtnu, 0),
6181 NEONMAP1(vcvtnq_u64_v, arm_neon_vcvtnu, 0),
6182 NEONMAP1(vcvtp_s16_f16, arm_neon_vcvtps, 0),
6183 NEONMAP1(vcvtp_s32_v, arm_neon_vcvtps, 0),
6184 NEONMAP1(vcvtp_s64_v, arm_neon_vcvtps, 0),
6185 NEONMAP1(vcvtp_u16_f16, arm_neon_vcvtpu, 0),
6186 NEONMAP1(vcvtp_u32_v, arm_neon_vcvtpu, 0),
6187 NEONMAP1(vcvtp_u64_v, arm_neon_vcvtpu, 0),
6188 NEONMAP1(vcvtpq_s16_f16, arm_neon_vcvtps, 0),
6189 NEONMAP1(vcvtpq_s32_v, arm_neon_vcvtps, 0),
6190 NEONMAP1(vcvtpq_s64_v, arm_neon_vcvtps, 0),
6191 NEONMAP1(vcvtpq_u16_f16, arm_neon_vcvtpu, 0),
6192 NEONMAP1(vcvtpq_u32_v, arm_neon_vcvtpu, 0),
6193 NEONMAP1(vcvtpq_u64_v, arm_neon_vcvtpu, 0),
6194 NEONMAP0(vcvtq_f16_s16),
6195 NEONMAP0(vcvtq_f16_u16),
6196 NEONMAP0(vcvtq_f32_v),
6197 NEONMAP1(vcvtq_n_f16_s16, arm_neon_vcvtfxs2fp, 0),
6198 NEONMAP1(vcvtq_n_f16_u16, arm_neon_vcvtfxu2fp, 0),
6199 NEONMAP2(vcvtq_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
6200 NEONMAP1(vcvtq_n_s16_f16, arm_neon_vcvtfp2fxs, 0),
6201 NEONMAP1(vcvtq_n_s32_v, arm_neon_vcvtfp2fxs, 0),
6202 NEONMAP1(vcvtq_n_s64_v, arm_neon_vcvtfp2fxs, 0),
6203 NEONMAP1(vcvtq_n_u16_f16, arm_neon_vcvtfp2fxu, 0),
6204 NEONMAP1(vcvtq_n_u32_v, arm_neon_vcvtfp2fxu, 0),
6205 NEONMAP1(vcvtq_n_u64_v, arm_neon_vcvtfp2fxu, 0),
6206 NEONMAP0(vcvtq_s16_f16),
6207 NEONMAP0(vcvtq_s32_v),
6208 NEONMAP0(vcvtq_s64_v),
6209 NEONMAP0(vcvtq_u16_f16),
6210 NEONMAP0(vcvtq_u32_v),
6211 NEONMAP0(vcvtq_u64_v),
6212 NEONMAP1(vdot_s32, arm_neon_sdot, 0),
6213 NEONMAP1(vdot_u32, arm_neon_udot, 0),
6214 NEONMAP1(vdotq_s32, arm_neon_sdot, 0),
6215 NEONMAP1(vdotq_u32, arm_neon_udot, 0),
6216 NEONMAP0(vext_v),
6217 NEONMAP0(vextq_v),
6218 NEONMAP0(vfma_v),
6219 NEONMAP0(vfmaq_v),
6220 NEONMAP2(vhadd_v, arm_neon_vhaddu, arm_neon_vhadds, Add1ArgType | UnsignedAlts),
6221 NEONMAP2(vhaddq_v, arm_neon_vhaddu, arm_neon_vhadds, Add1ArgType | UnsignedAlts),
6222 NEONMAP2(vhsub_v, arm_neon_vhsubu, arm_neon_vhsubs, Add1ArgType | UnsignedAlts),
6223 NEONMAP2(vhsubq_v, arm_neon_vhsubu, arm_neon_vhsubs, Add1ArgType | UnsignedAlts),
6224 NEONMAP0(vld1_dup_v),
6225 NEONMAP1(vld1_v, arm_neon_vld1, 0),
6226 NEONMAP1(vld1_x2_v, arm_neon_vld1x2, 0),
6227 NEONMAP1(vld1_x3_v, arm_neon_vld1x3, 0),
6228 NEONMAP1(vld1_x4_v, arm_neon_vld1x4, 0),
6229 NEONMAP0(vld1q_dup_v),
6230 NEONMAP1(vld1q_v, arm_neon_vld1, 0),
6231 NEONMAP1(vld1q_x2_v, arm_neon_vld1x2, 0),
6232 NEONMAP1(vld1q_x3_v, arm_neon_vld1x3, 0),
6233 NEONMAP1(vld1q_x4_v, arm_neon_vld1x4, 0),
6234 NEONMAP1(vld2_dup_v, arm_neon_vld2dup, 0),
6235 NEONMAP1(vld2_lane_v, arm_neon_vld2lane, 0),
6236 NEONMAP1(vld2_v, arm_neon_vld2, 0),
6237 NEONMAP1(vld2q_dup_v, arm_neon_vld2dup, 0),
6238 NEONMAP1(vld2q_lane_v, arm_neon_vld2lane, 0),
6239 NEONMAP1(vld2q_v, arm_neon_vld2, 0),
6240 NEONMAP1(vld3_dup_v, arm_neon_vld3dup, 0),
6241 NEONMAP1(vld3_lane_v, arm_neon_vld3lane, 0),
6242 NEONMAP1(vld3_v, arm_neon_vld3, 0),
6243 NEONMAP1(vld3q_dup_v, arm_neon_vld3dup, 0),
6244 NEONMAP1(vld3q_lane_v, arm_neon_vld3lane, 0),
6245 NEONMAP1(vld3q_v, arm_neon_vld3, 0),
6246 NEONMAP1(vld4_dup_v, arm_neon_vld4dup, 0),
6247 NEONMAP1(vld4_lane_v, arm_neon_vld4lane, 0),
6248 NEONMAP1(vld4_v, arm_neon_vld4, 0),
6249 NEONMAP1(vld4q_dup_v, arm_neon_vld4dup, 0),
6250 NEONMAP1(vld4q_lane_v, arm_neon_vld4lane, 0),
6251 NEONMAP1(vld4q_v, arm_neon_vld4, 0),
6252 NEONMAP2(vmax_v, arm_neon_vmaxu, arm_neon_vmaxs, Add1ArgType | UnsignedAlts),
6253 NEONMAP1(vmaxnm_v, arm_neon_vmaxnm, Add1ArgType),
6254 NEONMAP1(vmaxnmq_v, arm_neon_vmaxnm, Add1ArgType),
6255 NEONMAP2(vmaxq_v, arm_neon_vmaxu, arm_neon_vmaxs, Add1ArgType | UnsignedAlts),
6256 NEONMAP2(vmin_v, arm_neon_vminu, arm_neon_vmins, Add1ArgType | UnsignedAlts),
6257 NEONMAP1(vminnm_v, arm_neon_vminnm, Add1ArgType),
6258 NEONMAP1(vminnmq_v, arm_neon_vminnm, Add1ArgType),
6259 NEONMAP2(vminq_v, arm_neon_vminu, arm_neon_vmins, Add1ArgType | UnsignedAlts),
6260 NEONMAP1(vmmlaq_s32, arm_neon_smmla, 0),
6261 NEONMAP1(vmmlaq_u32, arm_neon_ummla, 0),
6262 NEONMAP0(vmovl_v),
6263 NEONMAP0(vmovn_v),
6264 NEONMAP1(vmul_v, arm_neon_vmulp, Add1ArgType),
6265 NEONMAP0(vmull_v),
6266 NEONMAP1(vmulq_v, arm_neon_vmulp, Add1ArgType),
6267 NEONMAP2(vpadal_v, arm_neon_vpadalu, arm_neon_vpadals, UnsignedAlts),
6268 NEONMAP2(vpadalq_v, arm_neon_vpadalu, arm_neon_vpadals, UnsignedAlts),
6269 NEONMAP1(vpadd_v, arm_neon_vpadd, Add1ArgType),
6270 NEONMAP2(vpaddl_v, arm_neon_vpaddlu, arm_neon_vpaddls, UnsignedAlts),
6271 NEONMAP2(vpaddlq_v, arm_neon_vpaddlu, arm_neon_vpaddls, UnsignedAlts),
6272 NEONMAP1(vpaddq_v, arm_neon_vpadd, Add1ArgType),
6273 NEONMAP2(vpmax_v, arm_neon_vpmaxu, arm_neon_vpmaxs, Add1ArgType | UnsignedAlts),
6274 NEONMAP2(vpmin_v, arm_neon_vpminu, arm_neon_vpmins, Add1ArgType | UnsignedAlts),
6275 NEONMAP1(vqabs_v, arm_neon_vqabs, Add1ArgType),
6276 NEONMAP1(vqabsq_v, arm_neon_vqabs, Add1ArgType),
6277 NEONMAP2(vqadd_v, uadd_sat, sadd_sat, Add1ArgType | UnsignedAlts),
6278 NEONMAP2(vqaddq_v, uadd_sat, sadd_sat, Add1ArgType | UnsignedAlts),
6279 NEONMAP2(vqdmlal_v, arm_neon_vqdmull, sadd_sat, 0),
6280 NEONMAP2(vqdmlsl_v, arm_neon_vqdmull, ssub_sat, 0),
6281 NEONMAP1(vqdmulh_v, arm_neon_vqdmulh, Add1ArgType),
6282 NEONMAP1(vqdmulhq_v, arm_neon_vqdmulh, Add1ArgType),
6283 NEONMAP1(vqdmull_v, arm_neon_vqdmull, Add1ArgType),
6284 NEONMAP2(vqmovn_v, arm_neon_vqmovnu, arm_neon_vqmovns, Add1ArgType | UnsignedAlts),
6285 NEONMAP1(vqmovun_v, arm_neon_vqmovnsu, Add1ArgType),
6286 NEONMAP1(vqneg_v, arm_neon_vqneg, Add1ArgType),
6287 NEONMAP1(vqnegq_v, arm_neon_vqneg, Add1ArgType),
6288 NEONMAP1(vqrdmlah_s16, arm_neon_vqrdmlah, Add1ArgType),
6289 NEONMAP1(vqrdmlah_s32, arm_neon_vqrdmlah, Add1ArgType),
6290 NEONMAP1(vqrdmlahq_s16, arm_neon_vqrdmlah, Add1ArgType),
6291 NEONMAP1(vqrdmlahq_s32, arm_neon_vqrdmlah, Add1ArgType),
6292 NEONMAP1(vqrdmlsh_s16, arm_neon_vqrdmlsh, Add1ArgType),
6293 NEONMAP1(vqrdmlsh_s32, arm_neon_vqrdmlsh, Add1ArgType),
6294 NEONMAP1(vqrdmlshq_s16, arm_neon_vqrdmlsh, Add1ArgType),
6295 NEONMAP1(vqrdmlshq_s32, arm_neon_vqrdmlsh, Add1ArgType),
6296 NEONMAP1(vqrdmulh_v, arm_neon_vqrdmulh, Add1ArgType),
6297 NEONMAP1(vqrdmulhq_v, arm_neon_vqrdmulh, Add1ArgType),
6298 NEONMAP2(vqrshl_v, arm_neon_vqrshiftu, arm_neon_vqrshifts, Add1ArgType | UnsignedAlts),
6299 NEONMAP2(vqrshlq_v, arm_neon_vqrshiftu, arm_neon_vqrshifts, Add1ArgType | UnsignedAlts),
6300 NEONMAP2(vqshl_n_v, arm_neon_vqshiftu, arm_neon_vqshifts, UnsignedAlts),
6301 NEONMAP2(vqshl_v, arm_neon_vqshiftu, arm_neon_vqshifts, Add1ArgType | UnsignedAlts),
6302 NEONMAP2(vqshlq_n_v, arm_neon_vqshiftu, arm_neon_vqshifts, UnsignedAlts),
6303 NEONMAP2(vqshlq_v, arm_neon_vqshiftu, arm_neon_vqshifts, Add1ArgType | UnsignedAlts),
6304 NEONMAP1(vqshlu_n_v, arm_neon_vqshiftsu, 0),
6305 NEONMAP1(vqshluq_n_v, arm_neon_vqshiftsu, 0),
6306 NEONMAP2(vqsub_v, usub_sat, ssub_sat, Add1ArgType | UnsignedAlts),
6307 NEONMAP2(vqsubq_v, usub_sat, ssub_sat, Add1ArgType | UnsignedAlts),
6308 NEONMAP1(vraddhn_v, arm_neon_vraddhn, Add1ArgType),
6309 NEONMAP2(vrecpe_v, arm_neon_vrecpe, arm_neon_vrecpe, 0),
6310 NEONMAP2(vrecpeq_v, arm_neon_vrecpe, arm_neon_vrecpe, 0),
6311 NEONMAP1(vrecps_v, arm_neon_vrecps, Add1ArgType),
6312 NEONMAP1(vrecpsq_v, arm_neon_vrecps, Add1ArgType),
6313 NEONMAP2(vrhadd_v, arm_neon_vrhaddu, arm_neon_vrhadds, Add1ArgType | UnsignedAlts),
6314 NEONMAP2(vrhaddq_v, arm_neon_vrhaddu, arm_neon_vrhadds, Add1ArgType | UnsignedAlts),
6315 NEONMAP1(vrnd_v, arm_neon_vrintz, Add1ArgType),
6316 NEONMAP1(vrnda_v, arm_neon_vrinta, Add1ArgType),
6317 NEONMAP1(vrndaq_v, arm_neon_vrinta, Add1ArgType),
6318 NEONMAP0(vrndi_v),
6319 NEONMAP0(vrndiq_v),
6320 NEONMAP1(vrndm_v, arm_neon_vrintm, Add1ArgType),
6321 NEONMAP1(vrndmq_v, arm_neon_vrintm, Add1ArgType),
6322 NEONMAP1(vrndn_v, arm_neon_vrintn, Add1ArgType),
6323 NEONMAP1(vrndnq_v, arm_neon_vrintn, Add1ArgType),
6324 NEONMAP1(vrndp_v, arm_neon_vrintp, Add1ArgType),
6325 NEONMAP1(vrndpq_v, arm_neon_vrintp, Add1ArgType),
6326 NEONMAP1(vrndq_v, arm_neon_vrintz, Add1ArgType),
6327 NEONMAP1(vrndx_v, arm_neon_vrintx, Add1ArgType),
6328 NEONMAP1(vrndxq_v, arm_neon_vrintx, Add1ArgType),
6329 NEONMAP2(vrshl_v, arm_neon_vrshiftu, arm_neon_vrshifts, Add1ArgType | UnsignedAlts),
6330 NEONMAP2(vrshlq_v, arm_neon_vrshiftu, arm_neon_vrshifts, Add1ArgType | UnsignedAlts),
6331 NEONMAP2(vrshr_n_v, arm_neon_vrshiftu, arm_neon_vrshifts, UnsignedAlts),
6332 NEONMAP2(vrshrq_n_v, arm_neon_vrshiftu, arm_neon_vrshifts, UnsignedAlts),
6333 NEONMAP2(vrsqrte_v, arm_neon_vrsqrte, arm_neon_vrsqrte, 0),
6334 NEONMAP2(vrsqrteq_v, arm_neon_vrsqrte, arm_neon_vrsqrte, 0),
6335 NEONMAP1(vrsqrts_v, arm_neon_vrsqrts, Add1ArgType),
6336 NEONMAP1(vrsqrtsq_v, arm_neon_vrsqrts, Add1ArgType),
6337 NEONMAP1(vrsubhn_v, arm_neon_vrsubhn, Add1ArgType),
6338 NEONMAP1(vsha1su0q_u32, arm_neon_sha1su0, 0),
6339 NEONMAP1(vsha1su1q_u32, arm_neon_sha1su1, 0),
6340 NEONMAP1(vsha256h2q_u32, arm_neon_sha256h2, 0),
6341 NEONMAP1(vsha256hq_u32, arm_neon_sha256h, 0),
6342 NEONMAP1(vsha256su0q_u32, arm_neon_sha256su0, 0),
6343 NEONMAP1(vsha256su1q_u32, arm_neon_sha256su1, 0),
6344 NEONMAP0(vshl_n_v),
6345 NEONMAP2(vshl_v, arm_neon_vshiftu, arm_neon_vshifts, Add1ArgType | UnsignedAlts),
6346 NEONMAP0(vshll_n_v),
6347 NEONMAP0(vshlq_n_v),
6348 NEONMAP2(vshlq_v, arm_neon_vshiftu, arm_neon_vshifts, Add1ArgType | UnsignedAlts),
6349 NEONMAP0(vshr_n_v),
6350 NEONMAP0(vshrn_n_v),
6351 NEONMAP0(vshrq_n_v),
6352 NEONMAP1(vst1_v, arm_neon_vst1, 0),
6353 NEONMAP1(vst1_x2_v, arm_neon_vst1x2, 0),
6354 NEONMAP1(vst1_x3_v, arm_neon_vst1x3, 0),
6355 NEONMAP1(vst1_x4_v, arm_neon_vst1x4, 0),
6356 NEONMAP1(vst1q_v, arm_neon_vst1, 0),
6357 NEONMAP1(vst1q_x2_v, arm_neon_vst1x2, 0),
6358 NEONMAP1(vst1q_x3_v, arm_neon_vst1x3, 0),
6359 NEONMAP1(vst1q_x4_v, arm_neon_vst1x4, 0),
6360 NEONMAP1(vst2_lane_v, arm_neon_vst2lane, 0),
6361 NEONMAP1(vst2_v, arm_neon_vst2, 0),
6362 NEONMAP1(vst2q_lane_v, arm_neon_vst2lane, 0),
6363 NEONMAP1(vst2q_v, arm_neon_vst2, 0),
6364 NEONMAP1(vst3_lane_v, arm_neon_vst3lane, 0),
6365 NEONMAP1(vst3_v, arm_neon_vst3, 0),
6366 NEONMAP1(vst3q_lane_v, arm_neon_vst3lane, 0),
6367 NEONMAP1(vst3q_v, arm_neon_vst3, 0),
6368 NEONMAP1(vst4_lane_v, arm_neon_vst4lane, 0),
6369 NEONMAP1(vst4_v, arm_neon_vst4, 0),
6370 NEONMAP1(vst4q_lane_v, arm_neon_vst4lane, 0),
6371 NEONMAP1(vst4q_v, arm_neon_vst4, 0),
6372 NEONMAP0(vsubhn_v),
6373 NEONMAP0(vtrn_v),
6374 NEONMAP0(vtrnq_v),
6375 NEONMAP0(vtst_v),
6376 NEONMAP0(vtstq_v),
6377 NEONMAP1(vusdot_s32, arm_neon_usdot, 0),
6378 NEONMAP1(vusdotq_s32, arm_neon_usdot, 0),
6379 NEONMAP1(vusmmlaq_s32, arm_neon_usmmla, 0),
6380 NEONMAP0(vuzp_v),
6381 NEONMAP0(vuzpq_v),
6382 NEONMAP0(vzip_v),
6383 NEONMAP0(vzipq_v)
6386 static const ARMVectorIntrinsicInfo AArch64SIMDIntrinsicMap[] = {
6387 NEONMAP1(__a64_vcvtq_low_bf16_f32, aarch64_neon_bfcvtn, 0),
6388 NEONMAP0(splat_lane_v),
6389 NEONMAP0(splat_laneq_v),
6390 NEONMAP0(splatq_lane_v),
6391 NEONMAP0(splatq_laneq_v),
6392 NEONMAP1(vabs_v, aarch64_neon_abs, 0),
6393 NEONMAP1(vabsq_v, aarch64_neon_abs, 0),
6394 NEONMAP0(vadd_v),
6395 NEONMAP0(vaddhn_v),
6396 NEONMAP0(vaddq_p128),
6397 NEONMAP0(vaddq_v),
6398 NEONMAP1(vaesdq_u8, aarch64_crypto_aesd, 0),
6399 NEONMAP1(vaeseq_u8, aarch64_crypto_aese, 0),
6400 NEONMAP1(vaesimcq_u8, aarch64_crypto_aesimc, 0),
6401 NEONMAP1(vaesmcq_u8, aarch64_crypto_aesmc, 0),
6402 NEONMAP2(vbcaxq_s16, aarch64_crypto_bcaxu, aarch64_crypto_bcaxs, Add1ArgType | UnsignedAlts),
6403 NEONMAP2(vbcaxq_s32, aarch64_crypto_bcaxu, aarch64_crypto_bcaxs, Add1ArgType | UnsignedAlts),
6404 NEONMAP2(vbcaxq_s64, aarch64_crypto_bcaxu, aarch64_crypto_bcaxs, Add1ArgType | UnsignedAlts),
6405 NEONMAP2(vbcaxq_s8, aarch64_crypto_bcaxu, aarch64_crypto_bcaxs, Add1ArgType | UnsignedAlts),
6406 NEONMAP2(vbcaxq_u16, aarch64_crypto_bcaxu, aarch64_crypto_bcaxs, Add1ArgType | UnsignedAlts),
6407 NEONMAP2(vbcaxq_u32, aarch64_crypto_bcaxu, aarch64_crypto_bcaxs, Add1ArgType | UnsignedAlts),
6408 NEONMAP2(vbcaxq_u64, aarch64_crypto_bcaxu, aarch64_crypto_bcaxs, Add1ArgType | UnsignedAlts),
6409 NEONMAP2(vbcaxq_u8, aarch64_crypto_bcaxu, aarch64_crypto_bcaxs, Add1ArgType | UnsignedAlts),
6410 NEONMAP1(vbfdot_f32, aarch64_neon_bfdot, 0),
6411 NEONMAP1(vbfdotq_f32, aarch64_neon_bfdot, 0),
6412 NEONMAP1(vbfmlalbq_f32, aarch64_neon_bfmlalb, 0),
6413 NEONMAP1(vbfmlaltq_f32, aarch64_neon_bfmlalt, 0),
6414 NEONMAP1(vbfmmlaq_f32, aarch64_neon_bfmmla, 0),
6415 NEONMAP1(vcadd_rot270_f16, aarch64_neon_vcadd_rot270, Add1ArgType),
6416 NEONMAP1(vcadd_rot270_f32, aarch64_neon_vcadd_rot270, Add1ArgType),
6417 NEONMAP1(vcadd_rot90_f16, aarch64_neon_vcadd_rot90, Add1ArgType),
6418 NEONMAP1(vcadd_rot90_f32, aarch64_neon_vcadd_rot90, Add1ArgType),
6419 NEONMAP1(vcaddq_rot270_f16, aarch64_neon_vcadd_rot270, Add1ArgType),
6420 NEONMAP1(vcaddq_rot270_f32, aarch64_neon_vcadd_rot270, Add1ArgType),
6421 NEONMAP1(vcaddq_rot270_f64, aarch64_neon_vcadd_rot270, Add1ArgType),
6422 NEONMAP1(vcaddq_rot90_f16, aarch64_neon_vcadd_rot90, Add1ArgType),
6423 NEONMAP1(vcaddq_rot90_f32, aarch64_neon_vcadd_rot90, Add1ArgType),
6424 NEONMAP1(vcaddq_rot90_f64, aarch64_neon_vcadd_rot90, Add1ArgType),
6425 NEONMAP1(vcage_v, aarch64_neon_facge, 0),
6426 NEONMAP1(vcageq_v, aarch64_neon_facge, 0),
6427 NEONMAP1(vcagt_v, aarch64_neon_facgt, 0),
6428 NEONMAP1(vcagtq_v, aarch64_neon_facgt, 0),
6429 NEONMAP1(vcale_v, aarch64_neon_facge, 0),
6430 NEONMAP1(vcaleq_v, aarch64_neon_facge, 0),
6431 NEONMAP1(vcalt_v, aarch64_neon_facgt, 0),
6432 NEONMAP1(vcaltq_v, aarch64_neon_facgt, 0),
6433 NEONMAP0(vceqz_v),
6434 NEONMAP0(vceqzq_v),
6435 NEONMAP0(vcgez_v),
6436 NEONMAP0(vcgezq_v),
6437 NEONMAP0(vcgtz_v),
6438 NEONMAP0(vcgtzq_v),
6439 NEONMAP0(vclez_v),
6440 NEONMAP0(vclezq_v),
6441 NEONMAP1(vcls_v, aarch64_neon_cls, Add1ArgType),
6442 NEONMAP1(vclsq_v, aarch64_neon_cls, Add1ArgType),
6443 NEONMAP0(vcltz_v),
6444 NEONMAP0(vcltzq_v),
6445 NEONMAP1(vclz_v, ctlz, Add1ArgType),
6446 NEONMAP1(vclzq_v, ctlz, Add1ArgType),
6447 NEONMAP1(vcmla_f16, aarch64_neon_vcmla_rot0, Add1ArgType),
6448 NEONMAP1(vcmla_f32, aarch64_neon_vcmla_rot0, Add1ArgType),
6449 NEONMAP1(vcmla_rot180_f16, aarch64_neon_vcmla_rot180, Add1ArgType),
6450 NEONMAP1(vcmla_rot180_f32, aarch64_neon_vcmla_rot180, Add1ArgType),
6451 NEONMAP1(vcmla_rot270_f16, aarch64_neon_vcmla_rot270, Add1ArgType),
6452 NEONMAP1(vcmla_rot270_f32, aarch64_neon_vcmla_rot270, Add1ArgType),
6453 NEONMAP1(vcmla_rot90_f16, aarch64_neon_vcmla_rot90, Add1ArgType),
6454 NEONMAP1(vcmla_rot90_f32, aarch64_neon_vcmla_rot90, Add1ArgType),
6455 NEONMAP1(vcmlaq_f16, aarch64_neon_vcmla_rot0, Add1ArgType),
6456 NEONMAP1(vcmlaq_f32, aarch64_neon_vcmla_rot0, Add1ArgType),
6457 NEONMAP1(vcmlaq_f64, aarch64_neon_vcmla_rot0, Add1ArgType),
6458 NEONMAP1(vcmlaq_rot180_f16, aarch64_neon_vcmla_rot180, Add1ArgType),
6459 NEONMAP1(vcmlaq_rot180_f32, aarch64_neon_vcmla_rot180, Add1ArgType),
6460 NEONMAP1(vcmlaq_rot180_f64, aarch64_neon_vcmla_rot180, Add1ArgType),
6461 NEONMAP1(vcmlaq_rot270_f16, aarch64_neon_vcmla_rot270, Add1ArgType),
6462 NEONMAP1(vcmlaq_rot270_f32, aarch64_neon_vcmla_rot270, Add1ArgType),
6463 NEONMAP1(vcmlaq_rot270_f64, aarch64_neon_vcmla_rot270, Add1ArgType),
6464 NEONMAP1(vcmlaq_rot90_f16, aarch64_neon_vcmla_rot90, Add1ArgType),
6465 NEONMAP1(vcmlaq_rot90_f32, aarch64_neon_vcmla_rot90, Add1ArgType),
6466 NEONMAP1(vcmlaq_rot90_f64, aarch64_neon_vcmla_rot90, Add1ArgType),
6467 NEONMAP1(vcnt_v, ctpop, Add1ArgType),
6468 NEONMAP1(vcntq_v, ctpop, Add1ArgType),
6469 NEONMAP1(vcvt_f16_f32, aarch64_neon_vcvtfp2hf, 0),
6470 NEONMAP0(vcvt_f16_s16),
6471 NEONMAP0(vcvt_f16_u16),
6472 NEONMAP1(vcvt_f32_f16, aarch64_neon_vcvthf2fp, 0),
6473 NEONMAP0(vcvt_f32_v),
6474 NEONMAP1(vcvt_n_f16_s16, aarch64_neon_vcvtfxs2fp, 0),
6475 NEONMAP1(vcvt_n_f16_u16, aarch64_neon_vcvtfxu2fp, 0),
6476 NEONMAP2(vcvt_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
6477 NEONMAP2(vcvt_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
6478 NEONMAP1(vcvt_n_s16_f16, aarch64_neon_vcvtfp2fxs, 0),
6479 NEONMAP1(vcvt_n_s32_v, aarch64_neon_vcvtfp2fxs, 0),
6480 NEONMAP1(vcvt_n_s64_v, aarch64_neon_vcvtfp2fxs, 0),
6481 NEONMAP1(vcvt_n_u16_f16, aarch64_neon_vcvtfp2fxu, 0),
6482 NEONMAP1(vcvt_n_u32_v, aarch64_neon_vcvtfp2fxu, 0),
6483 NEONMAP1(vcvt_n_u64_v, aarch64_neon_vcvtfp2fxu, 0),
6484 NEONMAP0(vcvtq_f16_s16),
6485 NEONMAP0(vcvtq_f16_u16),
6486 NEONMAP0(vcvtq_f32_v),
6487 NEONMAP1(vcvtq_high_bf16_f32, aarch64_neon_bfcvtn2, 0),
6488 NEONMAP1(vcvtq_n_f16_s16, aarch64_neon_vcvtfxs2fp, 0),
6489 NEONMAP1(vcvtq_n_f16_u16, aarch64_neon_vcvtfxu2fp, 0),
6490 NEONMAP2(vcvtq_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
6491 NEONMAP2(vcvtq_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
6492 NEONMAP1(vcvtq_n_s16_f16, aarch64_neon_vcvtfp2fxs, 0),
6493 NEONMAP1(vcvtq_n_s32_v, aarch64_neon_vcvtfp2fxs, 0),
6494 NEONMAP1(vcvtq_n_s64_v, aarch64_neon_vcvtfp2fxs, 0),
6495 NEONMAP1(vcvtq_n_u16_f16, aarch64_neon_vcvtfp2fxu, 0),
6496 NEONMAP1(vcvtq_n_u32_v, aarch64_neon_vcvtfp2fxu, 0),
6497 NEONMAP1(vcvtq_n_u64_v, aarch64_neon_vcvtfp2fxu, 0),
6498 NEONMAP1(vcvtx_f32_v, aarch64_neon_fcvtxn, AddRetType | Add1ArgType),
6499 NEONMAP1(vdot_s32, aarch64_neon_sdot, 0),
6500 NEONMAP1(vdot_u32, aarch64_neon_udot, 0),
6501 NEONMAP1(vdotq_s32, aarch64_neon_sdot, 0),
6502 NEONMAP1(vdotq_u32, aarch64_neon_udot, 0),
6503 NEONMAP2(veor3q_s16, aarch64_crypto_eor3u, aarch64_crypto_eor3s, Add1ArgType | UnsignedAlts),
6504 NEONMAP2(veor3q_s32, aarch64_crypto_eor3u, aarch64_crypto_eor3s, Add1ArgType | UnsignedAlts),
6505 NEONMAP2(veor3q_s64, aarch64_crypto_eor3u, aarch64_crypto_eor3s, Add1ArgType | UnsignedAlts),
6506 NEONMAP2(veor3q_s8, aarch64_crypto_eor3u, aarch64_crypto_eor3s, Add1ArgType | UnsignedAlts),
6507 NEONMAP2(veor3q_u16, aarch64_crypto_eor3u, aarch64_crypto_eor3s, Add1ArgType | UnsignedAlts),
6508 NEONMAP2(veor3q_u32, aarch64_crypto_eor3u, aarch64_crypto_eor3s, Add1ArgType | UnsignedAlts),
6509 NEONMAP2(veor3q_u64, aarch64_crypto_eor3u, aarch64_crypto_eor3s, Add1ArgType | UnsignedAlts),
6510 NEONMAP2(veor3q_u8, aarch64_crypto_eor3u, aarch64_crypto_eor3s, Add1ArgType | UnsignedAlts),
6511 NEONMAP0(vext_v),
6512 NEONMAP0(vextq_v),
6513 NEONMAP0(vfma_v),
6514 NEONMAP0(vfmaq_v),
6515 NEONMAP1(vfmlal_high_f16, aarch64_neon_fmlal2, 0),
6516 NEONMAP1(vfmlal_low_f16, aarch64_neon_fmlal, 0),
6517 NEONMAP1(vfmlalq_high_f16, aarch64_neon_fmlal2, 0),
6518 NEONMAP1(vfmlalq_low_f16, aarch64_neon_fmlal, 0),
6519 NEONMAP1(vfmlsl_high_f16, aarch64_neon_fmlsl2, 0),
6520 NEONMAP1(vfmlsl_low_f16, aarch64_neon_fmlsl, 0),
6521 NEONMAP1(vfmlslq_high_f16, aarch64_neon_fmlsl2, 0),
6522 NEONMAP1(vfmlslq_low_f16, aarch64_neon_fmlsl, 0),
6523 NEONMAP2(vhadd_v, aarch64_neon_uhadd, aarch64_neon_shadd, Add1ArgType | UnsignedAlts),
6524 NEONMAP2(vhaddq_v, aarch64_neon_uhadd, aarch64_neon_shadd, Add1ArgType | UnsignedAlts),
6525 NEONMAP2(vhsub_v, aarch64_neon_uhsub, aarch64_neon_shsub, Add1ArgType | UnsignedAlts),
6526 NEONMAP2(vhsubq_v, aarch64_neon_uhsub, aarch64_neon_shsub, Add1ArgType | UnsignedAlts),
6527 NEONMAP1(vld1_x2_v, aarch64_neon_ld1x2, 0),
6528 NEONMAP1(vld1_x3_v, aarch64_neon_ld1x3, 0),
6529 NEONMAP1(vld1_x4_v, aarch64_neon_ld1x4, 0),
6530 NEONMAP1(vld1q_x2_v, aarch64_neon_ld1x2, 0),
6531 NEONMAP1(vld1q_x3_v, aarch64_neon_ld1x3, 0),
6532 NEONMAP1(vld1q_x4_v, aarch64_neon_ld1x4, 0),
6533 NEONMAP1(vmmlaq_s32, aarch64_neon_smmla, 0),
6534 NEONMAP1(vmmlaq_u32, aarch64_neon_ummla, 0),
6535 NEONMAP0(vmovl_v),
6536 NEONMAP0(vmovn_v),
6537 NEONMAP1(vmul_v, aarch64_neon_pmul, Add1ArgType),
6538 NEONMAP1(vmulq_v, aarch64_neon_pmul, Add1ArgType),
6539 NEONMAP1(vpadd_v, aarch64_neon_addp, Add1ArgType),
6540 NEONMAP2(vpaddl_v, aarch64_neon_uaddlp, aarch64_neon_saddlp, UnsignedAlts),
6541 NEONMAP2(vpaddlq_v, aarch64_neon_uaddlp, aarch64_neon_saddlp, UnsignedAlts),
6542 NEONMAP1(vpaddq_v, aarch64_neon_addp, Add1ArgType),
6543 NEONMAP1(vqabs_v, aarch64_neon_sqabs, Add1ArgType),
6544 NEONMAP1(vqabsq_v, aarch64_neon_sqabs, Add1ArgType),
6545 NEONMAP2(vqadd_v, aarch64_neon_uqadd, aarch64_neon_sqadd, Add1ArgType | UnsignedAlts),
6546 NEONMAP2(vqaddq_v, aarch64_neon_uqadd, aarch64_neon_sqadd, Add1ArgType | UnsignedAlts),
6547 NEONMAP2(vqdmlal_v, aarch64_neon_sqdmull, aarch64_neon_sqadd, 0),
6548 NEONMAP2(vqdmlsl_v, aarch64_neon_sqdmull, aarch64_neon_sqsub, 0),
6549 NEONMAP1(vqdmulh_lane_v, aarch64_neon_sqdmulh_lane, 0),
6550 NEONMAP1(vqdmulh_laneq_v, aarch64_neon_sqdmulh_laneq, 0),
6551 NEONMAP1(vqdmulh_v, aarch64_neon_sqdmulh, Add1ArgType),
6552 NEONMAP1(vqdmulhq_lane_v, aarch64_neon_sqdmulh_lane, 0),
6553 NEONMAP1(vqdmulhq_laneq_v, aarch64_neon_sqdmulh_laneq, 0),
6554 NEONMAP1(vqdmulhq_v, aarch64_neon_sqdmulh, Add1ArgType),
6555 NEONMAP1(vqdmull_v, aarch64_neon_sqdmull, Add1ArgType),
6556 NEONMAP2(vqmovn_v, aarch64_neon_uqxtn, aarch64_neon_sqxtn, Add1ArgType | UnsignedAlts),
6557 NEONMAP1(vqmovun_v, aarch64_neon_sqxtun, Add1ArgType),
6558 NEONMAP1(vqneg_v, aarch64_neon_sqneg, Add1ArgType),
6559 NEONMAP1(vqnegq_v, aarch64_neon_sqneg, Add1ArgType),
6560 NEONMAP1(vqrdmlah_s16, aarch64_neon_sqrdmlah, Add1ArgType),
6561 NEONMAP1(vqrdmlah_s32, aarch64_neon_sqrdmlah, Add1ArgType),
6562 NEONMAP1(vqrdmlahq_s16, aarch64_neon_sqrdmlah, Add1ArgType),
6563 NEONMAP1(vqrdmlahq_s32, aarch64_neon_sqrdmlah, Add1ArgType),
6564 NEONMAP1(vqrdmlsh_s16, aarch64_neon_sqrdmlsh, Add1ArgType),
6565 NEONMAP1(vqrdmlsh_s32, aarch64_neon_sqrdmlsh, Add1ArgType),
6566 NEONMAP1(vqrdmlshq_s16, aarch64_neon_sqrdmlsh, Add1ArgType),
6567 NEONMAP1(vqrdmlshq_s32, aarch64_neon_sqrdmlsh, Add1ArgType),
6568 NEONMAP1(vqrdmulh_lane_v, aarch64_neon_sqrdmulh_lane, 0),
6569 NEONMAP1(vqrdmulh_laneq_v, aarch64_neon_sqrdmulh_laneq, 0),
6570 NEONMAP1(vqrdmulh_v, aarch64_neon_sqrdmulh, Add1ArgType),
6571 NEONMAP1(vqrdmulhq_lane_v, aarch64_neon_sqrdmulh_lane, 0),
6572 NEONMAP1(vqrdmulhq_laneq_v, aarch64_neon_sqrdmulh_laneq, 0),
6573 NEONMAP1(vqrdmulhq_v, aarch64_neon_sqrdmulh, Add1ArgType),
6574 NEONMAP2(vqrshl_v, aarch64_neon_uqrshl, aarch64_neon_sqrshl, Add1ArgType | UnsignedAlts),
6575 NEONMAP2(vqrshlq_v, aarch64_neon_uqrshl, aarch64_neon_sqrshl, Add1ArgType | UnsignedAlts),
6576 NEONMAP2(vqshl_n_v, aarch64_neon_uqshl, aarch64_neon_sqshl, UnsignedAlts),
6577 NEONMAP2(vqshl_v, aarch64_neon_uqshl, aarch64_neon_sqshl, Add1ArgType | UnsignedAlts),
6578 NEONMAP2(vqshlq_n_v, aarch64_neon_uqshl, aarch64_neon_sqshl,UnsignedAlts),
6579 NEONMAP2(vqshlq_v, aarch64_neon_uqshl, aarch64_neon_sqshl, Add1ArgType | UnsignedAlts),
6580 NEONMAP1(vqshlu_n_v, aarch64_neon_sqshlu, 0),
6581 NEONMAP1(vqshluq_n_v, aarch64_neon_sqshlu, 0),
6582 NEONMAP2(vqsub_v, aarch64_neon_uqsub, aarch64_neon_sqsub, Add1ArgType | UnsignedAlts),
6583 NEONMAP2(vqsubq_v, aarch64_neon_uqsub, aarch64_neon_sqsub, Add1ArgType | UnsignedAlts),
6584 NEONMAP1(vraddhn_v, aarch64_neon_raddhn, Add1ArgType),
6585 NEONMAP1(vrax1q_u64, aarch64_crypto_rax1, 0),
6586 NEONMAP2(vrecpe_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0),
6587 NEONMAP2(vrecpeq_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0),
6588 NEONMAP1(vrecps_v, aarch64_neon_frecps, Add1ArgType),
6589 NEONMAP1(vrecpsq_v, aarch64_neon_frecps, Add1ArgType),
6590 NEONMAP2(vrhadd_v, aarch64_neon_urhadd, aarch64_neon_srhadd, Add1ArgType | UnsignedAlts),
6591 NEONMAP2(vrhaddq_v, aarch64_neon_urhadd, aarch64_neon_srhadd, Add1ArgType | UnsignedAlts),
6592 NEONMAP1(vrnd32x_f32, aarch64_neon_frint32x, Add1ArgType),
6593 NEONMAP1(vrnd32x_f64, aarch64_neon_frint32x, Add1ArgType),
6594 NEONMAP1(vrnd32xq_f32, aarch64_neon_frint32x, Add1ArgType),
6595 NEONMAP1(vrnd32xq_f64, aarch64_neon_frint32x, Add1ArgType),
6596 NEONMAP1(vrnd32z_f32, aarch64_neon_frint32z, Add1ArgType),
6597 NEONMAP1(vrnd32z_f64, aarch64_neon_frint32z, Add1ArgType),
6598 NEONMAP1(vrnd32zq_f32, aarch64_neon_frint32z, Add1ArgType),
6599 NEONMAP1(vrnd32zq_f64, aarch64_neon_frint32z, Add1ArgType),
6600 NEONMAP1(vrnd64x_f32, aarch64_neon_frint64x, Add1ArgType),
6601 NEONMAP1(vrnd64x_f64, aarch64_neon_frint64x, Add1ArgType),
6602 NEONMAP1(vrnd64xq_f32, aarch64_neon_frint64x, Add1ArgType),
6603 NEONMAP1(vrnd64xq_f64, aarch64_neon_frint64x, Add1ArgType),
6604 NEONMAP1(vrnd64z_f32, aarch64_neon_frint64z, Add1ArgType),
6605 NEONMAP1(vrnd64z_f64, aarch64_neon_frint64z, Add1ArgType),
6606 NEONMAP1(vrnd64zq_f32, aarch64_neon_frint64z, Add1ArgType),
6607 NEONMAP1(vrnd64zq_f64, aarch64_neon_frint64z, Add1ArgType),
6608 NEONMAP0(vrndi_v),
6609 NEONMAP0(vrndiq_v),
6610 NEONMAP2(vrshl_v, aarch64_neon_urshl, aarch64_neon_srshl, Add1ArgType | UnsignedAlts),
6611 NEONMAP2(vrshlq_v, aarch64_neon_urshl, aarch64_neon_srshl, Add1ArgType | UnsignedAlts),
6612 NEONMAP2(vrshr_n_v, aarch64_neon_urshl, aarch64_neon_srshl, UnsignedAlts),
6613 NEONMAP2(vrshrq_n_v, aarch64_neon_urshl, aarch64_neon_srshl, UnsignedAlts),
6614 NEONMAP2(vrsqrte_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0),
6615 NEONMAP2(vrsqrteq_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0),
6616 NEONMAP1(vrsqrts_v, aarch64_neon_frsqrts, Add1ArgType),
6617 NEONMAP1(vrsqrtsq_v, aarch64_neon_frsqrts, Add1ArgType),
6618 NEONMAP1(vrsubhn_v, aarch64_neon_rsubhn, Add1ArgType),
6619 NEONMAP1(vsha1su0q_u32, aarch64_crypto_sha1su0, 0),
6620 NEONMAP1(vsha1su1q_u32, aarch64_crypto_sha1su1, 0),
6621 NEONMAP1(vsha256h2q_u32, aarch64_crypto_sha256h2, 0),
6622 NEONMAP1(vsha256hq_u32, aarch64_crypto_sha256h, 0),
6623 NEONMAP1(vsha256su0q_u32, aarch64_crypto_sha256su0, 0),
6624 NEONMAP1(vsha256su1q_u32, aarch64_crypto_sha256su1, 0),
6625 NEONMAP1(vsha512h2q_u64, aarch64_crypto_sha512h2, 0),
6626 NEONMAP1(vsha512hq_u64, aarch64_crypto_sha512h, 0),
6627 NEONMAP1(vsha512su0q_u64, aarch64_crypto_sha512su0, 0),
6628 NEONMAP1(vsha512su1q_u64, aarch64_crypto_sha512su1, 0),
6629 NEONMAP0(vshl_n_v),
6630 NEONMAP2(vshl_v, aarch64_neon_ushl, aarch64_neon_sshl, Add1ArgType | UnsignedAlts),
6631 NEONMAP0(vshll_n_v),
6632 NEONMAP0(vshlq_n_v),
6633 NEONMAP2(vshlq_v, aarch64_neon_ushl, aarch64_neon_sshl, Add1ArgType | UnsignedAlts),
6634 NEONMAP0(vshr_n_v),
6635 NEONMAP0(vshrn_n_v),
6636 NEONMAP0(vshrq_n_v),
6637 NEONMAP1(vsm3partw1q_u32, aarch64_crypto_sm3partw1, 0),
6638 NEONMAP1(vsm3partw2q_u32, aarch64_crypto_sm3partw2, 0),
6639 NEONMAP1(vsm3ss1q_u32, aarch64_crypto_sm3ss1, 0),
6640 NEONMAP1(vsm3tt1aq_u32, aarch64_crypto_sm3tt1a, 0),
6641 NEONMAP1(vsm3tt1bq_u32, aarch64_crypto_sm3tt1b, 0),
6642 NEONMAP1(vsm3tt2aq_u32, aarch64_crypto_sm3tt2a, 0),
6643 NEONMAP1(vsm3tt2bq_u32, aarch64_crypto_sm3tt2b, 0),
6644 NEONMAP1(vsm4ekeyq_u32, aarch64_crypto_sm4ekey, 0),
6645 NEONMAP1(vsm4eq_u32, aarch64_crypto_sm4e, 0),
6646 NEONMAP1(vst1_x2_v, aarch64_neon_st1x2, 0),
6647 NEONMAP1(vst1_x3_v, aarch64_neon_st1x3, 0),
6648 NEONMAP1(vst1_x4_v, aarch64_neon_st1x4, 0),
6649 NEONMAP1(vst1q_x2_v, aarch64_neon_st1x2, 0),
6650 NEONMAP1(vst1q_x3_v, aarch64_neon_st1x3, 0),
6651 NEONMAP1(vst1q_x4_v, aarch64_neon_st1x4, 0),
6652 NEONMAP0(vsubhn_v),
6653 NEONMAP0(vtst_v),
6654 NEONMAP0(vtstq_v),
6655 NEONMAP1(vusdot_s32, aarch64_neon_usdot, 0),
6656 NEONMAP1(vusdotq_s32, aarch64_neon_usdot, 0),
6657 NEONMAP1(vusmmlaq_s32, aarch64_neon_usmmla, 0),
6658 NEONMAP1(vxarq_u64, aarch64_crypto_xar, 0),
6661 static const ARMVectorIntrinsicInfo AArch64SISDIntrinsicMap[] = {
6662 NEONMAP1(vabdd_f64, aarch64_sisd_fabd, Add1ArgType),
6663 NEONMAP1(vabds_f32, aarch64_sisd_fabd, Add1ArgType),
6664 NEONMAP1(vabsd_s64, aarch64_neon_abs, Add1ArgType),
6665 NEONMAP1(vaddlv_s32, aarch64_neon_saddlv, AddRetType | Add1ArgType),
6666 NEONMAP1(vaddlv_u32, aarch64_neon_uaddlv, AddRetType | Add1ArgType),
6667 NEONMAP1(vaddlvq_s32, aarch64_neon_saddlv, AddRetType | Add1ArgType),
6668 NEONMAP1(vaddlvq_u32, aarch64_neon_uaddlv, AddRetType | Add1ArgType),
6669 NEONMAP1(vaddv_f32, aarch64_neon_faddv, AddRetType | Add1ArgType),
6670 NEONMAP1(vaddv_s32, aarch64_neon_saddv, AddRetType | Add1ArgType),
6671 NEONMAP1(vaddv_u32, aarch64_neon_uaddv, AddRetType | Add1ArgType),
6672 NEONMAP1(vaddvq_f32, aarch64_neon_faddv, AddRetType | Add1ArgType),
6673 NEONMAP1(vaddvq_f64, aarch64_neon_faddv, AddRetType | Add1ArgType),
6674 NEONMAP1(vaddvq_s32, aarch64_neon_saddv, AddRetType | Add1ArgType),
6675 NEONMAP1(vaddvq_s64, aarch64_neon_saddv, AddRetType | Add1ArgType),
6676 NEONMAP1(vaddvq_u32, aarch64_neon_uaddv, AddRetType | Add1ArgType),
6677 NEONMAP1(vaddvq_u64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
6678 NEONMAP1(vcaged_f64, aarch64_neon_facge, AddRetType | Add1ArgType),
6679 NEONMAP1(vcages_f32, aarch64_neon_facge, AddRetType | Add1ArgType),
6680 NEONMAP1(vcagtd_f64, aarch64_neon_facgt, AddRetType | Add1ArgType),
6681 NEONMAP1(vcagts_f32, aarch64_neon_facgt, AddRetType | Add1ArgType),
6682 NEONMAP1(vcaled_f64, aarch64_neon_facge, AddRetType | Add1ArgType),
6683 NEONMAP1(vcales_f32, aarch64_neon_facge, AddRetType | Add1ArgType),
6684 NEONMAP1(vcaltd_f64, aarch64_neon_facgt, AddRetType | Add1ArgType),
6685 NEONMAP1(vcalts_f32, aarch64_neon_facgt, AddRetType | Add1ArgType),
6686 NEONMAP1(vcvtad_s64_f64, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
6687 NEONMAP1(vcvtad_u64_f64, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
6688 NEONMAP1(vcvtas_s32_f32, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
6689 NEONMAP1(vcvtas_u32_f32, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
6690 NEONMAP1(vcvtd_n_f64_s64, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
6691 NEONMAP1(vcvtd_n_f64_u64, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
6692 NEONMAP1(vcvtd_n_s64_f64, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
6693 NEONMAP1(vcvtd_n_u64_f64, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
6694 NEONMAP1(vcvtd_s64_f64, aarch64_neon_fcvtzs, AddRetType | Add1ArgType),
6695 NEONMAP1(vcvtd_u64_f64, aarch64_neon_fcvtzu, AddRetType | Add1ArgType),
6696 NEONMAP1(vcvth_bf16_f32, aarch64_neon_bfcvt, 0),
6697 NEONMAP1(vcvtmd_s64_f64, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
6698 NEONMAP1(vcvtmd_u64_f64, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
6699 NEONMAP1(vcvtms_s32_f32, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
6700 NEONMAP1(vcvtms_u32_f32, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
6701 NEONMAP1(vcvtnd_s64_f64, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
6702 NEONMAP1(vcvtnd_u64_f64, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
6703 NEONMAP1(vcvtns_s32_f32, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
6704 NEONMAP1(vcvtns_u32_f32, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
6705 NEONMAP1(vcvtpd_s64_f64, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
6706 NEONMAP1(vcvtpd_u64_f64, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
6707 NEONMAP1(vcvtps_s32_f32, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
6708 NEONMAP1(vcvtps_u32_f32, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
6709 NEONMAP1(vcvts_n_f32_s32, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
6710 NEONMAP1(vcvts_n_f32_u32, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
6711 NEONMAP1(vcvts_n_s32_f32, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
6712 NEONMAP1(vcvts_n_u32_f32, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
6713 NEONMAP1(vcvts_s32_f32, aarch64_neon_fcvtzs, AddRetType | Add1ArgType),
6714 NEONMAP1(vcvts_u32_f32, aarch64_neon_fcvtzu, AddRetType | Add1ArgType),
6715 NEONMAP1(vcvtxd_f32_f64, aarch64_sisd_fcvtxn, 0),
6716 NEONMAP1(vmaxnmv_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
6717 NEONMAP1(vmaxnmvq_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
6718 NEONMAP1(vmaxnmvq_f64, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
6719 NEONMAP1(vmaxv_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
6720 NEONMAP1(vmaxv_s32, aarch64_neon_smaxv, AddRetType | Add1ArgType),
6721 NEONMAP1(vmaxv_u32, aarch64_neon_umaxv, AddRetType | Add1ArgType),
6722 NEONMAP1(vmaxvq_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
6723 NEONMAP1(vmaxvq_f64, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
6724 NEONMAP1(vmaxvq_s32, aarch64_neon_smaxv, AddRetType | Add1ArgType),
6725 NEONMAP1(vmaxvq_u32, aarch64_neon_umaxv, AddRetType | Add1ArgType),
6726 NEONMAP1(vminnmv_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
6727 NEONMAP1(vminnmvq_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
6728 NEONMAP1(vminnmvq_f64, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
6729 NEONMAP1(vminv_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
6730 NEONMAP1(vminv_s32, aarch64_neon_sminv, AddRetType | Add1ArgType),
6731 NEONMAP1(vminv_u32, aarch64_neon_uminv, AddRetType | Add1ArgType),
6732 NEONMAP1(vminvq_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
6733 NEONMAP1(vminvq_f64, aarch64_neon_fminv, AddRetType | Add1ArgType),
6734 NEONMAP1(vminvq_s32, aarch64_neon_sminv, AddRetType | Add1ArgType),
6735 NEONMAP1(vminvq_u32, aarch64_neon_uminv, AddRetType | Add1ArgType),
6736 NEONMAP1(vmull_p64, aarch64_neon_pmull64, 0),
6737 NEONMAP1(vmulxd_f64, aarch64_neon_fmulx, Add1ArgType),
6738 NEONMAP1(vmulxs_f32, aarch64_neon_fmulx, Add1ArgType),
6739 NEONMAP1(vpaddd_s64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
6740 NEONMAP1(vpaddd_u64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
6741 NEONMAP1(vpmaxnmqd_f64, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
6742 NEONMAP1(vpmaxnms_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
6743 NEONMAP1(vpmaxqd_f64, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
6744 NEONMAP1(vpmaxs_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
6745 NEONMAP1(vpminnmqd_f64, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
6746 NEONMAP1(vpminnms_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
6747 NEONMAP1(vpminqd_f64, aarch64_neon_fminv, AddRetType | Add1ArgType),
6748 NEONMAP1(vpmins_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
6749 NEONMAP1(vqabsb_s8, aarch64_neon_sqabs, Vectorize1ArgType | Use64BitVectors),
6750 NEONMAP1(vqabsd_s64, aarch64_neon_sqabs, Add1ArgType),
6751 NEONMAP1(vqabsh_s16, aarch64_neon_sqabs, Vectorize1ArgType | Use64BitVectors),
6752 NEONMAP1(vqabss_s32, aarch64_neon_sqabs, Add1ArgType),
6753 NEONMAP1(vqaddb_s8, aarch64_neon_sqadd, Vectorize1ArgType | Use64BitVectors),
6754 NEONMAP1(vqaddb_u8, aarch64_neon_uqadd, Vectorize1ArgType | Use64BitVectors),
6755 NEONMAP1(vqaddd_s64, aarch64_neon_sqadd, Add1ArgType),
6756 NEONMAP1(vqaddd_u64, aarch64_neon_uqadd, Add1ArgType),
6757 NEONMAP1(vqaddh_s16, aarch64_neon_sqadd, Vectorize1ArgType | Use64BitVectors),
6758 NEONMAP1(vqaddh_u16, aarch64_neon_uqadd, Vectorize1ArgType | Use64BitVectors),
6759 NEONMAP1(vqadds_s32, aarch64_neon_sqadd, Add1ArgType),
6760 NEONMAP1(vqadds_u32, aarch64_neon_uqadd, Add1ArgType),
6761 NEONMAP1(vqdmulhh_s16, aarch64_neon_sqdmulh, Vectorize1ArgType | Use64BitVectors),
6762 NEONMAP1(vqdmulhs_s32, aarch64_neon_sqdmulh, Add1ArgType),
6763 NEONMAP1(vqdmullh_s16, aarch64_neon_sqdmull, VectorRet | Use128BitVectors),
6764 NEONMAP1(vqdmulls_s32, aarch64_neon_sqdmulls_scalar, 0),
6765 NEONMAP1(vqmovnd_s64, aarch64_neon_scalar_sqxtn, AddRetType | Add1ArgType),
6766 NEONMAP1(vqmovnd_u64, aarch64_neon_scalar_uqxtn, AddRetType | Add1ArgType),
6767 NEONMAP1(vqmovnh_s16, aarch64_neon_sqxtn, VectorRet | Use64BitVectors),
6768 NEONMAP1(vqmovnh_u16, aarch64_neon_uqxtn, VectorRet | Use64BitVectors),
6769 NEONMAP1(vqmovns_s32, aarch64_neon_sqxtn, VectorRet | Use64BitVectors),
6770 NEONMAP1(vqmovns_u32, aarch64_neon_uqxtn, VectorRet | Use64BitVectors),
6771 NEONMAP1(vqmovund_s64, aarch64_neon_scalar_sqxtun, AddRetType | Add1ArgType),
6772 NEONMAP1(vqmovunh_s16, aarch64_neon_sqxtun, VectorRet | Use64BitVectors),
6773 NEONMAP1(vqmovuns_s32, aarch64_neon_sqxtun, VectorRet | Use64BitVectors),
6774 NEONMAP1(vqnegb_s8, aarch64_neon_sqneg, Vectorize1ArgType | Use64BitVectors),
6775 NEONMAP1(vqnegd_s64, aarch64_neon_sqneg, Add1ArgType),
6776 NEONMAP1(vqnegh_s16, aarch64_neon_sqneg, Vectorize1ArgType | Use64BitVectors),
6777 NEONMAP1(vqnegs_s32, aarch64_neon_sqneg, Add1ArgType),
6778 NEONMAP1(vqrdmlahh_s16, aarch64_neon_sqrdmlah, Vectorize1ArgType | Use64BitVectors),
6779 NEONMAP1(vqrdmlahs_s32, aarch64_neon_sqrdmlah, Add1ArgType),
6780 NEONMAP1(vqrdmlshh_s16, aarch64_neon_sqrdmlsh, Vectorize1ArgType | Use64BitVectors),
6781 NEONMAP1(vqrdmlshs_s32, aarch64_neon_sqrdmlsh, Add1ArgType),
6782 NEONMAP1(vqrdmulhh_s16, aarch64_neon_sqrdmulh, Vectorize1ArgType | Use64BitVectors),
6783 NEONMAP1(vqrdmulhs_s32, aarch64_neon_sqrdmulh, Add1ArgType),
6784 NEONMAP1(vqrshlb_s8, aarch64_neon_sqrshl, Vectorize1ArgType | Use64BitVectors),
6785 NEONMAP1(vqrshlb_u8, aarch64_neon_uqrshl, Vectorize1ArgType | Use64BitVectors),
6786 NEONMAP1(vqrshld_s64, aarch64_neon_sqrshl, Add1ArgType),
6787 NEONMAP1(vqrshld_u64, aarch64_neon_uqrshl, Add1ArgType),
6788 NEONMAP1(vqrshlh_s16, aarch64_neon_sqrshl, Vectorize1ArgType | Use64BitVectors),
6789 NEONMAP1(vqrshlh_u16, aarch64_neon_uqrshl, Vectorize1ArgType | Use64BitVectors),
6790 NEONMAP1(vqrshls_s32, aarch64_neon_sqrshl, Add1ArgType),
6791 NEONMAP1(vqrshls_u32, aarch64_neon_uqrshl, Add1ArgType),
6792 NEONMAP1(vqrshrnd_n_s64, aarch64_neon_sqrshrn, AddRetType),
6793 NEONMAP1(vqrshrnd_n_u64, aarch64_neon_uqrshrn, AddRetType),
6794 NEONMAP1(vqrshrnh_n_s16, aarch64_neon_sqrshrn, VectorRet | Use64BitVectors),
6795 NEONMAP1(vqrshrnh_n_u16, aarch64_neon_uqrshrn, VectorRet | Use64BitVectors),
6796 NEONMAP1(vqrshrns_n_s32, aarch64_neon_sqrshrn, VectorRet | Use64BitVectors),
6797 NEONMAP1(vqrshrns_n_u32, aarch64_neon_uqrshrn, VectorRet | Use64BitVectors),
6798 NEONMAP1(vqrshrund_n_s64, aarch64_neon_sqrshrun, AddRetType),
6799 NEONMAP1(vqrshrunh_n_s16, aarch64_neon_sqrshrun, VectorRet | Use64BitVectors),
6800 NEONMAP1(vqrshruns_n_s32, aarch64_neon_sqrshrun, VectorRet | Use64BitVectors),
6801 NEONMAP1(vqshlb_n_s8, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
6802 NEONMAP1(vqshlb_n_u8, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
6803 NEONMAP1(vqshlb_s8, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
6804 NEONMAP1(vqshlb_u8, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
6805 NEONMAP1(vqshld_s64, aarch64_neon_sqshl, Add1ArgType),
6806 NEONMAP1(vqshld_u64, aarch64_neon_uqshl, Add1ArgType),
6807 NEONMAP1(vqshlh_n_s16, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
6808 NEONMAP1(vqshlh_n_u16, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
6809 NEONMAP1(vqshlh_s16, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
6810 NEONMAP1(vqshlh_u16, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
6811 NEONMAP1(vqshls_n_s32, aarch64_neon_sqshl, Add1ArgType),
6812 NEONMAP1(vqshls_n_u32, aarch64_neon_uqshl, Add1ArgType),
6813 NEONMAP1(vqshls_s32, aarch64_neon_sqshl, Add1ArgType),
6814 NEONMAP1(vqshls_u32, aarch64_neon_uqshl, Add1ArgType),
6815 NEONMAP1(vqshlub_n_s8, aarch64_neon_sqshlu, Vectorize1ArgType | Use64BitVectors),
6816 NEONMAP1(vqshluh_n_s16, aarch64_neon_sqshlu, Vectorize1ArgType | Use64BitVectors),
6817 NEONMAP1(vqshlus_n_s32, aarch64_neon_sqshlu, Add1ArgType),
6818 NEONMAP1(vqshrnd_n_s64, aarch64_neon_sqshrn, AddRetType),
6819 NEONMAP1(vqshrnd_n_u64, aarch64_neon_uqshrn, AddRetType),
6820 NEONMAP1(vqshrnh_n_s16, aarch64_neon_sqshrn, VectorRet | Use64BitVectors),
6821 NEONMAP1(vqshrnh_n_u16, aarch64_neon_uqshrn, VectorRet | Use64BitVectors),
6822 NEONMAP1(vqshrns_n_s32, aarch64_neon_sqshrn, VectorRet | Use64BitVectors),
6823 NEONMAP1(vqshrns_n_u32, aarch64_neon_uqshrn, VectorRet | Use64BitVectors),
6824 NEONMAP1(vqshrund_n_s64, aarch64_neon_sqshrun, AddRetType),
6825 NEONMAP1(vqshrunh_n_s16, aarch64_neon_sqshrun, VectorRet | Use64BitVectors),
6826 NEONMAP1(vqshruns_n_s32, aarch64_neon_sqshrun, VectorRet | Use64BitVectors),
6827 NEONMAP1(vqsubb_s8, aarch64_neon_sqsub, Vectorize1ArgType | Use64BitVectors),
6828 NEONMAP1(vqsubb_u8, aarch64_neon_uqsub, Vectorize1ArgType | Use64BitVectors),
6829 NEONMAP1(vqsubd_s64, aarch64_neon_sqsub, Add1ArgType),
6830 NEONMAP1(vqsubd_u64, aarch64_neon_uqsub, Add1ArgType),
6831 NEONMAP1(vqsubh_s16, aarch64_neon_sqsub, Vectorize1ArgType | Use64BitVectors),
6832 NEONMAP1(vqsubh_u16, aarch64_neon_uqsub, Vectorize1ArgType | Use64BitVectors),
6833 NEONMAP1(vqsubs_s32, aarch64_neon_sqsub, Add1ArgType),
6834 NEONMAP1(vqsubs_u32, aarch64_neon_uqsub, Add1ArgType),
6835 NEONMAP1(vrecped_f64, aarch64_neon_frecpe, Add1ArgType),
6836 NEONMAP1(vrecpes_f32, aarch64_neon_frecpe, Add1ArgType),
6837 NEONMAP1(vrecpxd_f64, aarch64_neon_frecpx, Add1ArgType),
6838 NEONMAP1(vrecpxs_f32, aarch64_neon_frecpx, Add1ArgType),
6839 NEONMAP1(vrshld_s64, aarch64_neon_srshl, Add1ArgType),
6840 NEONMAP1(vrshld_u64, aarch64_neon_urshl, Add1ArgType),
6841 NEONMAP1(vrsqrted_f64, aarch64_neon_frsqrte, Add1ArgType),
6842 NEONMAP1(vrsqrtes_f32, aarch64_neon_frsqrte, Add1ArgType),
6843 NEONMAP1(vrsqrtsd_f64, aarch64_neon_frsqrts, Add1ArgType),
6844 NEONMAP1(vrsqrtss_f32, aarch64_neon_frsqrts, Add1ArgType),
6845 NEONMAP1(vsha1cq_u32, aarch64_crypto_sha1c, 0),
6846 NEONMAP1(vsha1h_u32, aarch64_crypto_sha1h, 0),
6847 NEONMAP1(vsha1mq_u32, aarch64_crypto_sha1m, 0),
6848 NEONMAP1(vsha1pq_u32, aarch64_crypto_sha1p, 0),
6849 NEONMAP1(vshld_s64, aarch64_neon_sshl, Add1ArgType),
6850 NEONMAP1(vshld_u64, aarch64_neon_ushl, Add1ArgType),
6851 NEONMAP1(vslid_n_s64, aarch64_neon_vsli, Vectorize1ArgType),
6852 NEONMAP1(vslid_n_u64, aarch64_neon_vsli, Vectorize1ArgType),
6853 NEONMAP1(vsqaddb_u8, aarch64_neon_usqadd, Vectorize1ArgType | Use64BitVectors),
6854 NEONMAP1(vsqaddd_u64, aarch64_neon_usqadd, Add1ArgType),
6855 NEONMAP1(vsqaddh_u16, aarch64_neon_usqadd, Vectorize1ArgType | Use64BitVectors),
6856 NEONMAP1(vsqadds_u32, aarch64_neon_usqadd, Add1ArgType),
6857 NEONMAP1(vsrid_n_s64, aarch64_neon_vsri, Vectorize1ArgType),
6858 NEONMAP1(vsrid_n_u64, aarch64_neon_vsri, Vectorize1ArgType),
6859 NEONMAP1(vuqaddb_s8, aarch64_neon_suqadd, Vectorize1ArgType | Use64BitVectors),
6860 NEONMAP1(vuqaddd_s64, aarch64_neon_suqadd, Add1ArgType),
6861 NEONMAP1(vuqaddh_s16, aarch64_neon_suqadd, Vectorize1ArgType | Use64BitVectors),
6862 NEONMAP1(vuqadds_s32, aarch64_neon_suqadd, Add1ArgType),
6863 // FP16 scalar intrinisics go here.
6864 NEONMAP1(vabdh_f16, aarch64_sisd_fabd, Add1ArgType),
6865 NEONMAP1(vcvtah_s32_f16, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
6866 NEONMAP1(vcvtah_s64_f16, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
6867 NEONMAP1(vcvtah_u32_f16, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
6868 NEONMAP1(vcvtah_u64_f16, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
6869 NEONMAP1(vcvth_n_f16_s32, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
6870 NEONMAP1(vcvth_n_f16_s64, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
6871 NEONMAP1(vcvth_n_f16_u32, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
6872 NEONMAP1(vcvth_n_f16_u64, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
6873 NEONMAP1(vcvth_n_s32_f16, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
6874 NEONMAP1(vcvth_n_s64_f16, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
6875 NEONMAP1(vcvth_n_u32_f16, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
6876 NEONMAP1(vcvth_n_u64_f16, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
6877 NEONMAP1(vcvth_s32_f16, aarch64_neon_fcvtzs, AddRetType | Add1ArgType),
6878 NEONMAP1(vcvth_s64_f16, aarch64_neon_fcvtzs, AddRetType | Add1ArgType),
6879 NEONMAP1(vcvth_u32_f16, aarch64_neon_fcvtzu, AddRetType | Add1ArgType),
6880 NEONMAP1(vcvth_u64_f16, aarch64_neon_fcvtzu, AddRetType | Add1ArgType),
6881 NEONMAP1(vcvtmh_s32_f16, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
6882 NEONMAP1(vcvtmh_s64_f16, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
6883 NEONMAP1(vcvtmh_u32_f16, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
6884 NEONMAP1(vcvtmh_u64_f16, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
6885 NEONMAP1(vcvtnh_s32_f16, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
6886 NEONMAP1(vcvtnh_s64_f16, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
6887 NEONMAP1(vcvtnh_u32_f16, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
6888 NEONMAP1(vcvtnh_u64_f16, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
6889 NEONMAP1(vcvtph_s32_f16, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
6890 NEONMAP1(vcvtph_s64_f16, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
6891 NEONMAP1(vcvtph_u32_f16, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
6892 NEONMAP1(vcvtph_u64_f16, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
6893 NEONMAP1(vmulxh_f16, aarch64_neon_fmulx, Add1ArgType),
6894 NEONMAP1(vrecpeh_f16, aarch64_neon_frecpe, Add1ArgType),
6895 NEONMAP1(vrecpxh_f16, aarch64_neon_frecpx, Add1ArgType),
6896 NEONMAP1(vrsqrteh_f16, aarch64_neon_frsqrte, Add1ArgType),
6897 NEONMAP1(vrsqrtsh_f16, aarch64_neon_frsqrts, Add1ArgType),
6900 // Some intrinsics are equivalent for codegen.
6901 static const std::pair<unsigned, unsigned> NEONEquivalentIntrinsicMap[] = {
6902 { NEON::BI__builtin_neon_splat_lane_bf16, NEON::BI__builtin_neon_splat_lane_v, },
6903 { NEON::BI__builtin_neon_splat_laneq_bf16, NEON::BI__builtin_neon_splat_laneq_v, },
6904 { NEON::BI__builtin_neon_splatq_lane_bf16, NEON::BI__builtin_neon_splatq_lane_v, },
6905 { NEON::BI__builtin_neon_splatq_laneq_bf16, NEON::BI__builtin_neon_splatq_laneq_v, },
6906 { NEON::BI__builtin_neon_vabd_f16, NEON::BI__builtin_neon_vabd_v, },
6907 { NEON::BI__builtin_neon_vabdq_f16, NEON::BI__builtin_neon_vabdq_v, },
6908 { NEON::BI__builtin_neon_vabs_f16, NEON::BI__builtin_neon_vabs_v, },
6909 { NEON::BI__builtin_neon_vabsq_f16, NEON::BI__builtin_neon_vabsq_v, },
6910 { NEON::BI__builtin_neon_vbsl_f16, NEON::BI__builtin_neon_vbsl_v, },
6911 { NEON::BI__builtin_neon_vbslq_f16, NEON::BI__builtin_neon_vbslq_v, },
6912 { NEON::BI__builtin_neon_vcage_f16, NEON::BI__builtin_neon_vcage_v, },
6913 { NEON::BI__builtin_neon_vcageq_f16, NEON::BI__builtin_neon_vcageq_v, },
6914 { NEON::BI__builtin_neon_vcagt_f16, NEON::BI__builtin_neon_vcagt_v, },
6915 { NEON::BI__builtin_neon_vcagtq_f16, NEON::BI__builtin_neon_vcagtq_v, },
6916 { NEON::BI__builtin_neon_vcale_f16, NEON::BI__builtin_neon_vcale_v, },
6917 { NEON::BI__builtin_neon_vcaleq_f16, NEON::BI__builtin_neon_vcaleq_v, },
6918 { NEON::BI__builtin_neon_vcalt_f16, NEON::BI__builtin_neon_vcalt_v, },
6919 { NEON::BI__builtin_neon_vcaltq_f16, NEON::BI__builtin_neon_vcaltq_v, },
6920 { NEON::BI__builtin_neon_vceqz_f16, NEON::BI__builtin_neon_vceqz_v, },
6921 { NEON::BI__builtin_neon_vceqzq_f16, NEON::BI__builtin_neon_vceqzq_v, },
6922 { NEON::BI__builtin_neon_vcgez_f16, NEON::BI__builtin_neon_vcgez_v, },
6923 { NEON::BI__builtin_neon_vcgezq_f16, NEON::BI__builtin_neon_vcgezq_v, },
6924 { NEON::BI__builtin_neon_vcgtz_f16, NEON::BI__builtin_neon_vcgtz_v, },
6925 { NEON::BI__builtin_neon_vcgtzq_f16, NEON::BI__builtin_neon_vcgtzq_v, },
6926 { NEON::BI__builtin_neon_vclez_f16, NEON::BI__builtin_neon_vclez_v, },
6927 { NEON::BI__builtin_neon_vclezq_f16, NEON::BI__builtin_neon_vclezq_v, },
6928 { NEON::BI__builtin_neon_vcltz_f16, NEON::BI__builtin_neon_vcltz_v, },
6929 { NEON::BI__builtin_neon_vcltzq_f16, NEON::BI__builtin_neon_vcltzq_v, },
6930 { NEON::BI__builtin_neon_vext_f16, NEON::BI__builtin_neon_vext_v, },
6931 { NEON::BI__builtin_neon_vextq_f16, NEON::BI__builtin_neon_vextq_v, },
6932 { NEON::BI__builtin_neon_vfma_f16, NEON::BI__builtin_neon_vfma_v, },
6933 { NEON::BI__builtin_neon_vfma_lane_f16, NEON::BI__builtin_neon_vfma_lane_v, },
6934 { NEON::BI__builtin_neon_vfma_laneq_f16, NEON::BI__builtin_neon_vfma_laneq_v, },
6935 { NEON::BI__builtin_neon_vfmaq_f16, NEON::BI__builtin_neon_vfmaq_v, },
6936 { NEON::BI__builtin_neon_vfmaq_lane_f16, NEON::BI__builtin_neon_vfmaq_lane_v, },
6937 { NEON::BI__builtin_neon_vfmaq_laneq_f16, NEON::BI__builtin_neon_vfmaq_laneq_v, },
6938 { NEON::BI__builtin_neon_vld1_bf16_x2, NEON::BI__builtin_neon_vld1_x2_v },
6939 { NEON::BI__builtin_neon_vld1_bf16_x3, NEON::BI__builtin_neon_vld1_x3_v },
6940 { NEON::BI__builtin_neon_vld1_bf16_x4, NEON::BI__builtin_neon_vld1_x4_v },
6941 { NEON::BI__builtin_neon_vld1_bf16, NEON::BI__builtin_neon_vld1_v },
6942 { NEON::BI__builtin_neon_vld1_dup_bf16, NEON::BI__builtin_neon_vld1_dup_v },
6943 { NEON::BI__builtin_neon_vld1_lane_bf16, NEON::BI__builtin_neon_vld1_lane_v },
6944 { NEON::BI__builtin_neon_vld1q_bf16_x2, NEON::BI__builtin_neon_vld1q_x2_v },
6945 { NEON::BI__builtin_neon_vld1q_bf16_x3, NEON::BI__builtin_neon_vld1q_x3_v },
6946 { NEON::BI__builtin_neon_vld1q_bf16_x4, NEON::BI__builtin_neon_vld1q_x4_v },
6947 { NEON::BI__builtin_neon_vld1q_bf16, NEON::BI__builtin_neon_vld1q_v },
6948 { NEON::BI__builtin_neon_vld1q_dup_bf16, NEON::BI__builtin_neon_vld1q_dup_v },
6949 { NEON::BI__builtin_neon_vld1q_lane_bf16, NEON::BI__builtin_neon_vld1q_lane_v },
6950 { NEON::BI__builtin_neon_vld2_bf16, NEON::BI__builtin_neon_vld2_v },
6951 { NEON::BI__builtin_neon_vld2_dup_bf16, NEON::BI__builtin_neon_vld2_dup_v },
6952 { NEON::BI__builtin_neon_vld2_lane_bf16, NEON::BI__builtin_neon_vld2_lane_v },
6953 { NEON::BI__builtin_neon_vld2q_bf16, NEON::BI__builtin_neon_vld2q_v },
6954 { NEON::BI__builtin_neon_vld2q_dup_bf16, NEON::BI__builtin_neon_vld2q_dup_v },
6955 { NEON::BI__builtin_neon_vld2q_lane_bf16, NEON::BI__builtin_neon_vld2q_lane_v },
6956 { NEON::BI__builtin_neon_vld3_bf16, NEON::BI__builtin_neon_vld3_v },
6957 { NEON::BI__builtin_neon_vld3_dup_bf16, NEON::BI__builtin_neon_vld3_dup_v },
6958 { NEON::BI__builtin_neon_vld3_lane_bf16, NEON::BI__builtin_neon_vld3_lane_v },
6959 { NEON::BI__builtin_neon_vld3q_bf16, NEON::BI__builtin_neon_vld3q_v },
6960 { NEON::BI__builtin_neon_vld3q_dup_bf16, NEON::BI__builtin_neon_vld3q_dup_v },
6961 { NEON::BI__builtin_neon_vld3q_lane_bf16, NEON::BI__builtin_neon_vld3q_lane_v },
6962 { NEON::BI__builtin_neon_vld4_bf16, NEON::BI__builtin_neon_vld4_v },
6963 { NEON::BI__builtin_neon_vld4_dup_bf16, NEON::BI__builtin_neon_vld4_dup_v },
6964 { NEON::BI__builtin_neon_vld4_lane_bf16, NEON::BI__builtin_neon_vld4_lane_v },
6965 { NEON::BI__builtin_neon_vld4q_bf16, NEON::BI__builtin_neon_vld4q_v },
6966 { NEON::BI__builtin_neon_vld4q_dup_bf16, NEON::BI__builtin_neon_vld4q_dup_v },
6967 { NEON::BI__builtin_neon_vld4q_lane_bf16, NEON::BI__builtin_neon_vld4q_lane_v },
6968 { NEON::BI__builtin_neon_vmax_f16, NEON::BI__builtin_neon_vmax_v, },
6969 { NEON::BI__builtin_neon_vmaxnm_f16, NEON::BI__builtin_neon_vmaxnm_v, },
6970 { NEON::BI__builtin_neon_vmaxnmq_f16, NEON::BI__builtin_neon_vmaxnmq_v, },
6971 { NEON::BI__builtin_neon_vmaxq_f16, NEON::BI__builtin_neon_vmaxq_v, },
6972 { NEON::BI__builtin_neon_vmin_f16, NEON::BI__builtin_neon_vmin_v, },
6973 { NEON::BI__builtin_neon_vminnm_f16, NEON::BI__builtin_neon_vminnm_v, },
6974 { NEON::BI__builtin_neon_vminnmq_f16, NEON::BI__builtin_neon_vminnmq_v, },
6975 { NEON::BI__builtin_neon_vminq_f16, NEON::BI__builtin_neon_vminq_v, },
6976 { NEON::BI__builtin_neon_vmulx_f16, NEON::BI__builtin_neon_vmulx_v, },
6977 { NEON::BI__builtin_neon_vmulxq_f16, NEON::BI__builtin_neon_vmulxq_v, },
6978 { NEON::BI__builtin_neon_vpadd_f16, NEON::BI__builtin_neon_vpadd_v, },
6979 { NEON::BI__builtin_neon_vpaddq_f16, NEON::BI__builtin_neon_vpaddq_v, },
6980 { NEON::BI__builtin_neon_vpmax_f16, NEON::BI__builtin_neon_vpmax_v, },
6981 { NEON::BI__builtin_neon_vpmaxnm_f16, NEON::BI__builtin_neon_vpmaxnm_v, },
6982 { NEON::BI__builtin_neon_vpmaxnmq_f16, NEON::BI__builtin_neon_vpmaxnmq_v, },
6983 { NEON::BI__builtin_neon_vpmaxq_f16, NEON::BI__builtin_neon_vpmaxq_v, },
6984 { NEON::BI__builtin_neon_vpmin_f16, NEON::BI__builtin_neon_vpmin_v, },
6985 { NEON::BI__builtin_neon_vpminnm_f16, NEON::BI__builtin_neon_vpminnm_v, },
6986 { NEON::BI__builtin_neon_vpminnmq_f16, NEON::BI__builtin_neon_vpminnmq_v, },
6987 { NEON::BI__builtin_neon_vpminq_f16, NEON::BI__builtin_neon_vpminq_v, },
6988 { NEON::BI__builtin_neon_vrecpe_f16, NEON::BI__builtin_neon_vrecpe_v, },
6989 { NEON::BI__builtin_neon_vrecpeq_f16, NEON::BI__builtin_neon_vrecpeq_v, },
6990 { NEON::BI__builtin_neon_vrecps_f16, NEON::BI__builtin_neon_vrecps_v, },
6991 { NEON::BI__builtin_neon_vrecpsq_f16, NEON::BI__builtin_neon_vrecpsq_v, },
6992 { NEON::BI__builtin_neon_vrnd_f16, NEON::BI__builtin_neon_vrnd_v, },
6993 { NEON::BI__builtin_neon_vrnda_f16, NEON::BI__builtin_neon_vrnda_v, },
6994 { NEON::BI__builtin_neon_vrndaq_f16, NEON::BI__builtin_neon_vrndaq_v, },
6995 { NEON::BI__builtin_neon_vrndi_f16, NEON::BI__builtin_neon_vrndi_v, },
6996 { NEON::BI__builtin_neon_vrndiq_f16, NEON::BI__builtin_neon_vrndiq_v, },
6997 { NEON::BI__builtin_neon_vrndm_f16, NEON::BI__builtin_neon_vrndm_v, },
6998 { NEON::BI__builtin_neon_vrndmq_f16, NEON::BI__builtin_neon_vrndmq_v, },
6999 { NEON::BI__builtin_neon_vrndn_f16, NEON::BI__builtin_neon_vrndn_v, },
7000 { NEON::BI__builtin_neon_vrndnq_f16, NEON::BI__builtin_neon_vrndnq_v, },
7001 { NEON::BI__builtin_neon_vrndp_f16, NEON::BI__builtin_neon_vrndp_v, },
7002 { NEON::BI__builtin_neon_vrndpq_f16, NEON::BI__builtin_neon_vrndpq_v, },
7003 { NEON::BI__builtin_neon_vrndq_f16, NEON::BI__builtin_neon_vrndq_v, },
7004 { NEON::BI__builtin_neon_vrndx_f16, NEON::BI__builtin_neon_vrndx_v, },
7005 { NEON::BI__builtin_neon_vrndxq_f16, NEON::BI__builtin_neon_vrndxq_v, },
7006 { NEON::BI__builtin_neon_vrsqrte_f16, NEON::BI__builtin_neon_vrsqrte_v, },
7007 { NEON::BI__builtin_neon_vrsqrteq_f16, NEON::BI__builtin_neon_vrsqrteq_v, },
7008 { NEON::BI__builtin_neon_vrsqrts_f16, NEON::BI__builtin_neon_vrsqrts_v, },
7009 { NEON::BI__builtin_neon_vrsqrtsq_f16, NEON::BI__builtin_neon_vrsqrtsq_v, },
7010 { NEON::BI__builtin_neon_vsqrt_f16, NEON::BI__builtin_neon_vsqrt_v, },
7011 { NEON::BI__builtin_neon_vsqrtq_f16, NEON::BI__builtin_neon_vsqrtq_v, },
7012 { NEON::BI__builtin_neon_vst1_bf16_x2, NEON::BI__builtin_neon_vst1_x2_v },
7013 { NEON::BI__builtin_neon_vst1_bf16_x3, NEON::BI__builtin_neon_vst1_x3_v },
7014 { NEON::BI__builtin_neon_vst1_bf16_x4, NEON::BI__builtin_neon_vst1_x4_v },
7015 { NEON::BI__builtin_neon_vst1_bf16, NEON::BI__builtin_neon_vst1_v },
7016 { NEON::BI__builtin_neon_vst1_lane_bf16, NEON::BI__builtin_neon_vst1_lane_v },
7017 { NEON::BI__builtin_neon_vst1q_bf16_x2, NEON::BI__builtin_neon_vst1q_x2_v },
7018 { NEON::BI__builtin_neon_vst1q_bf16_x3, NEON::BI__builtin_neon_vst1q_x3_v },
7019 { NEON::BI__builtin_neon_vst1q_bf16_x4, NEON::BI__builtin_neon_vst1q_x4_v },
7020 { NEON::BI__builtin_neon_vst1q_bf16, NEON::BI__builtin_neon_vst1q_v },
7021 { NEON::BI__builtin_neon_vst1q_lane_bf16, NEON::BI__builtin_neon_vst1q_lane_v },
7022 { NEON::BI__builtin_neon_vst2_bf16, NEON::BI__builtin_neon_vst2_v },
7023 { NEON::BI__builtin_neon_vst2_lane_bf16, NEON::BI__builtin_neon_vst2_lane_v },
7024 { NEON::BI__builtin_neon_vst2q_bf16, NEON::BI__builtin_neon_vst2q_v },
7025 { NEON::BI__builtin_neon_vst2q_lane_bf16, NEON::BI__builtin_neon_vst2q_lane_v },
7026 { NEON::BI__builtin_neon_vst3_bf16, NEON::BI__builtin_neon_vst3_v },
7027 { NEON::BI__builtin_neon_vst3_lane_bf16, NEON::BI__builtin_neon_vst3_lane_v },
7028 { NEON::BI__builtin_neon_vst3q_bf16, NEON::BI__builtin_neon_vst3q_v },
7029 { NEON::BI__builtin_neon_vst3q_lane_bf16, NEON::BI__builtin_neon_vst3q_lane_v },
7030 { NEON::BI__builtin_neon_vst4_bf16, NEON::BI__builtin_neon_vst4_v },
7031 { NEON::BI__builtin_neon_vst4_lane_bf16, NEON::BI__builtin_neon_vst4_lane_v },
7032 { NEON::BI__builtin_neon_vst4q_bf16, NEON::BI__builtin_neon_vst4q_v },
7033 { NEON::BI__builtin_neon_vst4q_lane_bf16, NEON::BI__builtin_neon_vst4q_lane_v },
7034 { NEON::BI__builtin_neon_vtrn_f16, NEON::BI__builtin_neon_vtrn_v, },
7035 { NEON::BI__builtin_neon_vtrnq_f16, NEON::BI__builtin_neon_vtrnq_v, },
7036 { NEON::BI__builtin_neon_vuzp_f16, NEON::BI__builtin_neon_vuzp_v, },
7037 { NEON::BI__builtin_neon_vuzpq_f16, NEON::BI__builtin_neon_vuzpq_v, },
7038 { NEON::BI__builtin_neon_vzip_f16, NEON::BI__builtin_neon_vzip_v, },
7039 { NEON::BI__builtin_neon_vzipq_f16, NEON::BI__builtin_neon_vzipq_v, },
7040 // The mangling rules cause us to have one ID for each type for vldap1(q)_lane
7041 // and vstl1(q)_lane, but codegen is equivalent for all of them. Choose an
7042 // arbitrary one to be handled as tha canonical variation.
7043 { NEON::BI__builtin_neon_vldap1_lane_u64, NEON::BI__builtin_neon_vldap1_lane_s64 },
7044 { NEON::BI__builtin_neon_vldap1_lane_f64, NEON::BI__builtin_neon_vldap1_lane_s64 },
7045 { NEON::BI__builtin_neon_vldap1_lane_p64, NEON::BI__builtin_neon_vldap1_lane_s64 },
7046 { NEON::BI__builtin_neon_vldap1q_lane_u64, NEON::BI__builtin_neon_vldap1q_lane_s64 },
7047 { NEON::BI__builtin_neon_vldap1q_lane_f64, NEON::BI__builtin_neon_vldap1q_lane_s64 },
7048 { NEON::BI__builtin_neon_vldap1q_lane_p64, NEON::BI__builtin_neon_vldap1q_lane_s64 },
7049 { NEON::BI__builtin_neon_vstl1_lane_u64, NEON::BI__builtin_neon_vstl1_lane_s64 },
7050 { NEON::BI__builtin_neon_vstl1_lane_f64, NEON::BI__builtin_neon_vstl1_lane_s64 },
7051 { NEON::BI__builtin_neon_vstl1_lane_p64, NEON::BI__builtin_neon_vstl1_lane_s64 },
7052 { NEON::BI__builtin_neon_vstl1q_lane_u64, NEON::BI__builtin_neon_vstl1q_lane_s64 },
7053 { NEON::BI__builtin_neon_vstl1q_lane_f64, NEON::BI__builtin_neon_vstl1q_lane_s64 },
7054 { NEON::BI__builtin_neon_vstl1q_lane_p64, NEON::BI__builtin_neon_vstl1q_lane_s64 },
7057 #undef NEONMAP0
7058 #undef NEONMAP1
7059 #undef NEONMAP2
7061 #define SVEMAP1(NameBase, LLVMIntrinsic, TypeModifier) \
7063 #NameBase, SVE::BI__builtin_sve_##NameBase, Intrinsic::LLVMIntrinsic, 0, \
7064 TypeModifier \
7067 #define SVEMAP2(NameBase, TypeModifier) \
7068 { #NameBase, SVE::BI__builtin_sve_##NameBase, 0, 0, TypeModifier }
7069 static const ARMVectorIntrinsicInfo AArch64SVEIntrinsicMap[] = {
7070 #define GET_SVE_LLVM_INTRINSIC_MAP
7071 #include "clang/Basic/arm_sve_builtin_cg.inc"
7072 #include "clang/Basic/BuiltinsAArch64NeonSVEBridge_cg.def"
7073 #undef GET_SVE_LLVM_INTRINSIC_MAP
7076 #undef SVEMAP1
7077 #undef SVEMAP2
7079 #define SMEMAP1(NameBase, LLVMIntrinsic, TypeModifier) \
7081 #NameBase, SME::BI__builtin_sme_##NameBase, Intrinsic::LLVMIntrinsic, 0, \
7082 TypeModifier \
7085 #define SMEMAP2(NameBase, TypeModifier) \
7086 { #NameBase, SME::BI__builtin_sme_##NameBase, 0, 0, TypeModifier }
7087 static const ARMVectorIntrinsicInfo AArch64SMEIntrinsicMap[] = {
7088 #define GET_SME_LLVM_INTRINSIC_MAP
7089 #include "clang/Basic/arm_sme_builtin_cg.inc"
7090 #undef GET_SME_LLVM_INTRINSIC_MAP
7093 #undef SMEMAP1
7094 #undef SMEMAP2
7096 static bool NEONSIMDIntrinsicsProvenSorted = false;
7098 static bool AArch64SIMDIntrinsicsProvenSorted = false;
7099 static bool AArch64SISDIntrinsicsProvenSorted = false;
7100 static bool AArch64SVEIntrinsicsProvenSorted = false;
7101 static bool AArch64SMEIntrinsicsProvenSorted = false;
7103 static const ARMVectorIntrinsicInfo *
7104 findARMVectorIntrinsicInMap(ArrayRef<ARMVectorIntrinsicInfo> IntrinsicMap,
7105 unsigned BuiltinID, bool &MapProvenSorted) {
7107 #ifndef NDEBUG
7108 if (!MapProvenSorted) {
7109 assert(llvm::is_sorted(IntrinsicMap));
7110 MapProvenSorted = true;
7112 #endif
7114 const ARMVectorIntrinsicInfo *Builtin =
7115 llvm::lower_bound(IntrinsicMap, BuiltinID);
7117 if (Builtin != IntrinsicMap.end() && Builtin->BuiltinID == BuiltinID)
7118 return Builtin;
7120 return nullptr;
7123 Function *CodeGenFunction::LookupNeonLLVMIntrinsic(unsigned IntrinsicID,
7124 unsigned Modifier,
7125 llvm::Type *ArgType,
7126 const CallExpr *E) {
7127 int VectorSize = 0;
7128 if (Modifier & Use64BitVectors)
7129 VectorSize = 64;
7130 else if (Modifier & Use128BitVectors)
7131 VectorSize = 128;
7133 // Return type.
7134 SmallVector<llvm::Type *, 3> Tys;
7135 if (Modifier & AddRetType) {
7136 llvm::Type *Ty = ConvertType(E->getCallReturnType(getContext()));
7137 if (Modifier & VectorizeRetType)
7138 Ty = llvm::FixedVectorType::get(
7139 Ty, VectorSize ? VectorSize / Ty->getPrimitiveSizeInBits() : 1);
7141 Tys.push_back(Ty);
7144 // Arguments.
7145 if (Modifier & VectorizeArgTypes) {
7146 int Elts = VectorSize ? VectorSize / ArgType->getPrimitiveSizeInBits() : 1;
7147 ArgType = llvm::FixedVectorType::get(ArgType, Elts);
7150 if (Modifier & (Add1ArgType | Add2ArgTypes))
7151 Tys.push_back(ArgType);
7153 if (Modifier & Add2ArgTypes)
7154 Tys.push_back(ArgType);
7156 if (Modifier & InventFloatType)
7157 Tys.push_back(FloatTy);
7159 return CGM.getIntrinsic(IntrinsicID, Tys);
7162 static Value *EmitCommonNeonSISDBuiltinExpr(
7163 CodeGenFunction &CGF, const ARMVectorIntrinsicInfo &SISDInfo,
7164 SmallVectorImpl<Value *> &Ops, const CallExpr *E) {
7165 unsigned BuiltinID = SISDInfo.BuiltinID;
7166 unsigned int Int = SISDInfo.LLVMIntrinsic;
7167 unsigned Modifier = SISDInfo.TypeModifier;
7168 const char *s = SISDInfo.NameHint;
7170 switch (BuiltinID) {
7171 case NEON::BI__builtin_neon_vcled_s64:
7172 case NEON::BI__builtin_neon_vcled_u64:
7173 case NEON::BI__builtin_neon_vcles_f32:
7174 case NEON::BI__builtin_neon_vcled_f64:
7175 case NEON::BI__builtin_neon_vcltd_s64:
7176 case NEON::BI__builtin_neon_vcltd_u64:
7177 case NEON::BI__builtin_neon_vclts_f32:
7178 case NEON::BI__builtin_neon_vcltd_f64:
7179 case NEON::BI__builtin_neon_vcales_f32:
7180 case NEON::BI__builtin_neon_vcaled_f64:
7181 case NEON::BI__builtin_neon_vcalts_f32:
7182 case NEON::BI__builtin_neon_vcaltd_f64:
7183 // Only one direction of comparisons actually exist, cmle is actually a cmge
7184 // with swapped operands. The table gives us the right intrinsic but we
7185 // still need to do the swap.
7186 std::swap(Ops[0], Ops[1]);
7187 break;
7190 assert(Int && "Generic code assumes a valid intrinsic");
7192 // Determine the type(s) of this overloaded AArch64 intrinsic.
7193 const Expr *Arg = E->getArg(0);
7194 llvm::Type *ArgTy = CGF.ConvertType(Arg->getType());
7195 Function *F = CGF.LookupNeonLLVMIntrinsic(Int, Modifier, ArgTy, E);
7197 int j = 0;
7198 ConstantInt *C0 = ConstantInt::get(CGF.SizeTy, 0);
7199 for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end();
7200 ai != ae; ++ai, ++j) {
7201 llvm::Type *ArgTy = ai->getType();
7202 if (Ops[j]->getType()->getPrimitiveSizeInBits() ==
7203 ArgTy->getPrimitiveSizeInBits())
7204 continue;
7206 assert(ArgTy->isVectorTy() && !Ops[j]->getType()->isVectorTy());
7207 // The constant argument to an _n_ intrinsic always has Int32Ty, so truncate
7208 // it before inserting.
7209 Ops[j] = CGF.Builder.CreateTruncOrBitCast(
7210 Ops[j], cast<llvm::VectorType>(ArgTy)->getElementType());
7211 Ops[j] =
7212 CGF.Builder.CreateInsertElement(PoisonValue::get(ArgTy), Ops[j], C0);
7215 Value *Result = CGF.EmitNeonCall(F, Ops, s);
7216 llvm::Type *ResultType = CGF.ConvertType(E->getType());
7217 if (ResultType->getPrimitiveSizeInBits().getFixedValue() <
7218 Result->getType()->getPrimitiveSizeInBits().getFixedValue())
7219 return CGF.Builder.CreateExtractElement(Result, C0);
7221 return CGF.Builder.CreateBitCast(Result, ResultType, s);
7224 Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
7225 unsigned BuiltinID, unsigned LLVMIntrinsic, unsigned AltLLVMIntrinsic,
7226 const char *NameHint, unsigned Modifier, const CallExpr *E,
7227 SmallVectorImpl<llvm::Value *> &Ops, Address PtrOp0, Address PtrOp1,
7228 llvm::Triple::ArchType Arch) {
7229 // Get the last argument, which specifies the vector type.
7230 const Expr *Arg = E->getArg(E->getNumArgs() - 1);
7231 std::optional<llvm::APSInt> NeonTypeConst =
7232 Arg->getIntegerConstantExpr(getContext());
7233 if (!NeonTypeConst)
7234 return nullptr;
7236 // Determine the type of this overloaded NEON intrinsic.
7237 NeonTypeFlags Type(NeonTypeConst->getZExtValue());
7238 bool Usgn = Type.isUnsigned();
7239 bool Quad = Type.isQuad();
7240 const bool HasLegalHalfType = getTarget().hasLegalHalfType();
7241 const bool AllowBFloatArgsAndRet =
7242 getTargetHooks().getABIInfo().allowBFloatArgsAndRet();
7244 llvm::FixedVectorType *VTy =
7245 GetNeonType(this, Type, HasLegalHalfType, false, AllowBFloatArgsAndRet);
7246 llvm::Type *Ty = VTy;
7247 if (!Ty)
7248 return nullptr;
7250 auto getAlignmentValue32 = [&](Address addr) -> Value* {
7251 return Builder.getInt32(addr.getAlignment().getQuantity());
7254 unsigned Int = LLVMIntrinsic;
7255 if ((Modifier & UnsignedAlts) && !Usgn)
7256 Int = AltLLVMIntrinsic;
7258 switch (BuiltinID) {
7259 default: break;
7260 case NEON::BI__builtin_neon_splat_lane_v:
7261 case NEON::BI__builtin_neon_splat_laneq_v:
7262 case NEON::BI__builtin_neon_splatq_lane_v:
7263 case NEON::BI__builtin_neon_splatq_laneq_v: {
7264 auto NumElements = VTy->getElementCount();
7265 if (BuiltinID == NEON::BI__builtin_neon_splatq_lane_v)
7266 NumElements = NumElements * 2;
7267 if (BuiltinID == NEON::BI__builtin_neon_splat_laneq_v)
7268 NumElements = NumElements.divideCoefficientBy(2);
7270 Ops[0] = Builder.CreateBitCast(Ops[0], VTy);
7271 return EmitNeonSplat(Ops[0], cast<ConstantInt>(Ops[1]), NumElements);
7273 case NEON::BI__builtin_neon_vpadd_v:
7274 case NEON::BI__builtin_neon_vpaddq_v:
7275 // We don't allow fp/int overloading of intrinsics.
7276 if (VTy->getElementType()->isFloatingPointTy() &&
7277 Int == Intrinsic::aarch64_neon_addp)
7278 Int = Intrinsic::aarch64_neon_faddp;
7279 break;
7280 case NEON::BI__builtin_neon_vabs_v:
7281 case NEON::BI__builtin_neon_vabsq_v:
7282 if (VTy->getElementType()->isFloatingPointTy())
7283 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::fabs, Ty), Ops, "vabs");
7284 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Ty), Ops, "vabs");
7285 case NEON::BI__builtin_neon_vadd_v:
7286 case NEON::BI__builtin_neon_vaddq_v: {
7287 llvm::Type *VTy = llvm::FixedVectorType::get(Int8Ty, Quad ? 16 : 8);
7288 Ops[0] = Builder.CreateBitCast(Ops[0], VTy);
7289 Ops[1] = Builder.CreateBitCast(Ops[1], VTy);
7290 Ops[0] = Builder.CreateXor(Ops[0], Ops[1]);
7291 return Builder.CreateBitCast(Ops[0], Ty);
7293 case NEON::BI__builtin_neon_vaddhn_v: {
7294 llvm::FixedVectorType *SrcTy =
7295 llvm::FixedVectorType::getExtendedElementVectorType(VTy);
7297 // %sum = add <4 x i32> %lhs, %rhs
7298 Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
7299 Ops[1] = Builder.CreateBitCast(Ops[1], SrcTy);
7300 Ops[0] = Builder.CreateAdd(Ops[0], Ops[1], "vaddhn");
7302 // %high = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16>
7303 Constant *ShiftAmt =
7304 ConstantInt::get(SrcTy, SrcTy->getScalarSizeInBits() / 2);
7305 Ops[0] = Builder.CreateLShr(Ops[0], ShiftAmt, "vaddhn");
7307 // %res = trunc <4 x i32> %high to <4 x i16>
7308 return Builder.CreateTrunc(Ops[0], VTy, "vaddhn");
7310 case NEON::BI__builtin_neon_vcale_v:
7311 case NEON::BI__builtin_neon_vcaleq_v:
7312 case NEON::BI__builtin_neon_vcalt_v:
7313 case NEON::BI__builtin_neon_vcaltq_v:
7314 std::swap(Ops[0], Ops[1]);
7315 [[fallthrough]];
7316 case NEON::BI__builtin_neon_vcage_v:
7317 case NEON::BI__builtin_neon_vcageq_v:
7318 case NEON::BI__builtin_neon_vcagt_v:
7319 case NEON::BI__builtin_neon_vcagtq_v: {
7320 llvm::Type *Ty;
7321 switch (VTy->getScalarSizeInBits()) {
7322 default: llvm_unreachable("unexpected type");
7323 case 32:
7324 Ty = FloatTy;
7325 break;
7326 case 64:
7327 Ty = DoubleTy;
7328 break;
7329 case 16:
7330 Ty = HalfTy;
7331 break;
7333 auto *VecFlt = llvm::FixedVectorType::get(Ty, VTy->getNumElements());
7334 llvm::Type *Tys[] = { VTy, VecFlt };
7335 Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
7336 return EmitNeonCall(F, Ops, NameHint);
7338 case NEON::BI__builtin_neon_vceqz_v:
7339 case NEON::BI__builtin_neon_vceqzq_v:
7340 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OEQ,
7341 ICmpInst::ICMP_EQ, "vceqz");
7342 case NEON::BI__builtin_neon_vcgez_v:
7343 case NEON::BI__builtin_neon_vcgezq_v:
7344 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OGE,
7345 ICmpInst::ICMP_SGE, "vcgez");
7346 case NEON::BI__builtin_neon_vclez_v:
7347 case NEON::BI__builtin_neon_vclezq_v:
7348 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OLE,
7349 ICmpInst::ICMP_SLE, "vclez");
7350 case NEON::BI__builtin_neon_vcgtz_v:
7351 case NEON::BI__builtin_neon_vcgtzq_v:
7352 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OGT,
7353 ICmpInst::ICMP_SGT, "vcgtz");
7354 case NEON::BI__builtin_neon_vcltz_v:
7355 case NEON::BI__builtin_neon_vcltzq_v:
7356 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OLT,
7357 ICmpInst::ICMP_SLT, "vcltz");
7358 case NEON::BI__builtin_neon_vclz_v:
7359 case NEON::BI__builtin_neon_vclzq_v:
7360 // We generate target-independent intrinsic, which needs a second argument
7361 // for whether or not clz of zero is undefined; on ARM it isn't.
7362 Ops.push_back(Builder.getInt1(getTarget().isCLZForZeroUndef()));
7363 break;
7364 case NEON::BI__builtin_neon_vcvt_f32_v:
7365 case NEON::BI__builtin_neon_vcvtq_f32_v:
7366 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
7367 Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, Quad),
7368 HasLegalHalfType);
7369 return Usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
7370 : Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
7371 case NEON::BI__builtin_neon_vcvt_f16_s16:
7372 case NEON::BI__builtin_neon_vcvt_f16_u16:
7373 case NEON::BI__builtin_neon_vcvtq_f16_s16:
7374 case NEON::BI__builtin_neon_vcvtq_f16_u16:
7375 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
7376 Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float16, false, Quad),
7377 HasLegalHalfType);
7378 return Usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
7379 : Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
7380 case NEON::BI__builtin_neon_vcvt_n_f16_s16:
7381 case NEON::BI__builtin_neon_vcvt_n_f16_u16:
7382 case NEON::BI__builtin_neon_vcvtq_n_f16_s16:
7383 case NEON::BI__builtin_neon_vcvtq_n_f16_u16: {
7384 llvm::Type *Tys[2] = { GetFloatNeonType(this, Type), Ty };
7385 Function *F = CGM.getIntrinsic(Int, Tys);
7386 return EmitNeonCall(F, Ops, "vcvt_n");
7388 case NEON::BI__builtin_neon_vcvt_n_f32_v:
7389 case NEON::BI__builtin_neon_vcvt_n_f64_v:
7390 case NEON::BI__builtin_neon_vcvtq_n_f32_v:
7391 case NEON::BI__builtin_neon_vcvtq_n_f64_v: {
7392 llvm::Type *Tys[2] = { GetFloatNeonType(this, Type), Ty };
7393 Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic;
7394 Function *F = CGM.getIntrinsic(Int, Tys);
7395 return EmitNeonCall(F, Ops, "vcvt_n");
7397 case NEON::BI__builtin_neon_vcvt_n_s16_f16:
7398 case NEON::BI__builtin_neon_vcvt_n_s32_v:
7399 case NEON::BI__builtin_neon_vcvt_n_u16_f16:
7400 case NEON::BI__builtin_neon_vcvt_n_u32_v:
7401 case NEON::BI__builtin_neon_vcvt_n_s64_v:
7402 case NEON::BI__builtin_neon_vcvt_n_u64_v:
7403 case NEON::BI__builtin_neon_vcvtq_n_s16_f16:
7404 case NEON::BI__builtin_neon_vcvtq_n_s32_v:
7405 case NEON::BI__builtin_neon_vcvtq_n_u16_f16:
7406 case NEON::BI__builtin_neon_vcvtq_n_u32_v:
7407 case NEON::BI__builtin_neon_vcvtq_n_s64_v:
7408 case NEON::BI__builtin_neon_vcvtq_n_u64_v: {
7409 llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
7410 Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
7411 return EmitNeonCall(F, Ops, "vcvt_n");
7413 case NEON::BI__builtin_neon_vcvt_s32_v:
7414 case NEON::BI__builtin_neon_vcvt_u32_v:
7415 case NEON::BI__builtin_neon_vcvt_s64_v:
7416 case NEON::BI__builtin_neon_vcvt_u64_v:
7417 case NEON::BI__builtin_neon_vcvt_s16_f16:
7418 case NEON::BI__builtin_neon_vcvt_u16_f16:
7419 case NEON::BI__builtin_neon_vcvtq_s32_v:
7420 case NEON::BI__builtin_neon_vcvtq_u32_v:
7421 case NEON::BI__builtin_neon_vcvtq_s64_v:
7422 case NEON::BI__builtin_neon_vcvtq_u64_v:
7423 case NEON::BI__builtin_neon_vcvtq_s16_f16:
7424 case NEON::BI__builtin_neon_vcvtq_u16_f16: {
7425 Ops[0] = Builder.CreateBitCast(Ops[0], GetFloatNeonType(this, Type));
7426 return Usgn ? Builder.CreateFPToUI(Ops[0], Ty, "vcvt")
7427 : Builder.CreateFPToSI(Ops[0], Ty, "vcvt");
7429 case NEON::BI__builtin_neon_vcvta_s16_f16:
7430 case NEON::BI__builtin_neon_vcvta_s32_v:
7431 case NEON::BI__builtin_neon_vcvta_s64_v:
7432 case NEON::BI__builtin_neon_vcvta_u16_f16:
7433 case NEON::BI__builtin_neon_vcvta_u32_v:
7434 case NEON::BI__builtin_neon_vcvta_u64_v:
7435 case NEON::BI__builtin_neon_vcvtaq_s16_f16:
7436 case NEON::BI__builtin_neon_vcvtaq_s32_v:
7437 case NEON::BI__builtin_neon_vcvtaq_s64_v:
7438 case NEON::BI__builtin_neon_vcvtaq_u16_f16:
7439 case NEON::BI__builtin_neon_vcvtaq_u32_v:
7440 case NEON::BI__builtin_neon_vcvtaq_u64_v:
7441 case NEON::BI__builtin_neon_vcvtn_s16_f16:
7442 case NEON::BI__builtin_neon_vcvtn_s32_v:
7443 case NEON::BI__builtin_neon_vcvtn_s64_v:
7444 case NEON::BI__builtin_neon_vcvtn_u16_f16:
7445 case NEON::BI__builtin_neon_vcvtn_u32_v:
7446 case NEON::BI__builtin_neon_vcvtn_u64_v:
7447 case NEON::BI__builtin_neon_vcvtnq_s16_f16:
7448 case NEON::BI__builtin_neon_vcvtnq_s32_v:
7449 case NEON::BI__builtin_neon_vcvtnq_s64_v:
7450 case NEON::BI__builtin_neon_vcvtnq_u16_f16:
7451 case NEON::BI__builtin_neon_vcvtnq_u32_v:
7452 case NEON::BI__builtin_neon_vcvtnq_u64_v:
7453 case NEON::BI__builtin_neon_vcvtp_s16_f16:
7454 case NEON::BI__builtin_neon_vcvtp_s32_v:
7455 case NEON::BI__builtin_neon_vcvtp_s64_v:
7456 case NEON::BI__builtin_neon_vcvtp_u16_f16:
7457 case NEON::BI__builtin_neon_vcvtp_u32_v:
7458 case NEON::BI__builtin_neon_vcvtp_u64_v:
7459 case NEON::BI__builtin_neon_vcvtpq_s16_f16:
7460 case NEON::BI__builtin_neon_vcvtpq_s32_v:
7461 case NEON::BI__builtin_neon_vcvtpq_s64_v:
7462 case NEON::BI__builtin_neon_vcvtpq_u16_f16:
7463 case NEON::BI__builtin_neon_vcvtpq_u32_v:
7464 case NEON::BI__builtin_neon_vcvtpq_u64_v:
7465 case NEON::BI__builtin_neon_vcvtm_s16_f16:
7466 case NEON::BI__builtin_neon_vcvtm_s32_v:
7467 case NEON::BI__builtin_neon_vcvtm_s64_v:
7468 case NEON::BI__builtin_neon_vcvtm_u16_f16:
7469 case NEON::BI__builtin_neon_vcvtm_u32_v:
7470 case NEON::BI__builtin_neon_vcvtm_u64_v:
7471 case NEON::BI__builtin_neon_vcvtmq_s16_f16:
7472 case NEON::BI__builtin_neon_vcvtmq_s32_v:
7473 case NEON::BI__builtin_neon_vcvtmq_s64_v:
7474 case NEON::BI__builtin_neon_vcvtmq_u16_f16:
7475 case NEON::BI__builtin_neon_vcvtmq_u32_v:
7476 case NEON::BI__builtin_neon_vcvtmq_u64_v: {
7477 llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
7478 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, NameHint);
7480 case NEON::BI__builtin_neon_vcvtx_f32_v: {
7481 llvm::Type *Tys[2] = { VTy->getTruncatedElementVectorType(VTy), Ty};
7482 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, NameHint);
7485 case NEON::BI__builtin_neon_vext_v:
7486 case NEON::BI__builtin_neon_vextq_v: {
7487 int CV = cast<ConstantInt>(Ops[2])->getSExtValue();
7488 SmallVector<int, 16> Indices;
7489 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
7490 Indices.push_back(i+CV);
7492 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
7493 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
7494 return Builder.CreateShuffleVector(Ops[0], Ops[1], Indices, "vext");
7496 case NEON::BI__builtin_neon_vfma_v:
7497 case NEON::BI__builtin_neon_vfmaq_v: {
7498 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
7499 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
7500 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
7502 // NEON intrinsic puts accumulator first, unlike the LLVM fma.
7503 return emitCallMaybeConstrainedFPBuiltin(
7504 *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, Ty,
7505 {Ops[1], Ops[2], Ops[0]});
7507 case NEON::BI__builtin_neon_vld1_v:
7508 case NEON::BI__builtin_neon_vld1q_v: {
7509 llvm::Type *Tys[] = {Ty, Int8PtrTy};
7510 Ops.push_back(getAlignmentValue32(PtrOp0));
7511 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "vld1");
7513 case NEON::BI__builtin_neon_vld1_x2_v:
7514 case NEON::BI__builtin_neon_vld1q_x2_v:
7515 case NEON::BI__builtin_neon_vld1_x3_v:
7516 case NEON::BI__builtin_neon_vld1q_x3_v:
7517 case NEON::BI__builtin_neon_vld1_x4_v:
7518 case NEON::BI__builtin_neon_vld1q_x4_v: {
7519 llvm::Type *Tys[2] = {VTy, UnqualPtrTy};
7520 Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
7521 Ops[1] = Builder.CreateCall(F, Ops[1], "vld1xN");
7522 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
7524 case NEON::BI__builtin_neon_vld2_v:
7525 case NEON::BI__builtin_neon_vld2q_v:
7526 case NEON::BI__builtin_neon_vld3_v:
7527 case NEON::BI__builtin_neon_vld3q_v:
7528 case NEON::BI__builtin_neon_vld4_v:
7529 case NEON::BI__builtin_neon_vld4q_v:
7530 case NEON::BI__builtin_neon_vld2_dup_v:
7531 case NEON::BI__builtin_neon_vld2q_dup_v:
7532 case NEON::BI__builtin_neon_vld3_dup_v:
7533 case NEON::BI__builtin_neon_vld3q_dup_v:
7534 case NEON::BI__builtin_neon_vld4_dup_v:
7535 case NEON::BI__builtin_neon_vld4q_dup_v: {
7536 llvm::Type *Tys[] = {Ty, Int8PtrTy};
7537 Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
7538 Value *Align = getAlignmentValue32(PtrOp1);
7539 Ops[1] = Builder.CreateCall(F, {Ops[1], Align}, NameHint);
7540 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
7542 case NEON::BI__builtin_neon_vld1_dup_v:
7543 case NEON::BI__builtin_neon_vld1q_dup_v: {
7544 Value *V = PoisonValue::get(Ty);
7545 PtrOp0 = PtrOp0.withElementType(VTy->getElementType());
7546 LoadInst *Ld = Builder.CreateLoad(PtrOp0);
7547 llvm::Constant *CI = ConstantInt::get(SizeTy, 0);
7548 Ops[0] = Builder.CreateInsertElement(V, Ld, CI);
7549 return EmitNeonSplat(Ops[0], CI);
7551 case NEON::BI__builtin_neon_vld2_lane_v:
7552 case NEON::BI__builtin_neon_vld2q_lane_v:
7553 case NEON::BI__builtin_neon_vld3_lane_v:
7554 case NEON::BI__builtin_neon_vld3q_lane_v:
7555 case NEON::BI__builtin_neon_vld4_lane_v:
7556 case NEON::BI__builtin_neon_vld4q_lane_v: {
7557 llvm::Type *Tys[] = {Ty, Int8PtrTy};
7558 Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
7559 for (unsigned I = 2; I < Ops.size() - 1; ++I)
7560 Ops[I] = Builder.CreateBitCast(Ops[I], Ty);
7561 Ops.push_back(getAlignmentValue32(PtrOp1));
7562 Ops[1] = Builder.CreateCall(F, ArrayRef(Ops).slice(1), NameHint);
7563 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
7565 case NEON::BI__builtin_neon_vmovl_v: {
7566 llvm::FixedVectorType *DTy =
7567 llvm::FixedVectorType::getTruncatedElementVectorType(VTy);
7568 Ops[0] = Builder.CreateBitCast(Ops[0], DTy);
7569 if (Usgn)
7570 return Builder.CreateZExt(Ops[0], Ty, "vmovl");
7571 return Builder.CreateSExt(Ops[0], Ty, "vmovl");
7573 case NEON::BI__builtin_neon_vmovn_v: {
7574 llvm::FixedVectorType *QTy =
7575 llvm::FixedVectorType::getExtendedElementVectorType(VTy);
7576 Ops[0] = Builder.CreateBitCast(Ops[0], QTy);
7577 return Builder.CreateTrunc(Ops[0], Ty, "vmovn");
7579 case NEON::BI__builtin_neon_vmull_v:
7580 // FIXME: the integer vmull operations could be emitted in terms of pure
7581 // LLVM IR (2 exts followed by a mul). Unfortunately LLVM has a habit of
7582 // hoisting the exts outside loops. Until global ISel comes along that can
7583 // see through such movement this leads to bad CodeGen. So we need an
7584 // intrinsic for now.
7585 Int = Usgn ? Intrinsic::arm_neon_vmullu : Intrinsic::arm_neon_vmulls;
7586 Int = Type.isPoly() ? (unsigned)Intrinsic::arm_neon_vmullp : Int;
7587 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull");
7588 case NEON::BI__builtin_neon_vpadal_v:
7589 case NEON::BI__builtin_neon_vpadalq_v: {
7590 // The source operand type has twice as many elements of half the size.
7591 unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
7592 llvm::Type *EltTy =
7593 llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
7594 auto *NarrowTy =
7595 llvm::FixedVectorType::get(EltTy, VTy->getNumElements() * 2);
7596 llvm::Type *Tys[2] = { Ty, NarrowTy };
7597 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, NameHint);
7599 case NEON::BI__builtin_neon_vpaddl_v:
7600 case NEON::BI__builtin_neon_vpaddlq_v: {
7601 // The source operand type has twice as many elements of half the size.
7602 unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
7603 llvm::Type *EltTy = llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
7604 auto *NarrowTy =
7605 llvm::FixedVectorType::get(EltTy, VTy->getNumElements() * 2);
7606 llvm::Type *Tys[2] = { Ty, NarrowTy };
7607 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vpaddl");
7609 case NEON::BI__builtin_neon_vqdmlal_v:
7610 case NEON::BI__builtin_neon_vqdmlsl_v: {
7611 SmallVector<Value *, 2> MulOps(Ops.begin() + 1, Ops.end());
7612 Ops[1] =
7613 EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Ty), MulOps, "vqdmlal");
7614 Ops.resize(2);
7615 return EmitNeonCall(CGM.getIntrinsic(AltLLVMIntrinsic, Ty), Ops, NameHint);
7617 case NEON::BI__builtin_neon_vqdmulhq_lane_v:
7618 case NEON::BI__builtin_neon_vqdmulh_lane_v:
7619 case NEON::BI__builtin_neon_vqrdmulhq_lane_v:
7620 case NEON::BI__builtin_neon_vqrdmulh_lane_v: {
7621 auto *RTy = cast<llvm::FixedVectorType>(Ty);
7622 if (BuiltinID == NEON::BI__builtin_neon_vqdmulhq_lane_v ||
7623 BuiltinID == NEON::BI__builtin_neon_vqrdmulhq_lane_v)
7624 RTy = llvm::FixedVectorType::get(RTy->getElementType(),
7625 RTy->getNumElements() * 2);
7626 llvm::Type *Tys[2] = {
7627 RTy, GetNeonType(this, NeonTypeFlags(Type.getEltType(), false,
7628 /*isQuad*/ false))};
7629 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, NameHint);
7631 case NEON::BI__builtin_neon_vqdmulhq_laneq_v:
7632 case NEON::BI__builtin_neon_vqdmulh_laneq_v:
7633 case NEON::BI__builtin_neon_vqrdmulhq_laneq_v:
7634 case NEON::BI__builtin_neon_vqrdmulh_laneq_v: {
7635 llvm::Type *Tys[2] = {
7636 Ty, GetNeonType(this, NeonTypeFlags(Type.getEltType(), false,
7637 /*isQuad*/ true))};
7638 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, NameHint);
7640 case NEON::BI__builtin_neon_vqshl_n_v:
7641 case NEON::BI__builtin_neon_vqshlq_n_v:
7642 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshl_n",
7643 1, false);
7644 case NEON::BI__builtin_neon_vqshlu_n_v:
7645 case NEON::BI__builtin_neon_vqshluq_n_v:
7646 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshlu_n",
7647 1, false);
7648 case NEON::BI__builtin_neon_vrecpe_v:
7649 case NEON::BI__builtin_neon_vrecpeq_v:
7650 case NEON::BI__builtin_neon_vrsqrte_v:
7651 case NEON::BI__builtin_neon_vrsqrteq_v:
7652 Int = Ty->isFPOrFPVectorTy() ? LLVMIntrinsic : AltLLVMIntrinsic;
7653 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, NameHint);
7654 case NEON::BI__builtin_neon_vrndi_v:
7655 case NEON::BI__builtin_neon_vrndiq_v:
7656 Int = Builder.getIsFPConstrained()
7657 ? Intrinsic::experimental_constrained_nearbyint
7658 : Intrinsic::nearbyint;
7659 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, NameHint);
7660 case NEON::BI__builtin_neon_vrshr_n_v:
7661 case NEON::BI__builtin_neon_vrshrq_n_v:
7662 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshr_n",
7663 1, true);
7664 case NEON::BI__builtin_neon_vsha512hq_u64:
7665 case NEON::BI__builtin_neon_vsha512h2q_u64:
7666 case NEON::BI__builtin_neon_vsha512su0q_u64:
7667 case NEON::BI__builtin_neon_vsha512su1q_u64: {
7668 Function *F = CGM.getIntrinsic(Int);
7669 return EmitNeonCall(F, Ops, "");
7671 case NEON::BI__builtin_neon_vshl_n_v:
7672 case NEON::BI__builtin_neon_vshlq_n_v:
7673 Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false);
7674 return Builder.CreateShl(Builder.CreateBitCast(Ops[0],Ty), Ops[1],
7675 "vshl_n");
7676 case NEON::BI__builtin_neon_vshll_n_v: {
7677 llvm::FixedVectorType *SrcTy =
7678 llvm::FixedVectorType::getTruncatedElementVectorType(VTy);
7679 Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
7680 if (Usgn)
7681 Ops[0] = Builder.CreateZExt(Ops[0], VTy);
7682 else
7683 Ops[0] = Builder.CreateSExt(Ops[0], VTy);
7684 Ops[1] = EmitNeonShiftVector(Ops[1], VTy, false);
7685 return Builder.CreateShl(Ops[0], Ops[1], "vshll_n");
7687 case NEON::BI__builtin_neon_vshrn_n_v: {
7688 llvm::FixedVectorType *SrcTy =
7689 llvm::FixedVectorType::getExtendedElementVectorType(VTy);
7690 Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
7691 Ops[1] = EmitNeonShiftVector(Ops[1], SrcTy, false);
7692 if (Usgn)
7693 Ops[0] = Builder.CreateLShr(Ops[0], Ops[1]);
7694 else
7695 Ops[0] = Builder.CreateAShr(Ops[0], Ops[1]);
7696 return Builder.CreateTrunc(Ops[0], Ty, "vshrn_n");
7698 case NEON::BI__builtin_neon_vshr_n_v:
7699 case NEON::BI__builtin_neon_vshrq_n_v:
7700 return EmitNeonRShiftImm(Ops[0], Ops[1], Ty, Usgn, "vshr_n");
7701 case NEON::BI__builtin_neon_vst1_v:
7702 case NEON::BI__builtin_neon_vst1q_v:
7703 case NEON::BI__builtin_neon_vst2_v:
7704 case NEON::BI__builtin_neon_vst2q_v:
7705 case NEON::BI__builtin_neon_vst3_v:
7706 case NEON::BI__builtin_neon_vst3q_v:
7707 case NEON::BI__builtin_neon_vst4_v:
7708 case NEON::BI__builtin_neon_vst4q_v:
7709 case NEON::BI__builtin_neon_vst2_lane_v:
7710 case NEON::BI__builtin_neon_vst2q_lane_v:
7711 case NEON::BI__builtin_neon_vst3_lane_v:
7712 case NEON::BI__builtin_neon_vst3q_lane_v:
7713 case NEON::BI__builtin_neon_vst4_lane_v:
7714 case NEON::BI__builtin_neon_vst4q_lane_v: {
7715 llvm::Type *Tys[] = {Int8PtrTy, Ty};
7716 Ops.push_back(getAlignmentValue32(PtrOp0));
7717 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "");
7719 case NEON::BI__builtin_neon_vsm3partw1q_u32:
7720 case NEON::BI__builtin_neon_vsm3partw2q_u32:
7721 case NEON::BI__builtin_neon_vsm3ss1q_u32:
7722 case NEON::BI__builtin_neon_vsm4ekeyq_u32:
7723 case NEON::BI__builtin_neon_vsm4eq_u32: {
7724 Function *F = CGM.getIntrinsic(Int);
7725 return EmitNeonCall(F, Ops, "");
7727 case NEON::BI__builtin_neon_vsm3tt1aq_u32:
7728 case NEON::BI__builtin_neon_vsm3tt1bq_u32:
7729 case NEON::BI__builtin_neon_vsm3tt2aq_u32:
7730 case NEON::BI__builtin_neon_vsm3tt2bq_u32: {
7731 Function *F = CGM.getIntrinsic(Int);
7732 Ops[3] = Builder.CreateZExt(Ops[3], Int64Ty);
7733 return EmitNeonCall(F, Ops, "");
7735 case NEON::BI__builtin_neon_vst1_x2_v:
7736 case NEON::BI__builtin_neon_vst1q_x2_v:
7737 case NEON::BI__builtin_neon_vst1_x3_v:
7738 case NEON::BI__builtin_neon_vst1q_x3_v:
7739 case NEON::BI__builtin_neon_vst1_x4_v:
7740 case NEON::BI__builtin_neon_vst1q_x4_v: {
7741 // TODO: Currently in AArch32 mode the pointer operand comes first, whereas
7742 // in AArch64 it comes last. We may want to stick to one or another.
7743 if (Arch == llvm::Triple::aarch64 || Arch == llvm::Triple::aarch64_be ||
7744 Arch == llvm::Triple::aarch64_32) {
7745 llvm::Type *Tys[2] = {VTy, UnqualPtrTy};
7746 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
7747 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "");
7749 llvm::Type *Tys[2] = {UnqualPtrTy, VTy};
7750 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "");
7752 case NEON::BI__builtin_neon_vsubhn_v: {
7753 llvm::FixedVectorType *SrcTy =
7754 llvm::FixedVectorType::getExtendedElementVectorType(VTy);
7756 // %sum = add <4 x i32> %lhs, %rhs
7757 Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
7758 Ops[1] = Builder.CreateBitCast(Ops[1], SrcTy);
7759 Ops[0] = Builder.CreateSub(Ops[0], Ops[1], "vsubhn");
7761 // %high = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16>
7762 Constant *ShiftAmt =
7763 ConstantInt::get(SrcTy, SrcTy->getScalarSizeInBits() / 2);
7764 Ops[0] = Builder.CreateLShr(Ops[0], ShiftAmt, "vsubhn");
7766 // %res = trunc <4 x i32> %high to <4 x i16>
7767 return Builder.CreateTrunc(Ops[0], VTy, "vsubhn");
7769 case NEON::BI__builtin_neon_vtrn_v:
7770 case NEON::BI__builtin_neon_vtrnq_v: {
7771 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
7772 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
7773 Value *SV = nullptr;
7775 for (unsigned vi = 0; vi != 2; ++vi) {
7776 SmallVector<int, 16> Indices;
7777 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
7778 Indices.push_back(i+vi);
7779 Indices.push_back(i+e+vi);
7781 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
7782 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vtrn");
7783 SV = Builder.CreateDefaultAlignedStore(SV, Addr);
7785 return SV;
7787 case NEON::BI__builtin_neon_vtst_v:
7788 case NEON::BI__builtin_neon_vtstq_v: {
7789 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
7790 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
7791 Ops[0] = Builder.CreateAnd(Ops[0], Ops[1]);
7792 Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0],
7793 ConstantAggregateZero::get(Ty));
7794 return Builder.CreateSExt(Ops[0], Ty, "vtst");
7796 case NEON::BI__builtin_neon_vuzp_v:
7797 case NEON::BI__builtin_neon_vuzpq_v: {
7798 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
7799 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
7800 Value *SV = nullptr;
7802 for (unsigned vi = 0; vi != 2; ++vi) {
7803 SmallVector<int, 16> Indices;
7804 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
7805 Indices.push_back(2*i+vi);
7807 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
7808 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vuzp");
7809 SV = Builder.CreateDefaultAlignedStore(SV, Addr);
7811 return SV;
7813 case NEON::BI__builtin_neon_vxarq_u64: {
7814 Function *F = CGM.getIntrinsic(Int);
7815 Ops[2] = Builder.CreateZExt(Ops[2], Int64Ty);
7816 return EmitNeonCall(F, Ops, "");
7818 case NEON::BI__builtin_neon_vzip_v:
7819 case NEON::BI__builtin_neon_vzipq_v: {
7820 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
7821 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
7822 Value *SV = nullptr;
7824 for (unsigned vi = 0; vi != 2; ++vi) {
7825 SmallVector<int, 16> Indices;
7826 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
7827 Indices.push_back((i + vi*e) >> 1);
7828 Indices.push_back(((i + vi*e) >> 1)+e);
7830 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
7831 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vzip");
7832 SV = Builder.CreateDefaultAlignedStore(SV, Addr);
7834 return SV;
7836 case NEON::BI__builtin_neon_vdot_s32:
7837 case NEON::BI__builtin_neon_vdot_u32:
7838 case NEON::BI__builtin_neon_vdotq_s32:
7839 case NEON::BI__builtin_neon_vdotq_u32: {
7840 auto *InputTy =
7841 llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
7842 llvm::Type *Tys[2] = { Ty, InputTy };
7843 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vdot");
7845 case NEON::BI__builtin_neon_vfmlal_low_f16:
7846 case NEON::BI__builtin_neon_vfmlalq_low_f16: {
7847 auto *InputTy =
7848 llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
7849 llvm::Type *Tys[2] = { Ty, InputTy };
7850 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlal_low");
7852 case NEON::BI__builtin_neon_vfmlsl_low_f16:
7853 case NEON::BI__builtin_neon_vfmlslq_low_f16: {
7854 auto *InputTy =
7855 llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
7856 llvm::Type *Tys[2] = { Ty, InputTy };
7857 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlsl_low");
7859 case NEON::BI__builtin_neon_vfmlal_high_f16:
7860 case NEON::BI__builtin_neon_vfmlalq_high_f16: {
7861 auto *InputTy =
7862 llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
7863 llvm::Type *Tys[2] = { Ty, InputTy };
7864 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlal_high");
7866 case NEON::BI__builtin_neon_vfmlsl_high_f16:
7867 case NEON::BI__builtin_neon_vfmlslq_high_f16: {
7868 auto *InputTy =
7869 llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
7870 llvm::Type *Tys[2] = { Ty, InputTy };
7871 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlsl_high");
7873 case NEON::BI__builtin_neon_vmmlaq_s32:
7874 case NEON::BI__builtin_neon_vmmlaq_u32: {
7875 auto *InputTy =
7876 llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
7877 llvm::Type *Tys[2] = { Ty, InputTy };
7878 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "vmmla");
7880 case NEON::BI__builtin_neon_vusmmlaq_s32: {
7881 auto *InputTy =
7882 llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
7883 llvm::Type *Tys[2] = { Ty, InputTy };
7884 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vusmmla");
7886 case NEON::BI__builtin_neon_vusdot_s32:
7887 case NEON::BI__builtin_neon_vusdotq_s32: {
7888 auto *InputTy =
7889 llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
7890 llvm::Type *Tys[2] = { Ty, InputTy };
7891 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vusdot");
7893 case NEON::BI__builtin_neon_vbfdot_f32:
7894 case NEON::BI__builtin_neon_vbfdotq_f32: {
7895 llvm::Type *InputTy =
7896 llvm::FixedVectorType::get(BFloatTy, Ty->getPrimitiveSizeInBits() / 16);
7897 llvm::Type *Tys[2] = { Ty, InputTy };
7898 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vbfdot");
7900 case NEON::BI__builtin_neon___a32_vcvt_bf16_f32: {
7901 llvm::Type *Tys[1] = { Ty };
7902 Function *F = CGM.getIntrinsic(Int, Tys);
7903 return EmitNeonCall(F, Ops, "vcvtfp2bf");
7908 assert(Int && "Expected valid intrinsic number");
7910 // Determine the type(s) of this overloaded AArch64 intrinsic.
7911 Function *F = LookupNeonLLVMIntrinsic(Int, Modifier, Ty, E);
7913 Value *Result = EmitNeonCall(F, Ops, NameHint);
7914 llvm::Type *ResultType = ConvertType(E->getType());
7915 // AArch64 intrinsic one-element vector type cast to
7916 // scalar type expected by the builtin
7917 return Builder.CreateBitCast(Result, ResultType, NameHint);
7920 Value *CodeGenFunction::EmitAArch64CompareBuiltinExpr(
7921 Value *Op, llvm::Type *Ty, const CmpInst::Predicate Fp,
7922 const CmpInst::Predicate Ip, const Twine &Name) {
7923 llvm::Type *OTy = Op->getType();
7925 // FIXME: this is utterly horrific. We should not be looking at previous
7926 // codegen context to find out what needs doing. Unfortunately TableGen
7927 // currently gives us exactly the same calls for vceqz_f32 and vceqz_s32
7928 // (etc).
7929 if (BitCastInst *BI = dyn_cast<BitCastInst>(Op))
7930 OTy = BI->getOperand(0)->getType();
7932 Op = Builder.CreateBitCast(Op, OTy);
7933 if (OTy->getScalarType()->isFloatingPointTy()) {
7934 if (Fp == CmpInst::FCMP_OEQ)
7935 Op = Builder.CreateFCmp(Fp, Op, Constant::getNullValue(OTy));
7936 else
7937 Op = Builder.CreateFCmpS(Fp, Op, Constant::getNullValue(OTy));
7938 } else {
7939 Op = Builder.CreateICmp(Ip, Op, Constant::getNullValue(OTy));
7941 return Builder.CreateSExt(Op, Ty, Name);
7944 static Value *packTBLDVectorList(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
7945 Value *ExtOp, Value *IndexOp,
7946 llvm::Type *ResTy, unsigned IntID,
7947 const char *Name) {
7948 SmallVector<Value *, 2> TblOps;
7949 if (ExtOp)
7950 TblOps.push_back(ExtOp);
7952 // Build a vector containing sequential number like (0, 1, 2, ..., 15)
7953 SmallVector<int, 16> Indices;
7954 auto *TblTy = cast<llvm::FixedVectorType>(Ops[0]->getType());
7955 for (unsigned i = 0, e = TblTy->getNumElements(); i != e; ++i) {
7956 Indices.push_back(2*i);
7957 Indices.push_back(2*i+1);
7960 int PairPos = 0, End = Ops.size() - 1;
7961 while (PairPos < End) {
7962 TblOps.push_back(CGF.Builder.CreateShuffleVector(Ops[PairPos],
7963 Ops[PairPos+1], Indices,
7964 Name));
7965 PairPos += 2;
7968 // If there's an odd number of 64-bit lookup table, fill the high 64-bit
7969 // of the 128-bit lookup table with zero.
7970 if (PairPos == End) {
7971 Value *ZeroTbl = ConstantAggregateZero::get(TblTy);
7972 TblOps.push_back(CGF.Builder.CreateShuffleVector(Ops[PairPos],
7973 ZeroTbl, Indices, Name));
7976 Function *TblF;
7977 TblOps.push_back(IndexOp);
7978 TblF = CGF.CGM.getIntrinsic(IntID, ResTy);
7980 return CGF.EmitNeonCall(TblF, TblOps, Name);
7983 Value *CodeGenFunction::GetValueForARMHint(unsigned BuiltinID) {
7984 unsigned Value;
7985 switch (BuiltinID) {
7986 default:
7987 return nullptr;
7988 case clang::ARM::BI__builtin_arm_nop:
7989 Value = 0;
7990 break;
7991 case clang::ARM::BI__builtin_arm_yield:
7992 case clang::ARM::BI__yield:
7993 Value = 1;
7994 break;
7995 case clang::ARM::BI__builtin_arm_wfe:
7996 case clang::ARM::BI__wfe:
7997 Value = 2;
7998 break;
7999 case clang::ARM::BI__builtin_arm_wfi:
8000 case clang::ARM::BI__wfi:
8001 Value = 3;
8002 break;
8003 case clang::ARM::BI__builtin_arm_sev:
8004 case clang::ARM::BI__sev:
8005 Value = 4;
8006 break;
8007 case clang::ARM::BI__builtin_arm_sevl:
8008 case clang::ARM::BI__sevl:
8009 Value = 5;
8010 break;
8013 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_hint),
8014 llvm::ConstantInt::get(Int32Ty, Value));
8017 enum SpecialRegisterAccessKind {
8018 NormalRead,
8019 VolatileRead,
8020 Write,
8023 // Generates the IR for __builtin_read_exec_*.
8024 // Lowers the builtin to amdgcn_ballot intrinsic.
8025 static Value *EmitAMDGCNBallotForExec(CodeGenFunction &CGF, const CallExpr *E,
8026 llvm::Type *RegisterType,
8027 llvm::Type *ValueType, bool isExecHi) {
8028 CodeGen::CGBuilderTy &Builder = CGF.Builder;
8029 CodeGen::CodeGenModule &CGM = CGF.CGM;
8031 Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_ballot, {RegisterType});
8032 llvm::Value *Call = Builder.CreateCall(F, {Builder.getInt1(true)});
8034 if (isExecHi) {
8035 Value *Rt2 = Builder.CreateLShr(Call, 32);
8036 Rt2 = Builder.CreateTrunc(Rt2, CGF.Int32Ty);
8037 return Rt2;
8040 return Call;
8043 // Generates the IR for the read/write special register builtin,
8044 // ValueType is the type of the value that is to be written or read,
8045 // RegisterType is the type of the register being written to or read from.
8046 static Value *EmitSpecialRegisterBuiltin(CodeGenFunction &CGF,
8047 const CallExpr *E,
8048 llvm::Type *RegisterType,
8049 llvm::Type *ValueType,
8050 SpecialRegisterAccessKind AccessKind,
8051 StringRef SysReg = "") {
8052 // write and register intrinsics only support 32, 64 and 128 bit operations.
8053 assert((RegisterType->isIntegerTy(32) || RegisterType->isIntegerTy(64) ||
8054 RegisterType->isIntegerTy(128)) &&
8055 "Unsupported size for register.");
8057 CodeGen::CGBuilderTy &Builder = CGF.Builder;
8058 CodeGen::CodeGenModule &CGM = CGF.CGM;
8059 LLVMContext &Context = CGM.getLLVMContext();
8061 if (SysReg.empty()) {
8062 const Expr *SysRegStrExpr = E->getArg(0)->IgnoreParenCasts();
8063 SysReg = cast<clang::StringLiteral>(SysRegStrExpr)->getString();
8066 llvm::Metadata *Ops[] = { llvm::MDString::get(Context, SysReg) };
8067 llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
8068 llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
8070 llvm::Type *Types[] = { RegisterType };
8072 bool MixedTypes = RegisterType->isIntegerTy(64) && ValueType->isIntegerTy(32);
8073 assert(!(RegisterType->isIntegerTy(32) && ValueType->isIntegerTy(64))
8074 && "Can't fit 64-bit value in 32-bit register");
8076 if (AccessKind != Write) {
8077 assert(AccessKind == NormalRead || AccessKind == VolatileRead);
8078 llvm::Function *F = CGM.getIntrinsic(
8079 AccessKind == VolatileRead ? llvm::Intrinsic::read_volatile_register
8080 : llvm::Intrinsic::read_register,
8081 Types);
8082 llvm::Value *Call = Builder.CreateCall(F, Metadata);
8084 if (MixedTypes)
8085 // Read into 64 bit register and then truncate result to 32 bit.
8086 return Builder.CreateTrunc(Call, ValueType);
8088 if (ValueType->isPointerTy())
8089 // Have i32/i64 result (Call) but want to return a VoidPtrTy (i8*).
8090 return Builder.CreateIntToPtr(Call, ValueType);
8092 return Call;
8095 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types);
8096 llvm::Value *ArgValue = CGF.EmitScalarExpr(E->getArg(1));
8097 if (MixedTypes) {
8098 // Extend 32 bit write value to 64 bit to pass to write.
8099 ArgValue = Builder.CreateZExt(ArgValue, RegisterType);
8100 return Builder.CreateCall(F, { Metadata, ArgValue });
8103 if (ValueType->isPointerTy()) {
8104 // Have VoidPtrTy ArgValue but want to return an i32/i64.
8105 ArgValue = Builder.CreatePtrToInt(ArgValue, RegisterType);
8106 return Builder.CreateCall(F, { Metadata, ArgValue });
8109 return Builder.CreateCall(F, { Metadata, ArgValue });
8112 /// Return true if BuiltinID is an overloaded Neon intrinsic with an extra
8113 /// argument that specifies the vector type.
8114 static bool HasExtraNeonArgument(unsigned BuiltinID) {
8115 switch (BuiltinID) {
8116 default: break;
8117 case NEON::BI__builtin_neon_vget_lane_i8:
8118 case NEON::BI__builtin_neon_vget_lane_i16:
8119 case NEON::BI__builtin_neon_vget_lane_bf16:
8120 case NEON::BI__builtin_neon_vget_lane_i32:
8121 case NEON::BI__builtin_neon_vget_lane_i64:
8122 case NEON::BI__builtin_neon_vget_lane_f32:
8123 case NEON::BI__builtin_neon_vgetq_lane_i8:
8124 case NEON::BI__builtin_neon_vgetq_lane_i16:
8125 case NEON::BI__builtin_neon_vgetq_lane_bf16:
8126 case NEON::BI__builtin_neon_vgetq_lane_i32:
8127 case NEON::BI__builtin_neon_vgetq_lane_i64:
8128 case NEON::BI__builtin_neon_vgetq_lane_f32:
8129 case NEON::BI__builtin_neon_vduph_lane_bf16:
8130 case NEON::BI__builtin_neon_vduph_laneq_bf16:
8131 case NEON::BI__builtin_neon_vset_lane_i8:
8132 case NEON::BI__builtin_neon_vset_lane_i16:
8133 case NEON::BI__builtin_neon_vset_lane_bf16:
8134 case NEON::BI__builtin_neon_vset_lane_i32:
8135 case NEON::BI__builtin_neon_vset_lane_i64:
8136 case NEON::BI__builtin_neon_vset_lane_f32:
8137 case NEON::BI__builtin_neon_vsetq_lane_i8:
8138 case NEON::BI__builtin_neon_vsetq_lane_i16:
8139 case NEON::BI__builtin_neon_vsetq_lane_bf16:
8140 case NEON::BI__builtin_neon_vsetq_lane_i32:
8141 case NEON::BI__builtin_neon_vsetq_lane_i64:
8142 case NEON::BI__builtin_neon_vsetq_lane_f32:
8143 case NEON::BI__builtin_neon_vsha1h_u32:
8144 case NEON::BI__builtin_neon_vsha1cq_u32:
8145 case NEON::BI__builtin_neon_vsha1pq_u32:
8146 case NEON::BI__builtin_neon_vsha1mq_u32:
8147 case NEON::BI__builtin_neon_vcvth_bf16_f32:
8148 case clang::ARM::BI_MoveToCoprocessor:
8149 case clang::ARM::BI_MoveToCoprocessor2:
8150 return false;
8152 return true;
8155 Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
8156 const CallExpr *E,
8157 ReturnValueSlot ReturnValue,
8158 llvm::Triple::ArchType Arch) {
8159 if (auto Hint = GetValueForARMHint(BuiltinID))
8160 return Hint;
8162 if (BuiltinID == clang::ARM::BI__emit) {
8163 bool IsThumb = getTarget().getTriple().getArch() == llvm::Triple::thumb;
8164 llvm::FunctionType *FTy =
8165 llvm::FunctionType::get(VoidTy, /*Variadic=*/false);
8167 Expr::EvalResult Result;
8168 if (!E->getArg(0)->EvaluateAsInt(Result, CGM.getContext()))
8169 llvm_unreachable("Sema will ensure that the parameter is constant");
8171 llvm::APSInt Value = Result.Val.getInt();
8172 uint64_t ZExtValue = Value.zextOrTrunc(IsThumb ? 16 : 32).getZExtValue();
8174 llvm::InlineAsm *Emit =
8175 IsThumb ? InlineAsm::get(FTy, ".inst.n 0x" + utohexstr(ZExtValue), "",
8176 /*hasSideEffects=*/true)
8177 : InlineAsm::get(FTy, ".inst 0x" + utohexstr(ZExtValue), "",
8178 /*hasSideEffects=*/true);
8180 return Builder.CreateCall(Emit);
8183 if (BuiltinID == clang::ARM::BI__builtin_arm_dbg) {
8184 Value *Option = EmitScalarExpr(E->getArg(0));
8185 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_dbg), Option);
8188 if (BuiltinID == clang::ARM::BI__builtin_arm_prefetch) {
8189 Value *Address = EmitScalarExpr(E->getArg(0));
8190 Value *RW = EmitScalarExpr(E->getArg(1));
8191 Value *IsData = EmitScalarExpr(E->getArg(2));
8193 // Locality is not supported on ARM target
8194 Value *Locality = llvm::ConstantInt::get(Int32Ty, 3);
8196 Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType());
8197 return Builder.CreateCall(F, {Address, RW, Locality, IsData});
8200 if (BuiltinID == clang::ARM::BI__builtin_arm_rbit) {
8201 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
8202 return Builder.CreateCall(
8203 CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg, "rbit");
8206 if (BuiltinID == clang::ARM::BI__builtin_arm_clz ||
8207 BuiltinID == clang::ARM::BI__builtin_arm_clz64) {
8208 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
8209 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Arg->getType());
8210 Value *Res = Builder.CreateCall(F, {Arg, Builder.getInt1(false)});
8211 if (BuiltinID == clang::ARM::BI__builtin_arm_clz64)
8212 Res = Builder.CreateTrunc(Res, Builder.getInt32Ty());
8213 return Res;
8217 if (BuiltinID == clang::ARM::BI__builtin_arm_cls) {
8218 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
8219 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_cls), Arg, "cls");
8221 if (BuiltinID == clang::ARM::BI__builtin_arm_cls64) {
8222 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
8223 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_cls64), Arg,
8224 "cls");
8227 if (BuiltinID == clang::ARM::BI__clear_cache) {
8228 assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments");
8229 const FunctionDecl *FD = E->getDirectCallee();
8230 Value *Ops[2];
8231 for (unsigned i = 0; i < 2; i++)
8232 Ops[i] = EmitScalarExpr(E->getArg(i));
8233 llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType());
8234 llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty);
8235 StringRef Name = FD->getName();
8236 return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops);
8239 if (BuiltinID == clang::ARM::BI__builtin_arm_mcrr ||
8240 BuiltinID == clang::ARM::BI__builtin_arm_mcrr2) {
8241 Function *F;
8243 switch (BuiltinID) {
8244 default: llvm_unreachable("unexpected builtin");
8245 case clang::ARM::BI__builtin_arm_mcrr:
8246 F = CGM.getIntrinsic(Intrinsic::arm_mcrr);
8247 break;
8248 case clang::ARM::BI__builtin_arm_mcrr2:
8249 F = CGM.getIntrinsic(Intrinsic::arm_mcrr2);
8250 break;
8253 // MCRR{2} instruction has 5 operands but
8254 // the intrinsic has 4 because Rt and Rt2
8255 // are represented as a single unsigned 64
8256 // bit integer in the intrinsic definition
8257 // but internally it's represented as 2 32
8258 // bit integers.
8260 Value *Coproc = EmitScalarExpr(E->getArg(0));
8261 Value *Opc1 = EmitScalarExpr(E->getArg(1));
8262 Value *RtAndRt2 = EmitScalarExpr(E->getArg(2));
8263 Value *CRm = EmitScalarExpr(E->getArg(3));
8265 Value *C1 = llvm::ConstantInt::get(Int64Ty, 32);
8266 Value *Rt = Builder.CreateTruncOrBitCast(RtAndRt2, Int32Ty);
8267 Value *Rt2 = Builder.CreateLShr(RtAndRt2, C1);
8268 Rt2 = Builder.CreateTruncOrBitCast(Rt2, Int32Ty);
8270 return Builder.CreateCall(F, {Coproc, Opc1, Rt, Rt2, CRm});
8273 if (BuiltinID == clang::ARM::BI__builtin_arm_mrrc ||
8274 BuiltinID == clang::ARM::BI__builtin_arm_mrrc2) {
8275 Function *F;
8277 switch (BuiltinID) {
8278 default: llvm_unreachable("unexpected builtin");
8279 case clang::ARM::BI__builtin_arm_mrrc:
8280 F = CGM.getIntrinsic(Intrinsic::arm_mrrc);
8281 break;
8282 case clang::ARM::BI__builtin_arm_mrrc2:
8283 F = CGM.getIntrinsic(Intrinsic::arm_mrrc2);
8284 break;
8287 Value *Coproc = EmitScalarExpr(E->getArg(0));
8288 Value *Opc1 = EmitScalarExpr(E->getArg(1));
8289 Value *CRm = EmitScalarExpr(E->getArg(2));
8290 Value *RtAndRt2 = Builder.CreateCall(F, {Coproc, Opc1, CRm});
8292 // Returns an unsigned 64 bit integer, represented
8293 // as two 32 bit integers.
8295 Value *Rt = Builder.CreateExtractValue(RtAndRt2, 1);
8296 Value *Rt1 = Builder.CreateExtractValue(RtAndRt2, 0);
8297 Rt = Builder.CreateZExt(Rt, Int64Ty);
8298 Rt1 = Builder.CreateZExt(Rt1, Int64Ty);
8300 Value *ShiftCast = llvm::ConstantInt::get(Int64Ty, 32);
8301 RtAndRt2 = Builder.CreateShl(Rt, ShiftCast, "shl", true);
8302 RtAndRt2 = Builder.CreateOr(RtAndRt2, Rt1);
8304 return Builder.CreateBitCast(RtAndRt2, ConvertType(E->getType()));
8307 if (BuiltinID == clang::ARM::BI__builtin_arm_ldrexd ||
8308 ((BuiltinID == clang::ARM::BI__builtin_arm_ldrex ||
8309 BuiltinID == clang::ARM::BI__builtin_arm_ldaex) &&
8310 getContext().getTypeSize(E->getType()) == 64) ||
8311 BuiltinID == clang::ARM::BI__ldrexd) {
8312 Function *F;
8314 switch (BuiltinID) {
8315 default: llvm_unreachable("unexpected builtin");
8316 case clang::ARM::BI__builtin_arm_ldaex:
8317 F = CGM.getIntrinsic(Intrinsic::arm_ldaexd);
8318 break;
8319 case clang::ARM::BI__builtin_arm_ldrexd:
8320 case clang::ARM::BI__builtin_arm_ldrex:
8321 case clang::ARM::BI__ldrexd:
8322 F = CGM.getIntrinsic(Intrinsic::arm_ldrexd);
8323 break;
8326 Value *LdPtr = EmitScalarExpr(E->getArg(0));
8327 Value *Val = Builder.CreateCall(F, Builder.CreateBitCast(LdPtr, Int8PtrTy),
8328 "ldrexd");
8330 Value *Val0 = Builder.CreateExtractValue(Val, 1);
8331 Value *Val1 = Builder.CreateExtractValue(Val, 0);
8332 Val0 = Builder.CreateZExt(Val0, Int64Ty);
8333 Val1 = Builder.CreateZExt(Val1, Int64Ty);
8335 Value *ShiftCst = llvm::ConstantInt::get(Int64Ty, 32);
8336 Val = Builder.CreateShl(Val0, ShiftCst, "shl", true /* nuw */);
8337 Val = Builder.CreateOr(Val, Val1);
8338 return Builder.CreateBitCast(Val, ConvertType(E->getType()));
8341 if (BuiltinID == clang::ARM::BI__builtin_arm_ldrex ||
8342 BuiltinID == clang::ARM::BI__builtin_arm_ldaex) {
8343 Value *LoadAddr = EmitScalarExpr(E->getArg(0));
8345 QualType Ty = E->getType();
8346 llvm::Type *RealResTy = ConvertType(Ty);
8347 llvm::Type *IntTy =
8348 llvm::IntegerType::get(getLLVMContext(), getContext().getTypeSize(Ty));
8350 Function *F = CGM.getIntrinsic(
8351 BuiltinID == clang::ARM::BI__builtin_arm_ldaex ? Intrinsic::arm_ldaex
8352 : Intrinsic::arm_ldrex,
8353 UnqualPtrTy);
8354 CallInst *Val = Builder.CreateCall(F, LoadAddr, "ldrex");
8355 Val->addParamAttr(
8356 0, Attribute::get(getLLVMContext(), Attribute::ElementType, IntTy));
8358 if (RealResTy->isPointerTy())
8359 return Builder.CreateIntToPtr(Val, RealResTy);
8360 else {
8361 llvm::Type *IntResTy = llvm::IntegerType::get(
8362 getLLVMContext(), CGM.getDataLayout().getTypeSizeInBits(RealResTy));
8363 return Builder.CreateBitCast(Builder.CreateTruncOrBitCast(Val, IntResTy),
8364 RealResTy);
8368 if (BuiltinID == clang::ARM::BI__builtin_arm_strexd ||
8369 ((BuiltinID == clang::ARM::BI__builtin_arm_stlex ||
8370 BuiltinID == clang::ARM::BI__builtin_arm_strex) &&
8371 getContext().getTypeSize(E->getArg(0)->getType()) == 64)) {
8372 Function *F = CGM.getIntrinsic(
8373 BuiltinID == clang::ARM::BI__builtin_arm_stlex ? Intrinsic::arm_stlexd
8374 : Intrinsic::arm_strexd);
8375 llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty);
8377 Address Tmp = CreateMemTemp(E->getArg(0)->getType());
8378 Value *Val = EmitScalarExpr(E->getArg(0));
8379 Builder.CreateStore(Val, Tmp);
8381 Address LdPtr = Tmp.withElementType(STy);
8382 Val = Builder.CreateLoad(LdPtr);
8384 Value *Arg0 = Builder.CreateExtractValue(Val, 0);
8385 Value *Arg1 = Builder.CreateExtractValue(Val, 1);
8386 Value *StPtr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), Int8PtrTy);
8387 return Builder.CreateCall(F, {Arg0, Arg1, StPtr}, "strexd");
8390 if (BuiltinID == clang::ARM::BI__builtin_arm_strex ||
8391 BuiltinID == clang::ARM::BI__builtin_arm_stlex) {
8392 Value *StoreVal = EmitScalarExpr(E->getArg(0));
8393 Value *StoreAddr = EmitScalarExpr(E->getArg(1));
8395 QualType Ty = E->getArg(0)->getType();
8396 llvm::Type *StoreTy =
8397 llvm::IntegerType::get(getLLVMContext(), getContext().getTypeSize(Ty));
8399 if (StoreVal->getType()->isPointerTy())
8400 StoreVal = Builder.CreatePtrToInt(StoreVal, Int32Ty);
8401 else {
8402 llvm::Type *IntTy = llvm::IntegerType::get(
8403 getLLVMContext(),
8404 CGM.getDataLayout().getTypeSizeInBits(StoreVal->getType()));
8405 StoreVal = Builder.CreateBitCast(StoreVal, IntTy);
8406 StoreVal = Builder.CreateZExtOrBitCast(StoreVal, Int32Ty);
8409 Function *F = CGM.getIntrinsic(
8410 BuiltinID == clang::ARM::BI__builtin_arm_stlex ? Intrinsic::arm_stlex
8411 : Intrinsic::arm_strex,
8412 StoreAddr->getType());
8414 CallInst *CI = Builder.CreateCall(F, {StoreVal, StoreAddr}, "strex");
8415 CI->addParamAttr(
8416 1, Attribute::get(getLLVMContext(), Attribute::ElementType, StoreTy));
8417 return CI;
8420 if (BuiltinID == clang::ARM::BI__builtin_arm_clrex) {
8421 Function *F = CGM.getIntrinsic(Intrinsic::arm_clrex);
8422 return Builder.CreateCall(F);
8425 // CRC32
8426 Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic;
8427 switch (BuiltinID) {
8428 case clang::ARM::BI__builtin_arm_crc32b:
8429 CRCIntrinsicID = Intrinsic::arm_crc32b; break;
8430 case clang::ARM::BI__builtin_arm_crc32cb:
8431 CRCIntrinsicID = Intrinsic::arm_crc32cb; break;
8432 case clang::ARM::BI__builtin_arm_crc32h:
8433 CRCIntrinsicID = Intrinsic::arm_crc32h; break;
8434 case clang::ARM::BI__builtin_arm_crc32ch:
8435 CRCIntrinsicID = Intrinsic::arm_crc32ch; break;
8436 case clang::ARM::BI__builtin_arm_crc32w:
8437 case clang::ARM::BI__builtin_arm_crc32d:
8438 CRCIntrinsicID = Intrinsic::arm_crc32w; break;
8439 case clang::ARM::BI__builtin_arm_crc32cw:
8440 case clang::ARM::BI__builtin_arm_crc32cd:
8441 CRCIntrinsicID = Intrinsic::arm_crc32cw; break;
8444 if (CRCIntrinsicID != Intrinsic::not_intrinsic) {
8445 Value *Arg0 = EmitScalarExpr(E->getArg(0));
8446 Value *Arg1 = EmitScalarExpr(E->getArg(1));
8448 // crc32{c,}d intrinsics are implemented as two calls to crc32{c,}w
8449 // intrinsics, hence we need different codegen for these cases.
8450 if (BuiltinID == clang::ARM::BI__builtin_arm_crc32d ||
8451 BuiltinID == clang::ARM::BI__builtin_arm_crc32cd) {
8452 Value *C1 = llvm::ConstantInt::get(Int64Ty, 32);
8453 Value *Arg1a = Builder.CreateTruncOrBitCast(Arg1, Int32Ty);
8454 Value *Arg1b = Builder.CreateLShr(Arg1, C1);
8455 Arg1b = Builder.CreateTruncOrBitCast(Arg1b, Int32Ty);
8457 Function *F = CGM.getIntrinsic(CRCIntrinsicID);
8458 Value *Res = Builder.CreateCall(F, {Arg0, Arg1a});
8459 return Builder.CreateCall(F, {Res, Arg1b});
8460 } else {
8461 Arg1 = Builder.CreateZExtOrBitCast(Arg1, Int32Ty);
8463 Function *F = CGM.getIntrinsic(CRCIntrinsicID);
8464 return Builder.CreateCall(F, {Arg0, Arg1});
8468 if (BuiltinID == clang::ARM::BI__builtin_arm_rsr ||
8469 BuiltinID == clang::ARM::BI__builtin_arm_rsr64 ||
8470 BuiltinID == clang::ARM::BI__builtin_arm_rsrp ||
8471 BuiltinID == clang::ARM::BI__builtin_arm_wsr ||
8472 BuiltinID == clang::ARM::BI__builtin_arm_wsr64 ||
8473 BuiltinID == clang::ARM::BI__builtin_arm_wsrp) {
8475 SpecialRegisterAccessKind AccessKind = Write;
8476 if (BuiltinID == clang::ARM::BI__builtin_arm_rsr ||
8477 BuiltinID == clang::ARM::BI__builtin_arm_rsr64 ||
8478 BuiltinID == clang::ARM::BI__builtin_arm_rsrp)
8479 AccessKind = VolatileRead;
8481 bool IsPointerBuiltin = BuiltinID == clang::ARM::BI__builtin_arm_rsrp ||
8482 BuiltinID == clang::ARM::BI__builtin_arm_wsrp;
8484 bool Is64Bit = BuiltinID == clang::ARM::BI__builtin_arm_rsr64 ||
8485 BuiltinID == clang::ARM::BI__builtin_arm_wsr64;
8487 llvm::Type *ValueType;
8488 llvm::Type *RegisterType;
8489 if (IsPointerBuiltin) {
8490 ValueType = VoidPtrTy;
8491 RegisterType = Int32Ty;
8492 } else if (Is64Bit) {
8493 ValueType = RegisterType = Int64Ty;
8494 } else {
8495 ValueType = RegisterType = Int32Ty;
8498 return EmitSpecialRegisterBuiltin(*this, E, RegisterType, ValueType,
8499 AccessKind);
8502 if (BuiltinID == ARM::BI__builtin_sponentry) {
8503 llvm::Function *F = CGM.getIntrinsic(Intrinsic::sponentry, AllocaInt8PtrTy);
8504 return Builder.CreateCall(F);
8507 // Handle MSVC intrinsics before argument evaluation to prevent double
8508 // evaluation.
8509 if (std::optional<MSVCIntrin> MsvcIntId = translateArmToMsvcIntrin(BuiltinID))
8510 return EmitMSVCBuiltinExpr(*MsvcIntId, E);
8512 // Deal with MVE builtins
8513 if (Value *Result = EmitARMMVEBuiltinExpr(BuiltinID, E, ReturnValue, Arch))
8514 return Result;
8515 // Handle CDE builtins
8516 if (Value *Result = EmitARMCDEBuiltinExpr(BuiltinID, E, ReturnValue, Arch))
8517 return Result;
8519 // Some intrinsics are equivalent - if they are use the base intrinsic ID.
8520 auto It = llvm::find_if(NEONEquivalentIntrinsicMap, [BuiltinID](auto &P) {
8521 return P.first == BuiltinID;
8523 if (It != end(NEONEquivalentIntrinsicMap))
8524 BuiltinID = It->second;
8526 // Find out if any arguments are required to be integer constant
8527 // expressions.
8528 unsigned ICEArguments = 0;
8529 ASTContext::GetBuiltinTypeError Error;
8530 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
8531 assert(Error == ASTContext::GE_None && "Should not codegen an error");
8533 auto getAlignmentValue32 = [&](Address addr) -> Value* {
8534 return Builder.getInt32(addr.getAlignment().getQuantity());
8537 Address PtrOp0 = Address::invalid();
8538 Address PtrOp1 = Address::invalid();
8539 SmallVector<Value*, 4> Ops;
8540 bool HasExtraArg = HasExtraNeonArgument(BuiltinID);
8541 unsigned NumArgs = E->getNumArgs() - (HasExtraArg ? 1 : 0);
8542 for (unsigned i = 0, e = NumArgs; i != e; i++) {
8543 if (i == 0) {
8544 switch (BuiltinID) {
8545 case NEON::BI__builtin_neon_vld1_v:
8546 case NEON::BI__builtin_neon_vld1q_v:
8547 case NEON::BI__builtin_neon_vld1q_lane_v:
8548 case NEON::BI__builtin_neon_vld1_lane_v:
8549 case NEON::BI__builtin_neon_vld1_dup_v:
8550 case NEON::BI__builtin_neon_vld1q_dup_v:
8551 case NEON::BI__builtin_neon_vst1_v:
8552 case NEON::BI__builtin_neon_vst1q_v:
8553 case NEON::BI__builtin_neon_vst1q_lane_v:
8554 case NEON::BI__builtin_neon_vst1_lane_v:
8555 case NEON::BI__builtin_neon_vst2_v:
8556 case NEON::BI__builtin_neon_vst2q_v:
8557 case NEON::BI__builtin_neon_vst2_lane_v:
8558 case NEON::BI__builtin_neon_vst2q_lane_v:
8559 case NEON::BI__builtin_neon_vst3_v:
8560 case NEON::BI__builtin_neon_vst3q_v:
8561 case NEON::BI__builtin_neon_vst3_lane_v:
8562 case NEON::BI__builtin_neon_vst3q_lane_v:
8563 case NEON::BI__builtin_neon_vst4_v:
8564 case NEON::BI__builtin_neon_vst4q_v:
8565 case NEON::BI__builtin_neon_vst4_lane_v:
8566 case NEON::BI__builtin_neon_vst4q_lane_v:
8567 // Get the alignment for the argument in addition to the value;
8568 // we'll use it later.
8569 PtrOp0 = EmitPointerWithAlignment(E->getArg(0));
8570 Ops.push_back(PtrOp0.getPointer());
8571 continue;
8574 if (i == 1) {
8575 switch (BuiltinID) {
8576 case NEON::BI__builtin_neon_vld2_v:
8577 case NEON::BI__builtin_neon_vld2q_v:
8578 case NEON::BI__builtin_neon_vld3_v:
8579 case NEON::BI__builtin_neon_vld3q_v:
8580 case NEON::BI__builtin_neon_vld4_v:
8581 case NEON::BI__builtin_neon_vld4q_v:
8582 case NEON::BI__builtin_neon_vld2_lane_v:
8583 case NEON::BI__builtin_neon_vld2q_lane_v:
8584 case NEON::BI__builtin_neon_vld3_lane_v:
8585 case NEON::BI__builtin_neon_vld3q_lane_v:
8586 case NEON::BI__builtin_neon_vld4_lane_v:
8587 case NEON::BI__builtin_neon_vld4q_lane_v:
8588 case NEON::BI__builtin_neon_vld2_dup_v:
8589 case NEON::BI__builtin_neon_vld2q_dup_v:
8590 case NEON::BI__builtin_neon_vld3_dup_v:
8591 case NEON::BI__builtin_neon_vld3q_dup_v:
8592 case NEON::BI__builtin_neon_vld4_dup_v:
8593 case NEON::BI__builtin_neon_vld4q_dup_v:
8594 // Get the alignment for the argument in addition to the value;
8595 // we'll use it later.
8596 PtrOp1 = EmitPointerWithAlignment(E->getArg(1));
8597 Ops.push_back(PtrOp1.getPointer());
8598 continue;
8602 if ((ICEArguments & (1 << i)) == 0) {
8603 Ops.push_back(EmitScalarExpr(E->getArg(i)));
8604 } else {
8605 // If this is required to be a constant, constant fold it so that we know
8606 // that the generated intrinsic gets a ConstantInt.
8607 Ops.push_back(llvm::ConstantInt::get(
8608 getLLVMContext(),
8609 *E->getArg(i)->getIntegerConstantExpr(getContext())));
8613 switch (BuiltinID) {
8614 default: break;
8616 case NEON::BI__builtin_neon_vget_lane_i8:
8617 case NEON::BI__builtin_neon_vget_lane_i16:
8618 case NEON::BI__builtin_neon_vget_lane_i32:
8619 case NEON::BI__builtin_neon_vget_lane_i64:
8620 case NEON::BI__builtin_neon_vget_lane_bf16:
8621 case NEON::BI__builtin_neon_vget_lane_f32:
8622 case NEON::BI__builtin_neon_vgetq_lane_i8:
8623 case NEON::BI__builtin_neon_vgetq_lane_i16:
8624 case NEON::BI__builtin_neon_vgetq_lane_i32:
8625 case NEON::BI__builtin_neon_vgetq_lane_i64:
8626 case NEON::BI__builtin_neon_vgetq_lane_bf16:
8627 case NEON::BI__builtin_neon_vgetq_lane_f32:
8628 case NEON::BI__builtin_neon_vduph_lane_bf16:
8629 case NEON::BI__builtin_neon_vduph_laneq_bf16:
8630 return Builder.CreateExtractElement(Ops[0], Ops[1], "vget_lane");
8632 case NEON::BI__builtin_neon_vrndns_f32: {
8633 Value *Arg = EmitScalarExpr(E->getArg(0));
8634 llvm::Type *Tys[] = {Arg->getType()};
8635 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vrintn, Tys);
8636 return Builder.CreateCall(F, {Arg}, "vrndn"); }
8638 case NEON::BI__builtin_neon_vset_lane_i8:
8639 case NEON::BI__builtin_neon_vset_lane_i16:
8640 case NEON::BI__builtin_neon_vset_lane_i32:
8641 case NEON::BI__builtin_neon_vset_lane_i64:
8642 case NEON::BI__builtin_neon_vset_lane_bf16:
8643 case NEON::BI__builtin_neon_vset_lane_f32:
8644 case NEON::BI__builtin_neon_vsetq_lane_i8:
8645 case NEON::BI__builtin_neon_vsetq_lane_i16:
8646 case NEON::BI__builtin_neon_vsetq_lane_i32:
8647 case NEON::BI__builtin_neon_vsetq_lane_i64:
8648 case NEON::BI__builtin_neon_vsetq_lane_bf16:
8649 case NEON::BI__builtin_neon_vsetq_lane_f32:
8650 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
8652 case NEON::BI__builtin_neon_vsha1h_u32:
8653 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1h), Ops,
8654 "vsha1h");
8655 case NEON::BI__builtin_neon_vsha1cq_u32:
8656 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1c), Ops,
8657 "vsha1h");
8658 case NEON::BI__builtin_neon_vsha1pq_u32:
8659 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1p), Ops,
8660 "vsha1h");
8661 case NEON::BI__builtin_neon_vsha1mq_u32:
8662 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1m), Ops,
8663 "vsha1h");
8665 case NEON::BI__builtin_neon_vcvth_bf16_f32: {
8666 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vcvtbfp2bf), Ops,
8667 "vcvtbfp2bf");
8670 // The ARM _MoveToCoprocessor builtins put the input register value as
8671 // the first argument, but the LLVM intrinsic expects it as the third one.
8672 case clang::ARM::BI_MoveToCoprocessor:
8673 case clang::ARM::BI_MoveToCoprocessor2: {
8674 Function *F = CGM.getIntrinsic(BuiltinID == clang::ARM::BI_MoveToCoprocessor
8675 ? Intrinsic::arm_mcr
8676 : Intrinsic::arm_mcr2);
8677 return Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0],
8678 Ops[3], Ops[4], Ops[5]});
8682 // Get the last argument, which specifies the vector type.
8683 assert(HasExtraArg);
8684 const Expr *Arg = E->getArg(E->getNumArgs()-1);
8685 std::optional<llvm::APSInt> Result =
8686 Arg->getIntegerConstantExpr(getContext());
8687 if (!Result)
8688 return nullptr;
8690 if (BuiltinID == clang::ARM::BI__builtin_arm_vcvtr_f ||
8691 BuiltinID == clang::ARM::BI__builtin_arm_vcvtr_d) {
8692 // Determine the overloaded type of this builtin.
8693 llvm::Type *Ty;
8694 if (BuiltinID == clang::ARM::BI__builtin_arm_vcvtr_f)
8695 Ty = FloatTy;
8696 else
8697 Ty = DoubleTy;
8699 // Determine whether this is an unsigned conversion or not.
8700 bool usgn = Result->getZExtValue() == 1;
8701 unsigned Int = usgn ? Intrinsic::arm_vcvtru : Intrinsic::arm_vcvtr;
8703 // Call the appropriate intrinsic.
8704 Function *F = CGM.getIntrinsic(Int, Ty);
8705 return Builder.CreateCall(F, Ops, "vcvtr");
8708 // Determine the type of this overloaded NEON intrinsic.
8709 NeonTypeFlags Type = Result->getZExtValue();
8710 bool usgn = Type.isUnsigned();
8711 bool rightShift = false;
8713 llvm::FixedVectorType *VTy =
8714 GetNeonType(this, Type, getTarget().hasLegalHalfType(), false,
8715 getTarget().hasBFloat16Type());
8716 llvm::Type *Ty = VTy;
8717 if (!Ty)
8718 return nullptr;
8720 // Many NEON builtins have identical semantics and uses in ARM and
8721 // AArch64. Emit these in a single function.
8722 auto IntrinsicMap = ArrayRef(ARMSIMDIntrinsicMap);
8723 const ARMVectorIntrinsicInfo *Builtin = findARMVectorIntrinsicInMap(
8724 IntrinsicMap, BuiltinID, NEONSIMDIntrinsicsProvenSorted);
8725 if (Builtin)
8726 return EmitCommonNeonBuiltinExpr(
8727 Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic,
8728 Builtin->NameHint, Builtin->TypeModifier, E, Ops, PtrOp0, PtrOp1, Arch);
8730 unsigned Int;
8731 switch (BuiltinID) {
8732 default: return nullptr;
8733 case NEON::BI__builtin_neon_vld1q_lane_v:
8734 // Handle 64-bit integer elements as a special case. Use shuffles of
8735 // one-element vectors to avoid poor code for i64 in the backend.
8736 if (VTy->getElementType()->isIntegerTy(64)) {
8737 // Extract the other lane.
8738 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
8739 int Lane = cast<ConstantInt>(Ops[2])->getZExtValue();
8740 Value *SV = llvm::ConstantVector::get(ConstantInt::get(Int32Ty, 1-Lane));
8741 Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV);
8742 // Load the value as a one-element vector.
8743 Ty = llvm::FixedVectorType::get(VTy->getElementType(), 1);
8744 llvm::Type *Tys[] = {Ty, Int8PtrTy};
8745 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld1, Tys);
8746 Value *Align = getAlignmentValue32(PtrOp0);
8747 Value *Ld = Builder.CreateCall(F, {Ops[0], Align});
8748 // Combine them.
8749 int Indices[] = {1 - Lane, Lane};
8750 return Builder.CreateShuffleVector(Ops[1], Ld, Indices, "vld1q_lane");
8752 [[fallthrough]];
8753 case NEON::BI__builtin_neon_vld1_lane_v: {
8754 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
8755 PtrOp0 = PtrOp0.withElementType(VTy->getElementType());
8756 Value *Ld = Builder.CreateLoad(PtrOp0);
8757 return Builder.CreateInsertElement(Ops[1], Ld, Ops[2], "vld1_lane");
8759 case NEON::BI__builtin_neon_vqrshrn_n_v:
8760 Int =
8761 usgn ? Intrinsic::arm_neon_vqrshiftnu : Intrinsic::arm_neon_vqrshiftns;
8762 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n",
8763 1, true);
8764 case NEON::BI__builtin_neon_vqrshrun_n_v:
8765 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqrshiftnsu, Ty),
8766 Ops, "vqrshrun_n", 1, true);
8767 case NEON::BI__builtin_neon_vqshrn_n_v:
8768 Int = usgn ? Intrinsic::arm_neon_vqshiftnu : Intrinsic::arm_neon_vqshiftns;
8769 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n",
8770 1, true);
8771 case NEON::BI__builtin_neon_vqshrun_n_v:
8772 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftnsu, Ty),
8773 Ops, "vqshrun_n", 1, true);
8774 case NEON::BI__builtin_neon_vrecpe_v:
8775 case NEON::BI__builtin_neon_vrecpeq_v:
8776 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrecpe, Ty),
8777 Ops, "vrecpe");
8778 case NEON::BI__builtin_neon_vrshrn_n_v:
8779 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrshiftn, Ty),
8780 Ops, "vrshrn_n", 1, true);
8781 case NEON::BI__builtin_neon_vrsra_n_v:
8782 case NEON::BI__builtin_neon_vrsraq_n_v:
8783 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
8784 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
8785 Ops[2] = EmitNeonShiftVector(Ops[2], Ty, true);
8786 Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts;
8787 Ops[1] = Builder.CreateCall(CGM.getIntrinsic(Int, Ty), {Ops[1], Ops[2]});
8788 return Builder.CreateAdd(Ops[0], Ops[1], "vrsra_n");
8789 case NEON::BI__builtin_neon_vsri_n_v:
8790 case NEON::BI__builtin_neon_vsriq_n_v:
8791 rightShift = true;
8792 [[fallthrough]];
8793 case NEON::BI__builtin_neon_vsli_n_v:
8794 case NEON::BI__builtin_neon_vsliq_n_v:
8795 Ops[2] = EmitNeonShiftVector(Ops[2], Ty, rightShift);
8796 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vshiftins, Ty),
8797 Ops, "vsli_n");
8798 case NEON::BI__builtin_neon_vsra_n_v:
8799 case NEON::BI__builtin_neon_vsraq_n_v:
8800 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
8801 Ops[1] = EmitNeonRShiftImm(Ops[1], Ops[2], Ty, usgn, "vsra_n");
8802 return Builder.CreateAdd(Ops[0], Ops[1]);
8803 case NEON::BI__builtin_neon_vst1q_lane_v:
8804 // Handle 64-bit integer elements as a special case. Use a shuffle to get
8805 // a one-element vector and avoid poor code for i64 in the backend.
8806 if (VTy->getElementType()->isIntegerTy(64)) {
8807 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
8808 Value *SV = llvm::ConstantVector::get(cast<llvm::Constant>(Ops[2]));
8809 Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV);
8810 Ops[2] = getAlignmentValue32(PtrOp0);
8811 llvm::Type *Tys[] = {Int8PtrTy, Ops[1]->getType()};
8812 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst1,
8813 Tys), Ops);
8815 [[fallthrough]];
8816 case NEON::BI__builtin_neon_vst1_lane_v: {
8817 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
8818 Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
8819 return Builder.CreateStore(Ops[1],
8820 PtrOp0.withElementType(Ops[1]->getType()));
8822 case NEON::BI__builtin_neon_vtbl1_v:
8823 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl1),
8824 Ops, "vtbl1");
8825 case NEON::BI__builtin_neon_vtbl2_v:
8826 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl2),
8827 Ops, "vtbl2");
8828 case NEON::BI__builtin_neon_vtbl3_v:
8829 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl3),
8830 Ops, "vtbl3");
8831 case NEON::BI__builtin_neon_vtbl4_v:
8832 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl4),
8833 Ops, "vtbl4");
8834 case NEON::BI__builtin_neon_vtbx1_v:
8835 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx1),
8836 Ops, "vtbx1");
8837 case NEON::BI__builtin_neon_vtbx2_v:
8838 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx2),
8839 Ops, "vtbx2");
8840 case NEON::BI__builtin_neon_vtbx3_v:
8841 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx3),
8842 Ops, "vtbx3");
8843 case NEON::BI__builtin_neon_vtbx4_v:
8844 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx4),
8845 Ops, "vtbx4");
8849 template<typename Integer>
8850 static Integer GetIntegerConstantValue(const Expr *E, ASTContext &Context) {
8851 return E->getIntegerConstantExpr(Context)->getExtValue();
8854 static llvm::Value *SignOrZeroExtend(CGBuilderTy &Builder, llvm::Value *V,
8855 llvm::Type *T, bool Unsigned) {
8856 // Helper function called by Tablegen-constructed ARM MVE builtin codegen,
8857 // which finds it convenient to specify signed/unsigned as a boolean flag.
8858 return Unsigned ? Builder.CreateZExt(V, T) : Builder.CreateSExt(V, T);
8861 static llvm::Value *MVEImmediateShr(CGBuilderTy &Builder, llvm::Value *V,
8862 uint32_t Shift, bool Unsigned) {
8863 // MVE helper function for integer shift right. This must handle signed vs
8864 // unsigned, and also deal specially with the case where the shift count is
8865 // equal to the lane size. In LLVM IR, an LShr with that parameter would be
8866 // undefined behavior, but in MVE it's legal, so we must convert it to code
8867 // that is not undefined in IR.
8868 unsigned LaneBits = cast<llvm::VectorType>(V->getType())
8869 ->getElementType()
8870 ->getPrimitiveSizeInBits();
8871 if (Shift == LaneBits) {
8872 // An unsigned shift of the full lane size always generates zero, so we can
8873 // simply emit a zero vector. A signed shift of the full lane size does the
8874 // same thing as shifting by one bit fewer.
8875 if (Unsigned)
8876 return llvm::Constant::getNullValue(V->getType());
8877 else
8878 --Shift;
8880 return Unsigned ? Builder.CreateLShr(V, Shift) : Builder.CreateAShr(V, Shift);
8883 static llvm::Value *ARMMVEVectorSplat(CGBuilderTy &Builder, llvm::Value *V) {
8884 // MVE-specific helper function for a vector splat, which infers the element
8885 // count of the output vector by knowing that MVE vectors are all 128 bits
8886 // wide.
8887 unsigned Elements = 128 / V->getType()->getPrimitiveSizeInBits();
8888 return Builder.CreateVectorSplat(Elements, V);
8891 static llvm::Value *ARMMVEVectorReinterpret(CGBuilderTy &Builder,
8892 CodeGenFunction *CGF,
8893 llvm::Value *V,
8894 llvm::Type *DestType) {
8895 // Convert one MVE vector type into another by reinterpreting its in-register
8896 // format.
8898 // Little-endian, this is identical to a bitcast (which reinterprets the
8899 // memory format). But big-endian, they're not necessarily the same, because
8900 // the register and memory formats map to each other differently depending on
8901 // the lane size.
8903 // We generate a bitcast whenever we can (if we're little-endian, or if the
8904 // lane sizes are the same anyway). Otherwise we fall back to an IR intrinsic
8905 // that performs the different kind of reinterpretation.
8906 if (CGF->getTarget().isBigEndian() &&
8907 V->getType()->getScalarSizeInBits() != DestType->getScalarSizeInBits()) {
8908 return Builder.CreateCall(
8909 CGF->CGM.getIntrinsic(Intrinsic::arm_mve_vreinterpretq,
8910 {DestType, V->getType()}),
8912 } else {
8913 return Builder.CreateBitCast(V, DestType);
8917 static llvm::Value *VectorUnzip(CGBuilderTy &Builder, llvm::Value *V, bool Odd) {
8918 // Make a shufflevector that extracts every other element of a vector (evens
8919 // or odds, as desired).
8920 SmallVector<int, 16> Indices;
8921 unsigned InputElements =
8922 cast<llvm::FixedVectorType>(V->getType())->getNumElements();
8923 for (unsigned i = 0; i < InputElements; i += 2)
8924 Indices.push_back(i + Odd);
8925 return Builder.CreateShuffleVector(V, Indices);
8928 static llvm::Value *VectorZip(CGBuilderTy &Builder, llvm::Value *V0,
8929 llvm::Value *V1) {
8930 // Make a shufflevector that interleaves two vectors element by element.
8931 assert(V0->getType() == V1->getType() && "Can't zip different vector types");
8932 SmallVector<int, 16> Indices;
8933 unsigned InputElements =
8934 cast<llvm::FixedVectorType>(V0->getType())->getNumElements();
8935 for (unsigned i = 0; i < InputElements; i++) {
8936 Indices.push_back(i);
8937 Indices.push_back(i + InputElements);
8939 return Builder.CreateShuffleVector(V0, V1, Indices);
8942 template<unsigned HighBit, unsigned OtherBits>
8943 static llvm::Value *ARMMVEConstantSplat(CGBuilderTy &Builder, llvm::Type *VT) {
8944 // MVE-specific helper function to make a vector splat of a constant such as
8945 // UINT_MAX or INT_MIN, in which all bits below the highest one are equal.
8946 llvm::Type *T = cast<llvm::VectorType>(VT)->getElementType();
8947 unsigned LaneBits = T->getPrimitiveSizeInBits();
8948 uint32_t Value = HighBit << (LaneBits - 1);
8949 if (OtherBits)
8950 Value |= (1UL << (LaneBits - 1)) - 1;
8951 llvm::Value *Lane = llvm::ConstantInt::get(T, Value);
8952 return ARMMVEVectorSplat(Builder, Lane);
8955 static llvm::Value *ARMMVEVectorElementReverse(CGBuilderTy &Builder,
8956 llvm::Value *V,
8957 unsigned ReverseWidth) {
8958 // MVE-specific helper function which reverses the elements of a
8959 // vector within every (ReverseWidth)-bit collection of lanes.
8960 SmallVector<int, 16> Indices;
8961 unsigned LaneSize = V->getType()->getScalarSizeInBits();
8962 unsigned Elements = 128 / LaneSize;
8963 unsigned Mask = ReverseWidth / LaneSize - 1;
8964 for (unsigned i = 0; i < Elements; i++)
8965 Indices.push_back(i ^ Mask);
8966 return Builder.CreateShuffleVector(V, Indices);
8969 Value *CodeGenFunction::EmitARMMVEBuiltinExpr(unsigned BuiltinID,
8970 const CallExpr *E,
8971 ReturnValueSlot ReturnValue,
8972 llvm::Triple::ArchType Arch) {
8973 enum class CustomCodeGen { VLD24, VST24 } CustomCodeGenType;
8974 Intrinsic::ID IRIntr;
8975 unsigned NumVectors;
8977 // Code autogenerated by Tablegen will handle all the simple builtins.
8978 switch (BuiltinID) {
8979 #include "clang/Basic/arm_mve_builtin_cg.inc"
8981 // If we didn't match an MVE builtin id at all, go back to the
8982 // main EmitARMBuiltinExpr.
8983 default:
8984 return nullptr;
8987 // Anything that breaks from that switch is an MVE builtin that
8988 // needs handwritten code to generate.
8990 switch (CustomCodeGenType) {
8992 case CustomCodeGen::VLD24: {
8993 llvm::SmallVector<Value *, 4> Ops;
8994 llvm::SmallVector<llvm::Type *, 4> Tys;
8996 auto MvecCType = E->getType();
8997 auto MvecLType = ConvertType(MvecCType);
8998 assert(MvecLType->isStructTy() &&
8999 "Return type for vld[24]q should be a struct");
9000 assert(MvecLType->getStructNumElements() == 1 &&
9001 "Return-type struct for vld[24]q should have one element");
9002 auto MvecLTypeInner = MvecLType->getStructElementType(0);
9003 assert(MvecLTypeInner->isArrayTy() &&
9004 "Return-type struct for vld[24]q should contain an array");
9005 assert(MvecLTypeInner->getArrayNumElements() == NumVectors &&
9006 "Array member of return-type struct vld[24]q has wrong length");
9007 auto VecLType = MvecLTypeInner->getArrayElementType();
9009 Tys.push_back(VecLType);
9011 auto Addr = E->getArg(0);
9012 Ops.push_back(EmitScalarExpr(Addr));
9013 Tys.push_back(ConvertType(Addr->getType()));
9015 Function *F = CGM.getIntrinsic(IRIntr, ArrayRef(Tys));
9016 Value *LoadResult = Builder.CreateCall(F, Ops);
9017 Value *MvecOut = PoisonValue::get(MvecLType);
9018 for (unsigned i = 0; i < NumVectors; ++i) {
9019 Value *Vec = Builder.CreateExtractValue(LoadResult, i);
9020 MvecOut = Builder.CreateInsertValue(MvecOut, Vec, {0, i});
9023 if (ReturnValue.isNull())
9024 return MvecOut;
9025 else
9026 return Builder.CreateStore(MvecOut, ReturnValue.getValue());
9029 case CustomCodeGen::VST24: {
9030 llvm::SmallVector<Value *, 4> Ops;
9031 llvm::SmallVector<llvm::Type *, 4> Tys;
9033 auto Addr = E->getArg(0);
9034 Ops.push_back(EmitScalarExpr(Addr));
9035 Tys.push_back(ConvertType(Addr->getType()));
9037 auto MvecCType = E->getArg(1)->getType();
9038 auto MvecLType = ConvertType(MvecCType);
9039 assert(MvecLType->isStructTy() && "Data type for vst2q should be a struct");
9040 assert(MvecLType->getStructNumElements() == 1 &&
9041 "Data-type struct for vst2q should have one element");
9042 auto MvecLTypeInner = MvecLType->getStructElementType(0);
9043 assert(MvecLTypeInner->isArrayTy() &&
9044 "Data-type struct for vst2q should contain an array");
9045 assert(MvecLTypeInner->getArrayNumElements() == NumVectors &&
9046 "Array member of return-type struct vld[24]q has wrong length");
9047 auto VecLType = MvecLTypeInner->getArrayElementType();
9049 Tys.push_back(VecLType);
9051 AggValueSlot MvecSlot = CreateAggTemp(MvecCType);
9052 EmitAggExpr(E->getArg(1), MvecSlot);
9053 auto Mvec = Builder.CreateLoad(MvecSlot.getAddress());
9054 for (unsigned i = 0; i < NumVectors; i++)
9055 Ops.push_back(Builder.CreateExtractValue(Mvec, {0, i}));
9057 Function *F = CGM.getIntrinsic(IRIntr, ArrayRef(Tys));
9058 Value *ToReturn = nullptr;
9059 for (unsigned i = 0; i < NumVectors; i++) {
9060 Ops.push_back(llvm::ConstantInt::get(Int32Ty, i));
9061 ToReturn = Builder.CreateCall(F, Ops);
9062 Ops.pop_back();
9064 return ToReturn;
9067 llvm_unreachable("unknown custom codegen type.");
9070 Value *CodeGenFunction::EmitARMCDEBuiltinExpr(unsigned BuiltinID,
9071 const CallExpr *E,
9072 ReturnValueSlot ReturnValue,
9073 llvm::Triple::ArchType Arch) {
9074 switch (BuiltinID) {
9075 default:
9076 return nullptr;
9077 #include "clang/Basic/arm_cde_builtin_cg.inc"
9081 static Value *EmitAArch64TblBuiltinExpr(CodeGenFunction &CGF, unsigned BuiltinID,
9082 const CallExpr *E,
9083 SmallVectorImpl<Value *> &Ops,
9084 llvm::Triple::ArchType Arch) {
9085 unsigned int Int = 0;
9086 const char *s = nullptr;
9088 switch (BuiltinID) {
9089 default:
9090 return nullptr;
9091 case NEON::BI__builtin_neon_vtbl1_v:
9092 case NEON::BI__builtin_neon_vqtbl1_v:
9093 case NEON::BI__builtin_neon_vqtbl1q_v:
9094 case NEON::BI__builtin_neon_vtbl2_v:
9095 case NEON::BI__builtin_neon_vqtbl2_v:
9096 case NEON::BI__builtin_neon_vqtbl2q_v:
9097 case NEON::BI__builtin_neon_vtbl3_v:
9098 case NEON::BI__builtin_neon_vqtbl3_v:
9099 case NEON::BI__builtin_neon_vqtbl3q_v:
9100 case NEON::BI__builtin_neon_vtbl4_v:
9101 case NEON::BI__builtin_neon_vqtbl4_v:
9102 case NEON::BI__builtin_neon_vqtbl4q_v:
9103 break;
9104 case NEON::BI__builtin_neon_vtbx1_v:
9105 case NEON::BI__builtin_neon_vqtbx1_v:
9106 case NEON::BI__builtin_neon_vqtbx1q_v:
9107 case NEON::BI__builtin_neon_vtbx2_v:
9108 case NEON::BI__builtin_neon_vqtbx2_v:
9109 case NEON::BI__builtin_neon_vqtbx2q_v:
9110 case NEON::BI__builtin_neon_vtbx3_v:
9111 case NEON::BI__builtin_neon_vqtbx3_v:
9112 case NEON::BI__builtin_neon_vqtbx3q_v:
9113 case NEON::BI__builtin_neon_vtbx4_v:
9114 case NEON::BI__builtin_neon_vqtbx4_v:
9115 case NEON::BI__builtin_neon_vqtbx4q_v:
9116 break;
9119 assert(E->getNumArgs() >= 3);
9121 // Get the last argument, which specifies the vector type.
9122 const Expr *Arg = E->getArg(E->getNumArgs() - 1);
9123 std::optional<llvm::APSInt> Result =
9124 Arg->getIntegerConstantExpr(CGF.getContext());
9125 if (!Result)
9126 return nullptr;
9128 // Determine the type of this overloaded NEON intrinsic.
9129 NeonTypeFlags Type = Result->getZExtValue();
9130 llvm::FixedVectorType *Ty = GetNeonType(&CGF, Type);
9131 if (!Ty)
9132 return nullptr;
9134 CodeGen::CGBuilderTy &Builder = CGF.Builder;
9136 // AArch64 scalar builtins are not overloaded, they do not have an extra
9137 // argument that specifies the vector type, need to handle each case.
9138 switch (BuiltinID) {
9139 case NEON::BI__builtin_neon_vtbl1_v: {
9140 return packTBLDVectorList(CGF, ArrayRef(Ops).slice(0, 1), nullptr, Ops[1],
9141 Ty, Intrinsic::aarch64_neon_tbl1, "vtbl1");
9143 case NEON::BI__builtin_neon_vtbl2_v: {
9144 return packTBLDVectorList(CGF, ArrayRef(Ops).slice(0, 2), nullptr, Ops[2],
9145 Ty, Intrinsic::aarch64_neon_tbl1, "vtbl1");
9147 case NEON::BI__builtin_neon_vtbl3_v: {
9148 return packTBLDVectorList(CGF, ArrayRef(Ops).slice(0, 3), nullptr, Ops[3],
9149 Ty, Intrinsic::aarch64_neon_tbl2, "vtbl2");
9151 case NEON::BI__builtin_neon_vtbl4_v: {
9152 return packTBLDVectorList(CGF, ArrayRef(Ops).slice(0, 4), nullptr, Ops[4],
9153 Ty, Intrinsic::aarch64_neon_tbl2, "vtbl2");
9155 case NEON::BI__builtin_neon_vtbx1_v: {
9156 Value *TblRes =
9157 packTBLDVectorList(CGF, ArrayRef(Ops).slice(1, 1), nullptr, Ops[2], Ty,
9158 Intrinsic::aarch64_neon_tbl1, "vtbl1");
9160 llvm::Constant *EightV = ConstantInt::get(Ty, 8);
9161 Value *CmpRes = Builder.CreateICmp(ICmpInst::ICMP_UGE, Ops[2], EightV);
9162 CmpRes = Builder.CreateSExt(CmpRes, Ty);
9164 Value *EltsFromInput = Builder.CreateAnd(CmpRes, Ops[0]);
9165 Value *EltsFromTbl = Builder.CreateAnd(Builder.CreateNot(CmpRes), TblRes);
9166 return Builder.CreateOr(EltsFromInput, EltsFromTbl, "vtbx");
9168 case NEON::BI__builtin_neon_vtbx2_v: {
9169 return packTBLDVectorList(CGF, ArrayRef(Ops).slice(1, 2), Ops[0], Ops[3],
9170 Ty, Intrinsic::aarch64_neon_tbx1, "vtbx1");
9172 case NEON::BI__builtin_neon_vtbx3_v: {
9173 Value *TblRes =
9174 packTBLDVectorList(CGF, ArrayRef(Ops).slice(1, 3), nullptr, Ops[4], Ty,
9175 Intrinsic::aarch64_neon_tbl2, "vtbl2");
9177 llvm::Constant *TwentyFourV = ConstantInt::get(Ty, 24);
9178 Value *CmpRes = Builder.CreateICmp(ICmpInst::ICMP_UGE, Ops[4],
9179 TwentyFourV);
9180 CmpRes = Builder.CreateSExt(CmpRes, Ty);
9182 Value *EltsFromInput = Builder.CreateAnd(CmpRes, Ops[0]);
9183 Value *EltsFromTbl = Builder.CreateAnd(Builder.CreateNot(CmpRes), TblRes);
9184 return Builder.CreateOr(EltsFromInput, EltsFromTbl, "vtbx");
9186 case NEON::BI__builtin_neon_vtbx4_v: {
9187 return packTBLDVectorList(CGF, ArrayRef(Ops).slice(1, 4), Ops[0], Ops[5],
9188 Ty, Intrinsic::aarch64_neon_tbx2, "vtbx2");
9190 case NEON::BI__builtin_neon_vqtbl1_v:
9191 case NEON::BI__builtin_neon_vqtbl1q_v:
9192 Int = Intrinsic::aarch64_neon_tbl1; s = "vtbl1"; break;
9193 case NEON::BI__builtin_neon_vqtbl2_v:
9194 case NEON::BI__builtin_neon_vqtbl2q_v: {
9195 Int = Intrinsic::aarch64_neon_tbl2; s = "vtbl2"; break;
9196 case NEON::BI__builtin_neon_vqtbl3_v:
9197 case NEON::BI__builtin_neon_vqtbl3q_v:
9198 Int = Intrinsic::aarch64_neon_tbl3; s = "vtbl3"; break;
9199 case NEON::BI__builtin_neon_vqtbl4_v:
9200 case NEON::BI__builtin_neon_vqtbl4q_v:
9201 Int = Intrinsic::aarch64_neon_tbl4; s = "vtbl4"; break;
9202 case NEON::BI__builtin_neon_vqtbx1_v:
9203 case NEON::BI__builtin_neon_vqtbx1q_v:
9204 Int = Intrinsic::aarch64_neon_tbx1; s = "vtbx1"; break;
9205 case NEON::BI__builtin_neon_vqtbx2_v:
9206 case NEON::BI__builtin_neon_vqtbx2q_v:
9207 Int = Intrinsic::aarch64_neon_tbx2; s = "vtbx2"; break;
9208 case NEON::BI__builtin_neon_vqtbx3_v:
9209 case NEON::BI__builtin_neon_vqtbx3q_v:
9210 Int = Intrinsic::aarch64_neon_tbx3; s = "vtbx3"; break;
9211 case NEON::BI__builtin_neon_vqtbx4_v:
9212 case NEON::BI__builtin_neon_vqtbx4q_v:
9213 Int = Intrinsic::aarch64_neon_tbx4; s = "vtbx4"; break;
9217 if (!Int)
9218 return nullptr;
9220 Function *F = CGF.CGM.getIntrinsic(Int, Ty);
9221 return CGF.EmitNeonCall(F, Ops, s);
9224 Value *CodeGenFunction::vectorWrapScalar16(Value *Op) {
9225 auto *VTy = llvm::FixedVectorType::get(Int16Ty, 4);
9226 Op = Builder.CreateBitCast(Op, Int16Ty);
9227 Value *V = PoisonValue::get(VTy);
9228 llvm::Constant *CI = ConstantInt::get(SizeTy, 0);
9229 Op = Builder.CreateInsertElement(V, Op, CI);
9230 return Op;
9233 /// SVEBuiltinMemEltTy - Returns the memory element type for this memory
9234 /// access builtin. Only required if it can't be inferred from the base pointer
9235 /// operand.
9236 llvm::Type *CodeGenFunction::SVEBuiltinMemEltTy(const SVETypeFlags &TypeFlags) {
9237 switch (TypeFlags.getMemEltType()) {
9238 case SVETypeFlags::MemEltTyDefault:
9239 return getEltType(TypeFlags);
9240 case SVETypeFlags::MemEltTyInt8:
9241 return Builder.getInt8Ty();
9242 case SVETypeFlags::MemEltTyInt16:
9243 return Builder.getInt16Ty();
9244 case SVETypeFlags::MemEltTyInt32:
9245 return Builder.getInt32Ty();
9246 case SVETypeFlags::MemEltTyInt64:
9247 return Builder.getInt64Ty();
9249 llvm_unreachable("Unknown MemEltType");
9252 llvm::Type *CodeGenFunction::getEltType(const SVETypeFlags &TypeFlags) {
9253 switch (TypeFlags.getEltType()) {
9254 default:
9255 llvm_unreachable("Invalid SVETypeFlag!");
9257 case SVETypeFlags::EltTyInt8:
9258 return Builder.getInt8Ty();
9259 case SVETypeFlags::EltTyInt16:
9260 return Builder.getInt16Ty();
9261 case SVETypeFlags::EltTyInt32:
9262 return Builder.getInt32Ty();
9263 case SVETypeFlags::EltTyInt64:
9264 return Builder.getInt64Ty();
9265 case SVETypeFlags::EltTyInt128:
9266 return Builder.getInt128Ty();
9268 case SVETypeFlags::EltTyFloat16:
9269 return Builder.getHalfTy();
9270 case SVETypeFlags::EltTyFloat32:
9271 return Builder.getFloatTy();
9272 case SVETypeFlags::EltTyFloat64:
9273 return Builder.getDoubleTy();
9275 case SVETypeFlags::EltTyBFloat16:
9276 return Builder.getBFloatTy();
9278 case SVETypeFlags::EltTyBool8:
9279 case SVETypeFlags::EltTyBool16:
9280 case SVETypeFlags::EltTyBool32:
9281 case SVETypeFlags::EltTyBool64:
9282 return Builder.getInt1Ty();
9286 // Return the llvm predicate vector type corresponding to the specified element
9287 // TypeFlags.
9288 llvm::ScalableVectorType *
9289 CodeGenFunction::getSVEPredType(const SVETypeFlags &TypeFlags) {
9290 switch (TypeFlags.getEltType()) {
9291 default: llvm_unreachable("Unhandled SVETypeFlag!");
9293 case SVETypeFlags::EltTyInt8:
9294 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16);
9295 case SVETypeFlags::EltTyInt16:
9296 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8);
9297 case SVETypeFlags::EltTyInt32:
9298 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 4);
9299 case SVETypeFlags::EltTyInt64:
9300 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 2);
9302 case SVETypeFlags::EltTyBFloat16:
9303 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8);
9304 case SVETypeFlags::EltTyFloat16:
9305 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8);
9306 case SVETypeFlags::EltTyFloat32:
9307 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 4);
9308 case SVETypeFlags::EltTyFloat64:
9309 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 2);
9311 case SVETypeFlags::EltTyBool8:
9312 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16);
9313 case SVETypeFlags::EltTyBool16:
9314 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8);
9315 case SVETypeFlags::EltTyBool32:
9316 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 4);
9317 case SVETypeFlags::EltTyBool64:
9318 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 2);
9322 // Return the llvm vector type corresponding to the specified element TypeFlags.
9323 llvm::ScalableVectorType *
9324 CodeGenFunction::getSVEType(const SVETypeFlags &TypeFlags) {
9325 switch (TypeFlags.getEltType()) {
9326 default:
9327 llvm_unreachable("Invalid SVETypeFlag!");
9329 case SVETypeFlags::EltTyInt8:
9330 return llvm::ScalableVectorType::get(Builder.getInt8Ty(), 16);
9331 case SVETypeFlags::EltTyInt16:
9332 return llvm::ScalableVectorType::get(Builder.getInt16Ty(), 8);
9333 case SVETypeFlags::EltTyInt32:
9334 return llvm::ScalableVectorType::get(Builder.getInt32Ty(), 4);
9335 case SVETypeFlags::EltTyInt64:
9336 return llvm::ScalableVectorType::get(Builder.getInt64Ty(), 2);
9338 case SVETypeFlags::EltTyFloat16:
9339 return llvm::ScalableVectorType::get(Builder.getHalfTy(), 8);
9340 case SVETypeFlags::EltTyBFloat16:
9341 return llvm::ScalableVectorType::get(Builder.getBFloatTy(), 8);
9342 case SVETypeFlags::EltTyFloat32:
9343 return llvm::ScalableVectorType::get(Builder.getFloatTy(), 4);
9344 case SVETypeFlags::EltTyFloat64:
9345 return llvm::ScalableVectorType::get(Builder.getDoubleTy(), 2);
9347 case SVETypeFlags::EltTyBool8:
9348 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16);
9349 case SVETypeFlags::EltTyBool16:
9350 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8);
9351 case SVETypeFlags::EltTyBool32:
9352 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 4);
9353 case SVETypeFlags::EltTyBool64:
9354 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 2);
9358 llvm::Value *
9359 CodeGenFunction::EmitSVEAllTruePred(const SVETypeFlags &TypeFlags) {
9360 Function *Ptrue =
9361 CGM.getIntrinsic(Intrinsic::aarch64_sve_ptrue, getSVEPredType(TypeFlags));
9362 return Builder.CreateCall(Ptrue, {Builder.getInt32(/*SV_ALL*/ 31)});
9365 constexpr unsigned SVEBitsPerBlock = 128;
9367 static llvm::ScalableVectorType *getSVEVectorForElementType(llvm::Type *EltTy) {
9368 unsigned NumElts = SVEBitsPerBlock / EltTy->getScalarSizeInBits();
9369 return llvm::ScalableVectorType::get(EltTy, NumElts);
9372 // Reinterpret the input predicate so that it can be used to correctly isolate
9373 // the elements of the specified datatype.
9374 Value *CodeGenFunction::EmitSVEPredicateCast(Value *Pred,
9375 llvm::ScalableVectorType *VTy) {
9377 if (isa<TargetExtType>(Pred->getType()) &&
9378 cast<TargetExtType>(Pred->getType())->getName() == "aarch64.svcount")
9379 return Pred;
9381 auto *RTy = llvm::VectorType::get(IntegerType::get(getLLVMContext(), 1), VTy);
9382 if (Pred->getType() == RTy)
9383 return Pred;
9385 unsigned IntID;
9386 llvm::Type *IntrinsicTy;
9387 switch (VTy->getMinNumElements()) {
9388 default:
9389 llvm_unreachable("unsupported element count!");
9390 case 1:
9391 case 2:
9392 case 4:
9393 case 8:
9394 IntID = Intrinsic::aarch64_sve_convert_from_svbool;
9395 IntrinsicTy = RTy;
9396 break;
9397 case 16:
9398 IntID = Intrinsic::aarch64_sve_convert_to_svbool;
9399 IntrinsicTy = Pred->getType();
9400 break;
9403 Function *F = CGM.getIntrinsic(IntID, IntrinsicTy);
9404 Value *C = Builder.CreateCall(F, Pred);
9405 assert(C->getType() == RTy && "Unexpected return type!");
9406 return C;
9409 Value *CodeGenFunction::EmitSVEGatherLoad(const SVETypeFlags &TypeFlags,
9410 SmallVectorImpl<Value *> &Ops,
9411 unsigned IntID) {
9412 auto *ResultTy = getSVEType(TypeFlags);
9413 auto *OverloadedTy =
9414 llvm::ScalableVectorType::get(SVEBuiltinMemEltTy(TypeFlags), ResultTy);
9416 // At the ACLE level there's only one predicate type, svbool_t, which is
9417 // mapped to <n x 16 x i1>. However, this might be incompatible with the
9418 // actual type being loaded. For example, when loading doubles (i64) the
9419 // predicated should be <n x 2 x i1> instead. At the IR level the type of
9420 // the predicate and the data being loaded must match. Cast accordingly.
9421 Ops[0] = EmitSVEPredicateCast(Ops[0], OverloadedTy);
9423 Function *F = nullptr;
9424 if (Ops[1]->getType()->isVectorTy())
9425 // This is the "vector base, scalar offset" case. In order to uniquely
9426 // map this built-in to an LLVM IR intrinsic, we need both the return type
9427 // and the type of the vector base.
9428 F = CGM.getIntrinsic(IntID, {OverloadedTy, Ops[1]->getType()});
9429 else
9430 // This is the "scalar base, vector offset case". The type of the offset
9431 // is encoded in the name of the intrinsic. We only need to specify the
9432 // return type in order to uniquely map this built-in to an LLVM IR
9433 // intrinsic.
9434 F = CGM.getIntrinsic(IntID, OverloadedTy);
9436 // Pass 0 when the offset is missing. This can only be applied when using
9437 // the "vector base" addressing mode for which ACLE allows no offset. The
9438 // corresponding LLVM IR always requires an offset.
9439 if (Ops.size() == 2) {
9440 assert(Ops[1]->getType()->isVectorTy() && "Scalar base requires an offset");
9441 Ops.push_back(ConstantInt::get(Int64Ty, 0));
9444 // For "vector base, scalar index" scale the index so that it becomes a
9445 // scalar offset.
9446 if (!TypeFlags.isByteIndexed() && Ops[1]->getType()->isVectorTy()) {
9447 unsigned BytesPerElt =
9448 OverloadedTy->getElementType()->getScalarSizeInBits() / 8;
9449 Ops[2] = Builder.CreateShl(Ops[2], Log2_32(BytesPerElt));
9452 Value *Call = Builder.CreateCall(F, Ops);
9454 // The following sext/zext is only needed when ResultTy != OverloadedTy. In
9455 // other cases it's folded into a nop.
9456 return TypeFlags.isZExtReturn() ? Builder.CreateZExt(Call, ResultTy)
9457 : Builder.CreateSExt(Call, ResultTy);
9460 Value *CodeGenFunction::EmitSVEScatterStore(const SVETypeFlags &TypeFlags,
9461 SmallVectorImpl<Value *> &Ops,
9462 unsigned IntID) {
9463 auto *SrcDataTy = getSVEType(TypeFlags);
9464 auto *OverloadedTy =
9465 llvm::ScalableVectorType::get(SVEBuiltinMemEltTy(TypeFlags), SrcDataTy);
9467 // In ACLE the source data is passed in the last argument, whereas in LLVM IR
9468 // it's the first argument. Move it accordingly.
9469 Ops.insert(Ops.begin(), Ops.pop_back_val());
9471 Function *F = nullptr;
9472 if (Ops[2]->getType()->isVectorTy())
9473 // This is the "vector base, scalar offset" case. In order to uniquely
9474 // map this built-in to an LLVM IR intrinsic, we need both the return type
9475 // and the type of the vector base.
9476 F = CGM.getIntrinsic(IntID, {OverloadedTy, Ops[2]->getType()});
9477 else
9478 // This is the "scalar base, vector offset case". The type of the offset
9479 // is encoded in the name of the intrinsic. We only need to specify the
9480 // return type in order to uniquely map this built-in to an LLVM IR
9481 // intrinsic.
9482 F = CGM.getIntrinsic(IntID, OverloadedTy);
9484 // Pass 0 when the offset is missing. This can only be applied when using
9485 // the "vector base" addressing mode for which ACLE allows no offset. The
9486 // corresponding LLVM IR always requires an offset.
9487 if (Ops.size() == 3) {
9488 assert(Ops[1]->getType()->isVectorTy() && "Scalar base requires an offset");
9489 Ops.push_back(ConstantInt::get(Int64Ty, 0));
9492 // Truncation is needed when SrcDataTy != OverloadedTy. In other cases it's
9493 // folded into a nop.
9494 Ops[0] = Builder.CreateTrunc(Ops[0], OverloadedTy);
9496 // At the ACLE level there's only one predicate type, svbool_t, which is
9497 // mapped to <n x 16 x i1>. However, this might be incompatible with the
9498 // actual type being stored. For example, when storing doubles (i64) the
9499 // predicated should be <n x 2 x i1> instead. At the IR level the type of
9500 // the predicate and the data being stored must match. Cast accordingly.
9501 Ops[1] = EmitSVEPredicateCast(Ops[1], OverloadedTy);
9503 // For "vector base, scalar index" scale the index so that it becomes a
9504 // scalar offset.
9505 if (!TypeFlags.isByteIndexed() && Ops[2]->getType()->isVectorTy()) {
9506 unsigned BytesPerElt =
9507 OverloadedTy->getElementType()->getScalarSizeInBits() / 8;
9508 Ops[3] = Builder.CreateShl(Ops[3], Log2_32(BytesPerElt));
9511 return Builder.CreateCall(F, Ops);
9514 Value *CodeGenFunction::EmitSVEGatherPrefetch(const SVETypeFlags &TypeFlags,
9515 SmallVectorImpl<Value *> &Ops,
9516 unsigned IntID) {
9517 // The gather prefetches are overloaded on the vector input - this can either
9518 // be the vector of base addresses or vector of offsets.
9519 auto *OverloadedTy = dyn_cast<llvm::ScalableVectorType>(Ops[1]->getType());
9520 if (!OverloadedTy)
9521 OverloadedTy = cast<llvm::ScalableVectorType>(Ops[2]->getType());
9523 // Cast the predicate from svbool_t to the right number of elements.
9524 Ops[0] = EmitSVEPredicateCast(Ops[0], OverloadedTy);
9526 // vector + imm addressing modes
9527 if (Ops[1]->getType()->isVectorTy()) {
9528 if (Ops.size() == 3) {
9529 // Pass 0 for 'vector+imm' when the index is omitted.
9530 Ops.push_back(ConstantInt::get(Int64Ty, 0));
9532 // The sv_prfop is the last operand in the builtin and IR intrinsic.
9533 std::swap(Ops[2], Ops[3]);
9534 } else {
9535 // Index needs to be passed as scaled offset.
9536 llvm::Type *MemEltTy = SVEBuiltinMemEltTy(TypeFlags);
9537 unsigned BytesPerElt = MemEltTy->getPrimitiveSizeInBits() / 8;
9538 if (BytesPerElt > 1)
9539 Ops[2] = Builder.CreateShl(Ops[2], Log2_32(BytesPerElt));
9543 Function *F = CGM.getIntrinsic(IntID, OverloadedTy);
9544 return Builder.CreateCall(F, Ops);
9547 Value *CodeGenFunction::EmitSVEStructLoad(const SVETypeFlags &TypeFlags,
9548 SmallVectorImpl<Value*> &Ops,
9549 unsigned IntID) {
9550 llvm::ScalableVectorType *VTy = getSVEType(TypeFlags);
9552 unsigned N;
9553 switch (IntID) {
9554 case Intrinsic::aarch64_sve_ld2_sret:
9555 case Intrinsic::aarch64_sve_ld1_pn_x2:
9556 case Intrinsic::aarch64_sve_ldnt1_pn_x2:
9557 N = 2;
9558 break;
9559 case Intrinsic::aarch64_sve_ld3_sret:
9560 N = 3;
9561 break;
9562 case Intrinsic::aarch64_sve_ld4_sret:
9563 case Intrinsic::aarch64_sve_ld1_pn_x4:
9564 case Intrinsic::aarch64_sve_ldnt1_pn_x4:
9565 N = 4;
9566 break;
9567 default:
9568 llvm_unreachable("unknown intrinsic!");
9570 auto RetTy = llvm::VectorType::get(VTy->getElementType(),
9571 VTy->getElementCount() * N);
9573 Value *Predicate = EmitSVEPredicateCast(Ops[0], VTy);
9574 Value *BasePtr = Ops[1];
9576 // Does the load have an offset?
9577 if (Ops.size() > 2)
9578 BasePtr = Builder.CreateGEP(VTy, BasePtr, Ops[2]);
9580 Function *F = CGM.getIntrinsic(IntID, {VTy});
9581 Value *Call = Builder.CreateCall(F, {Predicate, BasePtr});
9582 unsigned MinElts = VTy->getMinNumElements();
9583 Value *Ret = llvm::PoisonValue::get(RetTy);
9584 for (unsigned I = 0; I < N; I++) {
9585 Value *Idx = ConstantInt::get(CGM.Int64Ty, I * MinElts);
9586 Value *SRet = Builder.CreateExtractValue(Call, I);
9587 Ret = Builder.CreateInsertVector(RetTy, Ret, SRet, Idx);
9589 return Ret;
9592 Value *CodeGenFunction::EmitSVEStructStore(const SVETypeFlags &TypeFlags,
9593 SmallVectorImpl<Value*> &Ops,
9594 unsigned IntID) {
9595 llvm::ScalableVectorType *VTy = getSVEType(TypeFlags);
9597 unsigned N;
9598 switch (IntID) {
9599 case Intrinsic::aarch64_sve_st2:
9600 case Intrinsic::aarch64_sve_st1_pn_x2:
9601 case Intrinsic::aarch64_sve_stnt1_pn_x2:
9602 N = 2;
9603 break;
9604 case Intrinsic::aarch64_sve_st3:
9605 N = 3;
9606 break;
9607 case Intrinsic::aarch64_sve_st4:
9608 case Intrinsic::aarch64_sve_st1_pn_x4:
9609 case Intrinsic::aarch64_sve_stnt1_pn_x4:
9610 N = 4;
9611 break;
9612 default:
9613 llvm_unreachable("unknown intrinsic!");
9616 Value *Predicate = EmitSVEPredicateCast(Ops[0], VTy);
9617 Value *BasePtr = Ops[1];
9619 // Does the store have an offset?
9620 if (Ops.size() > (2 + N))
9621 BasePtr = Builder.CreateGEP(VTy, BasePtr, Ops[2]);
9623 // The llvm.aarch64.sve.st2/3/4 intrinsics take legal part vectors, so we
9624 // need to break up the tuple vector.
9625 SmallVector<llvm::Value*, 5> Operands;
9626 for (unsigned I = Ops.size() - N; I < Ops.size(); ++I)
9627 Operands.push_back(Ops[I]);
9628 Operands.append({Predicate, BasePtr});
9629 Function *F = CGM.getIntrinsic(IntID, { VTy });
9631 return Builder.CreateCall(F, Operands);
9634 // SVE2's svpmullb and svpmullt builtins are similar to the svpmullb_pair and
9635 // svpmullt_pair intrinsics, with the exception that their results are bitcast
9636 // to a wider type.
9637 Value *CodeGenFunction::EmitSVEPMull(const SVETypeFlags &TypeFlags,
9638 SmallVectorImpl<Value *> &Ops,
9639 unsigned BuiltinID) {
9640 // Splat scalar operand to vector (intrinsics with _n infix)
9641 if (TypeFlags.hasSplatOperand()) {
9642 unsigned OpNo = TypeFlags.getSplatOperand();
9643 Ops[OpNo] = EmitSVEDupX(Ops[OpNo]);
9646 // The pair-wise function has a narrower overloaded type.
9647 Function *F = CGM.getIntrinsic(BuiltinID, Ops[0]->getType());
9648 Value *Call = Builder.CreateCall(F, {Ops[0], Ops[1]});
9650 // Now bitcast to the wider result type.
9651 llvm::ScalableVectorType *Ty = getSVEType(TypeFlags);
9652 return EmitSVEReinterpret(Call, Ty);
9655 Value *CodeGenFunction::EmitSVEMovl(const SVETypeFlags &TypeFlags,
9656 ArrayRef<Value *> Ops, unsigned BuiltinID) {
9657 llvm::Type *OverloadedTy = getSVEType(TypeFlags);
9658 Function *F = CGM.getIntrinsic(BuiltinID, OverloadedTy);
9659 return Builder.CreateCall(F, {Ops[0], Builder.getInt32(0)});
9662 Value *CodeGenFunction::EmitSVEPrefetchLoad(const SVETypeFlags &TypeFlags,
9663 SmallVectorImpl<Value *> &Ops,
9664 unsigned BuiltinID) {
9665 auto *MemEltTy = SVEBuiltinMemEltTy(TypeFlags);
9666 auto *VectorTy = getSVEVectorForElementType(MemEltTy);
9667 auto *MemoryTy = llvm::ScalableVectorType::get(MemEltTy, VectorTy);
9669 Value *Predicate = EmitSVEPredicateCast(Ops[0], MemoryTy);
9670 Value *BasePtr = Ops[1];
9672 // Implement the index operand if not omitted.
9673 if (Ops.size() > 3)
9674 BasePtr = Builder.CreateGEP(MemoryTy, BasePtr, Ops[2]);
9676 Value *PrfOp = Ops.back();
9678 Function *F = CGM.getIntrinsic(BuiltinID, Predicate->getType());
9679 return Builder.CreateCall(F, {Predicate, BasePtr, PrfOp});
9682 Value *CodeGenFunction::EmitSVEMaskedLoad(const CallExpr *E,
9683 llvm::Type *ReturnTy,
9684 SmallVectorImpl<Value *> &Ops,
9685 unsigned BuiltinID,
9686 bool IsZExtReturn) {
9687 QualType LangPTy = E->getArg(1)->getType();
9688 llvm::Type *MemEltTy = CGM.getTypes().ConvertType(
9689 LangPTy->castAs<PointerType>()->getPointeeType());
9691 // The vector type that is returned may be different from the
9692 // eventual type loaded from memory.
9693 auto VectorTy = cast<llvm::ScalableVectorType>(ReturnTy);
9694 auto MemoryTy = llvm::ScalableVectorType::get(MemEltTy, VectorTy);
9696 Value *Predicate = EmitSVEPredicateCast(Ops[0], MemoryTy);
9697 Value *BasePtr = Ops[1];
9699 // Does the load have an offset?
9700 if (Ops.size() > 2)
9701 BasePtr = Builder.CreateGEP(MemoryTy, BasePtr, Ops[2]);
9703 Function *F = CGM.getIntrinsic(BuiltinID, MemoryTy);
9704 auto *Load =
9705 cast<llvm::Instruction>(Builder.CreateCall(F, {Predicate, BasePtr}));
9706 auto TBAAInfo = CGM.getTBAAAccessInfo(LangPTy->getPointeeType());
9707 CGM.DecorateInstructionWithTBAA(Load, TBAAInfo);
9709 return IsZExtReturn ? Builder.CreateZExt(Load, VectorTy)
9710 : Builder.CreateSExt(Load, VectorTy);
9713 Value *CodeGenFunction::EmitSVEMaskedStore(const CallExpr *E,
9714 SmallVectorImpl<Value *> &Ops,
9715 unsigned BuiltinID) {
9716 QualType LangPTy = E->getArg(1)->getType();
9717 llvm::Type *MemEltTy = CGM.getTypes().ConvertType(
9718 LangPTy->castAs<PointerType>()->getPointeeType());
9720 // The vector type that is stored may be different from the
9721 // eventual type stored to memory.
9722 auto VectorTy = cast<llvm::ScalableVectorType>(Ops.back()->getType());
9723 auto MemoryTy = llvm::ScalableVectorType::get(MemEltTy, VectorTy);
9725 Value *Predicate = EmitSVEPredicateCast(Ops[0], MemoryTy);
9726 Value *BasePtr = Ops[1];
9728 // Does the store have an offset?
9729 if (Ops.size() == 4)
9730 BasePtr = Builder.CreateGEP(MemoryTy, BasePtr, Ops[2]);
9732 // Last value is always the data
9733 llvm::Value *Val = Builder.CreateTrunc(Ops.back(), MemoryTy);
9735 Function *F = CGM.getIntrinsic(BuiltinID, MemoryTy);
9736 auto *Store =
9737 cast<llvm::Instruction>(Builder.CreateCall(F, {Val, Predicate, BasePtr}));
9738 auto TBAAInfo = CGM.getTBAAAccessInfo(LangPTy->getPointeeType());
9739 CGM.DecorateInstructionWithTBAA(Store, TBAAInfo);
9740 return Store;
9743 Value *CodeGenFunction::EmitSMELd1St1(const SVETypeFlags &TypeFlags,
9744 SmallVectorImpl<Value *> &Ops,
9745 unsigned IntID) {
9746 Ops[2] = EmitSVEPredicateCast(
9747 Ops[2], getSVEVectorForElementType(SVEBuiltinMemEltTy(TypeFlags)));
9749 SmallVector<Value *> NewOps;
9750 NewOps.push_back(Ops[2]);
9752 llvm::Value *BasePtr = Ops[3];
9754 // If the intrinsic contains the vnum parameter, multiply it with the vector
9755 // size in bytes.
9756 if (Ops.size() == 5) {
9757 Function *StreamingVectorLength =
9758 CGM.getIntrinsic(Intrinsic::aarch64_sme_cntsb);
9759 llvm::Value *StreamingVectorLengthCall =
9760 Builder.CreateCall(StreamingVectorLength);
9761 llvm::Value *Mulvl =
9762 Builder.CreateMul(StreamingVectorLengthCall, Ops[4], "mulvl");
9763 // The type of the ptr parameter is void *, so use Int8Ty here.
9764 BasePtr = Builder.CreateGEP(Int8Ty, Ops[3], Mulvl);
9766 NewOps.push_back(BasePtr);
9767 NewOps.push_back(Ops[0]);
9768 NewOps.push_back(Ops[1]);
9769 Function *F = CGM.getIntrinsic(IntID);
9770 return Builder.CreateCall(F, NewOps);
9773 Value *CodeGenFunction::EmitSMEReadWrite(const SVETypeFlags &TypeFlags,
9774 SmallVectorImpl<Value *> &Ops,
9775 unsigned IntID) {
9776 auto *VecTy = getSVEType(TypeFlags);
9777 Function *F = CGM.getIntrinsic(IntID, VecTy);
9778 if (TypeFlags.isReadZA())
9779 Ops[1] = EmitSVEPredicateCast(Ops[1], VecTy);
9780 else if (TypeFlags.isWriteZA())
9781 Ops[2] = EmitSVEPredicateCast(Ops[2], VecTy);
9782 return Builder.CreateCall(F, Ops);
9785 Value *CodeGenFunction::EmitSMEZero(const SVETypeFlags &TypeFlags,
9786 SmallVectorImpl<Value *> &Ops,
9787 unsigned IntID) {
9788 // svzero_za() intrinsic zeros the entire za tile and has no paramters.
9789 if (Ops.size() == 0)
9790 Ops.push_back(llvm::ConstantInt::get(Int32Ty, 255));
9791 Function *F = CGM.getIntrinsic(IntID, {});
9792 return Builder.CreateCall(F, Ops);
9795 Value *CodeGenFunction::EmitSMELdrStr(const SVETypeFlags &TypeFlags,
9796 SmallVectorImpl<Value *> &Ops,
9797 unsigned IntID) {
9798 if (Ops.size() == 3) {
9799 Function *Cntsb = CGM.getIntrinsic(Intrinsic::aarch64_sme_cntsb);
9800 llvm::Value *CntsbCall = Builder.CreateCall(Cntsb, {}, "svlb");
9802 llvm::Value *VecNum = Ops[2];
9803 llvm::Value *MulVL = Builder.CreateMul(CntsbCall, VecNum, "mulvl");
9805 Ops[1] = Builder.CreateGEP(Int8Ty, Ops[1], MulVL);
9806 Ops[0] = Builder.CreateAdd(
9807 Ops[0], Builder.CreateIntCast(VecNum, Int32Ty, true), "tileslice");
9808 Ops.erase(&Ops[2]);
9810 Function *F = CGM.getIntrinsic(IntID, {});
9811 return Builder.CreateCall(F, Ops);
9814 // Limit the usage of scalable llvm IR generated by the ACLE by using the
9815 // sve dup.x intrinsic instead of IRBuilder::CreateVectorSplat.
9816 Value *CodeGenFunction::EmitSVEDupX(Value *Scalar, llvm::Type *Ty) {
9817 return Builder.CreateVectorSplat(
9818 cast<llvm::VectorType>(Ty)->getElementCount(), Scalar);
9821 Value *CodeGenFunction::EmitSVEDupX(Value* Scalar) {
9822 return EmitSVEDupX(Scalar, getSVEVectorForElementType(Scalar->getType()));
9825 Value *CodeGenFunction::EmitSVEReinterpret(Value *Val, llvm::Type *Ty) {
9826 // FIXME: For big endian this needs an additional REV, or needs a separate
9827 // intrinsic that is code-generated as a no-op, because the LLVM bitcast
9828 // instruction is defined as 'bitwise' equivalent from memory point of
9829 // view (when storing/reloading), whereas the svreinterpret builtin
9830 // implements bitwise equivalent cast from register point of view.
9831 // LLVM CodeGen for a bitcast must add an explicit REV for big-endian.
9832 return Builder.CreateBitCast(Val, Ty);
9835 static void InsertExplicitZeroOperand(CGBuilderTy &Builder, llvm::Type *Ty,
9836 SmallVectorImpl<Value *> &Ops) {
9837 auto *SplatZero = Constant::getNullValue(Ty);
9838 Ops.insert(Ops.begin(), SplatZero);
9841 static void InsertExplicitUndefOperand(CGBuilderTy &Builder, llvm::Type *Ty,
9842 SmallVectorImpl<Value *> &Ops) {
9843 auto *SplatUndef = UndefValue::get(Ty);
9844 Ops.insert(Ops.begin(), SplatUndef);
9847 SmallVector<llvm::Type *, 2>
9848 CodeGenFunction::getSVEOverloadTypes(const SVETypeFlags &TypeFlags,
9849 llvm::Type *ResultType,
9850 ArrayRef<Value *> Ops) {
9851 if (TypeFlags.isOverloadNone())
9852 return {};
9854 llvm::Type *DefaultType = getSVEType(TypeFlags);
9856 if (TypeFlags.isOverloadWhile())
9857 return {DefaultType, Ops[1]->getType()};
9859 if (TypeFlags.isOverloadWhileRW())
9860 return {getSVEPredType(TypeFlags), Ops[0]->getType()};
9862 if (TypeFlags.isOverloadCvt())
9863 return {Ops[0]->getType(), Ops.back()->getType()};
9865 assert(TypeFlags.isOverloadDefault() && "Unexpected value for overloads");
9866 return {DefaultType};
9869 Value *CodeGenFunction::EmitSVETupleSetOrGet(const SVETypeFlags &TypeFlags,
9870 llvm::Type *Ty,
9871 ArrayRef<Value *> Ops) {
9872 assert((TypeFlags.isTupleSet() || TypeFlags.isTupleGet()) &&
9873 "Expects TypleFlag isTupleSet or TypeFlags.isTupleSet()");
9875 unsigned I = cast<ConstantInt>(Ops[1])->getSExtValue();
9876 auto *SingleVecTy = dyn_cast<llvm::ScalableVectorType>(
9877 TypeFlags.isTupleSet() ? Ops[2]->getType() : Ty);
9878 Value *Idx = ConstantInt::get(CGM.Int64Ty,
9879 I * SingleVecTy->getMinNumElements());
9881 if (TypeFlags.isTupleSet())
9882 return Builder.CreateInsertVector(Ty, Ops[0], Ops[2], Idx);
9883 return Builder.CreateExtractVector(Ty, Ops[0], Idx);
9886 Value *CodeGenFunction::EmitSVETupleCreate(const SVETypeFlags &TypeFlags,
9887 llvm::Type *Ty,
9888 ArrayRef<Value *> Ops) {
9889 assert(TypeFlags.isTupleCreate() && "Expects TypleFlag isTupleCreate");
9891 auto *SrcTy = dyn_cast<llvm::ScalableVectorType>(Ops[0]->getType());
9892 unsigned MinElts = SrcTy->getMinNumElements();
9893 Value *Call = llvm::PoisonValue::get(Ty);
9894 for (unsigned I = 0; I < Ops.size(); I++) {
9895 Value *Idx = ConstantInt::get(CGM.Int64Ty, I * MinElts);
9896 Call = Builder.CreateInsertVector(Ty, Call, Ops[I], Idx);
9899 return Call;
9902 Value *CodeGenFunction::FormSVEBuiltinResult(Value *Call) {
9903 // Multi-vector results should be broken up into a single (wide) result
9904 // vector.
9905 auto *StructTy = dyn_cast<StructType>(Call->getType());
9906 if (!StructTy)
9907 return Call;
9909 auto *VTy = dyn_cast<ScalableVectorType>(StructTy->getTypeAtIndex(0U));
9910 if (!VTy)
9911 return Call;
9912 unsigned N = StructTy->getNumElements();
9914 // We may need to emit a cast to a svbool_t
9915 bool IsPredTy = VTy->getElementType()->isIntegerTy(1);
9916 unsigned MinElts = IsPredTy ? 16 : VTy->getMinNumElements();
9918 ScalableVectorType *WideVTy =
9919 ScalableVectorType::get(VTy->getElementType(), MinElts * N);
9920 Value *Ret = llvm::PoisonValue::get(WideVTy);
9921 for (unsigned I = 0; I < N; ++I) {
9922 Value *SRet = Builder.CreateExtractValue(Call, I);
9923 assert(SRet->getType() == VTy && "Unexpected type for result value");
9924 Value *Idx = ConstantInt::get(CGM.Int64Ty, I * MinElts);
9926 if (IsPredTy)
9927 SRet = EmitSVEPredicateCast(
9928 SRet, ScalableVectorType::get(Builder.getInt1Ty(), 16));
9930 Ret = Builder.CreateInsertVector(WideVTy, Ret, SRet, Idx);
9932 Call = Ret;
9934 return Call;
9937 void CodeGenFunction::GetAArch64SVEProcessedOperands(
9938 unsigned BuiltinID, const CallExpr *E, SmallVectorImpl<Value *> &Ops,
9939 SVETypeFlags TypeFlags) {
9940 // Find out if any arguments are required to be integer constant expressions.
9941 unsigned ICEArguments = 0;
9942 ASTContext::GetBuiltinTypeError Error;
9943 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
9944 assert(Error == ASTContext::GE_None && "Should not codegen an error");
9946 // Tuple set/get only requires one insert/extract vector, which is
9947 // created by EmitSVETupleSetOrGet.
9948 bool IsTupleGetOrSet = TypeFlags.isTupleSet() || TypeFlags.isTupleGet();
9950 for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) {
9951 bool IsICE = ICEArguments & (1 << i);
9952 Value *Arg = EmitScalarExpr(E->getArg(i));
9954 if (IsICE) {
9955 // If this is required to be a constant, constant fold it so that we know
9956 // that the generated intrinsic gets a ConstantInt.
9957 std::optional<llvm::APSInt> Result =
9958 E->getArg(i)->getIntegerConstantExpr(getContext());
9959 assert(Result && "Expected argument to be a constant");
9961 // Immediates for SVE llvm intrinsics are always 32bit. We can safely
9962 // truncate because the immediate has been range checked and no valid
9963 // immediate requires more than a handful of bits.
9964 *Result = Result->extOrTrunc(32);
9965 Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), *Result));
9966 continue;
9969 if (IsTupleGetOrSet || !isa<ScalableVectorType>(Arg->getType())) {
9970 Ops.push_back(Arg);
9971 continue;
9974 auto *VTy = cast<ScalableVectorType>(Arg->getType());
9975 unsigned MinElts = VTy->getMinNumElements();
9976 bool IsPred = VTy->getElementType()->isIntegerTy(1);
9977 unsigned N = (MinElts * VTy->getScalarSizeInBits()) / (IsPred ? 16 : 128);
9979 if (N == 1) {
9980 Ops.push_back(Arg);
9981 continue;
9984 for (unsigned I = 0; I < N; ++I) {
9985 Value *Idx = ConstantInt::get(CGM.Int64Ty, (I * MinElts) / N);
9986 auto *NewVTy =
9987 ScalableVectorType::get(VTy->getElementType(), MinElts / N);
9988 Ops.push_back(Builder.CreateExtractVector(NewVTy, Arg, Idx));
9993 Value *CodeGenFunction::EmitAArch64SVEBuiltinExpr(unsigned BuiltinID,
9994 const CallExpr *E) {
9995 llvm::Type *Ty = ConvertType(E->getType());
9996 if (BuiltinID >= SVE::BI__builtin_sve_reinterpret_s8_s8 &&
9997 BuiltinID <= SVE::BI__builtin_sve_reinterpret_f64_f64_x4) {
9998 Value *Val = EmitScalarExpr(E->getArg(0));
9999 return EmitSVEReinterpret(Val, Ty);
10002 auto *Builtin = findARMVectorIntrinsicInMap(AArch64SVEIntrinsicMap, BuiltinID,
10003 AArch64SVEIntrinsicsProvenSorted);
10005 llvm::SmallVector<Value *, 4> Ops;
10006 SVETypeFlags TypeFlags(Builtin->TypeModifier);
10007 GetAArch64SVEProcessedOperands(BuiltinID, E, Ops, TypeFlags);
10009 if (TypeFlags.isLoad())
10010 return EmitSVEMaskedLoad(E, Ty, Ops, Builtin->LLVMIntrinsic,
10011 TypeFlags.isZExtReturn());
10012 else if (TypeFlags.isStore())
10013 return EmitSVEMaskedStore(E, Ops, Builtin->LLVMIntrinsic);
10014 else if (TypeFlags.isGatherLoad())
10015 return EmitSVEGatherLoad(TypeFlags, Ops, Builtin->LLVMIntrinsic);
10016 else if (TypeFlags.isScatterStore())
10017 return EmitSVEScatterStore(TypeFlags, Ops, Builtin->LLVMIntrinsic);
10018 else if (TypeFlags.isPrefetch())
10019 return EmitSVEPrefetchLoad(TypeFlags, Ops, Builtin->LLVMIntrinsic);
10020 else if (TypeFlags.isGatherPrefetch())
10021 return EmitSVEGatherPrefetch(TypeFlags, Ops, Builtin->LLVMIntrinsic);
10022 else if (TypeFlags.isStructLoad())
10023 return EmitSVEStructLoad(TypeFlags, Ops, Builtin->LLVMIntrinsic);
10024 else if (TypeFlags.isStructStore())
10025 return EmitSVEStructStore(TypeFlags, Ops, Builtin->LLVMIntrinsic);
10026 else if (TypeFlags.isTupleSet() || TypeFlags.isTupleGet())
10027 return EmitSVETupleSetOrGet(TypeFlags, Ty, Ops);
10028 else if (TypeFlags.isTupleCreate())
10029 return EmitSVETupleCreate(TypeFlags, Ty, Ops);
10030 else if (TypeFlags.isUndef())
10031 return UndefValue::get(Ty);
10032 else if (Builtin->LLVMIntrinsic != 0) {
10033 if (TypeFlags.getMergeType() == SVETypeFlags::MergeZeroExp)
10034 InsertExplicitZeroOperand(Builder, Ty, Ops);
10036 if (TypeFlags.getMergeType() == SVETypeFlags::MergeAnyExp)
10037 InsertExplicitUndefOperand(Builder, Ty, Ops);
10039 // Some ACLE builtins leave out the argument to specify the predicate
10040 // pattern, which is expected to be expanded to an SV_ALL pattern.
10041 if (TypeFlags.isAppendSVALL())
10042 Ops.push_back(Builder.getInt32(/*SV_ALL*/ 31));
10043 if (TypeFlags.isInsertOp1SVALL())
10044 Ops.insert(&Ops[1], Builder.getInt32(/*SV_ALL*/ 31));
10046 // Predicates must match the main datatype.
10047 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
10048 if (auto PredTy = dyn_cast<llvm::VectorType>(Ops[i]->getType()))
10049 if (PredTy->getElementType()->isIntegerTy(1))
10050 Ops[i] = EmitSVEPredicateCast(Ops[i], getSVEType(TypeFlags));
10052 // Splat scalar operand to vector (intrinsics with _n infix)
10053 if (TypeFlags.hasSplatOperand()) {
10054 unsigned OpNo = TypeFlags.getSplatOperand();
10055 Ops[OpNo] = EmitSVEDupX(Ops[OpNo]);
10058 if (TypeFlags.isReverseCompare())
10059 std::swap(Ops[1], Ops[2]);
10060 else if (TypeFlags.isReverseUSDOT())
10061 std::swap(Ops[1], Ops[2]);
10062 else if (TypeFlags.isReverseMergeAnyBinOp() &&
10063 TypeFlags.getMergeType() == SVETypeFlags::MergeAny)
10064 std::swap(Ops[1], Ops[2]);
10065 else if (TypeFlags.isReverseMergeAnyAccOp() &&
10066 TypeFlags.getMergeType() == SVETypeFlags::MergeAny)
10067 std::swap(Ops[1], Ops[3]);
10069 // Predicated intrinsics with _z suffix need a select w/ zeroinitializer.
10070 if (TypeFlags.getMergeType() == SVETypeFlags::MergeZero) {
10071 llvm::Type *OpndTy = Ops[1]->getType();
10072 auto *SplatZero = Constant::getNullValue(OpndTy);
10073 Ops[1] = Builder.CreateSelect(Ops[0], Ops[1], SplatZero);
10076 Function *F = CGM.getIntrinsic(Builtin->LLVMIntrinsic,
10077 getSVEOverloadTypes(TypeFlags, Ty, Ops));
10078 Value *Call = Builder.CreateCall(F, Ops);
10080 // Predicate results must be converted to svbool_t.
10081 if (auto PredTy = dyn_cast<llvm::VectorType>(Call->getType()))
10082 if (PredTy->getScalarType()->isIntegerTy(1))
10083 Call = EmitSVEPredicateCast(Call, cast<llvm::ScalableVectorType>(Ty));
10085 return FormSVEBuiltinResult(Call);
10088 switch (BuiltinID) {
10089 default:
10090 return nullptr;
10091 case SVE::BI__builtin_sve_svpsel_lane_b8:
10092 case SVE::BI__builtin_sve_svpsel_lane_b16:
10093 case SVE::BI__builtin_sve_svpsel_lane_b32:
10094 case SVE::BI__builtin_sve_svpsel_lane_b64:
10095 case SVE::BI__builtin_sve_svpsel_lane_c8:
10096 case SVE::BI__builtin_sve_svpsel_lane_c16:
10097 case SVE::BI__builtin_sve_svpsel_lane_c32:
10098 case SVE::BI__builtin_sve_svpsel_lane_c64: {
10099 bool IsSVCount = isa<TargetExtType>(Ops[0]->getType());
10100 assert(((!IsSVCount || cast<TargetExtType>(Ops[0]->getType())->getName() ==
10101 "aarch64.svcount")) &&
10102 "Unexpected TargetExtType");
10103 auto SVCountTy =
10104 llvm::TargetExtType::get(getLLVMContext(), "aarch64.svcount");
10105 Function *CastFromSVCountF =
10106 CGM.getIntrinsic(Intrinsic::aarch64_sve_convert_to_svbool, SVCountTy);
10107 Function *CastToSVCountF =
10108 CGM.getIntrinsic(Intrinsic::aarch64_sve_convert_from_svbool, SVCountTy);
10110 auto OverloadedTy = getSVEType(SVETypeFlags(Builtin->TypeModifier));
10111 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_sve_psel, OverloadedTy);
10112 llvm::Value *Ops0 =
10113 IsSVCount ? Builder.CreateCall(CastFromSVCountF, Ops[0]) : Ops[0];
10114 llvm::Value *Ops1 = EmitSVEPredicateCast(Ops[1], OverloadedTy);
10115 llvm::Value *PSel = Builder.CreateCall(F, {Ops0, Ops1, Ops[2]});
10116 return IsSVCount ? Builder.CreateCall(CastToSVCountF, PSel) : PSel;
10118 case SVE::BI__builtin_sve_svmov_b_z: {
10119 // svmov_b_z(pg, op) <=> svand_b_z(pg, op, op)
10120 SVETypeFlags TypeFlags(Builtin->TypeModifier);
10121 llvm::Type* OverloadedTy = getSVEType(TypeFlags);
10122 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_sve_and_z, OverloadedTy);
10123 return Builder.CreateCall(F, {Ops[0], Ops[1], Ops[1]});
10126 case SVE::BI__builtin_sve_svnot_b_z: {
10127 // svnot_b_z(pg, op) <=> sveor_b_z(pg, op, pg)
10128 SVETypeFlags TypeFlags(Builtin->TypeModifier);
10129 llvm::Type* OverloadedTy = getSVEType(TypeFlags);
10130 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_sve_eor_z, OverloadedTy);
10131 return Builder.CreateCall(F, {Ops[0], Ops[1], Ops[0]});
10134 case SVE::BI__builtin_sve_svmovlb_u16:
10135 case SVE::BI__builtin_sve_svmovlb_u32:
10136 case SVE::BI__builtin_sve_svmovlb_u64:
10137 return EmitSVEMovl(TypeFlags, Ops, Intrinsic::aarch64_sve_ushllb);
10139 case SVE::BI__builtin_sve_svmovlb_s16:
10140 case SVE::BI__builtin_sve_svmovlb_s32:
10141 case SVE::BI__builtin_sve_svmovlb_s64:
10142 return EmitSVEMovl(TypeFlags, Ops, Intrinsic::aarch64_sve_sshllb);
10144 case SVE::BI__builtin_sve_svmovlt_u16:
10145 case SVE::BI__builtin_sve_svmovlt_u32:
10146 case SVE::BI__builtin_sve_svmovlt_u64:
10147 return EmitSVEMovl(TypeFlags, Ops, Intrinsic::aarch64_sve_ushllt);
10149 case SVE::BI__builtin_sve_svmovlt_s16:
10150 case SVE::BI__builtin_sve_svmovlt_s32:
10151 case SVE::BI__builtin_sve_svmovlt_s64:
10152 return EmitSVEMovl(TypeFlags, Ops, Intrinsic::aarch64_sve_sshllt);
10154 case SVE::BI__builtin_sve_svpmullt_u16:
10155 case SVE::BI__builtin_sve_svpmullt_u64:
10156 case SVE::BI__builtin_sve_svpmullt_n_u16:
10157 case SVE::BI__builtin_sve_svpmullt_n_u64:
10158 return EmitSVEPMull(TypeFlags, Ops, Intrinsic::aarch64_sve_pmullt_pair);
10160 case SVE::BI__builtin_sve_svpmullb_u16:
10161 case SVE::BI__builtin_sve_svpmullb_u64:
10162 case SVE::BI__builtin_sve_svpmullb_n_u16:
10163 case SVE::BI__builtin_sve_svpmullb_n_u64:
10164 return EmitSVEPMull(TypeFlags, Ops, Intrinsic::aarch64_sve_pmullb_pair);
10166 case SVE::BI__builtin_sve_svdup_n_b8:
10167 case SVE::BI__builtin_sve_svdup_n_b16:
10168 case SVE::BI__builtin_sve_svdup_n_b32:
10169 case SVE::BI__builtin_sve_svdup_n_b64: {
10170 Value *CmpNE =
10171 Builder.CreateICmpNE(Ops[0], Constant::getNullValue(Ops[0]->getType()));
10172 llvm::ScalableVectorType *OverloadedTy = getSVEType(TypeFlags);
10173 Value *Dup = EmitSVEDupX(CmpNE, OverloadedTy);
10174 return EmitSVEPredicateCast(Dup, cast<llvm::ScalableVectorType>(Ty));
10177 case SVE::BI__builtin_sve_svdupq_n_b8:
10178 case SVE::BI__builtin_sve_svdupq_n_b16:
10179 case SVE::BI__builtin_sve_svdupq_n_b32:
10180 case SVE::BI__builtin_sve_svdupq_n_b64:
10181 case SVE::BI__builtin_sve_svdupq_n_u8:
10182 case SVE::BI__builtin_sve_svdupq_n_s8:
10183 case SVE::BI__builtin_sve_svdupq_n_u64:
10184 case SVE::BI__builtin_sve_svdupq_n_f64:
10185 case SVE::BI__builtin_sve_svdupq_n_s64:
10186 case SVE::BI__builtin_sve_svdupq_n_u16:
10187 case SVE::BI__builtin_sve_svdupq_n_f16:
10188 case SVE::BI__builtin_sve_svdupq_n_bf16:
10189 case SVE::BI__builtin_sve_svdupq_n_s16:
10190 case SVE::BI__builtin_sve_svdupq_n_u32:
10191 case SVE::BI__builtin_sve_svdupq_n_f32:
10192 case SVE::BI__builtin_sve_svdupq_n_s32: {
10193 // These builtins are implemented by storing each element to an array and using
10194 // ld1rq to materialize a vector.
10195 unsigned NumOpnds = Ops.size();
10197 bool IsBoolTy =
10198 cast<llvm::VectorType>(Ty)->getElementType()->isIntegerTy(1);
10200 // For svdupq_n_b* the element type of is an integer of type 128/numelts,
10201 // so that the compare can use the width that is natural for the expected
10202 // number of predicate lanes.
10203 llvm::Type *EltTy = Ops[0]->getType();
10204 if (IsBoolTy)
10205 EltTy = IntegerType::get(getLLVMContext(), SVEBitsPerBlock / NumOpnds);
10207 SmallVector<llvm::Value *, 16> VecOps;
10208 for (unsigned I = 0; I < NumOpnds; ++I)
10209 VecOps.push_back(Builder.CreateZExt(Ops[I], EltTy));
10210 Value *Vec = BuildVector(VecOps);
10212 llvm::Type *OverloadedTy = getSVEVectorForElementType(EltTy);
10213 Value *InsertSubVec = Builder.CreateInsertVector(
10214 OverloadedTy, PoisonValue::get(OverloadedTy), Vec, Builder.getInt64(0));
10216 Function *F =
10217 CGM.getIntrinsic(Intrinsic::aarch64_sve_dupq_lane, OverloadedTy);
10218 Value *DupQLane =
10219 Builder.CreateCall(F, {InsertSubVec, Builder.getInt64(0)});
10221 if (!IsBoolTy)
10222 return DupQLane;
10224 SVETypeFlags TypeFlags(Builtin->TypeModifier);
10225 Value *Pred = EmitSVEAllTruePred(TypeFlags);
10227 // For svdupq_n_b* we need to add an additional 'cmpne' with '0'.
10228 F = CGM.getIntrinsic(NumOpnds == 2 ? Intrinsic::aarch64_sve_cmpne
10229 : Intrinsic::aarch64_sve_cmpne_wide,
10230 OverloadedTy);
10231 Value *Call = Builder.CreateCall(
10232 F, {Pred, DupQLane, EmitSVEDupX(Builder.getInt64(0))});
10233 return EmitSVEPredicateCast(Call, cast<llvm::ScalableVectorType>(Ty));
10236 case SVE::BI__builtin_sve_svpfalse_b:
10237 return ConstantInt::getFalse(Ty);
10239 case SVE::BI__builtin_sve_svpfalse_c: {
10240 auto SVBoolTy = ScalableVectorType::get(Builder.getInt1Ty(), 16);
10241 Function *CastToSVCountF =
10242 CGM.getIntrinsic(Intrinsic::aarch64_sve_convert_from_svbool, Ty);
10243 return Builder.CreateCall(CastToSVCountF, ConstantInt::getFalse(SVBoolTy));
10246 case SVE::BI__builtin_sve_svlen_bf16:
10247 case SVE::BI__builtin_sve_svlen_f16:
10248 case SVE::BI__builtin_sve_svlen_f32:
10249 case SVE::BI__builtin_sve_svlen_f64:
10250 case SVE::BI__builtin_sve_svlen_s8:
10251 case SVE::BI__builtin_sve_svlen_s16:
10252 case SVE::BI__builtin_sve_svlen_s32:
10253 case SVE::BI__builtin_sve_svlen_s64:
10254 case SVE::BI__builtin_sve_svlen_u8:
10255 case SVE::BI__builtin_sve_svlen_u16:
10256 case SVE::BI__builtin_sve_svlen_u32:
10257 case SVE::BI__builtin_sve_svlen_u64: {
10258 SVETypeFlags TF(Builtin->TypeModifier);
10259 auto VTy = cast<llvm::VectorType>(getSVEType(TF));
10260 auto *NumEls =
10261 llvm::ConstantInt::get(Ty, VTy->getElementCount().getKnownMinValue());
10263 Function *F = CGM.getIntrinsic(Intrinsic::vscale, Ty);
10264 return Builder.CreateMul(NumEls, Builder.CreateCall(F));
10267 case SVE::BI__builtin_sve_svtbl2_u8:
10268 case SVE::BI__builtin_sve_svtbl2_s8:
10269 case SVE::BI__builtin_sve_svtbl2_u16:
10270 case SVE::BI__builtin_sve_svtbl2_s16:
10271 case SVE::BI__builtin_sve_svtbl2_u32:
10272 case SVE::BI__builtin_sve_svtbl2_s32:
10273 case SVE::BI__builtin_sve_svtbl2_u64:
10274 case SVE::BI__builtin_sve_svtbl2_s64:
10275 case SVE::BI__builtin_sve_svtbl2_f16:
10276 case SVE::BI__builtin_sve_svtbl2_bf16:
10277 case SVE::BI__builtin_sve_svtbl2_f32:
10278 case SVE::BI__builtin_sve_svtbl2_f64: {
10279 SVETypeFlags TF(Builtin->TypeModifier);
10280 auto VTy = cast<llvm::ScalableVectorType>(getSVEType(TF));
10281 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_sve_tbl2, VTy);
10282 return Builder.CreateCall(F, Ops);
10285 case SVE::BI__builtin_sve_svset_neonq_s8:
10286 case SVE::BI__builtin_sve_svset_neonq_s16:
10287 case SVE::BI__builtin_sve_svset_neonq_s32:
10288 case SVE::BI__builtin_sve_svset_neonq_s64:
10289 case SVE::BI__builtin_sve_svset_neonq_u8:
10290 case SVE::BI__builtin_sve_svset_neonq_u16:
10291 case SVE::BI__builtin_sve_svset_neonq_u32:
10292 case SVE::BI__builtin_sve_svset_neonq_u64:
10293 case SVE::BI__builtin_sve_svset_neonq_f16:
10294 case SVE::BI__builtin_sve_svset_neonq_f32:
10295 case SVE::BI__builtin_sve_svset_neonq_f64:
10296 case SVE::BI__builtin_sve_svset_neonq_bf16: {
10297 return Builder.CreateInsertVector(Ty, Ops[0], Ops[1], Builder.getInt64(0));
10300 case SVE::BI__builtin_sve_svget_neonq_s8:
10301 case SVE::BI__builtin_sve_svget_neonq_s16:
10302 case SVE::BI__builtin_sve_svget_neonq_s32:
10303 case SVE::BI__builtin_sve_svget_neonq_s64:
10304 case SVE::BI__builtin_sve_svget_neonq_u8:
10305 case SVE::BI__builtin_sve_svget_neonq_u16:
10306 case SVE::BI__builtin_sve_svget_neonq_u32:
10307 case SVE::BI__builtin_sve_svget_neonq_u64:
10308 case SVE::BI__builtin_sve_svget_neonq_f16:
10309 case SVE::BI__builtin_sve_svget_neonq_f32:
10310 case SVE::BI__builtin_sve_svget_neonq_f64:
10311 case SVE::BI__builtin_sve_svget_neonq_bf16: {
10312 return Builder.CreateExtractVector(Ty, Ops[0], Builder.getInt64(0));
10315 case SVE::BI__builtin_sve_svdup_neonq_s8:
10316 case SVE::BI__builtin_sve_svdup_neonq_s16:
10317 case SVE::BI__builtin_sve_svdup_neonq_s32:
10318 case SVE::BI__builtin_sve_svdup_neonq_s64:
10319 case SVE::BI__builtin_sve_svdup_neonq_u8:
10320 case SVE::BI__builtin_sve_svdup_neonq_u16:
10321 case SVE::BI__builtin_sve_svdup_neonq_u32:
10322 case SVE::BI__builtin_sve_svdup_neonq_u64:
10323 case SVE::BI__builtin_sve_svdup_neonq_f16:
10324 case SVE::BI__builtin_sve_svdup_neonq_f32:
10325 case SVE::BI__builtin_sve_svdup_neonq_f64:
10326 case SVE::BI__builtin_sve_svdup_neonq_bf16: {
10327 Value *Insert = Builder.CreateInsertVector(Ty, PoisonValue::get(Ty), Ops[0],
10328 Builder.getInt64(0));
10329 return Builder.CreateIntrinsic(Intrinsic::aarch64_sve_dupq_lane, {Ty},
10330 {Insert, Builder.getInt64(0)});
10334 /// Should not happen
10335 return nullptr;
10338 Value *CodeGenFunction::EmitAArch64SMEBuiltinExpr(unsigned BuiltinID,
10339 const CallExpr *E) {
10340 auto *Builtin = findARMVectorIntrinsicInMap(AArch64SMEIntrinsicMap, BuiltinID,
10341 AArch64SMEIntrinsicsProvenSorted);
10343 llvm::SmallVector<Value *, 4> Ops;
10344 SVETypeFlags TypeFlags(Builtin->TypeModifier);
10345 GetAArch64SVEProcessedOperands(BuiltinID, E, Ops, TypeFlags);
10347 if (TypeFlags.isLoad() || TypeFlags.isStore())
10348 return EmitSMELd1St1(TypeFlags, Ops, Builtin->LLVMIntrinsic);
10349 else if (TypeFlags.isReadZA() || TypeFlags.isWriteZA())
10350 return EmitSMEReadWrite(TypeFlags, Ops, Builtin->LLVMIntrinsic);
10351 else if (BuiltinID == SME::BI__builtin_sme_svzero_mask_za ||
10352 BuiltinID == SME::BI__builtin_sme_svzero_za)
10353 return EmitSMEZero(TypeFlags, Ops, Builtin->LLVMIntrinsic);
10354 else if (BuiltinID == SME::BI__builtin_sme_svldr_vnum_za ||
10355 BuiltinID == SME::BI__builtin_sme_svstr_vnum_za ||
10356 BuiltinID == SME::BI__builtin_sme_svldr_za ||
10357 BuiltinID == SME::BI__builtin_sme_svstr_za)
10358 return EmitSMELdrStr(TypeFlags, Ops, Builtin->LLVMIntrinsic);
10360 // Should not happen!
10361 if (Builtin->LLVMIntrinsic == 0)
10362 return nullptr;
10364 // Predicates must match the main datatype.
10365 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
10366 if (auto PredTy = dyn_cast<llvm::VectorType>(Ops[i]->getType()))
10367 if (PredTy->getElementType()->isIntegerTy(1))
10368 Ops[i] = EmitSVEPredicateCast(Ops[i], getSVEType(TypeFlags));
10370 Function *F =
10371 TypeFlags.isOverloadNone()
10372 ? CGM.getIntrinsic(Builtin->LLVMIntrinsic)
10373 : CGM.getIntrinsic(Builtin->LLVMIntrinsic, {getSVEType(TypeFlags)});
10374 Value *Call = Builder.CreateCall(F, Ops);
10376 return FormSVEBuiltinResult(Call);
10379 Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
10380 const CallExpr *E,
10381 llvm::Triple::ArchType Arch) {
10382 if (BuiltinID >= clang::AArch64::FirstSVEBuiltin &&
10383 BuiltinID <= clang::AArch64::LastSVEBuiltin)
10384 return EmitAArch64SVEBuiltinExpr(BuiltinID, E);
10386 if (BuiltinID >= clang::AArch64::FirstSMEBuiltin &&
10387 BuiltinID <= clang::AArch64::LastSMEBuiltin)
10388 return EmitAArch64SMEBuiltinExpr(BuiltinID, E);
10390 unsigned HintID = static_cast<unsigned>(-1);
10391 switch (BuiltinID) {
10392 default: break;
10393 case clang::AArch64::BI__builtin_arm_nop:
10394 HintID = 0;
10395 break;
10396 case clang::AArch64::BI__builtin_arm_yield:
10397 case clang::AArch64::BI__yield:
10398 HintID = 1;
10399 break;
10400 case clang::AArch64::BI__builtin_arm_wfe:
10401 case clang::AArch64::BI__wfe:
10402 HintID = 2;
10403 break;
10404 case clang::AArch64::BI__builtin_arm_wfi:
10405 case clang::AArch64::BI__wfi:
10406 HintID = 3;
10407 break;
10408 case clang::AArch64::BI__builtin_arm_sev:
10409 case clang::AArch64::BI__sev:
10410 HintID = 4;
10411 break;
10412 case clang::AArch64::BI__builtin_arm_sevl:
10413 case clang::AArch64::BI__sevl:
10414 HintID = 5;
10415 break;
10418 if (HintID != static_cast<unsigned>(-1)) {
10419 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_hint);
10420 return Builder.CreateCall(F, llvm::ConstantInt::get(Int32Ty, HintID));
10423 if (BuiltinID == clang::AArch64::BI__builtin_arm_rbit) {
10424 assert((getContext().getTypeSize(E->getType()) == 32) &&
10425 "rbit of unusual size!");
10426 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
10427 return Builder.CreateCall(
10428 CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg, "rbit");
10430 if (BuiltinID == clang::AArch64::BI__builtin_arm_rbit64) {
10431 assert((getContext().getTypeSize(E->getType()) == 64) &&
10432 "rbit of unusual size!");
10433 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
10434 return Builder.CreateCall(
10435 CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg, "rbit");
10438 if (BuiltinID == clang::AArch64::BI__builtin_arm_clz ||
10439 BuiltinID == clang::AArch64::BI__builtin_arm_clz64) {
10440 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
10441 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Arg->getType());
10442 Value *Res = Builder.CreateCall(F, {Arg, Builder.getInt1(false)});
10443 if (BuiltinID == clang::AArch64::BI__builtin_arm_clz64)
10444 Res = Builder.CreateTrunc(Res, Builder.getInt32Ty());
10445 return Res;
10448 if (BuiltinID == clang::AArch64::BI__builtin_arm_cls) {
10449 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
10450 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_cls), Arg,
10451 "cls");
10453 if (BuiltinID == clang::AArch64::BI__builtin_arm_cls64) {
10454 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
10455 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_cls64), Arg,
10456 "cls");
10459 if (BuiltinID == clang::AArch64::BI__builtin_arm_rint32zf ||
10460 BuiltinID == clang::AArch64::BI__builtin_arm_rint32z) {
10461 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
10462 llvm::Type *Ty = Arg->getType();
10463 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_frint32z, Ty),
10464 Arg, "frint32z");
10467 if (BuiltinID == clang::AArch64::BI__builtin_arm_rint64zf ||
10468 BuiltinID == clang::AArch64::BI__builtin_arm_rint64z) {
10469 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
10470 llvm::Type *Ty = Arg->getType();
10471 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_frint64z, Ty),
10472 Arg, "frint64z");
10475 if (BuiltinID == clang::AArch64::BI__builtin_arm_rint32xf ||
10476 BuiltinID == clang::AArch64::BI__builtin_arm_rint32x) {
10477 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
10478 llvm::Type *Ty = Arg->getType();
10479 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_frint32x, Ty),
10480 Arg, "frint32x");
10483 if (BuiltinID == clang::AArch64::BI__builtin_arm_rint64xf ||
10484 BuiltinID == clang::AArch64::BI__builtin_arm_rint64x) {
10485 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
10486 llvm::Type *Ty = Arg->getType();
10487 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_frint64x, Ty),
10488 Arg, "frint64x");
10491 if (BuiltinID == clang::AArch64::BI__builtin_arm_jcvt) {
10492 assert((getContext().getTypeSize(E->getType()) == 32) &&
10493 "__jcvt of unusual size!");
10494 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
10495 return Builder.CreateCall(
10496 CGM.getIntrinsic(Intrinsic::aarch64_fjcvtzs), Arg);
10499 if (BuiltinID == clang::AArch64::BI__builtin_arm_ld64b ||
10500 BuiltinID == clang::AArch64::BI__builtin_arm_st64b ||
10501 BuiltinID == clang::AArch64::BI__builtin_arm_st64bv ||
10502 BuiltinID == clang::AArch64::BI__builtin_arm_st64bv0) {
10503 llvm::Value *MemAddr = EmitScalarExpr(E->getArg(0));
10504 llvm::Value *ValPtr = EmitScalarExpr(E->getArg(1));
10506 if (BuiltinID == clang::AArch64::BI__builtin_arm_ld64b) {
10507 // Load from the address via an LLVM intrinsic, receiving a
10508 // tuple of 8 i64 words, and store each one to ValPtr.
10509 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_ld64b);
10510 llvm::Value *Val = Builder.CreateCall(F, MemAddr);
10511 llvm::Value *ToRet;
10512 for (size_t i = 0; i < 8; i++) {
10513 llvm::Value *ValOffsetPtr =
10514 Builder.CreateGEP(Int64Ty, ValPtr, Builder.getInt32(i));
10515 Address Addr =
10516 Address(ValOffsetPtr, Int64Ty, CharUnits::fromQuantity(8));
10517 ToRet = Builder.CreateStore(Builder.CreateExtractValue(Val, i), Addr);
10519 return ToRet;
10520 } else {
10521 // Load 8 i64 words from ValPtr, and store them to the address
10522 // via an LLVM intrinsic.
10523 SmallVector<llvm::Value *, 9> Args;
10524 Args.push_back(MemAddr);
10525 for (size_t i = 0; i < 8; i++) {
10526 llvm::Value *ValOffsetPtr =
10527 Builder.CreateGEP(Int64Ty, ValPtr, Builder.getInt32(i));
10528 Address Addr =
10529 Address(ValOffsetPtr, Int64Ty, CharUnits::fromQuantity(8));
10530 Args.push_back(Builder.CreateLoad(Addr));
10533 auto Intr = (BuiltinID == clang::AArch64::BI__builtin_arm_st64b
10534 ? Intrinsic::aarch64_st64b
10535 : BuiltinID == clang::AArch64::BI__builtin_arm_st64bv
10536 ? Intrinsic::aarch64_st64bv
10537 : Intrinsic::aarch64_st64bv0);
10538 Function *F = CGM.getIntrinsic(Intr);
10539 return Builder.CreateCall(F, Args);
10543 if (BuiltinID == clang::AArch64::BI__builtin_arm_rndr ||
10544 BuiltinID == clang::AArch64::BI__builtin_arm_rndrrs) {
10546 auto Intr = (BuiltinID == clang::AArch64::BI__builtin_arm_rndr
10547 ? Intrinsic::aarch64_rndr
10548 : Intrinsic::aarch64_rndrrs);
10549 Function *F = CGM.getIntrinsic(Intr);
10550 llvm::Value *Val = Builder.CreateCall(F);
10551 Value *RandomValue = Builder.CreateExtractValue(Val, 0);
10552 Value *Status = Builder.CreateExtractValue(Val, 1);
10554 Address MemAddress = EmitPointerWithAlignment(E->getArg(0));
10555 Builder.CreateStore(RandomValue, MemAddress);
10556 Status = Builder.CreateZExt(Status, Int32Ty);
10557 return Status;
10560 if (BuiltinID == clang::AArch64::BI__clear_cache) {
10561 assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments");
10562 const FunctionDecl *FD = E->getDirectCallee();
10563 Value *Ops[2];
10564 for (unsigned i = 0; i < 2; i++)
10565 Ops[i] = EmitScalarExpr(E->getArg(i));
10566 llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType());
10567 llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty);
10568 StringRef Name = FD->getName();
10569 return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops);
10572 if ((BuiltinID == clang::AArch64::BI__builtin_arm_ldrex ||
10573 BuiltinID == clang::AArch64::BI__builtin_arm_ldaex) &&
10574 getContext().getTypeSize(E->getType()) == 128) {
10575 Function *F =
10576 CGM.getIntrinsic(BuiltinID == clang::AArch64::BI__builtin_arm_ldaex
10577 ? Intrinsic::aarch64_ldaxp
10578 : Intrinsic::aarch64_ldxp);
10580 Value *LdPtr = EmitScalarExpr(E->getArg(0));
10581 Value *Val = Builder.CreateCall(F, LdPtr, "ldxp");
10583 Value *Val0 = Builder.CreateExtractValue(Val, 1);
10584 Value *Val1 = Builder.CreateExtractValue(Val, 0);
10585 llvm::Type *Int128Ty = llvm::IntegerType::get(getLLVMContext(), 128);
10586 Val0 = Builder.CreateZExt(Val0, Int128Ty);
10587 Val1 = Builder.CreateZExt(Val1, Int128Ty);
10589 Value *ShiftCst = llvm::ConstantInt::get(Int128Ty, 64);
10590 Val = Builder.CreateShl(Val0, ShiftCst, "shl", true /* nuw */);
10591 Val = Builder.CreateOr(Val, Val1);
10592 return Builder.CreateBitCast(Val, ConvertType(E->getType()));
10593 } else if (BuiltinID == clang::AArch64::BI__builtin_arm_ldrex ||
10594 BuiltinID == clang::AArch64::BI__builtin_arm_ldaex) {
10595 Value *LoadAddr = EmitScalarExpr(E->getArg(0));
10597 QualType Ty = E->getType();
10598 llvm::Type *RealResTy = ConvertType(Ty);
10599 llvm::Type *IntTy =
10600 llvm::IntegerType::get(getLLVMContext(), getContext().getTypeSize(Ty));
10602 Function *F =
10603 CGM.getIntrinsic(BuiltinID == clang::AArch64::BI__builtin_arm_ldaex
10604 ? Intrinsic::aarch64_ldaxr
10605 : Intrinsic::aarch64_ldxr,
10606 UnqualPtrTy);
10607 CallInst *Val = Builder.CreateCall(F, LoadAddr, "ldxr");
10608 Val->addParamAttr(
10609 0, Attribute::get(getLLVMContext(), Attribute::ElementType, IntTy));
10611 if (RealResTy->isPointerTy())
10612 return Builder.CreateIntToPtr(Val, RealResTy);
10614 llvm::Type *IntResTy = llvm::IntegerType::get(
10615 getLLVMContext(), CGM.getDataLayout().getTypeSizeInBits(RealResTy));
10616 return Builder.CreateBitCast(Builder.CreateTruncOrBitCast(Val, IntResTy),
10617 RealResTy);
10620 if ((BuiltinID == clang::AArch64::BI__builtin_arm_strex ||
10621 BuiltinID == clang::AArch64::BI__builtin_arm_stlex) &&
10622 getContext().getTypeSize(E->getArg(0)->getType()) == 128) {
10623 Function *F =
10624 CGM.getIntrinsic(BuiltinID == clang::AArch64::BI__builtin_arm_stlex
10625 ? Intrinsic::aarch64_stlxp
10626 : Intrinsic::aarch64_stxp);
10627 llvm::Type *STy = llvm::StructType::get(Int64Ty, Int64Ty);
10629 Address Tmp = CreateMemTemp(E->getArg(0)->getType());
10630 EmitAnyExprToMem(E->getArg(0), Tmp, Qualifiers(), /*init*/ true);
10632 Tmp = Tmp.withElementType(STy);
10633 llvm::Value *Val = Builder.CreateLoad(Tmp);
10635 Value *Arg0 = Builder.CreateExtractValue(Val, 0);
10636 Value *Arg1 = Builder.CreateExtractValue(Val, 1);
10637 Value *StPtr = EmitScalarExpr(E->getArg(1));
10638 return Builder.CreateCall(F, {Arg0, Arg1, StPtr}, "stxp");
10641 if (BuiltinID == clang::AArch64::BI__builtin_arm_strex ||
10642 BuiltinID == clang::AArch64::BI__builtin_arm_stlex) {
10643 Value *StoreVal = EmitScalarExpr(E->getArg(0));
10644 Value *StoreAddr = EmitScalarExpr(E->getArg(1));
10646 QualType Ty = E->getArg(0)->getType();
10647 llvm::Type *StoreTy =
10648 llvm::IntegerType::get(getLLVMContext(), getContext().getTypeSize(Ty));
10650 if (StoreVal->getType()->isPointerTy())
10651 StoreVal = Builder.CreatePtrToInt(StoreVal, Int64Ty);
10652 else {
10653 llvm::Type *IntTy = llvm::IntegerType::get(
10654 getLLVMContext(),
10655 CGM.getDataLayout().getTypeSizeInBits(StoreVal->getType()));
10656 StoreVal = Builder.CreateBitCast(StoreVal, IntTy);
10657 StoreVal = Builder.CreateZExtOrBitCast(StoreVal, Int64Ty);
10660 Function *F =
10661 CGM.getIntrinsic(BuiltinID == clang::AArch64::BI__builtin_arm_stlex
10662 ? Intrinsic::aarch64_stlxr
10663 : Intrinsic::aarch64_stxr,
10664 StoreAddr->getType());
10665 CallInst *CI = Builder.CreateCall(F, {StoreVal, StoreAddr}, "stxr");
10666 CI->addParamAttr(
10667 1, Attribute::get(getLLVMContext(), Attribute::ElementType, StoreTy));
10668 return CI;
10671 if (BuiltinID == clang::AArch64::BI__getReg) {
10672 Expr::EvalResult Result;
10673 if (!E->getArg(0)->EvaluateAsInt(Result, CGM.getContext()))
10674 llvm_unreachable("Sema will ensure that the parameter is constant");
10676 llvm::APSInt Value = Result.Val.getInt();
10677 LLVMContext &Context = CGM.getLLVMContext();
10678 std::string Reg = Value == 31 ? "sp" : "x" + toString(Value, 10);
10680 llvm::Metadata *Ops[] = {llvm::MDString::get(Context, Reg)};
10681 llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
10682 llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
10684 llvm::Function *F =
10685 CGM.getIntrinsic(llvm::Intrinsic::read_register, {Int64Ty});
10686 return Builder.CreateCall(F, Metadata);
10689 if (BuiltinID == clang::AArch64::BI__break) {
10690 Expr::EvalResult Result;
10691 if (!E->getArg(0)->EvaluateAsInt(Result, CGM.getContext()))
10692 llvm_unreachable("Sema will ensure that the parameter is constant");
10694 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::aarch64_break);
10695 return Builder.CreateCall(F, {EmitScalarExpr(E->getArg(0))});
10698 if (BuiltinID == clang::AArch64::BI__builtin_arm_clrex) {
10699 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_clrex);
10700 return Builder.CreateCall(F);
10703 if (BuiltinID == clang::AArch64::BI_ReadWriteBarrier)
10704 return Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent,
10705 llvm::SyncScope::SingleThread);
10707 // CRC32
10708 Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic;
10709 switch (BuiltinID) {
10710 case clang::AArch64::BI__builtin_arm_crc32b:
10711 CRCIntrinsicID = Intrinsic::aarch64_crc32b; break;
10712 case clang::AArch64::BI__builtin_arm_crc32cb:
10713 CRCIntrinsicID = Intrinsic::aarch64_crc32cb; break;
10714 case clang::AArch64::BI__builtin_arm_crc32h:
10715 CRCIntrinsicID = Intrinsic::aarch64_crc32h; break;
10716 case clang::AArch64::BI__builtin_arm_crc32ch:
10717 CRCIntrinsicID = Intrinsic::aarch64_crc32ch; break;
10718 case clang::AArch64::BI__builtin_arm_crc32w:
10719 CRCIntrinsicID = Intrinsic::aarch64_crc32w; break;
10720 case clang::AArch64::BI__builtin_arm_crc32cw:
10721 CRCIntrinsicID = Intrinsic::aarch64_crc32cw; break;
10722 case clang::AArch64::BI__builtin_arm_crc32d:
10723 CRCIntrinsicID = Intrinsic::aarch64_crc32x; break;
10724 case clang::AArch64::BI__builtin_arm_crc32cd:
10725 CRCIntrinsicID = Intrinsic::aarch64_crc32cx; break;
10728 if (CRCIntrinsicID != Intrinsic::not_intrinsic) {
10729 Value *Arg0 = EmitScalarExpr(E->getArg(0));
10730 Value *Arg1 = EmitScalarExpr(E->getArg(1));
10731 Function *F = CGM.getIntrinsic(CRCIntrinsicID);
10733 llvm::Type *DataTy = F->getFunctionType()->getParamType(1);
10734 Arg1 = Builder.CreateZExtOrBitCast(Arg1, DataTy);
10736 return Builder.CreateCall(F, {Arg0, Arg1});
10739 // Memory Operations (MOPS)
10740 if (BuiltinID == AArch64::BI__builtin_arm_mops_memset_tag) {
10741 Value *Dst = EmitScalarExpr(E->getArg(0));
10742 Value *Val = EmitScalarExpr(E->getArg(1));
10743 Value *Size = EmitScalarExpr(E->getArg(2));
10744 Dst = Builder.CreatePointerCast(Dst, Int8PtrTy);
10745 Val = Builder.CreateTrunc(Val, Int8Ty);
10746 Size = Builder.CreateIntCast(Size, Int64Ty, false);
10747 return Builder.CreateCall(
10748 CGM.getIntrinsic(Intrinsic::aarch64_mops_memset_tag), {Dst, Val, Size});
10751 // Memory Tagging Extensions (MTE) Intrinsics
10752 Intrinsic::ID MTEIntrinsicID = Intrinsic::not_intrinsic;
10753 switch (BuiltinID) {
10754 case clang::AArch64::BI__builtin_arm_irg:
10755 MTEIntrinsicID = Intrinsic::aarch64_irg; break;
10756 case clang::AArch64::BI__builtin_arm_addg:
10757 MTEIntrinsicID = Intrinsic::aarch64_addg; break;
10758 case clang::AArch64::BI__builtin_arm_gmi:
10759 MTEIntrinsicID = Intrinsic::aarch64_gmi; break;
10760 case clang::AArch64::BI__builtin_arm_ldg:
10761 MTEIntrinsicID = Intrinsic::aarch64_ldg; break;
10762 case clang::AArch64::BI__builtin_arm_stg:
10763 MTEIntrinsicID = Intrinsic::aarch64_stg; break;
10764 case clang::AArch64::BI__builtin_arm_subp:
10765 MTEIntrinsicID = Intrinsic::aarch64_subp; break;
10768 if (MTEIntrinsicID != Intrinsic::not_intrinsic) {
10769 llvm::Type *T = ConvertType(E->getType());
10771 if (MTEIntrinsicID == Intrinsic::aarch64_irg) {
10772 Value *Pointer = EmitScalarExpr(E->getArg(0));
10773 Value *Mask = EmitScalarExpr(E->getArg(1));
10775 Pointer = Builder.CreatePointerCast(Pointer, Int8PtrTy);
10776 Mask = Builder.CreateZExt(Mask, Int64Ty);
10777 Value *RV = Builder.CreateCall(
10778 CGM.getIntrinsic(MTEIntrinsicID), {Pointer, Mask});
10779 return Builder.CreatePointerCast(RV, T);
10781 if (MTEIntrinsicID == Intrinsic::aarch64_addg) {
10782 Value *Pointer = EmitScalarExpr(E->getArg(0));
10783 Value *TagOffset = EmitScalarExpr(E->getArg(1));
10785 Pointer = Builder.CreatePointerCast(Pointer, Int8PtrTy);
10786 TagOffset = Builder.CreateZExt(TagOffset, Int64Ty);
10787 Value *RV = Builder.CreateCall(
10788 CGM.getIntrinsic(MTEIntrinsicID), {Pointer, TagOffset});
10789 return Builder.CreatePointerCast(RV, T);
10791 if (MTEIntrinsicID == Intrinsic::aarch64_gmi) {
10792 Value *Pointer = EmitScalarExpr(E->getArg(0));
10793 Value *ExcludedMask = EmitScalarExpr(E->getArg(1));
10795 ExcludedMask = Builder.CreateZExt(ExcludedMask, Int64Ty);
10796 Pointer = Builder.CreatePointerCast(Pointer, Int8PtrTy);
10797 return Builder.CreateCall(
10798 CGM.getIntrinsic(MTEIntrinsicID), {Pointer, ExcludedMask});
10800 // Although it is possible to supply a different return
10801 // address (first arg) to this intrinsic, for now we set
10802 // return address same as input address.
10803 if (MTEIntrinsicID == Intrinsic::aarch64_ldg) {
10804 Value *TagAddress = EmitScalarExpr(E->getArg(0));
10805 TagAddress = Builder.CreatePointerCast(TagAddress, Int8PtrTy);
10806 Value *RV = Builder.CreateCall(
10807 CGM.getIntrinsic(MTEIntrinsicID), {TagAddress, TagAddress});
10808 return Builder.CreatePointerCast(RV, T);
10810 // Although it is possible to supply a different tag (to set)
10811 // to this intrinsic (as first arg), for now we supply
10812 // the tag that is in input address arg (common use case).
10813 if (MTEIntrinsicID == Intrinsic::aarch64_stg) {
10814 Value *TagAddress = EmitScalarExpr(E->getArg(0));
10815 TagAddress = Builder.CreatePointerCast(TagAddress, Int8PtrTy);
10816 return Builder.CreateCall(
10817 CGM.getIntrinsic(MTEIntrinsicID), {TagAddress, TagAddress});
10819 if (MTEIntrinsicID == Intrinsic::aarch64_subp) {
10820 Value *PointerA = EmitScalarExpr(E->getArg(0));
10821 Value *PointerB = EmitScalarExpr(E->getArg(1));
10822 PointerA = Builder.CreatePointerCast(PointerA, Int8PtrTy);
10823 PointerB = Builder.CreatePointerCast(PointerB, Int8PtrTy);
10824 return Builder.CreateCall(
10825 CGM.getIntrinsic(MTEIntrinsicID), {PointerA, PointerB});
10829 if (BuiltinID == clang::AArch64::BI__builtin_arm_rsr ||
10830 BuiltinID == clang::AArch64::BI__builtin_arm_rsr64 ||
10831 BuiltinID == clang::AArch64::BI__builtin_arm_rsr128 ||
10832 BuiltinID == clang::AArch64::BI__builtin_arm_rsrp ||
10833 BuiltinID == clang::AArch64::BI__builtin_arm_wsr ||
10834 BuiltinID == clang::AArch64::BI__builtin_arm_wsr64 ||
10835 BuiltinID == clang::AArch64::BI__builtin_arm_wsr128 ||
10836 BuiltinID == clang::AArch64::BI__builtin_arm_wsrp) {
10838 SpecialRegisterAccessKind AccessKind = Write;
10839 if (BuiltinID == clang::AArch64::BI__builtin_arm_rsr ||
10840 BuiltinID == clang::AArch64::BI__builtin_arm_rsr64 ||
10841 BuiltinID == clang::AArch64::BI__builtin_arm_rsr128 ||
10842 BuiltinID == clang::AArch64::BI__builtin_arm_rsrp)
10843 AccessKind = VolatileRead;
10845 bool IsPointerBuiltin = BuiltinID == clang::AArch64::BI__builtin_arm_rsrp ||
10846 BuiltinID == clang::AArch64::BI__builtin_arm_wsrp;
10848 bool Is32Bit = BuiltinID == clang::AArch64::BI__builtin_arm_rsr ||
10849 BuiltinID == clang::AArch64::BI__builtin_arm_wsr;
10851 bool Is128Bit = BuiltinID == clang::AArch64::BI__builtin_arm_rsr128 ||
10852 BuiltinID == clang::AArch64::BI__builtin_arm_wsr128;
10854 llvm::Type *ValueType;
10855 llvm::Type *RegisterType = Int64Ty;
10856 if (Is32Bit) {
10857 ValueType = Int32Ty;
10858 } else if (Is128Bit) {
10859 llvm::Type *Int128Ty =
10860 llvm::IntegerType::getInt128Ty(CGM.getLLVMContext());
10861 ValueType = Int128Ty;
10862 RegisterType = Int128Ty;
10863 } else if (IsPointerBuiltin) {
10864 ValueType = VoidPtrTy;
10865 } else {
10866 ValueType = Int64Ty;
10869 return EmitSpecialRegisterBuiltin(*this, E, RegisterType, ValueType,
10870 AccessKind);
10873 if (BuiltinID == clang::AArch64::BI_ReadStatusReg ||
10874 BuiltinID == clang::AArch64::BI_WriteStatusReg) {
10875 LLVMContext &Context = CGM.getLLVMContext();
10877 unsigned SysReg =
10878 E->getArg(0)->EvaluateKnownConstInt(getContext()).getZExtValue();
10880 std::string SysRegStr;
10881 llvm::raw_string_ostream(SysRegStr) <<
10882 ((1 << 1) | ((SysReg >> 14) & 1)) << ":" <<
10883 ((SysReg >> 11) & 7) << ":" <<
10884 ((SysReg >> 7) & 15) << ":" <<
10885 ((SysReg >> 3) & 15) << ":" <<
10886 ( SysReg & 7);
10888 llvm::Metadata *Ops[] = { llvm::MDString::get(Context, SysRegStr) };
10889 llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
10890 llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
10892 llvm::Type *RegisterType = Int64Ty;
10893 llvm::Type *Types[] = { RegisterType };
10895 if (BuiltinID == clang::AArch64::BI_ReadStatusReg) {
10896 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types);
10898 return Builder.CreateCall(F, Metadata);
10901 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types);
10902 llvm::Value *ArgValue = EmitScalarExpr(E->getArg(1));
10904 return Builder.CreateCall(F, { Metadata, ArgValue });
10907 if (BuiltinID == clang::AArch64::BI_AddressOfReturnAddress) {
10908 llvm::Function *F =
10909 CGM.getIntrinsic(Intrinsic::addressofreturnaddress, AllocaInt8PtrTy);
10910 return Builder.CreateCall(F);
10913 if (BuiltinID == clang::AArch64::BI__builtin_sponentry) {
10914 llvm::Function *F = CGM.getIntrinsic(Intrinsic::sponentry, AllocaInt8PtrTy);
10915 return Builder.CreateCall(F);
10918 if (BuiltinID == clang::AArch64::BI__mulh ||
10919 BuiltinID == clang::AArch64::BI__umulh) {
10920 llvm::Type *ResType = ConvertType(E->getType());
10921 llvm::Type *Int128Ty = llvm::IntegerType::get(getLLVMContext(), 128);
10923 bool IsSigned = BuiltinID == clang::AArch64::BI__mulh;
10924 Value *LHS =
10925 Builder.CreateIntCast(EmitScalarExpr(E->getArg(0)), Int128Ty, IsSigned);
10926 Value *RHS =
10927 Builder.CreateIntCast(EmitScalarExpr(E->getArg(1)), Int128Ty, IsSigned);
10929 Value *MulResult, *HigherBits;
10930 if (IsSigned) {
10931 MulResult = Builder.CreateNSWMul(LHS, RHS);
10932 HigherBits = Builder.CreateAShr(MulResult, 64);
10933 } else {
10934 MulResult = Builder.CreateNUWMul(LHS, RHS);
10935 HigherBits = Builder.CreateLShr(MulResult, 64);
10937 HigherBits = Builder.CreateIntCast(HigherBits, ResType, IsSigned);
10939 return HigherBits;
10942 if (BuiltinID == AArch64::BI__writex18byte ||
10943 BuiltinID == AArch64::BI__writex18word ||
10944 BuiltinID == AArch64::BI__writex18dword ||
10945 BuiltinID == AArch64::BI__writex18qword) {
10946 // Read x18 as i8*
10947 LLVMContext &Context = CGM.getLLVMContext();
10948 llvm::Metadata *Ops[] = {llvm::MDString::get(Context, "x18")};
10949 llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
10950 llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
10951 llvm::Function *F =
10952 CGM.getIntrinsic(llvm::Intrinsic::read_register, {Int64Ty});
10953 llvm::Value *X18 = Builder.CreateCall(F, Metadata);
10954 X18 = Builder.CreateIntToPtr(X18, Int8PtrTy);
10956 // Store val at x18 + offset
10957 Value *Offset = Builder.CreateZExt(EmitScalarExpr(E->getArg(0)), Int64Ty);
10958 Value *Ptr = Builder.CreateGEP(Int8Ty, X18, Offset);
10959 Value *Val = EmitScalarExpr(E->getArg(1));
10960 StoreInst *Store = Builder.CreateAlignedStore(Val, Ptr, CharUnits::One());
10961 return Store;
10964 if (BuiltinID == AArch64::BI__readx18byte ||
10965 BuiltinID == AArch64::BI__readx18word ||
10966 BuiltinID == AArch64::BI__readx18dword ||
10967 BuiltinID == AArch64::BI__readx18qword) {
10968 llvm::Type *IntTy = ConvertType(E->getType());
10970 // Read x18 as i8*
10971 LLVMContext &Context = CGM.getLLVMContext();
10972 llvm::Metadata *Ops[] = {llvm::MDString::get(Context, "x18")};
10973 llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
10974 llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
10975 llvm::Function *F =
10976 CGM.getIntrinsic(llvm::Intrinsic::read_register, {Int64Ty});
10977 llvm::Value *X18 = Builder.CreateCall(F, Metadata);
10978 X18 = Builder.CreateIntToPtr(X18, Int8PtrTy);
10980 // Load x18 + offset
10981 Value *Offset = Builder.CreateZExt(EmitScalarExpr(E->getArg(0)), Int64Ty);
10982 Value *Ptr = Builder.CreateGEP(Int8Ty, X18, Offset);
10983 LoadInst *Load = Builder.CreateAlignedLoad(IntTy, Ptr, CharUnits::One());
10984 return Load;
10987 if (BuiltinID == AArch64::BI_CopyDoubleFromInt64 ||
10988 BuiltinID == AArch64::BI_CopyFloatFromInt32 ||
10989 BuiltinID == AArch64::BI_CopyInt32FromFloat ||
10990 BuiltinID == AArch64::BI_CopyInt64FromDouble) {
10991 Value *Arg = EmitScalarExpr(E->getArg(0));
10992 llvm::Type *RetTy = ConvertType(E->getType());
10993 return Builder.CreateBitCast(Arg, RetTy);
10996 if (BuiltinID == AArch64::BI_CountLeadingOnes ||
10997 BuiltinID == AArch64::BI_CountLeadingOnes64 ||
10998 BuiltinID == AArch64::BI_CountLeadingZeros ||
10999 BuiltinID == AArch64::BI_CountLeadingZeros64) {
11000 Value *Arg = EmitScalarExpr(E->getArg(0));
11001 llvm::Type *ArgType = Arg->getType();
11003 if (BuiltinID == AArch64::BI_CountLeadingOnes ||
11004 BuiltinID == AArch64::BI_CountLeadingOnes64)
11005 Arg = Builder.CreateXor(Arg, Constant::getAllOnesValue(ArgType));
11007 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
11008 Value *Result = Builder.CreateCall(F, {Arg, Builder.getInt1(false)});
11010 if (BuiltinID == AArch64::BI_CountLeadingOnes64 ||
11011 BuiltinID == AArch64::BI_CountLeadingZeros64)
11012 Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
11013 return Result;
11016 if (BuiltinID == AArch64::BI_CountLeadingSigns ||
11017 BuiltinID == AArch64::BI_CountLeadingSigns64) {
11018 Value *Arg = EmitScalarExpr(E->getArg(0));
11020 Function *F = (BuiltinID == AArch64::BI_CountLeadingSigns)
11021 ? CGM.getIntrinsic(Intrinsic::aarch64_cls)
11022 : CGM.getIntrinsic(Intrinsic::aarch64_cls64);
11024 Value *Result = Builder.CreateCall(F, Arg, "cls");
11025 if (BuiltinID == AArch64::BI_CountLeadingSigns64)
11026 Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
11027 return Result;
11030 if (BuiltinID == AArch64::BI_CountOneBits ||
11031 BuiltinID == AArch64::BI_CountOneBits64) {
11032 Value *ArgValue = EmitScalarExpr(E->getArg(0));
11033 llvm::Type *ArgType = ArgValue->getType();
11034 Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
11036 Value *Result = Builder.CreateCall(F, ArgValue);
11037 if (BuiltinID == AArch64::BI_CountOneBits64)
11038 Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
11039 return Result;
11042 if (BuiltinID == AArch64::BI__prefetch) {
11043 Value *Address = EmitScalarExpr(E->getArg(0));
11044 Value *RW = llvm::ConstantInt::get(Int32Ty, 0);
11045 Value *Locality = ConstantInt::get(Int32Ty, 3);
11046 Value *Data = llvm::ConstantInt::get(Int32Ty, 1);
11047 Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType());
11048 return Builder.CreateCall(F, {Address, RW, Locality, Data});
11051 // Handle MSVC intrinsics before argument evaluation to prevent double
11052 // evaluation.
11053 if (std::optional<MSVCIntrin> MsvcIntId =
11054 translateAarch64ToMsvcIntrin(BuiltinID))
11055 return EmitMSVCBuiltinExpr(*MsvcIntId, E);
11057 // Some intrinsics are equivalent - if they are use the base intrinsic ID.
11058 auto It = llvm::find_if(NEONEquivalentIntrinsicMap, [BuiltinID](auto &P) {
11059 return P.first == BuiltinID;
11061 if (It != end(NEONEquivalentIntrinsicMap))
11062 BuiltinID = It->second;
11064 // Find out if any arguments are required to be integer constant
11065 // expressions.
11066 unsigned ICEArguments = 0;
11067 ASTContext::GetBuiltinTypeError Error;
11068 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
11069 assert(Error == ASTContext::GE_None && "Should not codegen an error");
11071 llvm::SmallVector<Value*, 4> Ops;
11072 Address PtrOp0 = Address::invalid();
11073 for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++) {
11074 if (i == 0) {
11075 switch (BuiltinID) {
11076 case NEON::BI__builtin_neon_vld1_v:
11077 case NEON::BI__builtin_neon_vld1q_v:
11078 case NEON::BI__builtin_neon_vld1_dup_v:
11079 case NEON::BI__builtin_neon_vld1q_dup_v:
11080 case NEON::BI__builtin_neon_vld1_lane_v:
11081 case NEON::BI__builtin_neon_vld1q_lane_v:
11082 case NEON::BI__builtin_neon_vst1_v:
11083 case NEON::BI__builtin_neon_vst1q_v:
11084 case NEON::BI__builtin_neon_vst1_lane_v:
11085 case NEON::BI__builtin_neon_vst1q_lane_v:
11086 case NEON::BI__builtin_neon_vldap1_lane_s64:
11087 case NEON::BI__builtin_neon_vldap1q_lane_s64:
11088 case NEON::BI__builtin_neon_vstl1_lane_s64:
11089 case NEON::BI__builtin_neon_vstl1q_lane_s64:
11090 // Get the alignment for the argument in addition to the value;
11091 // we'll use it later.
11092 PtrOp0 = EmitPointerWithAlignment(E->getArg(0));
11093 Ops.push_back(PtrOp0.getPointer());
11094 continue;
11097 if ((ICEArguments & (1 << i)) == 0) {
11098 Ops.push_back(EmitScalarExpr(E->getArg(i)));
11099 } else {
11100 // If this is required to be a constant, constant fold it so that we know
11101 // that the generated intrinsic gets a ConstantInt.
11102 Ops.push_back(llvm::ConstantInt::get(
11103 getLLVMContext(),
11104 *E->getArg(i)->getIntegerConstantExpr(getContext())));
11108 auto SISDMap = ArrayRef(AArch64SISDIntrinsicMap);
11109 const ARMVectorIntrinsicInfo *Builtin = findARMVectorIntrinsicInMap(
11110 SISDMap, BuiltinID, AArch64SISDIntrinsicsProvenSorted);
11112 if (Builtin) {
11113 Ops.push_back(EmitScalarExpr(E->getArg(E->getNumArgs() - 1)));
11114 Value *Result = EmitCommonNeonSISDBuiltinExpr(*this, *Builtin, Ops, E);
11115 assert(Result && "SISD intrinsic should have been handled");
11116 return Result;
11119 const Expr *Arg = E->getArg(E->getNumArgs()-1);
11120 NeonTypeFlags Type(0);
11121 if (std::optional<llvm::APSInt> Result =
11122 Arg->getIntegerConstantExpr(getContext()))
11123 // Determine the type of this overloaded NEON intrinsic.
11124 Type = NeonTypeFlags(Result->getZExtValue());
11126 bool usgn = Type.isUnsigned();
11127 bool quad = Type.isQuad();
11129 // Handle non-overloaded intrinsics first.
11130 switch (BuiltinID) {
11131 default: break;
11132 case NEON::BI__builtin_neon_vabsh_f16:
11133 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11134 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::fabs, HalfTy), Ops, "vabs");
11135 case NEON::BI__builtin_neon_vaddq_p128: {
11136 llvm::Type *Ty = GetNeonType(this, NeonTypeFlags::Poly128);
11137 Ops.push_back(EmitScalarExpr(E->getArg(1)));
11138 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
11139 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
11140 Ops[0] = Builder.CreateXor(Ops[0], Ops[1]);
11141 llvm::Type *Int128Ty = llvm::Type::getIntNTy(getLLVMContext(), 128);
11142 return Builder.CreateBitCast(Ops[0], Int128Ty);
11144 case NEON::BI__builtin_neon_vldrq_p128: {
11145 llvm::Type *Int128Ty = llvm::Type::getIntNTy(getLLVMContext(), 128);
11146 Value *Ptr = EmitScalarExpr(E->getArg(0));
11147 return Builder.CreateAlignedLoad(Int128Ty, Ptr,
11148 CharUnits::fromQuantity(16));
11150 case NEON::BI__builtin_neon_vstrq_p128: {
11151 Value *Ptr = Ops[0];
11152 return Builder.CreateDefaultAlignedStore(EmitScalarExpr(E->getArg(1)), Ptr);
11154 case NEON::BI__builtin_neon_vcvts_f32_u32:
11155 case NEON::BI__builtin_neon_vcvtd_f64_u64:
11156 usgn = true;
11157 [[fallthrough]];
11158 case NEON::BI__builtin_neon_vcvts_f32_s32:
11159 case NEON::BI__builtin_neon_vcvtd_f64_s64: {
11160 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11161 bool Is64 = Ops[0]->getType()->getPrimitiveSizeInBits() == 64;
11162 llvm::Type *InTy = Is64 ? Int64Ty : Int32Ty;
11163 llvm::Type *FTy = Is64 ? DoubleTy : FloatTy;
11164 Ops[0] = Builder.CreateBitCast(Ops[0], InTy);
11165 if (usgn)
11166 return Builder.CreateUIToFP(Ops[0], FTy);
11167 return Builder.CreateSIToFP(Ops[0], FTy);
11169 case NEON::BI__builtin_neon_vcvth_f16_u16:
11170 case NEON::BI__builtin_neon_vcvth_f16_u32:
11171 case NEON::BI__builtin_neon_vcvth_f16_u64:
11172 usgn = true;
11173 [[fallthrough]];
11174 case NEON::BI__builtin_neon_vcvth_f16_s16:
11175 case NEON::BI__builtin_neon_vcvth_f16_s32:
11176 case NEON::BI__builtin_neon_vcvth_f16_s64: {
11177 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11178 llvm::Type *FTy = HalfTy;
11179 llvm::Type *InTy;
11180 if (Ops[0]->getType()->getPrimitiveSizeInBits() == 64)
11181 InTy = Int64Ty;
11182 else if (Ops[0]->getType()->getPrimitiveSizeInBits() == 32)
11183 InTy = Int32Ty;
11184 else
11185 InTy = Int16Ty;
11186 Ops[0] = Builder.CreateBitCast(Ops[0], InTy);
11187 if (usgn)
11188 return Builder.CreateUIToFP(Ops[0], FTy);
11189 return Builder.CreateSIToFP(Ops[0], FTy);
11191 case NEON::BI__builtin_neon_vcvtah_u16_f16:
11192 case NEON::BI__builtin_neon_vcvtmh_u16_f16:
11193 case NEON::BI__builtin_neon_vcvtnh_u16_f16:
11194 case NEON::BI__builtin_neon_vcvtph_u16_f16:
11195 case NEON::BI__builtin_neon_vcvth_u16_f16:
11196 case NEON::BI__builtin_neon_vcvtah_s16_f16:
11197 case NEON::BI__builtin_neon_vcvtmh_s16_f16:
11198 case NEON::BI__builtin_neon_vcvtnh_s16_f16:
11199 case NEON::BI__builtin_neon_vcvtph_s16_f16:
11200 case NEON::BI__builtin_neon_vcvth_s16_f16: {
11201 unsigned Int;
11202 llvm::Type* InTy = Int32Ty;
11203 llvm::Type* FTy = HalfTy;
11204 llvm::Type *Tys[2] = {InTy, FTy};
11205 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11206 switch (BuiltinID) {
11207 default: llvm_unreachable("missing builtin ID in switch!");
11208 case NEON::BI__builtin_neon_vcvtah_u16_f16:
11209 Int = Intrinsic::aarch64_neon_fcvtau; break;
11210 case NEON::BI__builtin_neon_vcvtmh_u16_f16:
11211 Int = Intrinsic::aarch64_neon_fcvtmu; break;
11212 case NEON::BI__builtin_neon_vcvtnh_u16_f16:
11213 Int = Intrinsic::aarch64_neon_fcvtnu; break;
11214 case NEON::BI__builtin_neon_vcvtph_u16_f16:
11215 Int = Intrinsic::aarch64_neon_fcvtpu; break;
11216 case NEON::BI__builtin_neon_vcvth_u16_f16:
11217 Int = Intrinsic::aarch64_neon_fcvtzu; break;
11218 case NEON::BI__builtin_neon_vcvtah_s16_f16:
11219 Int = Intrinsic::aarch64_neon_fcvtas; break;
11220 case NEON::BI__builtin_neon_vcvtmh_s16_f16:
11221 Int = Intrinsic::aarch64_neon_fcvtms; break;
11222 case NEON::BI__builtin_neon_vcvtnh_s16_f16:
11223 Int = Intrinsic::aarch64_neon_fcvtns; break;
11224 case NEON::BI__builtin_neon_vcvtph_s16_f16:
11225 Int = Intrinsic::aarch64_neon_fcvtps; break;
11226 case NEON::BI__builtin_neon_vcvth_s16_f16:
11227 Int = Intrinsic::aarch64_neon_fcvtzs; break;
11229 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "fcvt");
11230 return Builder.CreateTrunc(Ops[0], Int16Ty);
11232 case NEON::BI__builtin_neon_vcaleh_f16:
11233 case NEON::BI__builtin_neon_vcalth_f16:
11234 case NEON::BI__builtin_neon_vcageh_f16:
11235 case NEON::BI__builtin_neon_vcagth_f16: {
11236 unsigned Int;
11237 llvm::Type* InTy = Int32Ty;
11238 llvm::Type* FTy = HalfTy;
11239 llvm::Type *Tys[2] = {InTy, FTy};
11240 Ops.push_back(EmitScalarExpr(E->getArg(1)));
11241 switch (BuiltinID) {
11242 default: llvm_unreachable("missing builtin ID in switch!");
11243 case NEON::BI__builtin_neon_vcageh_f16:
11244 Int = Intrinsic::aarch64_neon_facge; break;
11245 case NEON::BI__builtin_neon_vcagth_f16:
11246 Int = Intrinsic::aarch64_neon_facgt; break;
11247 case NEON::BI__builtin_neon_vcaleh_f16:
11248 Int = Intrinsic::aarch64_neon_facge; std::swap(Ops[0], Ops[1]); break;
11249 case NEON::BI__builtin_neon_vcalth_f16:
11250 Int = Intrinsic::aarch64_neon_facgt; std::swap(Ops[0], Ops[1]); break;
11252 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "facg");
11253 return Builder.CreateTrunc(Ops[0], Int16Ty);
11255 case NEON::BI__builtin_neon_vcvth_n_s16_f16:
11256 case NEON::BI__builtin_neon_vcvth_n_u16_f16: {
11257 unsigned Int;
11258 llvm::Type* InTy = Int32Ty;
11259 llvm::Type* FTy = HalfTy;
11260 llvm::Type *Tys[2] = {InTy, FTy};
11261 Ops.push_back(EmitScalarExpr(E->getArg(1)));
11262 switch (BuiltinID) {
11263 default: llvm_unreachable("missing builtin ID in switch!");
11264 case NEON::BI__builtin_neon_vcvth_n_s16_f16:
11265 Int = Intrinsic::aarch64_neon_vcvtfp2fxs; break;
11266 case NEON::BI__builtin_neon_vcvth_n_u16_f16:
11267 Int = Intrinsic::aarch64_neon_vcvtfp2fxu; break;
11269 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "fcvth_n");
11270 return Builder.CreateTrunc(Ops[0], Int16Ty);
11272 case NEON::BI__builtin_neon_vcvth_n_f16_s16:
11273 case NEON::BI__builtin_neon_vcvth_n_f16_u16: {
11274 unsigned Int;
11275 llvm::Type* FTy = HalfTy;
11276 llvm::Type* InTy = Int32Ty;
11277 llvm::Type *Tys[2] = {FTy, InTy};
11278 Ops.push_back(EmitScalarExpr(E->getArg(1)));
11279 switch (BuiltinID) {
11280 default: llvm_unreachable("missing builtin ID in switch!");
11281 case NEON::BI__builtin_neon_vcvth_n_f16_s16:
11282 Int = Intrinsic::aarch64_neon_vcvtfxs2fp;
11283 Ops[0] = Builder.CreateSExt(Ops[0], InTy, "sext");
11284 break;
11285 case NEON::BI__builtin_neon_vcvth_n_f16_u16:
11286 Int = Intrinsic::aarch64_neon_vcvtfxu2fp;
11287 Ops[0] = Builder.CreateZExt(Ops[0], InTy);
11288 break;
11290 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "fcvth_n");
11292 case NEON::BI__builtin_neon_vpaddd_s64: {
11293 auto *Ty = llvm::FixedVectorType::get(Int64Ty, 2);
11294 Value *Vec = EmitScalarExpr(E->getArg(0));
11295 // The vector is v2f64, so make sure it's bitcast to that.
11296 Vec = Builder.CreateBitCast(Vec, Ty, "v2i64");
11297 llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0);
11298 llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1);
11299 Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0");
11300 Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1");
11301 // Pairwise addition of a v2f64 into a scalar f64.
11302 return Builder.CreateAdd(Op0, Op1, "vpaddd");
11304 case NEON::BI__builtin_neon_vpaddd_f64: {
11305 auto *Ty = llvm::FixedVectorType::get(DoubleTy, 2);
11306 Value *Vec = EmitScalarExpr(E->getArg(0));
11307 // The vector is v2f64, so make sure it's bitcast to that.
11308 Vec = Builder.CreateBitCast(Vec, Ty, "v2f64");
11309 llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0);
11310 llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1);
11311 Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0");
11312 Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1");
11313 // Pairwise addition of a v2f64 into a scalar f64.
11314 return Builder.CreateFAdd(Op0, Op1, "vpaddd");
11316 case NEON::BI__builtin_neon_vpadds_f32: {
11317 auto *Ty = llvm::FixedVectorType::get(FloatTy, 2);
11318 Value *Vec = EmitScalarExpr(E->getArg(0));
11319 // The vector is v2f32, so make sure it's bitcast to that.
11320 Vec = Builder.CreateBitCast(Vec, Ty, "v2f32");
11321 llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0);
11322 llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1);
11323 Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0");
11324 Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1");
11325 // Pairwise addition of a v2f32 into a scalar f32.
11326 return Builder.CreateFAdd(Op0, Op1, "vpaddd");
11328 case NEON::BI__builtin_neon_vceqzd_s64:
11329 case NEON::BI__builtin_neon_vceqzd_f64:
11330 case NEON::BI__builtin_neon_vceqzs_f32:
11331 case NEON::BI__builtin_neon_vceqzh_f16:
11332 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11333 return EmitAArch64CompareBuiltinExpr(
11334 Ops[0], ConvertType(E->getCallReturnType(getContext())),
11335 ICmpInst::FCMP_OEQ, ICmpInst::ICMP_EQ, "vceqz");
11336 case NEON::BI__builtin_neon_vcgezd_s64:
11337 case NEON::BI__builtin_neon_vcgezd_f64:
11338 case NEON::BI__builtin_neon_vcgezs_f32:
11339 case NEON::BI__builtin_neon_vcgezh_f16:
11340 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11341 return EmitAArch64CompareBuiltinExpr(
11342 Ops[0], ConvertType(E->getCallReturnType(getContext())),
11343 ICmpInst::FCMP_OGE, ICmpInst::ICMP_SGE, "vcgez");
11344 case NEON::BI__builtin_neon_vclezd_s64:
11345 case NEON::BI__builtin_neon_vclezd_f64:
11346 case NEON::BI__builtin_neon_vclezs_f32:
11347 case NEON::BI__builtin_neon_vclezh_f16:
11348 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11349 return EmitAArch64CompareBuiltinExpr(
11350 Ops[0], ConvertType(E->getCallReturnType(getContext())),
11351 ICmpInst::FCMP_OLE, ICmpInst::ICMP_SLE, "vclez");
11352 case NEON::BI__builtin_neon_vcgtzd_s64:
11353 case NEON::BI__builtin_neon_vcgtzd_f64:
11354 case NEON::BI__builtin_neon_vcgtzs_f32:
11355 case NEON::BI__builtin_neon_vcgtzh_f16:
11356 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11357 return EmitAArch64CompareBuiltinExpr(
11358 Ops[0], ConvertType(E->getCallReturnType(getContext())),
11359 ICmpInst::FCMP_OGT, ICmpInst::ICMP_SGT, "vcgtz");
11360 case NEON::BI__builtin_neon_vcltzd_s64:
11361 case NEON::BI__builtin_neon_vcltzd_f64:
11362 case NEON::BI__builtin_neon_vcltzs_f32:
11363 case NEON::BI__builtin_neon_vcltzh_f16:
11364 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11365 return EmitAArch64CompareBuiltinExpr(
11366 Ops[0], ConvertType(E->getCallReturnType(getContext())),
11367 ICmpInst::FCMP_OLT, ICmpInst::ICMP_SLT, "vcltz");
11369 case NEON::BI__builtin_neon_vceqzd_u64: {
11370 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11371 Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty);
11372 Ops[0] =
11373 Builder.CreateICmpEQ(Ops[0], llvm::Constant::getNullValue(Int64Ty));
11374 return Builder.CreateSExt(Ops[0], Int64Ty, "vceqzd");
11376 case NEON::BI__builtin_neon_vceqd_f64:
11377 case NEON::BI__builtin_neon_vcled_f64:
11378 case NEON::BI__builtin_neon_vcltd_f64:
11379 case NEON::BI__builtin_neon_vcged_f64:
11380 case NEON::BI__builtin_neon_vcgtd_f64: {
11381 llvm::CmpInst::Predicate P;
11382 switch (BuiltinID) {
11383 default: llvm_unreachable("missing builtin ID in switch!");
11384 case NEON::BI__builtin_neon_vceqd_f64: P = llvm::FCmpInst::FCMP_OEQ; break;
11385 case NEON::BI__builtin_neon_vcled_f64: P = llvm::FCmpInst::FCMP_OLE; break;
11386 case NEON::BI__builtin_neon_vcltd_f64: P = llvm::FCmpInst::FCMP_OLT; break;
11387 case NEON::BI__builtin_neon_vcged_f64: P = llvm::FCmpInst::FCMP_OGE; break;
11388 case NEON::BI__builtin_neon_vcgtd_f64: P = llvm::FCmpInst::FCMP_OGT; break;
11390 Ops.push_back(EmitScalarExpr(E->getArg(1)));
11391 Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
11392 Ops[1] = Builder.CreateBitCast(Ops[1], DoubleTy);
11393 if (P == llvm::FCmpInst::FCMP_OEQ)
11394 Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
11395 else
11396 Ops[0] = Builder.CreateFCmpS(P, Ops[0], Ops[1]);
11397 return Builder.CreateSExt(Ops[0], Int64Ty, "vcmpd");
11399 case NEON::BI__builtin_neon_vceqs_f32:
11400 case NEON::BI__builtin_neon_vcles_f32:
11401 case NEON::BI__builtin_neon_vclts_f32:
11402 case NEON::BI__builtin_neon_vcges_f32:
11403 case NEON::BI__builtin_neon_vcgts_f32: {
11404 llvm::CmpInst::Predicate P;
11405 switch (BuiltinID) {
11406 default: llvm_unreachable("missing builtin ID in switch!");
11407 case NEON::BI__builtin_neon_vceqs_f32: P = llvm::FCmpInst::FCMP_OEQ; break;
11408 case NEON::BI__builtin_neon_vcles_f32: P = llvm::FCmpInst::FCMP_OLE; break;
11409 case NEON::BI__builtin_neon_vclts_f32: P = llvm::FCmpInst::FCMP_OLT; break;
11410 case NEON::BI__builtin_neon_vcges_f32: P = llvm::FCmpInst::FCMP_OGE; break;
11411 case NEON::BI__builtin_neon_vcgts_f32: P = llvm::FCmpInst::FCMP_OGT; break;
11413 Ops.push_back(EmitScalarExpr(E->getArg(1)));
11414 Ops[0] = Builder.CreateBitCast(Ops[0], FloatTy);
11415 Ops[1] = Builder.CreateBitCast(Ops[1], FloatTy);
11416 if (P == llvm::FCmpInst::FCMP_OEQ)
11417 Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
11418 else
11419 Ops[0] = Builder.CreateFCmpS(P, Ops[0], Ops[1]);
11420 return Builder.CreateSExt(Ops[0], Int32Ty, "vcmpd");
11422 case NEON::BI__builtin_neon_vceqh_f16:
11423 case NEON::BI__builtin_neon_vcleh_f16:
11424 case NEON::BI__builtin_neon_vclth_f16:
11425 case NEON::BI__builtin_neon_vcgeh_f16:
11426 case NEON::BI__builtin_neon_vcgth_f16: {
11427 llvm::CmpInst::Predicate P;
11428 switch (BuiltinID) {
11429 default: llvm_unreachable("missing builtin ID in switch!");
11430 case NEON::BI__builtin_neon_vceqh_f16: P = llvm::FCmpInst::FCMP_OEQ; break;
11431 case NEON::BI__builtin_neon_vcleh_f16: P = llvm::FCmpInst::FCMP_OLE; break;
11432 case NEON::BI__builtin_neon_vclth_f16: P = llvm::FCmpInst::FCMP_OLT; break;
11433 case NEON::BI__builtin_neon_vcgeh_f16: P = llvm::FCmpInst::FCMP_OGE; break;
11434 case NEON::BI__builtin_neon_vcgth_f16: P = llvm::FCmpInst::FCMP_OGT; break;
11436 Ops.push_back(EmitScalarExpr(E->getArg(1)));
11437 Ops[0] = Builder.CreateBitCast(Ops[0], HalfTy);
11438 Ops[1] = Builder.CreateBitCast(Ops[1], HalfTy);
11439 if (P == llvm::FCmpInst::FCMP_OEQ)
11440 Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
11441 else
11442 Ops[0] = Builder.CreateFCmpS(P, Ops[0], Ops[1]);
11443 return Builder.CreateSExt(Ops[0], Int16Ty, "vcmpd");
11445 case NEON::BI__builtin_neon_vceqd_s64:
11446 case NEON::BI__builtin_neon_vceqd_u64:
11447 case NEON::BI__builtin_neon_vcgtd_s64:
11448 case NEON::BI__builtin_neon_vcgtd_u64:
11449 case NEON::BI__builtin_neon_vcltd_s64:
11450 case NEON::BI__builtin_neon_vcltd_u64:
11451 case NEON::BI__builtin_neon_vcged_u64:
11452 case NEON::BI__builtin_neon_vcged_s64:
11453 case NEON::BI__builtin_neon_vcled_u64:
11454 case NEON::BI__builtin_neon_vcled_s64: {
11455 llvm::CmpInst::Predicate P;
11456 switch (BuiltinID) {
11457 default: llvm_unreachable("missing builtin ID in switch!");
11458 case NEON::BI__builtin_neon_vceqd_s64:
11459 case NEON::BI__builtin_neon_vceqd_u64:P = llvm::ICmpInst::ICMP_EQ;break;
11460 case NEON::BI__builtin_neon_vcgtd_s64:P = llvm::ICmpInst::ICMP_SGT;break;
11461 case NEON::BI__builtin_neon_vcgtd_u64:P = llvm::ICmpInst::ICMP_UGT;break;
11462 case NEON::BI__builtin_neon_vcltd_s64:P = llvm::ICmpInst::ICMP_SLT;break;
11463 case NEON::BI__builtin_neon_vcltd_u64:P = llvm::ICmpInst::ICMP_ULT;break;
11464 case NEON::BI__builtin_neon_vcged_u64:P = llvm::ICmpInst::ICMP_UGE;break;
11465 case NEON::BI__builtin_neon_vcged_s64:P = llvm::ICmpInst::ICMP_SGE;break;
11466 case NEON::BI__builtin_neon_vcled_u64:P = llvm::ICmpInst::ICMP_ULE;break;
11467 case NEON::BI__builtin_neon_vcled_s64:P = llvm::ICmpInst::ICMP_SLE;break;
11469 Ops.push_back(EmitScalarExpr(E->getArg(1)));
11470 Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty);
11471 Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty);
11472 Ops[0] = Builder.CreateICmp(P, Ops[0], Ops[1]);
11473 return Builder.CreateSExt(Ops[0], Int64Ty, "vceqd");
11475 case NEON::BI__builtin_neon_vtstd_s64:
11476 case NEON::BI__builtin_neon_vtstd_u64: {
11477 Ops.push_back(EmitScalarExpr(E->getArg(1)));
11478 Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty);
11479 Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty);
11480 Ops[0] = Builder.CreateAnd(Ops[0], Ops[1]);
11481 Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0],
11482 llvm::Constant::getNullValue(Int64Ty));
11483 return Builder.CreateSExt(Ops[0], Int64Ty, "vtstd");
11485 case NEON::BI__builtin_neon_vset_lane_i8:
11486 case NEON::BI__builtin_neon_vset_lane_i16:
11487 case NEON::BI__builtin_neon_vset_lane_i32:
11488 case NEON::BI__builtin_neon_vset_lane_i64:
11489 case NEON::BI__builtin_neon_vset_lane_bf16:
11490 case NEON::BI__builtin_neon_vset_lane_f32:
11491 case NEON::BI__builtin_neon_vsetq_lane_i8:
11492 case NEON::BI__builtin_neon_vsetq_lane_i16:
11493 case NEON::BI__builtin_neon_vsetq_lane_i32:
11494 case NEON::BI__builtin_neon_vsetq_lane_i64:
11495 case NEON::BI__builtin_neon_vsetq_lane_bf16:
11496 case NEON::BI__builtin_neon_vsetq_lane_f32:
11497 Ops.push_back(EmitScalarExpr(E->getArg(2)));
11498 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
11499 case NEON::BI__builtin_neon_vset_lane_f64:
11500 // The vector type needs a cast for the v1f64 variant.
11501 Ops[1] =
11502 Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(DoubleTy, 1));
11503 Ops.push_back(EmitScalarExpr(E->getArg(2)));
11504 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
11505 case NEON::BI__builtin_neon_vsetq_lane_f64:
11506 // The vector type needs a cast for the v2f64 variant.
11507 Ops[1] =
11508 Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(DoubleTy, 2));
11509 Ops.push_back(EmitScalarExpr(E->getArg(2)));
11510 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
11512 case NEON::BI__builtin_neon_vget_lane_i8:
11513 case NEON::BI__builtin_neon_vdupb_lane_i8:
11514 Ops[0] =
11515 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int8Ty, 8));
11516 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
11517 "vget_lane");
11518 case NEON::BI__builtin_neon_vgetq_lane_i8:
11519 case NEON::BI__builtin_neon_vdupb_laneq_i8:
11520 Ops[0] =
11521 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int8Ty, 16));
11522 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
11523 "vgetq_lane");
11524 case NEON::BI__builtin_neon_vget_lane_i16:
11525 case NEON::BI__builtin_neon_vduph_lane_i16:
11526 Ops[0] =
11527 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int16Ty, 4));
11528 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
11529 "vget_lane");
11530 case NEON::BI__builtin_neon_vgetq_lane_i16:
11531 case NEON::BI__builtin_neon_vduph_laneq_i16:
11532 Ops[0] =
11533 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int16Ty, 8));
11534 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
11535 "vgetq_lane");
11536 case NEON::BI__builtin_neon_vget_lane_i32:
11537 case NEON::BI__builtin_neon_vdups_lane_i32:
11538 Ops[0] =
11539 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int32Ty, 2));
11540 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
11541 "vget_lane");
11542 case NEON::BI__builtin_neon_vdups_lane_f32:
11543 Ops[0] =
11544 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(FloatTy, 2));
11545 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
11546 "vdups_lane");
11547 case NEON::BI__builtin_neon_vgetq_lane_i32:
11548 case NEON::BI__builtin_neon_vdups_laneq_i32:
11549 Ops[0] =
11550 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int32Ty, 4));
11551 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
11552 "vgetq_lane");
11553 case NEON::BI__builtin_neon_vget_lane_i64:
11554 case NEON::BI__builtin_neon_vdupd_lane_i64:
11555 Ops[0] =
11556 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 1));
11557 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
11558 "vget_lane");
11559 case NEON::BI__builtin_neon_vdupd_lane_f64:
11560 Ops[0] =
11561 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(DoubleTy, 1));
11562 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
11563 "vdupd_lane");
11564 case NEON::BI__builtin_neon_vgetq_lane_i64:
11565 case NEON::BI__builtin_neon_vdupd_laneq_i64:
11566 Ops[0] =
11567 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 2));
11568 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
11569 "vgetq_lane");
11570 case NEON::BI__builtin_neon_vget_lane_f32:
11571 Ops[0] =
11572 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(FloatTy, 2));
11573 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
11574 "vget_lane");
11575 case NEON::BI__builtin_neon_vget_lane_f64:
11576 Ops[0] =
11577 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(DoubleTy, 1));
11578 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
11579 "vget_lane");
11580 case NEON::BI__builtin_neon_vgetq_lane_f32:
11581 case NEON::BI__builtin_neon_vdups_laneq_f32:
11582 Ops[0] =
11583 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(FloatTy, 4));
11584 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
11585 "vgetq_lane");
11586 case NEON::BI__builtin_neon_vgetq_lane_f64:
11587 case NEON::BI__builtin_neon_vdupd_laneq_f64:
11588 Ops[0] =
11589 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(DoubleTy, 2));
11590 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
11591 "vgetq_lane");
11592 case NEON::BI__builtin_neon_vaddh_f16:
11593 Ops.push_back(EmitScalarExpr(E->getArg(1)));
11594 return Builder.CreateFAdd(Ops[0], Ops[1], "vaddh");
11595 case NEON::BI__builtin_neon_vsubh_f16:
11596 Ops.push_back(EmitScalarExpr(E->getArg(1)));
11597 return Builder.CreateFSub(Ops[0], Ops[1], "vsubh");
11598 case NEON::BI__builtin_neon_vmulh_f16:
11599 Ops.push_back(EmitScalarExpr(E->getArg(1)));
11600 return Builder.CreateFMul(Ops[0], Ops[1], "vmulh");
11601 case NEON::BI__builtin_neon_vdivh_f16:
11602 Ops.push_back(EmitScalarExpr(E->getArg(1)));
11603 return Builder.CreateFDiv(Ops[0], Ops[1], "vdivh");
11604 case NEON::BI__builtin_neon_vfmah_f16:
11605 // NEON intrinsic puts accumulator first, unlike the LLVM fma.
11606 return emitCallMaybeConstrainedFPBuiltin(
11607 *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, HalfTy,
11608 {EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2)), Ops[0]});
11609 case NEON::BI__builtin_neon_vfmsh_f16: {
11610 Value* Neg = Builder.CreateFNeg(EmitScalarExpr(E->getArg(1)), "vsubh");
11612 // NEON intrinsic puts accumulator first, unlike the LLVM fma.
11613 return emitCallMaybeConstrainedFPBuiltin(
11614 *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, HalfTy,
11615 {Neg, EmitScalarExpr(E->getArg(2)), Ops[0]});
11617 case NEON::BI__builtin_neon_vaddd_s64:
11618 case NEON::BI__builtin_neon_vaddd_u64:
11619 return Builder.CreateAdd(Ops[0], EmitScalarExpr(E->getArg(1)), "vaddd");
11620 case NEON::BI__builtin_neon_vsubd_s64:
11621 case NEON::BI__builtin_neon_vsubd_u64:
11622 return Builder.CreateSub(Ops[0], EmitScalarExpr(E->getArg(1)), "vsubd");
11623 case NEON::BI__builtin_neon_vqdmlalh_s16:
11624 case NEON::BI__builtin_neon_vqdmlslh_s16: {
11625 SmallVector<Value *, 2> ProductOps;
11626 ProductOps.push_back(vectorWrapScalar16(Ops[1]));
11627 ProductOps.push_back(vectorWrapScalar16(EmitScalarExpr(E->getArg(2))));
11628 auto *VTy = llvm::FixedVectorType::get(Int32Ty, 4);
11629 Ops[1] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy),
11630 ProductOps, "vqdmlXl");
11631 Constant *CI = ConstantInt::get(SizeTy, 0);
11632 Ops[1] = Builder.CreateExtractElement(Ops[1], CI, "lane0");
11634 unsigned AccumInt = BuiltinID == NEON::BI__builtin_neon_vqdmlalh_s16
11635 ? Intrinsic::aarch64_neon_sqadd
11636 : Intrinsic::aarch64_neon_sqsub;
11637 return EmitNeonCall(CGM.getIntrinsic(AccumInt, Int32Ty), Ops, "vqdmlXl");
11639 case NEON::BI__builtin_neon_vqshlud_n_s64: {
11640 Ops.push_back(EmitScalarExpr(E->getArg(1)));
11641 Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty);
11642 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqshlu, Int64Ty),
11643 Ops, "vqshlu_n");
11645 case NEON::BI__builtin_neon_vqshld_n_u64:
11646 case NEON::BI__builtin_neon_vqshld_n_s64: {
11647 unsigned Int = BuiltinID == NEON::BI__builtin_neon_vqshld_n_u64
11648 ? Intrinsic::aarch64_neon_uqshl
11649 : Intrinsic::aarch64_neon_sqshl;
11650 Ops.push_back(EmitScalarExpr(E->getArg(1)));
11651 Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty);
11652 return EmitNeonCall(CGM.getIntrinsic(Int, Int64Ty), Ops, "vqshl_n");
11654 case NEON::BI__builtin_neon_vrshrd_n_u64:
11655 case NEON::BI__builtin_neon_vrshrd_n_s64: {
11656 unsigned Int = BuiltinID == NEON::BI__builtin_neon_vrshrd_n_u64
11657 ? Intrinsic::aarch64_neon_urshl
11658 : Intrinsic::aarch64_neon_srshl;
11659 Ops.push_back(EmitScalarExpr(E->getArg(1)));
11660 int SV = cast<ConstantInt>(Ops[1])->getSExtValue();
11661 Ops[1] = ConstantInt::get(Int64Ty, -SV);
11662 return EmitNeonCall(CGM.getIntrinsic(Int, Int64Ty), Ops, "vrshr_n");
11664 case NEON::BI__builtin_neon_vrsrad_n_u64:
11665 case NEON::BI__builtin_neon_vrsrad_n_s64: {
11666 unsigned Int = BuiltinID == NEON::BI__builtin_neon_vrsrad_n_u64
11667 ? Intrinsic::aarch64_neon_urshl
11668 : Intrinsic::aarch64_neon_srshl;
11669 Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty);
11670 Ops.push_back(Builder.CreateNeg(EmitScalarExpr(E->getArg(2))));
11671 Ops[1] = Builder.CreateCall(CGM.getIntrinsic(Int, Int64Ty),
11672 {Ops[1], Builder.CreateSExt(Ops[2], Int64Ty)});
11673 return Builder.CreateAdd(Ops[0], Builder.CreateBitCast(Ops[1], Int64Ty));
11675 case NEON::BI__builtin_neon_vshld_n_s64:
11676 case NEON::BI__builtin_neon_vshld_n_u64: {
11677 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
11678 return Builder.CreateShl(
11679 Ops[0], ConstantInt::get(Int64Ty, Amt->getZExtValue()), "shld_n");
11681 case NEON::BI__builtin_neon_vshrd_n_s64: {
11682 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
11683 return Builder.CreateAShr(
11684 Ops[0], ConstantInt::get(Int64Ty, std::min(static_cast<uint64_t>(63),
11685 Amt->getZExtValue())),
11686 "shrd_n");
11688 case NEON::BI__builtin_neon_vshrd_n_u64: {
11689 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
11690 uint64_t ShiftAmt = Amt->getZExtValue();
11691 // Right-shifting an unsigned value by its size yields 0.
11692 if (ShiftAmt == 64)
11693 return ConstantInt::get(Int64Ty, 0);
11694 return Builder.CreateLShr(Ops[0], ConstantInt::get(Int64Ty, ShiftAmt),
11695 "shrd_n");
11697 case NEON::BI__builtin_neon_vsrad_n_s64: {
11698 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(2)));
11699 Ops[1] = Builder.CreateAShr(
11700 Ops[1], ConstantInt::get(Int64Ty, std::min(static_cast<uint64_t>(63),
11701 Amt->getZExtValue())),
11702 "shrd_n");
11703 return Builder.CreateAdd(Ops[0], Ops[1]);
11705 case NEON::BI__builtin_neon_vsrad_n_u64: {
11706 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(2)));
11707 uint64_t ShiftAmt = Amt->getZExtValue();
11708 // Right-shifting an unsigned value by its size yields 0.
11709 // As Op + 0 = Op, return Ops[0] directly.
11710 if (ShiftAmt == 64)
11711 return Ops[0];
11712 Ops[1] = Builder.CreateLShr(Ops[1], ConstantInt::get(Int64Ty, ShiftAmt),
11713 "shrd_n");
11714 return Builder.CreateAdd(Ops[0], Ops[1]);
11716 case NEON::BI__builtin_neon_vqdmlalh_lane_s16:
11717 case NEON::BI__builtin_neon_vqdmlalh_laneq_s16:
11718 case NEON::BI__builtin_neon_vqdmlslh_lane_s16:
11719 case NEON::BI__builtin_neon_vqdmlslh_laneq_s16: {
11720 Ops[2] = Builder.CreateExtractElement(Ops[2], EmitScalarExpr(E->getArg(3)),
11721 "lane");
11722 SmallVector<Value *, 2> ProductOps;
11723 ProductOps.push_back(vectorWrapScalar16(Ops[1]));
11724 ProductOps.push_back(vectorWrapScalar16(Ops[2]));
11725 auto *VTy = llvm::FixedVectorType::get(Int32Ty, 4);
11726 Ops[1] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy),
11727 ProductOps, "vqdmlXl");
11728 Constant *CI = ConstantInt::get(SizeTy, 0);
11729 Ops[1] = Builder.CreateExtractElement(Ops[1], CI, "lane0");
11730 Ops.pop_back();
11732 unsigned AccInt = (BuiltinID == NEON::BI__builtin_neon_vqdmlalh_lane_s16 ||
11733 BuiltinID == NEON::BI__builtin_neon_vqdmlalh_laneq_s16)
11734 ? Intrinsic::aarch64_neon_sqadd
11735 : Intrinsic::aarch64_neon_sqsub;
11736 return EmitNeonCall(CGM.getIntrinsic(AccInt, Int32Ty), Ops, "vqdmlXl");
11738 case NEON::BI__builtin_neon_vqdmlals_s32:
11739 case NEON::BI__builtin_neon_vqdmlsls_s32: {
11740 SmallVector<Value *, 2> ProductOps;
11741 ProductOps.push_back(Ops[1]);
11742 ProductOps.push_back(EmitScalarExpr(E->getArg(2)));
11743 Ops[1] =
11744 EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmulls_scalar),
11745 ProductOps, "vqdmlXl");
11747 unsigned AccumInt = BuiltinID == NEON::BI__builtin_neon_vqdmlals_s32
11748 ? Intrinsic::aarch64_neon_sqadd
11749 : Intrinsic::aarch64_neon_sqsub;
11750 return EmitNeonCall(CGM.getIntrinsic(AccumInt, Int64Ty), Ops, "vqdmlXl");
11752 case NEON::BI__builtin_neon_vqdmlals_lane_s32:
11753 case NEON::BI__builtin_neon_vqdmlals_laneq_s32:
11754 case NEON::BI__builtin_neon_vqdmlsls_lane_s32:
11755 case NEON::BI__builtin_neon_vqdmlsls_laneq_s32: {
11756 Ops[2] = Builder.CreateExtractElement(Ops[2], EmitScalarExpr(E->getArg(3)),
11757 "lane");
11758 SmallVector<Value *, 2> ProductOps;
11759 ProductOps.push_back(Ops[1]);
11760 ProductOps.push_back(Ops[2]);
11761 Ops[1] =
11762 EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmulls_scalar),
11763 ProductOps, "vqdmlXl");
11764 Ops.pop_back();
11766 unsigned AccInt = (BuiltinID == NEON::BI__builtin_neon_vqdmlals_lane_s32 ||
11767 BuiltinID == NEON::BI__builtin_neon_vqdmlals_laneq_s32)
11768 ? Intrinsic::aarch64_neon_sqadd
11769 : Intrinsic::aarch64_neon_sqsub;
11770 return EmitNeonCall(CGM.getIntrinsic(AccInt, Int64Ty), Ops, "vqdmlXl");
11772 case NEON::BI__builtin_neon_vget_lane_bf16:
11773 case NEON::BI__builtin_neon_vduph_lane_bf16:
11774 case NEON::BI__builtin_neon_vduph_lane_f16: {
11775 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
11776 "vget_lane");
11778 case NEON::BI__builtin_neon_vgetq_lane_bf16:
11779 case NEON::BI__builtin_neon_vduph_laneq_bf16:
11780 case NEON::BI__builtin_neon_vduph_laneq_f16: {
11781 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
11782 "vgetq_lane");
11785 case clang::AArch64::BI_InterlockedAdd: {
11786 Value *Arg0 = EmitScalarExpr(E->getArg(0));
11787 Value *Arg1 = EmitScalarExpr(E->getArg(1));
11788 AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
11789 AtomicRMWInst::Add, Arg0, Arg1,
11790 llvm::AtomicOrdering::SequentiallyConsistent);
11791 return Builder.CreateAdd(RMWI, Arg1);
11795 llvm::FixedVectorType *VTy = GetNeonType(this, Type);
11796 llvm::Type *Ty = VTy;
11797 if (!Ty)
11798 return nullptr;
11800 // Not all intrinsics handled by the common case work for AArch64 yet, so only
11801 // defer to common code if it's been added to our special map.
11802 Builtin = findARMVectorIntrinsicInMap(AArch64SIMDIntrinsicMap, BuiltinID,
11803 AArch64SIMDIntrinsicsProvenSorted);
11805 if (Builtin)
11806 return EmitCommonNeonBuiltinExpr(
11807 Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic,
11808 Builtin->NameHint, Builtin->TypeModifier, E, Ops,
11809 /*never use addresses*/ Address::invalid(), Address::invalid(), Arch);
11811 if (Value *V = EmitAArch64TblBuiltinExpr(*this, BuiltinID, E, Ops, Arch))
11812 return V;
11814 unsigned Int;
11815 switch (BuiltinID) {
11816 default: return nullptr;
11817 case NEON::BI__builtin_neon_vbsl_v:
11818 case NEON::BI__builtin_neon_vbslq_v: {
11819 llvm::Type *BitTy = llvm::VectorType::getInteger(VTy);
11820 Ops[0] = Builder.CreateBitCast(Ops[0], BitTy, "vbsl");
11821 Ops[1] = Builder.CreateBitCast(Ops[1], BitTy, "vbsl");
11822 Ops[2] = Builder.CreateBitCast(Ops[2], BitTy, "vbsl");
11824 Ops[1] = Builder.CreateAnd(Ops[0], Ops[1], "vbsl");
11825 Ops[2] = Builder.CreateAnd(Builder.CreateNot(Ops[0]), Ops[2], "vbsl");
11826 Ops[0] = Builder.CreateOr(Ops[1], Ops[2], "vbsl");
11827 return Builder.CreateBitCast(Ops[0], Ty);
11829 case NEON::BI__builtin_neon_vfma_lane_v:
11830 case NEON::BI__builtin_neon_vfmaq_lane_v: { // Only used for FP types
11831 // The ARM builtins (and instructions) have the addend as the first
11832 // operand, but the 'fma' intrinsics have it last. Swap it around here.
11833 Value *Addend = Ops[0];
11834 Value *Multiplicand = Ops[1];
11835 Value *LaneSource = Ops[2];
11836 Ops[0] = Multiplicand;
11837 Ops[1] = LaneSource;
11838 Ops[2] = Addend;
11840 // Now adjust things to handle the lane access.
11841 auto *SourceTy = BuiltinID == NEON::BI__builtin_neon_vfmaq_lane_v
11842 ? llvm::FixedVectorType::get(VTy->getElementType(),
11843 VTy->getNumElements() / 2)
11844 : VTy;
11845 llvm::Constant *cst = cast<Constant>(Ops[3]);
11846 Value *SV = llvm::ConstantVector::getSplat(VTy->getElementCount(), cst);
11847 Ops[1] = Builder.CreateBitCast(Ops[1], SourceTy);
11848 Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV, "lane");
11850 Ops.pop_back();
11851 Int = Builder.getIsFPConstrained() ? Intrinsic::experimental_constrained_fma
11852 : Intrinsic::fma;
11853 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "fmla");
11855 case NEON::BI__builtin_neon_vfma_laneq_v: {
11856 auto *VTy = cast<llvm::FixedVectorType>(Ty);
11857 // v1f64 fma should be mapped to Neon scalar f64 fma
11858 if (VTy && VTy->getElementType() == DoubleTy) {
11859 Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
11860 Ops[1] = Builder.CreateBitCast(Ops[1], DoubleTy);
11861 llvm::FixedVectorType *VTy =
11862 GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, true));
11863 Ops[2] = Builder.CreateBitCast(Ops[2], VTy);
11864 Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract");
11865 Value *Result;
11866 Result = emitCallMaybeConstrainedFPBuiltin(
11867 *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma,
11868 DoubleTy, {Ops[1], Ops[2], Ops[0]});
11869 return Builder.CreateBitCast(Result, Ty);
11871 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
11872 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
11874 auto *STy = llvm::FixedVectorType::get(VTy->getElementType(),
11875 VTy->getNumElements() * 2);
11876 Ops[2] = Builder.CreateBitCast(Ops[2], STy);
11877 Value *SV = llvm::ConstantVector::getSplat(VTy->getElementCount(),
11878 cast<ConstantInt>(Ops[3]));
11879 Ops[2] = Builder.CreateShuffleVector(Ops[2], Ops[2], SV, "lane");
11881 return emitCallMaybeConstrainedFPBuiltin(
11882 *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, Ty,
11883 {Ops[2], Ops[1], Ops[0]});
11885 case NEON::BI__builtin_neon_vfmaq_laneq_v: {
11886 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
11887 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
11889 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
11890 Ops[2] = EmitNeonSplat(Ops[2], cast<ConstantInt>(Ops[3]));
11891 return emitCallMaybeConstrainedFPBuiltin(
11892 *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, Ty,
11893 {Ops[2], Ops[1], Ops[0]});
11895 case NEON::BI__builtin_neon_vfmah_lane_f16:
11896 case NEON::BI__builtin_neon_vfmas_lane_f32:
11897 case NEON::BI__builtin_neon_vfmah_laneq_f16:
11898 case NEON::BI__builtin_neon_vfmas_laneq_f32:
11899 case NEON::BI__builtin_neon_vfmad_lane_f64:
11900 case NEON::BI__builtin_neon_vfmad_laneq_f64: {
11901 Ops.push_back(EmitScalarExpr(E->getArg(3)));
11902 llvm::Type *Ty = ConvertType(E->getCallReturnType(getContext()));
11903 Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract");
11904 return emitCallMaybeConstrainedFPBuiltin(
11905 *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, Ty,
11906 {Ops[1], Ops[2], Ops[0]});
11908 case NEON::BI__builtin_neon_vmull_v:
11909 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
11910 Int = usgn ? Intrinsic::aarch64_neon_umull : Intrinsic::aarch64_neon_smull;
11911 if (Type.isPoly()) Int = Intrinsic::aarch64_neon_pmull;
11912 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull");
11913 case NEON::BI__builtin_neon_vmax_v:
11914 case NEON::BI__builtin_neon_vmaxq_v:
11915 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
11916 Int = usgn ? Intrinsic::aarch64_neon_umax : Intrinsic::aarch64_neon_smax;
11917 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmax;
11918 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmax");
11919 case NEON::BI__builtin_neon_vmaxh_f16: {
11920 Ops.push_back(EmitScalarExpr(E->getArg(1)));
11921 Int = Intrinsic::aarch64_neon_fmax;
11922 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmax");
11924 case NEON::BI__builtin_neon_vmin_v:
11925 case NEON::BI__builtin_neon_vminq_v:
11926 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
11927 Int = usgn ? Intrinsic::aarch64_neon_umin : Intrinsic::aarch64_neon_smin;
11928 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmin;
11929 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmin");
11930 case NEON::BI__builtin_neon_vminh_f16: {
11931 Ops.push_back(EmitScalarExpr(E->getArg(1)));
11932 Int = Intrinsic::aarch64_neon_fmin;
11933 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmin");
11935 case NEON::BI__builtin_neon_vabd_v:
11936 case NEON::BI__builtin_neon_vabdq_v:
11937 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
11938 Int = usgn ? Intrinsic::aarch64_neon_uabd : Intrinsic::aarch64_neon_sabd;
11939 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fabd;
11940 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vabd");
11941 case NEON::BI__builtin_neon_vpadal_v:
11942 case NEON::BI__builtin_neon_vpadalq_v: {
11943 unsigned ArgElts = VTy->getNumElements();
11944 llvm::IntegerType *EltTy = cast<IntegerType>(VTy->getElementType());
11945 unsigned BitWidth = EltTy->getBitWidth();
11946 auto *ArgTy = llvm::FixedVectorType::get(
11947 llvm::IntegerType::get(getLLVMContext(), BitWidth / 2), 2 * ArgElts);
11948 llvm::Type* Tys[2] = { VTy, ArgTy };
11949 Int = usgn ? Intrinsic::aarch64_neon_uaddlp : Intrinsic::aarch64_neon_saddlp;
11950 SmallVector<llvm::Value*, 1> TmpOps;
11951 TmpOps.push_back(Ops[1]);
11952 Function *F = CGM.getIntrinsic(Int, Tys);
11953 llvm::Value *tmp = EmitNeonCall(F, TmpOps, "vpadal");
11954 llvm::Value *addend = Builder.CreateBitCast(Ops[0], tmp->getType());
11955 return Builder.CreateAdd(tmp, addend);
11957 case NEON::BI__builtin_neon_vpmin_v:
11958 case NEON::BI__builtin_neon_vpminq_v:
11959 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
11960 Int = usgn ? Intrinsic::aarch64_neon_uminp : Intrinsic::aarch64_neon_sminp;
11961 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fminp;
11962 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmin");
11963 case NEON::BI__builtin_neon_vpmax_v:
11964 case NEON::BI__builtin_neon_vpmaxq_v:
11965 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
11966 Int = usgn ? Intrinsic::aarch64_neon_umaxp : Intrinsic::aarch64_neon_smaxp;
11967 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmaxp;
11968 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmax");
11969 case NEON::BI__builtin_neon_vminnm_v:
11970 case NEON::BI__builtin_neon_vminnmq_v:
11971 Int = Intrinsic::aarch64_neon_fminnm;
11972 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vminnm");
11973 case NEON::BI__builtin_neon_vminnmh_f16:
11974 Ops.push_back(EmitScalarExpr(E->getArg(1)));
11975 Int = Intrinsic::aarch64_neon_fminnm;
11976 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vminnm");
11977 case NEON::BI__builtin_neon_vmaxnm_v:
11978 case NEON::BI__builtin_neon_vmaxnmq_v:
11979 Int = Intrinsic::aarch64_neon_fmaxnm;
11980 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmaxnm");
11981 case NEON::BI__builtin_neon_vmaxnmh_f16:
11982 Ops.push_back(EmitScalarExpr(E->getArg(1)));
11983 Int = Intrinsic::aarch64_neon_fmaxnm;
11984 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmaxnm");
11985 case NEON::BI__builtin_neon_vrecpss_f32: {
11986 Ops.push_back(EmitScalarExpr(E->getArg(1)));
11987 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, FloatTy),
11988 Ops, "vrecps");
11990 case NEON::BI__builtin_neon_vrecpsd_f64:
11991 Ops.push_back(EmitScalarExpr(E->getArg(1)));
11992 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, DoubleTy),
11993 Ops, "vrecps");
11994 case NEON::BI__builtin_neon_vrecpsh_f16:
11995 Ops.push_back(EmitScalarExpr(E->getArg(1)));
11996 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, HalfTy),
11997 Ops, "vrecps");
11998 case NEON::BI__builtin_neon_vqshrun_n_v:
11999 Int = Intrinsic::aarch64_neon_sqshrun;
12000 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrun_n");
12001 case NEON::BI__builtin_neon_vqrshrun_n_v:
12002 Int = Intrinsic::aarch64_neon_sqrshrun;
12003 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrun_n");
12004 case NEON::BI__builtin_neon_vqshrn_n_v:
12005 Int = usgn ? Intrinsic::aarch64_neon_uqshrn : Intrinsic::aarch64_neon_sqshrn;
12006 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n");
12007 case NEON::BI__builtin_neon_vrshrn_n_v:
12008 Int = Intrinsic::aarch64_neon_rshrn;
12009 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshrn_n");
12010 case NEON::BI__builtin_neon_vqrshrn_n_v:
12011 Int = usgn ? Intrinsic::aarch64_neon_uqrshrn : Intrinsic::aarch64_neon_sqrshrn;
12012 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n");
12013 case NEON::BI__builtin_neon_vrndah_f16: {
12014 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12015 Int = Builder.getIsFPConstrained()
12016 ? Intrinsic::experimental_constrained_round
12017 : Intrinsic::round;
12018 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrnda");
12020 case NEON::BI__builtin_neon_vrnda_v:
12021 case NEON::BI__builtin_neon_vrndaq_v: {
12022 Int = Builder.getIsFPConstrained()
12023 ? Intrinsic::experimental_constrained_round
12024 : Intrinsic::round;
12025 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnda");
12027 case NEON::BI__builtin_neon_vrndih_f16: {
12028 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12029 Int = Builder.getIsFPConstrained()
12030 ? Intrinsic::experimental_constrained_nearbyint
12031 : Intrinsic::nearbyint;
12032 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndi");
12034 case NEON::BI__builtin_neon_vrndmh_f16: {
12035 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12036 Int = Builder.getIsFPConstrained()
12037 ? Intrinsic::experimental_constrained_floor
12038 : Intrinsic::floor;
12039 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndm");
12041 case NEON::BI__builtin_neon_vrndm_v:
12042 case NEON::BI__builtin_neon_vrndmq_v: {
12043 Int = Builder.getIsFPConstrained()
12044 ? Intrinsic::experimental_constrained_floor
12045 : Intrinsic::floor;
12046 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndm");
12048 case NEON::BI__builtin_neon_vrndnh_f16: {
12049 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12050 Int = Builder.getIsFPConstrained()
12051 ? Intrinsic::experimental_constrained_roundeven
12052 : Intrinsic::roundeven;
12053 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndn");
12055 case NEON::BI__builtin_neon_vrndn_v:
12056 case NEON::BI__builtin_neon_vrndnq_v: {
12057 Int = Builder.getIsFPConstrained()
12058 ? Intrinsic::experimental_constrained_roundeven
12059 : Intrinsic::roundeven;
12060 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndn");
12062 case NEON::BI__builtin_neon_vrndns_f32: {
12063 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12064 Int = Builder.getIsFPConstrained()
12065 ? Intrinsic::experimental_constrained_roundeven
12066 : Intrinsic::roundeven;
12067 return EmitNeonCall(CGM.getIntrinsic(Int, FloatTy), Ops, "vrndn");
12069 case NEON::BI__builtin_neon_vrndph_f16: {
12070 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12071 Int = Builder.getIsFPConstrained()
12072 ? Intrinsic::experimental_constrained_ceil
12073 : Intrinsic::ceil;
12074 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndp");
12076 case NEON::BI__builtin_neon_vrndp_v:
12077 case NEON::BI__builtin_neon_vrndpq_v: {
12078 Int = Builder.getIsFPConstrained()
12079 ? Intrinsic::experimental_constrained_ceil
12080 : Intrinsic::ceil;
12081 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndp");
12083 case NEON::BI__builtin_neon_vrndxh_f16: {
12084 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12085 Int = Builder.getIsFPConstrained()
12086 ? Intrinsic::experimental_constrained_rint
12087 : Intrinsic::rint;
12088 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndx");
12090 case NEON::BI__builtin_neon_vrndx_v:
12091 case NEON::BI__builtin_neon_vrndxq_v: {
12092 Int = Builder.getIsFPConstrained()
12093 ? Intrinsic::experimental_constrained_rint
12094 : Intrinsic::rint;
12095 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndx");
12097 case NEON::BI__builtin_neon_vrndh_f16: {
12098 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12099 Int = Builder.getIsFPConstrained()
12100 ? Intrinsic::experimental_constrained_trunc
12101 : Intrinsic::trunc;
12102 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndz");
12104 case NEON::BI__builtin_neon_vrnd32x_f32:
12105 case NEON::BI__builtin_neon_vrnd32xq_f32:
12106 case NEON::BI__builtin_neon_vrnd32x_f64:
12107 case NEON::BI__builtin_neon_vrnd32xq_f64: {
12108 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12109 Int = Intrinsic::aarch64_neon_frint32x;
12110 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnd32x");
12112 case NEON::BI__builtin_neon_vrnd32z_f32:
12113 case NEON::BI__builtin_neon_vrnd32zq_f32:
12114 case NEON::BI__builtin_neon_vrnd32z_f64:
12115 case NEON::BI__builtin_neon_vrnd32zq_f64: {
12116 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12117 Int = Intrinsic::aarch64_neon_frint32z;
12118 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnd32z");
12120 case NEON::BI__builtin_neon_vrnd64x_f32:
12121 case NEON::BI__builtin_neon_vrnd64xq_f32:
12122 case NEON::BI__builtin_neon_vrnd64x_f64:
12123 case NEON::BI__builtin_neon_vrnd64xq_f64: {
12124 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12125 Int = Intrinsic::aarch64_neon_frint64x;
12126 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnd64x");
12128 case NEON::BI__builtin_neon_vrnd64z_f32:
12129 case NEON::BI__builtin_neon_vrnd64zq_f32:
12130 case NEON::BI__builtin_neon_vrnd64z_f64:
12131 case NEON::BI__builtin_neon_vrnd64zq_f64: {
12132 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12133 Int = Intrinsic::aarch64_neon_frint64z;
12134 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnd64z");
12136 case NEON::BI__builtin_neon_vrnd_v:
12137 case NEON::BI__builtin_neon_vrndq_v: {
12138 Int = Builder.getIsFPConstrained()
12139 ? Intrinsic::experimental_constrained_trunc
12140 : Intrinsic::trunc;
12141 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndz");
12143 case NEON::BI__builtin_neon_vcvt_f64_v:
12144 case NEON::BI__builtin_neon_vcvtq_f64_v:
12145 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
12146 Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, quad));
12147 return usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
12148 : Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
12149 case NEON::BI__builtin_neon_vcvt_f64_f32: {
12150 assert(Type.getEltType() == NeonTypeFlags::Float64 && quad &&
12151 "unexpected vcvt_f64_f32 builtin");
12152 NeonTypeFlags SrcFlag = NeonTypeFlags(NeonTypeFlags::Float32, false, false);
12153 Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(this, SrcFlag));
12155 return Builder.CreateFPExt(Ops[0], Ty, "vcvt");
12157 case NEON::BI__builtin_neon_vcvt_f32_f64: {
12158 assert(Type.getEltType() == NeonTypeFlags::Float32 &&
12159 "unexpected vcvt_f32_f64 builtin");
12160 NeonTypeFlags SrcFlag = NeonTypeFlags(NeonTypeFlags::Float64, false, true);
12161 Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(this, SrcFlag));
12163 return Builder.CreateFPTrunc(Ops[0], Ty, "vcvt");
12165 case NEON::BI__builtin_neon_vcvt_s32_v:
12166 case NEON::BI__builtin_neon_vcvt_u32_v:
12167 case NEON::BI__builtin_neon_vcvt_s64_v:
12168 case NEON::BI__builtin_neon_vcvt_u64_v:
12169 case NEON::BI__builtin_neon_vcvt_s16_f16:
12170 case NEON::BI__builtin_neon_vcvt_u16_f16:
12171 case NEON::BI__builtin_neon_vcvtq_s32_v:
12172 case NEON::BI__builtin_neon_vcvtq_u32_v:
12173 case NEON::BI__builtin_neon_vcvtq_s64_v:
12174 case NEON::BI__builtin_neon_vcvtq_u64_v:
12175 case NEON::BI__builtin_neon_vcvtq_s16_f16:
12176 case NEON::BI__builtin_neon_vcvtq_u16_f16: {
12177 Int =
12178 usgn ? Intrinsic::aarch64_neon_fcvtzu : Intrinsic::aarch64_neon_fcvtzs;
12179 llvm::Type *Tys[2] = {Ty, GetFloatNeonType(this, Type)};
12180 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtz");
12182 case NEON::BI__builtin_neon_vcvta_s16_f16:
12183 case NEON::BI__builtin_neon_vcvta_u16_f16:
12184 case NEON::BI__builtin_neon_vcvta_s32_v:
12185 case NEON::BI__builtin_neon_vcvtaq_s16_f16:
12186 case NEON::BI__builtin_neon_vcvtaq_s32_v:
12187 case NEON::BI__builtin_neon_vcvta_u32_v:
12188 case NEON::BI__builtin_neon_vcvtaq_u16_f16:
12189 case NEON::BI__builtin_neon_vcvtaq_u32_v:
12190 case NEON::BI__builtin_neon_vcvta_s64_v:
12191 case NEON::BI__builtin_neon_vcvtaq_s64_v:
12192 case NEON::BI__builtin_neon_vcvta_u64_v:
12193 case NEON::BI__builtin_neon_vcvtaq_u64_v: {
12194 Int = usgn ? Intrinsic::aarch64_neon_fcvtau : Intrinsic::aarch64_neon_fcvtas;
12195 llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
12196 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvta");
12198 case NEON::BI__builtin_neon_vcvtm_s16_f16:
12199 case NEON::BI__builtin_neon_vcvtm_s32_v:
12200 case NEON::BI__builtin_neon_vcvtmq_s16_f16:
12201 case NEON::BI__builtin_neon_vcvtmq_s32_v:
12202 case NEON::BI__builtin_neon_vcvtm_u16_f16:
12203 case NEON::BI__builtin_neon_vcvtm_u32_v:
12204 case NEON::BI__builtin_neon_vcvtmq_u16_f16:
12205 case NEON::BI__builtin_neon_vcvtmq_u32_v:
12206 case NEON::BI__builtin_neon_vcvtm_s64_v:
12207 case NEON::BI__builtin_neon_vcvtmq_s64_v:
12208 case NEON::BI__builtin_neon_vcvtm_u64_v:
12209 case NEON::BI__builtin_neon_vcvtmq_u64_v: {
12210 Int = usgn ? Intrinsic::aarch64_neon_fcvtmu : Intrinsic::aarch64_neon_fcvtms;
12211 llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
12212 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtm");
12214 case NEON::BI__builtin_neon_vcvtn_s16_f16:
12215 case NEON::BI__builtin_neon_vcvtn_s32_v:
12216 case NEON::BI__builtin_neon_vcvtnq_s16_f16:
12217 case NEON::BI__builtin_neon_vcvtnq_s32_v:
12218 case NEON::BI__builtin_neon_vcvtn_u16_f16:
12219 case NEON::BI__builtin_neon_vcvtn_u32_v:
12220 case NEON::BI__builtin_neon_vcvtnq_u16_f16:
12221 case NEON::BI__builtin_neon_vcvtnq_u32_v:
12222 case NEON::BI__builtin_neon_vcvtn_s64_v:
12223 case NEON::BI__builtin_neon_vcvtnq_s64_v:
12224 case NEON::BI__builtin_neon_vcvtn_u64_v:
12225 case NEON::BI__builtin_neon_vcvtnq_u64_v: {
12226 Int = usgn ? Intrinsic::aarch64_neon_fcvtnu : Intrinsic::aarch64_neon_fcvtns;
12227 llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
12228 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtn");
12230 case NEON::BI__builtin_neon_vcvtp_s16_f16:
12231 case NEON::BI__builtin_neon_vcvtp_s32_v:
12232 case NEON::BI__builtin_neon_vcvtpq_s16_f16:
12233 case NEON::BI__builtin_neon_vcvtpq_s32_v:
12234 case NEON::BI__builtin_neon_vcvtp_u16_f16:
12235 case NEON::BI__builtin_neon_vcvtp_u32_v:
12236 case NEON::BI__builtin_neon_vcvtpq_u16_f16:
12237 case NEON::BI__builtin_neon_vcvtpq_u32_v:
12238 case NEON::BI__builtin_neon_vcvtp_s64_v:
12239 case NEON::BI__builtin_neon_vcvtpq_s64_v:
12240 case NEON::BI__builtin_neon_vcvtp_u64_v:
12241 case NEON::BI__builtin_neon_vcvtpq_u64_v: {
12242 Int = usgn ? Intrinsic::aarch64_neon_fcvtpu : Intrinsic::aarch64_neon_fcvtps;
12243 llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
12244 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtp");
12246 case NEON::BI__builtin_neon_vmulx_v:
12247 case NEON::BI__builtin_neon_vmulxq_v: {
12248 Int = Intrinsic::aarch64_neon_fmulx;
12249 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmulx");
12251 case NEON::BI__builtin_neon_vmulxh_lane_f16:
12252 case NEON::BI__builtin_neon_vmulxh_laneq_f16: {
12253 // vmulx_lane should be mapped to Neon scalar mulx after
12254 // extracting the scalar element
12255 Ops.push_back(EmitScalarExpr(E->getArg(2)));
12256 Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2], "extract");
12257 Ops.pop_back();
12258 Int = Intrinsic::aarch64_neon_fmulx;
12259 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmulx");
12261 case NEON::BI__builtin_neon_vmul_lane_v:
12262 case NEON::BI__builtin_neon_vmul_laneq_v: {
12263 // v1f64 vmul_lane should be mapped to Neon scalar mul lane
12264 bool Quad = false;
12265 if (BuiltinID == NEON::BI__builtin_neon_vmul_laneq_v)
12266 Quad = true;
12267 Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
12268 llvm::FixedVectorType *VTy =
12269 GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, Quad));
12270 Ops[1] = Builder.CreateBitCast(Ops[1], VTy);
12271 Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2], "extract");
12272 Value *Result = Builder.CreateFMul(Ops[0], Ops[1]);
12273 return Builder.CreateBitCast(Result, Ty);
12275 case NEON::BI__builtin_neon_vnegd_s64:
12276 return Builder.CreateNeg(EmitScalarExpr(E->getArg(0)), "vnegd");
12277 case NEON::BI__builtin_neon_vnegh_f16:
12278 return Builder.CreateFNeg(EmitScalarExpr(E->getArg(0)), "vnegh");
12279 case NEON::BI__builtin_neon_vpmaxnm_v:
12280 case NEON::BI__builtin_neon_vpmaxnmq_v: {
12281 Int = Intrinsic::aarch64_neon_fmaxnmp;
12282 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmaxnm");
12284 case NEON::BI__builtin_neon_vpminnm_v:
12285 case NEON::BI__builtin_neon_vpminnmq_v: {
12286 Int = Intrinsic::aarch64_neon_fminnmp;
12287 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpminnm");
12289 case NEON::BI__builtin_neon_vsqrth_f16: {
12290 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12291 Int = Builder.getIsFPConstrained()
12292 ? Intrinsic::experimental_constrained_sqrt
12293 : Intrinsic::sqrt;
12294 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vsqrt");
12296 case NEON::BI__builtin_neon_vsqrt_v:
12297 case NEON::BI__builtin_neon_vsqrtq_v: {
12298 Int = Builder.getIsFPConstrained()
12299 ? Intrinsic::experimental_constrained_sqrt
12300 : Intrinsic::sqrt;
12301 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
12302 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsqrt");
12304 case NEON::BI__builtin_neon_vrbit_v:
12305 case NEON::BI__builtin_neon_vrbitq_v: {
12306 Int = Intrinsic::bitreverse;
12307 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrbit");
12309 case NEON::BI__builtin_neon_vaddv_u8:
12310 // FIXME: These are handled by the AArch64 scalar code.
12311 usgn = true;
12312 [[fallthrough]];
12313 case NEON::BI__builtin_neon_vaddv_s8: {
12314 Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
12315 Ty = Int32Ty;
12316 VTy = llvm::FixedVectorType::get(Int8Ty, 8);
12317 llvm::Type *Tys[2] = { Ty, VTy };
12318 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12319 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
12320 return Builder.CreateTrunc(Ops[0], Int8Ty);
12322 case NEON::BI__builtin_neon_vaddv_u16:
12323 usgn = true;
12324 [[fallthrough]];
12325 case NEON::BI__builtin_neon_vaddv_s16: {
12326 Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
12327 Ty = Int32Ty;
12328 VTy = llvm::FixedVectorType::get(Int16Ty, 4);
12329 llvm::Type *Tys[2] = { Ty, VTy };
12330 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12331 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
12332 return Builder.CreateTrunc(Ops[0], Int16Ty);
12334 case NEON::BI__builtin_neon_vaddvq_u8:
12335 usgn = true;
12336 [[fallthrough]];
12337 case NEON::BI__builtin_neon_vaddvq_s8: {
12338 Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
12339 Ty = Int32Ty;
12340 VTy = llvm::FixedVectorType::get(Int8Ty, 16);
12341 llvm::Type *Tys[2] = { Ty, VTy };
12342 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12343 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
12344 return Builder.CreateTrunc(Ops[0], Int8Ty);
12346 case NEON::BI__builtin_neon_vaddvq_u16:
12347 usgn = true;
12348 [[fallthrough]];
12349 case NEON::BI__builtin_neon_vaddvq_s16: {
12350 Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
12351 Ty = Int32Ty;
12352 VTy = llvm::FixedVectorType::get(Int16Ty, 8);
12353 llvm::Type *Tys[2] = { Ty, VTy };
12354 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12355 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
12356 return Builder.CreateTrunc(Ops[0], Int16Ty);
12358 case NEON::BI__builtin_neon_vmaxv_u8: {
12359 Int = Intrinsic::aarch64_neon_umaxv;
12360 Ty = Int32Ty;
12361 VTy = llvm::FixedVectorType::get(Int8Ty, 8);
12362 llvm::Type *Tys[2] = { Ty, VTy };
12363 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12364 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
12365 return Builder.CreateTrunc(Ops[0], Int8Ty);
12367 case NEON::BI__builtin_neon_vmaxv_u16: {
12368 Int = Intrinsic::aarch64_neon_umaxv;
12369 Ty = Int32Ty;
12370 VTy = llvm::FixedVectorType::get(Int16Ty, 4);
12371 llvm::Type *Tys[2] = { Ty, VTy };
12372 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12373 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
12374 return Builder.CreateTrunc(Ops[0], Int16Ty);
12376 case NEON::BI__builtin_neon_vmaxvq_u8: {
12377 Int = Intrinsic::aarch64_neon_umaxv;
12378 Ty = Int32Ty;
12379 VTy = llvm::FixedVectorType::get(Int8Ty, 16);
12380 llvm::Type *Tys[2] = { Ty, VTy };
12381 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12382 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
12383 return Builder.CreateTrunc(Ops[0], Int8Ty);
12385 case NEON::BI__builtin_neon_vmaxvq_u16: {
12386 Int = Intrinsic::aarch64_neon_umaxv;
12387 Ty = Int32Ty;
12388 VTy = llvm::FixedVectorType::get(Int16Ty, 8);
12389 llvm::Type *Tys[2] = { Ty, VTy };
12390 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12391 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
12392 return Builder.CreateTrunc(Ops[0], Int16Ty);
12394 case NEON::BI__builtin_neon_vmaxv_s8: {
12395 Int = Intrinsic::aarch64_neon_smaxv;
12396 Ty = Int32Ty;
12397 VTy = llvm::FixedVectorType::get(Int8Ty, 8);
12398 llvm::Type *Tys[2] = { Ty, VTy };
12399 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12400 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
12401 return Builder.CreateTrunc(Ops[0], Int8Ty);
12403 case NEON::BI__builtin_neon_vmaxv_s16: {
12404 Int = Intrinsic::aarch64_neon_smaxv;
12405 Ty = Int32Ty;
12406 VTy = llvm::FixedVectorType::get(Int16Ty, 4);
12407 llvm::Type *Tys[2] = { Ty, VTy };
12408 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12409 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
12410 return Builder.CreateTrunc(Ops[0], Int16Ty);
12412 case NEON::BI__builtin_neon_vmaxvq_s8: {
12413 Int = Intrinsic::aarch64_neon_smaxv;
12414 Ty = Int32Ty;
12415 VTy = llvm::FixedVectorType::get(Int8Ty, 16);
12416 llvm::Type *Tys[2] = { Ty, VTy };
12417 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12418 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
12419 return Builder.CreateTrunc(Ops[0], Int8Ty);
12421 case NEON::BI__builtin_neon_vmaxvq_s16: {
12422 Int = Intrinsic::aarch64_neon_smaxv;
12423 Ty = Int32Ty;
12424 VTy = llvm::FixedVectorType::get(Int16Ty, 8);
12425 llvm::Type *Tys[2] = { Ty, VTy };
12426 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12427 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
12428 return Builder.CreateTrunc(Ops[0], Int16Ty);
12430 case NEON::BI__builtin_neon_vmaxv_f16: {
12431 Int = Intrinsic::aarch64_neon_fmaxv;
12432 Ty = HalfTy;
12433 VTy = llvm::FixedVectorType::get(HalfTy, 4);
12434 llvm::Type *Tys[2] = { Ty, VTy };
12435 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12436 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
12437 return Builder.CreateTrunc(Ops[0], HalfTy);
12439 case NEON::BI__builtin_neon_vmaxvq_f16: {
12440 Int = Intrinsic::aarch64_neon_fmaxv;
12441 Ty = HalfTy;
12442 VTy = llvm::FixedVectorType::get(HalfTy, 8);
12443 llvm::Type *Tys[2] = { Ty, VTy };
12444 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12445 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
12446 return Builder.CreateTrunc(Ops[0], HalfTy);
12448 case NEON::BI__builtin_neon_vminv_u8: {
12449 Int = Intrinsic::aarch64_neon_uminv;
12450 Ty = Int32Ty;
12451 VTy = llvm::FixedVectorType::get(Int8Ty, 8);
12452 llvm::Type *Tys[2] = { Ty, VTy };
12453 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12454 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
12455 return Builder.CreateTrunc(Ops[0], Int8Ty);
12457 case NEON::BI__builtin_neon_vminv_u16: {
12458 Int = Intrinsic::aarch64_neon_uminv;
12459 Ty = Int32Ty;
12460 VTy = llvm::FixedVectorType::get(Int16Ty, 4);
12461 llvm::Type *Tys[2] = { Ty, VTy };
12462 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12463 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
12464 return Builder.CreateTrunc(Ops[0], Int16Ty);
12466 case NEON::BI__builtin_neon_vminvq_u8: {
12467 Int = Intrinsic::aarch64_neon_uminv;
12468 Ty = Int32Ty;
12469 VTy = llvm::FixedVectorType::get(Int8Ty, 16);
12470 llvm::Type *Tys[2] = { Ty, VTy };
12471 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12472 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
12473 return Builder.CreateTrunc(Ops[0], Int8Ty);
12475 case NEON::BI__builtin_neon_vminvq_u16: {
12476 Int = Intrinsic::aarch64_neon_uminv;
12477 Ty = Int32Ty;
12478 VTy = llvm::FixedVectorType::get(Int16Ty, 8);
12479 llvm::Type *Tys[2] = { Ty, VTy };
12480 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12481 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
12482 return Builder.CreateTrunc(Ops[0], Int16Ty);
12484 case NEON::BI__builtin_neon_vminv_s8: {
12485 Int = Intrinsic::aarch64_neon_sminv;
12486 Ty = Int32Ty;
12487 VTy = llvm::FixedVectorType::get(Int8Ty, 8);
12488 llvm::Type *Tys[2] = { Ty, VTy };
12489 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12490 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
12491 return Builder.CreateTrunc(Ops[0], Int8Ty);
12493 case NEON::BI__builtin_neon_vminv_s16: {
12494 Int = Intrinsic::aarch64_neon_sminv;
12495 Ty = Int32Ty;
12496 VTy = llvm::FixedVectorType::get(Int16Ty, 4);
12497 llvm::Type *Tys[2] = { Ty, VTy };
12498 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12499 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
12500 return Builder.CreateTrunc(Ops[0], Int16Ty);
12502 case NEON::BI__builtin_neon_vminvq_s8: {
12503 Int = Intrinsic::aarch64_neon_sminv;
12504 Ty = Int32Ty;
12505 VTy = llvm::FixedVectorType::get(Int8Ty, 16);
12506 llvm::Type *Tys[2] = { Ty, VTy };
12507 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12508 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
12509 return Builder.CreateTrunc(Ops[0], Int8Ty);
12511 case NEON::BI__builtin_neon_vminvq_s16: {
12512 Int = Intrinsic::aarch64_neon_sminv;
12513 Ty = Int32Ty;
12514 VTy = llvm::FixedVectorType::get(Int16Ty, 8);
12515 llvm::Type *Tys[2] = { Ty, VTy };
12516 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12517 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
12518 return Builder.CreateTrunc(Ops[0], Int16Ty);
12520 case NEON::BI__builtin_neon_vminv_f16: {
12521 Int = Intrinsic::aarch64_neon_fminv;
12522 Ty = HalfTy;
12523 VTy = llvm::FixedVectorType::get(HalfTy, 4);
12524 llvm::Type *Tys[2] = { Ty, VTy };
12525 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12526 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
12527 return Builder.CreateTrunc(Ops[0], HalfTy);
12529 case NEON::BI__builtin_neon_vminvq_f16: {
12530 Int = Intrinsic::aarch64_neon_fminv;
12531 Ty = HalfTy;
12532 VTy = llvm::FixedVectorType::get(HalfTy, 8);
12533 llvm::Type *Tys[2] = { Ty, VTy };
12534 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12535 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
12536 return Builder.CreateTrunc(Ops[0], HalfTy);
12538 case NEON::BI__builtin_neon_vmaxnmv_f16: {
12539 Int = Intrinsic::aarch64_neon_fmaxnmv;
12540 Ty = HalfTy;
12541 VTy = llvm::FixedVectorType::get(HalfTy, 4);
12542 llvm::Type *Tys[2] = { Ty, VTy };
12543 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12544 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxnmv");
12545 return Builder.CreateTrunc(Ops[0], HalfTy);
12547 case NEON::BI__builtin_neon_vmaxnmvq_f16: {
12548 Int = Intrinsic::aarch64_neon_fmaxnmv;
12549 Ty = HalfTy;
12550 VTy = llvm::FixedVectorType::get(HalfTy, 8);
12551 llvm::Type *Tys[2] = { Ty, VTy };
12552 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12553 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxnmv");
12554 return Builder.CreateTrunc(Ops[0], HalfTy);
12556 case NEON::BI__builtin_neon_vminnmv_f16: {
12557 Int = Intrinsic::aarch64_neon_fminnmv;
12558 Ty = HalfTy;
12559 VTy = llvm::FixedVectorType::get(HalfTy, 4);
12560 llvm::Type *Tys[2] = { Ty, VTy };
12561 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12562 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminnmv");
12563 return Builder.CreateTrunc(Ops[0], HalfTy);
12565 case NEON::BI__builtin_neon_vminnmvq_f16: {
12566 Int = Intrinsic::aarch64_neon_fminnmv;
12567 Ty = HalfTy;
12568 VTy = llvm::FixedVectorType::get(HalfTy, 8);
12569 llvm::Type *Tys[2] = { Ty, VTy };
12570 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12571 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminnmv");
12572 return Builder.CreateTrunc(Ops[0], HalfTy);
12574 case NEON::BI__builtin_neon_vmul_n_f64: {
12575 Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
12576 Value *RHS = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), DoubleTy);
12577 return Builder.CreateFMul(Ops[0], RHS);
12579 case NEON::BI__builtin_neon_vaddlv_u8: {
12580 Int = Intrinsic::aarch64_neon_uaddlv;
12581 Ty = Int32Ty;
12582 VTy = llvm::FixedVectorType::get(Int8Ty, 8);
12583 llvm::Type *Tys[2] = { Ty, VTy };
12584 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12585 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
12586 return Builder.CreateTrunc(Ops[0], Int16Ty);
12588 case NEON::BI__builtin_neon_vaddlv_u16: {
12589 Int = Intrinsic::aarch64_neon_uaddlv;
12590 Ty = Int32Ty;
12591 VTy = llvm::FixedVectorType::get(Int16Ty, 4);
12592 llvm::Type *Tys[2] = { Ty, VTy };
12593 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12594 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
12596 case NEON::BI__builtin_neon_vaddlvq_u8: {
12597 Int = Intrinsic::aarch64_neon_uaddlv;
12598 Ty = Int32Ty;
12599 VTy = llvm::FixedVectorType::get(Int8Ty, 16);
12600 llvm::Type *Tys[2] = { Ty, VTy };
12601 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12602 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
12603 return Builder.CreateTrunc(Ops[0], Int16Ty);
12605 case NEON::BI__builtin_neon_vaddlvq_u16: {
12606 Int = Intrinsic::aarch64_neon_uaddlv;
12607 Ty = Int32Ty;
12608 VTy = llvm::FixedVectorType::get(Int16Ty, 8);
12609 llvm::Type *Tys[2] = { Ty, VTy };
12610 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12611 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
12613 case NEON::BI__builtin_neon_vaddlv_s8: {
12614 Int = Intrinsic::aarch64_neon_saddlv;
12615 Ty = Int32Ty;
12616 VTy = llvm::FixedVectorType::get(Int8Ty, 8);
12617 llvm::Type *Tys[2] = { Ty, VTy };
12618 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12619 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
12620 return Builder.CreateTrunc(Ops[0], Int16Ty);
12622 case NEON::BI__builtin_neon_vaddlv_s16: {
12623 Int = Intrinsic::aarch64_neon_saddlv;
12624 Ty = Int32Ty;
12625 VTy = llvm::FixedVectorType::get(Int16Ty, 4);
12626 llvm::Type *Tys[2] = { Ty, VTy };
12627 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12628 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
12630 case NEON::BI__builtin_neon_vaddlvq_s8: {
12631 Int = Intrinsic::aarch64_neon_saddlv;
12632 Ty = Int32Ty;
12633 VTy = llvm::FixedVectorType::get(Int8Ty, 16);
12634 llvm::Type *Tys[2] = { Ty, VTy };
12635 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12636 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
12637 return Builder.CreateTrunc(Ops[0], Int16Ty);
12639 case NEON::BI__builtin_neon_vaddlvq_s16: {
12640 Int = Intrinsic::aarch64_neon_saddlv;
12641 Ty = Int32Ty;
12642 VTy = llvm::FixedVectorType::get(Int16Ty, 8);
12643 llvm::Type *Tys[2] = { Ty, VTy };
12644 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12645 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
12647 case NEON::BI__builtin_neon_vsri_n_v:
12648 case NEON::BI__builtin_neon_vsriq_n_v: {
12649 Int = Intrinsic::aarch64_neon_vsri;
12650 llvm::Function *Intrin = CGM.getIntrinsic(Int, Ty);
12651 return EmitNeonCall(Intrin, Ops, "vsri_n");
12653 case NEON::BI__builtin_neon_vsli_n_v:
12654 case NEON::BI__builtin_neon_vsliq_n_v: {
12655 Int = Intrinsic::aarch64_neon_vsli;
12656 llvm::Function *Intrin = CGM.getIntrinsic(Int, Ty);
12657 return EmitNeonCall(Intrin, Ops, "vsli_n");
12659 case NEON::BI__builtin_neon_vsra_n_v:
12660 case NEON::BI__builtin_neon_vsraq_n_v:
12661 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
12662 Ops[1] = EmitNeonRShiftImm(Ops[1], Ops[2], Ty, usgn, "vsra_n");
12663 return Builder.CreateAdd(Ops[0], Ops[1]);
12664 case NEON::BI__builtin_neon_vrsra_n_v:
12665 case NEON::BI__builtin_neon_vrsraq_n_v: {
12666 Int = usgn ? Intrinsic::aarch64_neon_urshl : Intrinsic::aarch64_neon_srshl;
12667 SmallVector<llvm::Value*,2> TmpOps;
12668 TmpOps.push_back(Ops[1]);
12669 TmpOps.push_back(Ops[2]);
12670 Function* F = CGM.getIntrinsic(Int, Ty);
12671 llvm::Value *tmp = EmitNeonCall(F, TmpOps, "vrshr_n", 1, true);
12672 Ops[0] = Builder.CreateBitCast(Ops[0], VTy);
12673 return Builder.CreateAdd(Ops[0], tmp);
12675 case NEON::BI__builtin_neon_vld1_v:
12676 case NEON::BI__builtin_neon_vld1q_v: {
12677 return Builder.CreateAlignedLoad(VTy, Ops[0], PtrOp0.getAlignment());
12679 case NEON::BI__builtin_neon_vst1_v:
12680 case NEON::BI__builtin_neon_vst1q_v:
12681 Ops[1] = Builder.CreateBitCast(Ops[1], VTy);
12682 return Builder.CreateAlignedStore(Ops[1], Ops[0], PtrOp0.getAlignment());
12683 case NEON::BI__builtin_neon_vld1_lane_v:
12684 case NEON::BI__builtin_neon_vld1q_lane_v: {
12685 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
12686 Ops[0] = Builder.CreateAlignedLoad(VTy->getElementType(), Ops[0],
12687 PtrOp0.getAlignment());
12688 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vld1_lane");
12690 case NEON::BI__builtin_neon_vldap1_lane_s64:
12691 case NEON::BI__builtin_neon_vldap1q_lane_s64: {
12692 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
12693 llvm::LoadInst *LI = Builder.CreateAlignedLoad(
12694 VTy->getElementType(), Ops[0], PtrOp0.getAlignment());
12695 LI->setAtomic(llvm::AtomicOrdering::Acquire);
12696 Ops[0] = LI;
12697 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vldap1_lane");
12699 case NEON::BI__builtin_neon_vld1_dup_v:
12700 case NEON::BI__builtin_neon_vld1q_dup_v: {
12701 Value *V = PoisonValue::get(Ty);
12702 Ops[0] = Builder.CreateAlignedLoad(VTy->getElementType(), Ops[0],
12703 PtrOp0.getAlignment());
12704 llvm::Constant *CI = ConstantInt::get(Int32Ty, 0);
12705 Ops[0] = Builder.CreateInsertElement(V, Ops[0], CI);
12706 return EmitNeonSplat(Ops[0], CI);
12708 case NEON::BI__builtin_neon_vst1_lane_v:
12709 case NEON::BI__builtin_neon_vst1q_lane_v:
12710 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
12711 Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
12712 return Builder.CreateAlignedStore(Ops[1], Ops[0], PtrOp0.getAlignment());
12713 case NEON::BI__builtin_neon_vstl1_lane_s64:
12714 case NEON::BI__builtin_neon_vstl1q_lane_s64: {
12715 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
12716 Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
12717 llvm::StoreInst *SI =
12718 Builder.CreateAlignedStore(Ops[1], Ops[0], PtrOp0.getAlignment());
12719 SI->setAtomic(llvm::AtomicOrdering::Release);
12720 return SI;
12722 case NEON::BI__builtin_neon_vld2_v:
12723 case NEON::BI__builtin_neon_vld2q_v: {
12724 llvm::Type *Tys[2] = {VTy, UnqualPtrTy};
12725 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2, Tys);
12726 Ops[1] = Builder.CreateCall(F, Ops[1], "vld2");
12727 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
12729 case NEON::BI__builtin_neon_vld3_v:
12730 case NEON::BI__builtin_neon_vld3q_v: {
12731 llvm::Type *Tys[2] = {VTy, UnqualPtrTy};
12732 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3, Tys);
12733 Ops[1] = Builder.CreateCall(F, Ops[1], "vld3");
12734 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
12736 case NEON::BI__builtin_neon_vld4_v:
12737 case NEON::BI__builtin_neon_vld4q_v: {
12738 llvm::Type *Tys[2] = {VTy, UnqualPtrTy};
12739 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4, Tys);
12740 Ops[1] = Builder.CreateCall(F, Ops[1], "vld4");
12741 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
12743 case NEON::BI__builtin_neon_vld2_dup_v:
12744 case NEON::BI__builtin_neon_vld2q_dup_v: {
12745 llvm::Type *Tys[2] = {VTy, UnqualPtrTy};
12746 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2r, Tys);
12747 Ops[1] = Builder.CreateCall(F, Ops[1], "vld2");
12748 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
12750 case NEON::BI__builtin_neon_vld3_dup_v:
12751 case NEON::BI__builtin_neon_vld3q_dup_v: {
12752 llvm::Type *Tys[2] = {VTy, UnqualPtrTy};
12753 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3r, Tys);
12754 Ops[1] = Builder.CreateCall(F, Ops[1], "vld3");
12755 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
12757 case NEON::BI__builtin_neon_vld4_dup_v:
12758 case NEON::BI__builtin_neon_vld4q_dup_v: {
12759 llvm::Type *Tys[2] = {VTy, UnqualPtrTy};
12760 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4r, Tys);
12761 Ops[1] = Builder.CreateCall(F, Ops[1], "vld4");
12762 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
12764 case NEON::BI__builtin_neon_vld2_lane_v:
12765 case NEON::BI__builtin_neon_vld2q_lane_v: {
12766 llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
12767 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2lane, Tys);
12768 std::rotate(Ops.begin() + 1, Ops.begin() + 2, Ops.end());
12769 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
12770 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
12771 Ops[3] = Builder.CreateZExt(Ops[3], Int64Ty);
12772 Ops[1] = Builder.CreateCall(F, ArrayRef(Ops).slice(1), "vld2_lane");
12773 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
12775 case NEON::BI__builtin_neon_vld3_lane_v:
12776 case NEON::BI__builtin_neon_vld3q_lane_v: {
12777 llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
12778 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3lane, Tys);
12779 std::rotate(Ops.begin() + 1, Ops.begin() + 2, Ops.end());
12780 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
12781 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
12782 Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
12783 Ops[4] = Builder.CreateZExt(Ops[4], Int64Ty);
12784 Ops[1] = Builder.CreateCall(F, ArrayRef(Ops).slice(1), "vld3_lane");
12785 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
12787 case NEON::BI__builtin_neon_vld4_lane_v:
12788 case NEON::BI__builtin_neon_vld4q_lane_v: {
12789 llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
12790 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4lane, Tys);
12791 std::rotate(Ops.begin() + 1, Ops.begin() + 2, Ops.end());
12792 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
12793 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
12794 Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
12795 Ops[4] = Builder.CreateBitCast(Ops[4], Ty);
12796 Ops[5] = Builder.CreateZExt(Ops[5], Int64Ty);
12797 Ops[1] = Builder.CreateCall(F, ArrayRef(Ops).slice(1), "vld4_lane");
12798 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
12800 case NEON::BI__builtin_neon_vst2_v:
12801 case NEON::BI__builtin_neon_vst2q_v: {
12802 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
12803 llvm::Type *Tys[2] = { VTy, Ops[2]->getType() };
12804 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st2, Tys),
12805 Ops, "");
12807 case NEON::BI__builtin_neon_vst2_lane_v:
12808 case NEON::BI__builtin_neon_vst2q_lane_v: {
12809 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
12810 Ops[2] = Builder.CreateZExt(Ops[2], Int64Ty);
12811 llvm::Type *Tys[2] = { VTy, Ops[3]->getType() };
12812 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st2lane, Tys),
12813 Ops, "");
12815 case NEON::BI__builtin_neon_vst3_v:
12816 case NEON::BI__builtin_neon_vst3q_v: {
12817 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
12818 llvm::Type *Tys[2] = { VTy, Ops[3]->getType() };
12819 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st3, Tys),
12820 Ops, "");
12822 case NEON::BI__builtin_neon_vst3_lane_v:
12823 case NEON::BI__builtin_neon_vst3q_lane_v: {
12824 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
12825 Ops[3] = Builder.CreateZExt(Ops[3], Int64Ty);
12826 llvm::Type *Tys[2] = { VTy, Ops[4]->getType() };
12827 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st3lane, Tys),
12828 Ops, "");
12830 case NEON::BI__builtin_neon_vst4_v:
12831 case NEON::BI__builtin_neon_vst4q_v: {
12832 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
12833 llvm::Type *Tys[2] = { VTy, Ops[4]->getType() };
12834 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st4, Tys),
12835 Ops, "");
12837 case NEON::BI__builtin_neon_vst4_lane_v:
12838 case NEON::BI__builtin_neon_vst4q_lane_v: {
12839 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
12840 Ops[4] = Builder.CreateZExt(Ops[4], Int64Ty);
12841 llvm::Type *Tys[2] = { VTy, Ops[5]->getType() };
12842 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st4lane, Tys),
12843 Ops, "");
12845 case NEON::BI__builtin_neon_vtrn_v:
12846 case NEON::BI__builtin_neon_vtrnq_v: {
12847 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
12848 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
12849 Value *SV = nullptr;
12851 for (unsigned vi = 0; vi != 2; ++vi) {
12852 SmallVector<int, 16> Indices;
12853 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
12854 Indices.push_back(i+vi);
12855 Indices.push_back(i+e+vi);
12857 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
12858 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vtrn");
12859 SV = Builder.CreateDefaultAlignedStore(SV, Addr);
12861 return SV;
12863 case NEON::BI__builtin_neon_vuzp_v:
12864 case NEON::BI__builtin_neon_vuzpq_v: {
12865 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
12866 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
12867 Value *SV = nullptr;
12869 for (unsigned vi = 0; vi != 2; ++vi) {
12870 SmallVector<int, 16> Indices;
12871 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
12872 Indices.push_back(2*i+vi);
12874 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
12875 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vuzp");
12876 SV = Builder.CreateDefaultAlignedStore(SV, Addr);
12878 return SV;
12880 case NEON::BI__builtin_neon_vzip_v:
12881 case NEON::BI__builtin_neon_vzipq_v: {
12882 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
12883 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
12884 Value *SV = nullptr;
12886 for (unsigned vi = 0; vi != 2; ++vi) {
12887 SmallVector<int, 16> Indices;
12888 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
12889 Indices.push_back((i + vi*e) >> 1);
12890 Indices.push_back(((i + vi*e) >> 1)+e);
12892 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
12893 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vzip");
12894 SV = Builder.CreateDefaultAlignedStore(SV, Addr);
12896 return SV;
12898 case NEON::BI__builtin_neon_vqtbl1q_v: {
12899 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl1, Ty),
12900 Ops, "vtbl1");
12902 case NEON::BI__builtin_neon_vqtbl2q_v: {
12903 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl2, Ty),
12904 Ops, "vtbl2");
12906 case NEON::BI__builtin_neon_vqtbl3q_v: {
12907 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl3, Ty),
12908 Ops, "vtbl3");
12910 case NEON::BI__builtin_neon_vqtbl4q_v: {
12911 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl4, Ty),
12912 Ops, "vtbl4");
12914 case NEON::BI__builtin_neon_vqtbx1q_v: {
12915 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx1, Ty),
12916 Ops, "vtbx1");
12918 case NEON::BI__builtin_neon_vqtbx2q_v: {
12919 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx2, Ty),
12920 Ops, "vtbx2");
12922 case NEON::BI__builtin_neon_vqtbx3q_v: {
12923 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx3, Ty),
12924 Ops, "vtbx3");
12926 case NEON::BI__builtin_neon_vqtbx4q_v: {
12927 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx4, Ty),
12928 Ops, "vtbx4");
12930 case NEON::BI__builtin_neon_vsqadd_v:
12931 case NEON::BI__builtin_neon_vsqaddq_v: {
12932 Int = Intrinsic::aarch64_neon_usqadd;
12933 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsqadd");
12935 case NEON::BI__builtin_neon_vuqadd_v:
12936 case NEON::BI__builtin_neon_vuqaddq_v: {
12937 Int = Intrinsic::aarch64_neon_suqadd;
12938 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vuqadd");
12943 Value *CodeGenFunction::EmitBPFBuiltinExpr(unsigned BuiltinID,
12944 const CallExpr *E) {
12945 assert((BuiltinID == BPF::BI__builtin_preserve_field_info ||
12946 BuiltinID == BPF::BI__builtin_btf_type_id ||
12947 BuiltinID == BPF::BI__builtin_preserve_type_info ||
12948 BuiltinID == BPF::BI__builtin_preserve_enum_value) &&
12949 "unexpected BPF builtin");
12951 // A sequence number, injected into IR builtin functions, to
12952 // prevent CSE given the only difference of the function
12953 // may just be the debuginfo metadata.
12954 static uint32_t BuiltinSeqNum;
12956 switch (BuiltinID) {
12957 default:
12958 llvm_unreachable("Unexpected BPF builtin");
12959 case BPF::BI__builtin_preserve_field_info: {
12960 const Expr *Arg = E->getArg(0);
12961 bool IsBitField = Arg->IgnoreParens()->getObjectKind() == OK_BitField;
12963 if (!getDebugInfo()) {
12964 CGM.Error(E->getExprLoc(),
12965 "using __builtin_preserve_field_info() without -g");
12966 return IsBitField ? EmitLValue(Arg).getBitFieldPointer()
12967 : EmitLValue(Arg).getPointer(*this);
12970 // Enable underlying preserve_*_access_index() generation.
12971 bool OldIsInPreservedAIRegion = IsInPreservedAIRegion;
12972 IsInPreservedAIRegion = true;
12973 Value *FieldAddr = IsBitField ? EmitLValue(Arg).getBitFieldPointer()
12974 : EmitLValue(Arg).getPointer(*this);
12975 IsInPreservedAIRegion = OldIsInPreservedAIRegion;
12977 ConstantInt *C = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
12978 Value *InfoKind = ConstantInt::get(Int64Ty, C->getSExtValue());
12980 // Built the IR for the preserve_field_info intrinsic.
12981 llvm::Function *FnGetFieldInfo = llvm::Intrinsic::getDeclaration(
12982 &CGM.getModule(), llvm::Intrinsic::bpf_preserve_field_info,
12983 {FieldAddr->getType()});
12984 return Builder.CreateCall(FnGetFieldInfo, {FieldAddr, InfoKind});
12986 case BPF::BI__builtin_btf_type_id:
12987 case BPF::BI__builtin_preserve_type_info: {
12988 if (!getDebugInfo()) {
12989 CGM.Error(E->getExprLoc(), "using builtin function without -g");
12990 return nullptr;
12993 const Expr *Arg0 = E->getArg(0);
12994 llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateStandaloneType(
12995 Arg0->getType(), Arg0->getExprLoc());
12997 ConstantInt *Flag = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
12998 Value *FlagValue = ConstantInt::get(Int64Ty, Flag->getSExtValue());
12999 Value *SeqNumVal = ConstantInt::get(Int32Ty, BuiltinSeqNum++);
13001 llvm::Function *FnDecl;
13002 if (BuiltinID == BPF::BI__builtin_btf_type_id)
13003 FnDecl = llvm::Intrinsic::getDeclaration(
13004 &CGM.getModule(), llvm::Intrinsic::bpf_btf_type_id, {});
13005 else
13006 FnDecl = llvm::Intrinsic::getDeclaration(
13007 &CGM.getModule(), llvm::Intrinsic::bpf_preserve_type_info, {});
13008 CallInst *Fn = Builder.CreateCall(FnDecl, {SeqNumVal, FlagValue});
13009 Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
13010 return Fn;
13012 case BPF::BI__builtin_preserve_enum_value: {
13013 if (!getDebugInfo()) {
13014 CGM.Error(E->getExprLoc(), "using builtin function without -g");
13015 return nullptr;
13018 const Expr *Arg0 = E->getArg(0);
13019 llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateStandaloneType(
13020 Arg0->getType(), Arg0->getExprLoc());
13022 // Find enumerator
13023 const auto *UO = cast<UnaryOperator>(Arg0->IgnoreParens());
13024 const auto *CE = cast<CStyleCastExpr>(UO->getSubExpr());
13025 const auto *DR = cast<DeclRefExpr>(CE->getSubExpr());
13026 const auto *Enumerator = cast<EnumConstantDecl>(DR->getDecl());
13028 auto &InitVal = Enumerator->getInitVal();
13029 std::string InitValStr;
13030 if (InitVal.isNegative() || InitVal > uint64_t(INT64_MAX))
13031 InitValStr = std::to_string(InitVal.getSExtValue());
13032 else
13033 InitValStr = std::to_string(InitVal.getZExtValue());
13034 std::string EnumStr = Enumerator->getNameAsString() + ":" + InitValStr;
13035 Value *EnumStrVal = Builder.CreateGlobalStringPtr(EnumStr);
13037 ConstantInt *Flag = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
13038 Value *FlagValue = ConstantInt::get(Int64Ty, Flag->getSExtValue());
13039 Value *SeqNumVal = ConstantInt::get(Int32Ty, BuiltinSeqNum++);
13041 llvm::Function *IntrinsicFn = llvm::Intrinsic::getDeclaration(
13042 &CGM.getModule(), llvm::Intrinsic::bpf_preserve_enum_value, {});
13043 CallInst *Fn =
13044 Builder.CreateCall(IntrinsicFn, {SeqNumVal, EnumStrVal, FlagValue});
13045 Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
13046 return Fn;
13051 llvm::Value *CodeGenFunction::
13052 BuildVector(ArrayRef<llvm::Value*> Ops) {
13053 assert((Ops.size() & (Ops.size() - 1)) == 0 &&
13054 "Not a power-of-two sized vector!");
13055 bool AllConstants = true;
13056 for (unsigned i = 0, e = Ops.size(); i != e && AllConstants; ++i)
13057 AllConstants &= isa<Constant>(Ops[i]);
13059 // If this is a constant vector, create a ConstantVector.
13060 if (AllConstants) {
13061 SmallVector<llvm::Constant*, 16> CstOps;
13062 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
13063 CstOps.push_back(cast<Constant>(Ops[i]));
13064 return llvm::ConstantVector::get(CstOps);
13067 // Otherwise, insertelement the values to build the vector.
13068 Value *Result = llvm::PoisonValue::get(
13069 llvm::FixedVectorType::get(Ops[0]->getType(), Ops.size()));
13071 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
13072 Result = Builder.CreateInsertElement(Result, Ops[i], Builder.getInt64(i));
13074 return Result;
13077 // Convert the mask from an integer type to a vector of i1.
13078 static Value *getMaskVecValue(CodeGenFunction &CGF, Value *Mask,
13079 unsigned NumElts) {
13081 auto *MaskTy = llvm::FixedVectorType::get(
13082 CGF.Builder.getInt1Ty(),
13083 cast<IntegerType>(Mask->getType())->getBitWidth());
13084 Value *MaskVec = CGF.Builder.CreateBitCast(Mask, MaskTy);
13086 // If we have less than 8 elements, then the starting mask was an i8 and
13087 // we need to extract down to the right number of elements.
13088 if (NumElts < 8) {
13089 int Indices[4];
13090 for (unsigned i = 0; i != NumElts; ++i)
13091 Indices[i] = i;
13092 MaskVec = CGF.Builder.CreateShuffleVector(
13093 MaskVec, MaskVec, ArrayRef(Indices, NumElts), "extract");
13095 return MaskVec;
13098 static Value *EmitX86MaskedStore(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
13099 Align Alignment) {
13100 Value *Ptr = Ops[0];
13102 Value *MaskVec = getMaskVecValue(
13103 CGF, Ops[2],
13104 cast<llvm::FixedVectorType>(Ops[1]->getType())->getNumElements());
13106 return CGF.Builder.CreateMaskedStore(Ops[1], Ptr, Alignment, MaskVec);
13109 static Value *EmitX86MaskedLoad(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
13110 Align Alignment) {
13111 llvm::Type *Ty = Ops[1]->getType();
13112 Value *Ptr = Ops[0];
13114 Value *MaskVec = getMaskVecValue(
13115 CGF, Ops[2], cast<llvm::FixedVectorType>(Ty)->getNumElements());
13117 return CGF.Builder.CreateMaskedLoad(Ty, Ptr, Alignment, MaskVec, Ops[1]);
13120 static Value *EmitX86ExpandLoad(CodeGenFunction &CGF,
13121 ArrayRef<Value *> Ops) {
13122 auto *ResultTy = cast<llvm::VectorType>(Ops[1]->getType());
13123 Value *Ptr = Ops[0];
13125 Value *MaskVec = getMaskVecValue(
13126 CGF, Ops[2], cast<FixedVectorType>(ResultTy)->getNumElements());
13128 llvm::Function *F = CGF.CGM.getIntrinsic(Intrinsic::masked_expandload,
13129 ResultTy);
13130 return CGF.Builder.CreateCall(F, { Ptr, MaskVec, Ops[1] });
13133 static Value *EmitX86CompressExpand(CodeGenFunction &CGF,
13134 ArrayRef<Value *> Ops,
13135 bool IsCompress) {
13136 auto *ResultTy = cast<llvm::FixedVectorType>(Ops[1]->getType());
13138 Value *MaskVec = getMaskVecValue(CGF, Ops[2], ResultTy->getNumElements());
13140 Intrinsic::ID IID = IsCompress ? Intrinsic::x86_avx512_mask_compress
13141 : Intrinsic::x86_avx512_mask_expand;
13142 llvm::Function *F = CGF.CGM.getIntrinsic(IID, ResultTy);
13143 return CGF.Builder.CreateCall(F, { Ops[0], Ops[1], MaskVec });
13146 static Value *EmitX86CompressStore(CodeGenFunction &CGF,
13147 ArrayRef<Value *> Ops) {
13148 auto *ResultTy = cast<llvm::FixedVectorType>(Ops[1]->getType());
13149 Value *Ptr = Ops[0];
13151 Value *MaskVec = getMaskVecValue(CGF, Ops[2], ResultTy->getNumElements());
13153 llvm::Function *F = CGF.CGM.getIntrinsic(Intrinsic::masked_compressstore,
13154 ResultTy);
13155 return CGF.Builder.CreateCall(F, { Ops[1], Ptr, MaskVec });
13158 static Value *EmitX86MaskLogic(CodeGenFunction &CGF, Instruction::BinaryOps Opc,
13159 ArrayRef<Value *> Ops,
13160 bool InvertLHS = false) {
13161 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
13162 Value *LHS = getMaskVecValue(CGF, Ops[0], NumElts);
13163 Value *RHS = getMaskVecValue(CGF, Ops[1], NumElts);
13165 if (InvertLHS)
13166 LHS = CGF.Builder.CreateNot(LHS);
13168 return CGF.Builder.CreateBitCast(CGF.Builder.CreateBinOp(Opc, LHS, RHS),
13169 Ops[0]->getType());
13172 static Value *EmitX86FunnelShift(CodeGenFunction &CGF, Value *Op0, Value *Op1,
13173 Value *Amt, bool IsRight) {
13174 llvm::Type *Ty = Op0->getType();
13176 // Amount may be scalar immediate, in which case create a splat vector.
13177 // Funnel shifts amounts are treated as modulo and types are all power-of-2 so
13178 // we only care about the lowest log2 bits anyway.
13179 if (Amt->getType() != Ty) {
13180 unsigned NumElts = cast<llvm::FixedVectorType>(Ty)->getNumElements();
13181 Amt = CGF.Builder.CreateIntCast(Amt, Ty->getScalarType(), false);
13182 Amt = CGF.Builder.CreateVectorSplat(NumElts, Amt);
13185 unsigned IID = IsRight ? Intrinsic::fshr : Intrinsic::fshl;
13186 Function *F = CGF.CGM.getIntrinsic(IID, Ty);
13187 return CGF.Builder.CreateCall(F, {Op0, Op1, Amt});
13190 static Value *EmitX86vpcom(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
13191 bool IsSigned) {
13192 Value *Op0 = Ops[0];
13193 Value *Op1 = Ops[1];
13194 llvm::Type *Ty = Op0->getType();
13195 uint64_t Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x7;
13197 CmpInst::Predicate Pred;
13198 switch (Imm) {
13199 case 0x0:
13200 Pred = IsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT;
13201 break;
13202 case 0x1:
13203 Pred = IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE;
13204 break;
13205 case 0x2:
13206 Pred = IsSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
13207 break;
13208 case 0x3:
13209 Pred = IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE;
13210 break;
13211 case 0x4:
13212 Pred = ICmpInst::ICMP_EQ;
13213 break;
13214 case 0x5:
13215 Pred = ICmpInst::ICMP_NE;
13216 break;
13217 case 0x6:
13218 return llvm::Constant::getNullValue(Ty); // FALSE
13219 case 0x7:
13220 return llvm::Constant::getAllOnesValue(Ty); // TRUE
13221 default:
13222 llvm_unreachable("Unexpected XOP vpcom/vpcomu predicate");
13225 Value *Cmp = CGF.Builder.CreateICmp(Pred, Op0, Op1);
13226 Value *Res = CGF.Builder.CreateSExt(Cmp, Ty);
13227 return Res;
13230 static Value *EmitX86Select(CodeGenFunction &CGF,
13231 Value *Mask, Value *Op0, Value *Op1) {
13233 // If the mask is all ones just return first argument.
13234 if (const auto *C = dyn_cast<Constant>(Mask))
13235 if (C->isAllOnesValue())
13236 return Op0;
13238 Mask = getMaskVecValue(
13239 CGF, Mask, cast<llvm::FixedVectorType>(Op0->getType())->getNumElements());
13241 return CGF.Builder.CreateSelect(Mask, Op0, Op1);
13244 static Value *EmitX86ScalarSelect(CodeGenFunction &CGF,
13245 Value *Mask, Value *Op0, Value *Op1) {
13246 // If the mask is all ones just return first argument.
13247 if (const auto *C = dyn_cast<Constant>(Mask))
13248 if (C->isAllOnesValue())
13249 return Op0;
13251 auto *MaskTy = llvm::FixedVectorType::get(
13252 CGF.Builder.getInt1Ty(), Mask->getType()->getIntegerBitWidth());
13253 Mask = CGF.Builder.CreateBitCast(Mask, MaskTy);
13254 Mask = CGF.Builder.CreateExtractElement(Mask, (uint64_t)0);
13255 return CGF.Builder.CreateSelect(Mask, Op0, Op1);
13258 static Value *EmitX86MaskedCompareResult(CodeGenFunction &CGF, Value *Cmp,
13259 unsigned NumElts, Value *MaskIn) {
13260 if (MaskIn) {
13261 const auto *C = dyn_cast<Constant>(MaskIn);
13262 if (!C || !C->isAllOnesValue())
13263 Cmp = CGF.Builder.CreateAnd(Cmp, getMaskVecValue(CGF, MaskIn, NumElts));
13266 if (NumElts < 8) {
13267 int Indices[8];
13268 for (unsigned i = 0; i != NumElts; ++i)
13269 Indices[i] = i;
13270 for (unsigned i = NumElts; i != 8; ++i)
13271 Indices[i] = i % NumElts + NumElts;
13272 Cmp = CGF.Builder.CreateShuffleVector(
13273 Cmp, llvm::Constant::getNullValue(Cmp->getType()), Indices);
13276 return CGF.Builder.CreateBitCast(Cmp,
13277 IntegerType::get(CGF.getLLVMContext(),
13278 std::max(NumElts, 8U)));
13281 static Value *EmitX86MaskedCompare(CodeGenFunction &CGF, unsigned CC,
13282 bool Signed, ArrayRef<Value *> Ops) {
13283 assert((Ops.size() == 2 || Ops.size() == 4) &&
13284 "Unexpected number of arguments");
13285 unsigned NumElts =
13286 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
13287 Value *Cmp;
13289 if (CC == 3) {
13290 Cmp = Constant::getNullValue(
13291 llvm::FixedVectorType::get(CGF.Builder.getInt1Ty(), NumElts));
13292 } else if (CC == 7) {
13293 Cmp = Constant::getAllOnesValue(
13294 llvm::FixedVectorType::get(CGF.Builder.getInt1Ty(), NumElts));
13295 } else {
13296 ICmpInst::Predicate Pred;
13297 switch (CC) {
13298 default: llvm_unreachable("Unknown condition code");
13299 case 0: Pred = ICmpInst::ICMP_EQ; break;
13300 case 1: Pred = Signed ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; break;
13301 case 2: Pred = Signed ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; break;
13302 case 4: Pred = ICmpInst::ICMP_NE; break;
13303 case 5: Pred = Signed ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; break;
13304 case 6: Pred = Signed ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; break;
13306 Cmp = CGF.Builder.CreateICmp(Pred, Ops[0], Ops[1]);
13309 Value *MaskIn = nullptr;
13310 if (Ops.size() == 4)
13311 MaskIn = Ops[3];
13313 return EmitX86MaskedCompareResult(CGF, Cmp, NumElts, MaskIn);
13316 static Value *EmitX86ConvertToMask(CodeGenFunction &CGF, Value *In) {
13317 Value *Zero = Constant::getNullValue(In->getType());
13318 return EmitX86MaskedCompare(CGF, 1, true, { In, Zero });
13321 static Value *EmitX86ConvertIntToFp(CodeGenFunction &CGF, const CallExpr *E,
13322 ArrayRef<Value *> Ops, bool IsSigned) {
13323 unsigned Rnd = cast<llvm::ConstantInt>(Ops[3])->getZExtValue();
13324 llvm::Type *Ty = Ops[1]->getType();
13326 Value *Res;
13327 if (Rnd != 4) {
13328 Intrinsic::ID IID = IsSigned ? Intrinsic::x86_avx512_sitofp_round
13329 : Intrinsic::x86_avx512_uitofp_round;
13330 Function *F = CGF.CGM.getIntrinsic(IID, { Ty, Ops[0]->getType() });
13331 Res = CGF.Builder.CreateCall(F, { Ops[0], Ops[3] });
13332 } else {
13333 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
13334 Res = IsSigned ? CGF.Builder.CreateSIToFP(Ops[0], Ty)
13335 : CGF.Builder.CreateUIToFP(Ops[0], Ty);
13338 return EmitX86Select(CGF, Ops[2], Res, Ops[1]);
13341 // Lowers X86 FMA intrinsics to IR.
13342 static Value *EmitX86FMAExpr(CodeGenFunction &CGF, const CallExpr *E,
13343 ArrayRef<Value *> Ops, unsigned BuiltinID,
13344 bool IsAddSub) {
13346 bool Subtract = false;
13347 Intrinsic::ID IID = Intrinsic::not_intrinsic;
13348 switch (BuiltinID) {
13349 default: break;
13350 case clang::X86::BI__builtin_ia32_vfmsubph512_mask3:
13351 Subtract = true;
13352 [[fallthrough]];
13353 case clang::X86::BI__builtin_ia32_vfmaddph512_mask:
13354 case clang::X86::BI__builtin_ia32_vfmaddph512_maskz:
13355 case clang::X86::BI__builtin_ia32_vfmaddph512_mask3:
13356 IID = llvm::Intrinsic::x86_avx512fp16_vfmadd_ph_512;
13357 break;
13358 case clang::X86::BI__builtin_ia32_vfmsubaddph512_mask3:
13359 Subtract = true;
13360 [[fallthrough]];
13361 case clang::X86::BI__builtin_ia32_vfmaddsubph512_mask:
13362 case clang::X86::BI__builtin_ia32_vfmaddsubph512_maskz:
13363 case clang::X86::BI__builtin_ia32_vfmaddsubph512_mask3:
13364 IID = llvm::Intrinsic::x86_avx512fp16_vfmaddsub_ph_512;
13365 break;
13366 case clang::X86::BI__builtin_ia32_vfmsubps512_mask3:
13367 Subtract = true;
13368 [[fallthrough]];
13369 case clang::X86::BI__builtin_ia32_vfmaddps512_mask:
13370 case clang::X86::BI__builtin_ia32_vfmaddps512_maskz:
13371 case clang::X86::BI__builtin_ia32_vfmaddps512_mask3:
13372 IID = llvm::Intrinsic::x86_avx512_vfmadd_ps_512; break;
13373 case clang::X86::BI__builtin_ia32_vfmsubpd512_mask3:
13374 Subtract = true;
13375 [[fallthrough]];
13376 case clang::X86::BI__builtin_ia32_vfmaddpd512_mask:
13377 case clang::X86::BI__builtin_ia32_vfmaddpd512_maskz:
13378 case clang::X86::BI__builtin_ia32_vfmaddpd512_mask3:
13379 IID = llvm::Intrinsic::x86_avx512_vfmadd_pd_512; break;
13380 case clang::X86::BI__builtin_ia32_vfmsubaddps512_mask3:
13381 Subtract = true;
13382 [[fallthrough]];
13383 case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask:
13384 case clang::X86::BI__builtin_ia32_vfmaddsubps512_maskz:
13385 case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask3:
13386 IID = llvm::Intrinsic::x86_avx512_vfmaddsub_ps_512;
13387 break;
13388 case clang::X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
13389 Subtract = true;
13390 [[fallthrough]];
13391 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask:
13392 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
13393 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
13394 IID = llvm::Intrinsic::x86_avx512_vfmaddsub_pd_512;
13395 break;
13398 Value *A = Ops[0];
13399 Value *B = Ops[1];
13400 Value *C = Ops[2];
13402 if (Subtract)
13403 C = CGF.Builder.CreateFNeg(C);
13405 Value *Res;
13407 // Only handle in case of _MM_FROUND_CUR_DIRECTION/4 (no rounding).
13408 if (IID != Intrinsic::not_intrinsic &&
13409 (cast<llvm::ConstantInt>(Ops.back())->getZExtValue() != (uint64_t)4 ||
13410 IsAddSub)) {
13411 Function *Intr = CGF.CGM.getIntrinsic(IID);
13412 Res = CGF.Builder.CreateCall(Intr, {A, B, C, Ops.back() });
13413 } else {
13414 llvm::Type *Ty = A->getType();
13415 Function *FMA;
13416 if (CGF.Builder.getIsFPConstrained()) {
13417 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
13418 FMA = CGF.CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, Ty);
13419 Res = CGF.Builder.CreateConstrainedFPCall(FMA, {A, B, C});
13420 } else {
13421 FMA = CGF.CGM.getIntrinsic(Intrinsic::fma, Ty);
13422 Res = CGF.Builder.CreateCall(FMA, {A, B, C});
13426 // Handle any required masking.
13427 Value *MaskFalseVal = nullptr;
13428 switch (BuiltinID) {
13429 case clang::X86::BI__builtin_ia32_vfmaddph512_mask:
13430 case clang::X86::BI__builtin_ia32_vfmaddps512_mask:
13431 case clang::X86::BI__builtin_ia32_vfmaddpd512_mask:
13432 case clang::X86::BI__builtin_ia32_vfmaddsubph512_mask:
13433 case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask:
13434 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask:
13435 MaskFalseVal = Ops[0];
13436 break;
13437 case clang::X86::BI__builtin_ia32_vfmaddph512_maskz:
13438 case clang::X86::BI__builtin_ia32_vfmaddps512_maskz:
13439 case clang::X86::BI__builtin_ia32_vfmaddpd512_maskz:
13440 case clang::X86::BI__builtin_ia32_vfmaddsubph512_maskz:
13441 case clang::X86::BI__builtin_ia32_vfmaddsubps512_maskz:
13442 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
13443 MaskFalseVal = Constant::getNullValue(Ops[0]->getType());
13444 break;
13445 case clang::X86::BI__builtin_ia32_vfmsubph512_mask3:
13446 case clang::X86::BI__builtin_ia32_vfmaddph512_mask3:
13447 case clang::X86::BI__builtin_ia32_vfmsubps512_mask3:
13448 case clang::X86::BI__builtin_ia32_vfmaddps512_mask3:
13449 case clang::X86::BI__builtin_ia32_vfmsubpd512_mask3:
13450 case clang::X86::BI__builtin_ia32_vfmaddpd512_mask3:
13451 case clang::X86::BI__builtin_ia32_vfmsubaddph512_mask3:
13452 case clang::X86::BI__builtin_ia32_vfmaddsubph512_mask3:
13453 case clang::X86::BI__builtin_ia32_vfmsubaddps512_mask3:
13454 case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask3:
13455 case clang::X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
13456 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
13457 MaskFalseVal = Ops[2];
13458 break;
13461 if (MaskFalseVal)
13462 return EmitX86Select(CGF, Ops[3], Res, MaskFalseVal);
13464 return Res;
13467 static Value *EmitScalarFMAExpr(CodeGenFunction &CGF, const CallExpr *E,
13468 MutableArrayRef<Value *> Ops, Value *Upper,
13469 bool ZeroMask = false, unsigned PTIdx = 0,
13470 bool NegAcc = false) {
13471 unsigned Rnd = 4;
13472 if (Ops.size() > 4)
13473 Rnd = cast<llvm::ConstantInt>(Ops[4])->getZExtValue();
13475 if (NegAcc)
13476 Ops[2] = CGF.Builder.CreateFNeg(Ops[2]);
13478 Ops[0] = CGF.Builder.CreateExtractElement(Ops[0], (uint64_t)0);
13479 Ops[1] = CGF.Builder.CreateExtractElement(Ops[1], (uint64_t)0);
13480 Ops[2] = CGF.Builder.CreateExtractElement(Ops[2], (uint64_t)0);
13481 Value *Res;
13482 if (Rnd != 4) {
13483 Intrinsic::ID IID;
13485 switch (Ops[0]->getType()->getPrimitiveSizeInBits()) {
13486 case 16:
13487 IID = Intrinsic::x86_avx512fp16_vfmadd_f16;
13488 break;
13489 case 32:
13490 IID = Intrinsic::x86_avx512_vfmadd_f32;
13491 break;
13492 case 64:
13493 IID = Intrinsic::x86_avx512_vfmadd_f64;
13494 break;
13495 default:
13496 llvm_unreachable("Unexpected size");
13498 Res = CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(IID),
13499 {Ops[0], Ops[1], Ops[2], Ops[4]});
13500 } else if (CGF.Builder.getIsFPConstrained()) {
13501 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
13502 Function *FMA = CGF.CGM.getIntrinsic(
13503 Intrinsic::experimental_constrained_fma, Ops[0]->getType());
13504 Res = CGF.Builder.CreateConstrainedFPCall(FMA, Ops.slice(0, 3));
13505 } else {
13506 Function *FMA = CGF.CGM.getIntrinsic(Intrinsic::fma, Ops[0]->getType());
13507 Res = CGF.Builder.CreateCall(FMA, Ops.slice(0, 3));
13509 // If we have more than 3 arguments, we need to do masking.
13510 if (Ops.size() > 3) {
13511 Value *PassThru = ZeroMask ? Constant::getNullValue(Res->getType())
13512 : Ops[PTIdx];
13514 // If we negated the accumulator and the its the PassThru value we need to
13515 // bypass the negate. Conveniently Upper should be the same thing in this
13516 // case.
13517 if (NegAcc && PTIdx == 2)
13518 PassThru = CGF.Builder.CreateExtractElement(Upper, (uint64_t)0);
13520 Res = EmitX86ScalarSelect(CGF, Ops[3], Res, PassThru);
13522 return CGF.Builder.CreateInsertElement(Upper, Res, (uint64_t)0);
13525 static Value *EmitX86Muldq(CodeGenFunction &CGF, bool IsSigned,
13526 ArrayRef<Value *> Ops) {
13527 llvm::Type *Ty = Ops[0]->getType();
13528 // Arguments have a vXi32 type so cast to vXi64.
13529 Ty = llvm::FixedVectorType::get(CGF.Int64Ty,
13530 Ty->getPrimitiveSizeInBits() / 64);
13531 Value *LHS = CGF.Builder.CreateBitCast(Ops[0], Ty);
13532 Value *RHS = CGF.Builder.CreateBitCast(Ops[1], Ty);
13534 if (IsSigned) {
13535 // Shift left then arithmetic shift right.
13536 Constant *ShiftAmt = ConstantInt::get(Ty, 32);
13537 LHS = CGF.Builder.CreateShl(LHS, ShiftAmt);
13538 LHS = CGF.Builder.CreateAShr(LHS, ShiftAmt);
13539 RHS = CGF.Builder.CreateShl(RHS, ShiftAmt);
13540 RHS = CGF.Builder.CreateAShr(RHS, ShiftAmt);
13541 } else {
13542 // Clear the upper bits.
13543 Constant *Mask = ConstantInt::get(Ty, 0xffffffff);
13544 LHS = CGF.Builder.CreateAnd(LHS, Mask);
13545 RHS = CGF.Builder.CreateAnd(RHS, Mask);
13548 return CGF.Builder.CreateMul(LHS, RHS);
13551 // Emit a masked pternlog intrinsic. This only exists because the header has to
13552 // use a macro and we aren't able to pass the input argument to a pternlog
13553 // builtin and a select builtin without evaluating it twice.
13554 static Value *EmitX86Ternlog(CodeGenFunction &CGF, bool ZeroMask,
13555 ArrayRef<Value *> Ops) {
13556 llvm::Type *Ty = Ops[0]->getType();
13558 unsigned VecWidth = Ty->getPrimitiveSizeInBits();
13559 unsigned EltWidth = Ty->getScalarSizeInBits();
13560 Intrinsic::ID IID;
13561 if (VecWidth == 128 && EltWidth == 32)
13562 IID = Intrinsic::x86_avx512_pternlog_d_128;
13563 else if (VecWidth == 256 && EltWidth == 32)
13564 IID = Intrinsic::x86_avx512_pternlog_d_256;
13565 else if (VecWidth == 512 && EltWidth == 32)
13566 IID = Intrinsic::x86_avx512_pternlog_d_512;
13567 else if (VecWidth == 128 && EltWidth == 64)
13568 IID = Intrinsic::x86_avx512_pternlog_q_128;
13569 else if (VecWidth == 256 && EltWidth == 64)
13570 IID = Intrinsic::x86_avx512_pternlog_q_256;
13571 else if (VecWidth == 512 && EltWidth == 64)
13572 IID = Intrinsic::x86_avx512_pternlog_q_512;
13573 else
13574 llvm_unreachable("Unexpected intrinsic");
13576 Value *Ternlog = CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(IID),
13577 Ops.drop_back());
13578 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(Ty) : Ops[0];
13579 return EmitX86Select(CGF, Ops[4], Ternlog, PassThru);
13582 static Value *EmitX86SExtMask(CodeGenFunction &CGF, Value *Op,
13583 llvm::Type *DstTy) {
13584 unsigned NumberOfElements =
13585 cast<llvm::FixedVectorType>(DstTy)->getNumElements();
13586 Value *Mask = getMaskVecValue(CGF, Op, NumberOfElements);
13587 return CGF.Builder.CreateSExt(Mask, DstTy, "vpmovm2");
13590 Value *CodeGenFunction::EmitX86CpuIs(const CallExpr *E) {
13591 const Expr *CPUExpr = E->getArg(0)->IgnoreParenCasts();
13592 StringRef CPUStr = cast<clang::StringLiteral>(CPUExpr)->getString();
13593 return EmitX86CpuIs(CPUStr);
13596 // Convert F16 halfs to floats.
13597 static Value *EmitX86CvtF16ToFloatExpr(CodeGenFunction &CGF,
13598 ArrayRef<Value *> Ops,
13599 llvm::Type *DstTy) {
13600 assert((Ops.size() == 1 || Ops.size() == 3 || Ops.size() == 4) &&
13601 "Unknown cvtph2ps intrinsic");
13603 // If the SAE intrinsic doesn't use default rounding then we can't upgrade.
13604 if (Ops.size() == 4 && cast<llvm::ConstantInt>(Ops[3])->getZExtValue() != 4) {
13605 Function *F =
13606 CGF.CGM.getIntrinsic(Intrinsic::x86_avx512_mask_vcvtph2ps_512);
13607 return CGF.Builder.CreateCall(F, {Ops[0], Ops[1], Ops[2], Ops[3]});
13610 unsigned NumDstElts = cast<llvm::FixedVectorType>(DstTy)->getNumElements();
13611 Value *Src = Ops[0];
13613 // Extract the subvector.
13614 if (NumDstElts !=
13615 cast<llvm::FixedVectorType>(Src->getType())->getNumElements()) {
13616 assert(NumDstElts == 4 && "Unexpected vector size");
13617 Src = CGF.Builder.CreateShuffleVector(Src, ArrayRef<int>{0, 1, 2, 3});
13620 // Bitcast from vXi16 to vXf16.
13621 auto *HalfTy = llvm::FixedVectorType::get(
13622 llvm::Type::getHalfTy(CGF.getLLVMContext()), NumDstElts);
13623 Src = CGF.Builder.CreateBitCast(Src, HalfTy);
13625 // Perform the fp-extension.
13626 Value *Res = CGF.Builder.CreateFPExt(Src, DstTy, "cvtph2ps");
13628 if (Ops.size() >= 3)
13629 Res = EmitX86Select(CGF, Ops[2], Res, Ops[1]);
13630 return Res;
13633 Value *CodeGenFunction::EmitX86CpuIs(StringRef CPUStr) {
13635 llvm::Type *Int32Ty = Builder.getInt32Ty();
13637 // Matching the struct layout from the compiler-rt/libgcc structure that is
13638 // filled in:
13639 // unsigned int __cpu_vendor;
13640 // unsigned int __cpu_type;
13641 // unsigned int __cpu_subtype;
13642 // unsigned int __cpu_features[1];
13643 llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty, Int32Ty,
13644 llvm::ArrayType::get(Int32Ty, 1));
13646 // Grab the global __cpu_model.
13647 llvm::Constant *CpuModel = CGM.CreateRuntimeVariable(STy, "__cpu_model");
13648 cast<llvm::GlobalValue>(CpuModel)->setDSOLocal(true);
13650 // Calculate the index needed to access the correct field based on the
13651 // range. Also adjust the expected value.
13652 unsigned Index;
13653 unsigned Value;
13654 std::tie(Index, Value) = StringSwitch<std::pair<unsigned, unsigned>>(CPUStr)
13655 #define X86_VENDOR(ENUM, STRING) \
13656 .Case(STRING, {0u, static_cast<unsigned>(llvm::X86::ENUM)})
13657 #define X86_CPU_TYPE_ALIAS(ENUM, ALIAS) \
13658 .Case(ALIAS, {1u, static_cast<unsigned>(llvm::X86::ENUM)})
13659 #define X86_CPU_TYPE(ENUM, STR) \
13660 .Case(STR, {1u, static_cast<unsigned>(llvm::X86::ENUM)})
13661 #define X86_CPU_SUBTYPE_ALIAS(ENUM, ALIAS) \
13662 .Case(ALIAS, {2u, static_cast<unsigned>(llvm::X86::ENUM)})
13663 #define X86_CPU_SUBTYPE(ENUM, STR) \
13664 .Case(STR, {2u, static_cast<unsigned>(llvm::X86::ENUM)})
13665 #include "llvm/TargetParser/X86TargetParser.def"
13666 .Default({0, 0});
13667 assert(Value != 0 && "Invalid CPUStr passed to CpuIs");
13669 // Grab the appropriate field from __cpu_model.
13670 llvm::Value *Idxs[] = {ConstantInt::get(Int32Ty, 0),
13671 ConstantInt::get(Int32Ty, Index)};
13672 llvm::Value *CpuValue = Builder.CreateGEP(STy, CpuModel, Idxs);
13673 CpuValue = Builder.CreateAlignedLoad(Int32Ty, CpuValue,
13674 CharUnits::fromQuantity(4));
13676 // Check the value of the field against the requested value.
13677 return Builder.CreateICmpEQ(CpuValue,
13678 llvm::ConstantInt::get(Int32Ty, Value));
13681 Value *CodeGenFunction::EmitX86CpuSupports(const CallExpr *E) {
13682 const Expr *FeatureExpr = E->getArg(0)->IgnoreParenCasts();
13683 StringRef FeatureStr = cast<StringLiteral>(FeatureExpr)->getString();
13684 return EmitX86CpuSupports(FeatureStr);
13687 Value *CodeGenFunction::EmitX86CpuSupports(ArrayRef<StringRef> FeatureStrs) {
13688 return EmitX86CpuSupports(llvm::X86::getCpuSupportsMask(FeatureStrs));
13691 llvm::Value *
13692 CodeGenFunction::EmitX86CpuSupports(std::array<uint32_t, 4> FeatureMask) {
13693 Value *Result = Builder.getTrue();
13694 if (FeatureMask[0] != 0) {
13695 // Matching the struct layout from the compiler-rt/libgcc structure that is
13696 // filled in:
13697 // unsigned int __cpu_vendor;
13698 // unsigned int __cpu_type;
13699 // unsigned int __cpu_subtype;
13700 // unsigned int __cpu_features[1];
13701 llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty, Int32Ty,
13702 llvm::ArrayType::get(Int32Ty, 1));
13704 // Grab the global __cpu_model.
13705 llvm::Constant *CpuModel = CGM.CreateRuntimeVariable(STy, "__cpu_model");
13706 cast<llvm::GlobalValue>(CpuModel)->setDSOLocal(true);
13708 // Grab the first (0th) element from the field __cpu_features off of the
13709 // global in the struct STy.
13710 Value *Idxs[] = {Builder.getInt32(0), Builder.getInt32(3),
13711 Builder.getInt32(0)};
13712 Value *CpuFeatures = Builder.CreateGEP(STy, CpuModel, Idxs);
13713 Value *Features = Builder.CreateAlignedLoad(Int32Ty, CpuFeatures,
13714 CharUnits::fromQuantity(4));
13716 // Check the value of the bit corresponding to the feature requested.
13717 Value *Mask = Builder.getInt32(FeatureMask[0]);
13718 Value *Bitset = Builder.CreateAnd(Features, Mask);
13719 Value *Cmp = Builder.CreateICmpEQ(Bitset, Mask);
13720 Result = Builder.CreateAnd(Result, Cmp);
13723 llvm::Type *ATy = llvm::ArrayType::get(Int32Ty, 3);
13724 llvm::Constant *CpuFeatures2 =
13725 CGM.CreateRuntimeVariable(ATy, "__cpu_features2");
13726 cast<llvm::GlobalValue>(CpuFeatures2)->setDSOLocal(true);
13727 for (int i = 1; i != 4; ++i) {
13728 const uint32_t M = FeatureMask[i];
13729 if (!M)
13730 continue;
13731 Value *Idxs[] = {Builder.getInt32(0), Builder.getInt32(i - 1)};
13732 Value *Features = Builder.CreateAlignedLoad(
13733 Int32Ty, Builder.CreateGEP(ATy, CpuFeatures2, Idxs),
13734 CharUnits::fromQuantity(4));
13735 // Check the value of the bit corresponding to the feature requested.
13736 Value *Mask = Builder.getInt32(M);
13737 Value *Bitset = Builder.CreateAnd(Features, Mask);
13738 Value *Cmp = Builder.CreateICmpEQ(Bitset, Mask);
13739 Result = Builder.CreateAnd(Result, Cmp);
13742 return Result;
13745 Value *CodeGenFunction::EmitAArch64CpuInit() {
13746 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
13747 llvm::FunctionCallee Func =
13748 CGM.CreateRuntimeFunction(FTy, "__init_cpu_features_resolver");
13749 cast<llvm::GlobalValue>(Func.getCallee())->setDSOLocal(true);
13750 cast<llvm::GlobalValue>(Func.getCallee())
13751 ->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass);
13752 return Builder.CreateCall(Func);
13755 Value *CodeGenFunction::EmitX86CpuInit() {
13756 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy,
13757 /*Variadic*/ false);
13758 llvm::FunctionCallee Func =
13759 CGM.CreateRuntimeFunction(FTy, "__cpu_indicator_init");
13760 cast<llvm::GlobalValue>(Func.getCallee())->setDSOLocal(true);
13761 cast<llvm::GlobalValue>(Func.getCallee())
13762 ->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass);
13763 return Builder.CreateCall(Func);
13766 llvm::Value *
13767 CodeGenFunction::EmitAArch64CpuSupports(ArrayRef<StringRef> FeaturesStrs) {
13768 uint64_t FeaturesMask = llvm::AArch64::getCpuSupportsMask(FeaturesStrs);
13769 Value *Result = Builder.getTrue();
13770 if (FeaturesMask != 0) {
13771 // Get features from structure in runtime library
13772 // struct {
13773 // unsigned long long features;
13774 // } __aarch64_cpu_features;
13775 llvm::Type *STy = llvm::StructType::get(Int64Ty);
13776 llvm::Constant *AArch64CPUFeatures =
13777 CGM.CreateRuntimeVariable(STy, "__aarch64_cpu_features");
13778 cast<llvm::GlobalValue>(AArch64CPUFeatures)->setDSOLocal(true);
13779 llvm::Value *CpuFeatures = Builder.CreateGEP(
13780 STy, AArch64CPUFeatures,
13781 {ConstantInt::get(Int32Ty, 0), ConstantInt::get(Int32Ty, 0)});
13782 Value *Features = Builder.CreateAlignedLoad(Int64Ty, CpuFeatures,
13783 CharUnits::fromQuantity(8));
13784 Value *Mask = Builder.getInt64(FeaturesMask);
13785 Value *Bitset = Builder.CreateAnd(Features, Mask);
13786 Value *Cmp = Builder.CreateICmpEQ(Bitset, Mask);
13787 Result = Builder.CreateAnd(Result, Cmp);
13789 return Result;
13792 Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
13793 const CallExpr *E) {
13794 if (BuiltinID == X86::BI__builtin_cpu_is)
13795 return EmitX86CpuIs(E);
13796 if (BuiltinID == X86::BI__builtin_cpu_supports)
13797 return EmitX86CpuSupports(E);
13798 if (BuiltinID == X86::BI__builtin_cpu_init)
13799 return EmitX86CpuInit();
13801 // Handle MSVC intrinsics before argument evaluation to prevent double
13802 // evaluation.
13803 if (std::optional<MSVCIntrin> MsvcIntId = translateX86ToMsvcIntrin(BuiltinID))
13804 return EmitMSVCBuiltinExpr(*MsvcIntId, E);
13806 SmallVector<Value*, 4> Ops;
13807 bool IsMaskFCmp = false;
13808 bool IsConjFMA = false;
13810 // Find out if any arguments are required to be integer constant expressions.
13811 unsigned ICEArguments = 0;
13812 ASTContext::GetBuiltinTypeError Error;
13813 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
13814 assert(Error == ASTContext::GE_None && "Should not codegen an error");
13816 for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) {
13817 // If this is a normal argument, just emit it as a scalar.
13818 if ((ICEArguments & (1 << i)) == 0) {
13819 Ops.push_back(EmitScalarExpr(E->getArg(i)));
13820 continue;
13823 // If this is required to be a constant, constant fold it so that we know
13824 // that the generated intrinsic gets a ConstantInt.
13825 Ops.push_back(llvm::ConstantInt::get(
13826 getLLVMContext(), *E->getArg(i)->getIntegerConstantExpr(getContext())));
13829 // These exist so that the builtin that takes an immediate can be bounds
13830 // checked by clang to avoid passing bad immediates to the backend. Since
13831 // AVX has a larger immediate than SSE we would need separate builtins to
13832 // do the different bounds checking. Rather than create a clang specific
13833 // SSE only builtin, this implements eight separate builtins to match gcc
13834 // implementation.
13835 auto getCmpIntrinsicCall = [this, &Ops](Intrinsic::ID ID, unsigned Imm) {
13836 Ops.push_back(llvm::ConstantInt::get(Int8Ty, Imm));
13837 llvm::Function *F = CGM.getIntrinsic(ID);
13838 return Builder.CreateCall(F, Ops);
13841 // For the vector forms of FP comparisons, translate the builtins directly to
13842 // IR.
13843 // TODO: The builtins could be removed if the SSE header files used vector
13844 // extension comparisons directly (vector ordered/unordered may need
13845 // additional support via __builtin_isnan()).
13846 auto getVectorFCmpIR = [this, &Ops, E](CmpInst::Predicate Pred,
13847 bool IsSignaling) {
13848 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
13849 Value *Cmp;
13850 if (IsSignaling)
13851 Cmp = Builder.CreateFCmpS(Pred, Ops[0], Ops[1]);
13852 else
13853 Cmp = Builder.CreateFCmp(Pred, Ops[0], Ops[1]);
13854 llvm::VectorType *FPVecTy = cast<llvm::VectorType>(Ops[0]->getType());
13855 llvm::VectorType *IntVecTy = llvm::VectorType::getInteger(FPVecTy);
13856 Value *Sext = Builder.CreateSExt(Cmp, IntVecTy);
13857 return Builder.CreateBitCast(Sext, FPVecTy);
13860 switch (BuiltinID) {
13861 default: return nullptr;
13862 case X86::BI_mm_prefetch: {
13863 Value *Address = Ops[0];
13864 ConstantInt *C = cast<ConstantInt>(Ops[1]);
13865 Value *RW = ConstantInt::get(Int32Ty, (C->getZExtValue() >> 2) & 0x1);
13866 Value *Locality = ConstantInt::get(Int32Ty, C->getZExtValue() & 0x3);
13867 Value *Data = ConstantInt::get(Int32Ty, 1);
13868 Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType());
13869 return Builder.CreateCall(F, {Address, RW, Locality, Data});
13871 case X86::BI_mm_clflush: {
13872 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_clflush),
13873 Ops[0]);
13875 case X86::BI_mm_lfence: {
13876 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_lfence));
13878 case X86::BI_mm_mfence: {
13879 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_mfence));
13881 case X86::BI_mm_sfence: {
13882 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_sfence));
13884 case X86::BI_mm_pause: {
13885 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_pause));
13887 case X86::BI__rdtsc: {
13888 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_rdtsc));
13890 case X86::BI__builtin_ia32_rdtscp: {
13891 Value *Call = Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_rdtscp));
13892 Builder.CreateDefaultAlignedStore(Builder.CreateExtractValue(Call, 1),
13893 Ops[0]);
13894 return Builder.CreateExtractValue(Call, 0);
13896 case X86::BI__builtin_ia32_lzcnt_u16:
13897 case X86::BI__builtin_ia32_lzcnt_u32:
13898 case X86::BI__builtin_ia32_lzcnt_u64: {
13899 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Ops[0]->getType());
13900 return Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)});
13902 case X86::BI__builtin_ia32_tzcnt_u16:
13903 case X86::BI__builtin_ia32_tzcnt_u32:
13904 case X86::BI__builtin_ia32_tzcnt_u64: {
13905 Function *F = CGM.getIntrinsic(Intrinsic::cttz, Ops[0]->getType());
13906 return Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)});
13908 case X86::BI__builtin_ia32_undef128:
13909 case X86::BI__builtin_ia32_undef256:
13910 case X86::BI__builtin_ia32_undef512:
13911 // The x86 definition of "undef" is not the same as the LLVM definition
13912 // (PR32176). We leave optimizing away an unnecessary zero constant to the
13913 // IR optimizer and backend.
13914 // TODO: If we had a "freeze" IR instruction to generate a fixed undef
13915 // value, we should use that here instead of a zero.
13916 return llvm::Constant::getNullValue(ConvertType(E->getType()));
13917 case X86::BI__builtin_ia32_vec_init_v8qi:
13918 case X86::BI__builtin_ia32_vec_init_v4hi:
13919 case X86::BI__builtin_ia32_vec_init_v2si:
13920 return Builder.CreateBitCast(BuildVector(Ops),
13921 llvm::Type::getX86_MMXTy(getLLVMContext()));
13922 case X86::BI__builtin_ia32_vec_ext_v2si:
13923 case X86::BI__builtin_ia32_vec_ext_v16qi:
13924 case X86::BI__builtin_ia32_vec_ext_v8hi:
13925 case X86::BI__builtin_ia32_vec_ext_v4si:
13926 case X86::BI__builtin_ia32_vec_ext_v4sf:
13927 case X86::BI__builtin_ia32_vec_ext_v2di:
13928 case X86::BI__builtin_ia32_vec_ext_v32qi:
13929 case X86::BI__builtin_ia32_vec_ext_v16hi:
13930 case X86::BI__builtin_ia32_vec_ext_v8si:
13931 case X86::BI__builtin_ia32_vec_ext_v4di: {
13932 unsigned NumElts =
13933 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
13934 uint64_t Index = cast<ConstantInt>(Ops[1])->getZExtValue();
13935 Index &= NumElts - 1;
13936 // These builtins exist so we can ensure the index is an ICE and in range.
13937 // Otherwise we could just do this in the header file.
13938 return Builder.CreateExtractElement(Ops[0], Index);
13940 case X86::BI__builtin_ia32_vec_set_v16qi:
13941 case X86::BI__builtin_ia32_vec_set_v8hi:
13942 case X86::BI__builtin_ia32_vec_set_v4si:
13943 case X86::BI__builtin_ia32_vec_set_v2di:
13944 case X86::BI__builtin_ia32_vec_set_v32qi:
13945 case X86::BI__builtin_ia32_vec_set_v16hi:
13946 case X86::BI__builtin_ia32_vec_set_v8si:
13947 case X86::BI__builtin_ia32_vec_set_v4di: {
13948 unsigned NumElts =
13949 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
13950 unsigned Index = cast<ConstantInt>(Ops[2])->getZExtValue();
13951 Index &= NumElts - 1;
13952 // These builtins exist so we can ensure the index is an ICE and in range.
13953 // Otherwise we could just do this in the header file.
13954 return Builder.CreateInsertElement(Ops[0], Ops[1], Index);
13956 case X86::BI_mm_setcsr:
13957 case X86::BI__builtin_ia32_ldmxcsr: {
13958 Address Tmp = CreateMemTemp(E->getArg(0)->getType());
13959 Builder.CreateStore(Ops[0], Tmp);
13960 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_ldmxcsr),
13961 Tmp.getPointer());
13963 case X86::BI_mm_getcsr:
13964 case X86::BI__builtin_ia32_stmxcsr: {
13965 Address Tmp = CreateMemTemp(E->getType());
13966 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_stmxcsr),
13967 Tmp.getPointer());
13968 return Builder.CreateLoad(Tmp, "stmxcsr");
13970 case X86::BI__builtin_ia32_xsave:
13971 case X86::BI__builtin_ia32_xsave64:
13972 case X86::BI__builtin_ia32_xrstor:
13973 case X86::BI__builtin_ia32_xrstor64:
13974 case X86::BI__builtin_ia32_xsaveopt:
13975 case X86::BI__builtin_ia32_xsaveopt64:
13976 case X86::BI__builtin_ia32_xrstors:
13977 case X86::BI__builtin_ia32_xrstors64:
13978 case X86::BI__builtin_ia32_xsavec:
13979 case X86::BI__builtin_ia32_xsavec64:
13980 case X86::BI__builtin_ia32_xsaves:
13981 case X86::BI__builtin_ia32_xsaves64:
13982 case X86::BI__builtin_ia32_xsetbv:
13983 case X86::BI_xsetbv: {
13984 Intrinsic::ID ID;
13985 #define INTRINSIC_X86_XSAVE_ID(NAME) \
13986 case X86::BI__builtin_ia32_##NAME: \
13987 ID = Intrinsic::x86_##NAME; \
13988 break
13989 switch (BuiltinID) {
13990 default: llvm_unreachable("Unsupported intrinsic!");
13991 INTRINSIC_X86_XSAVE_ID(xsave);
13992 INTRINSIC_X86_XSAVE_ID(xsave64);
13993 INTRINSIC_X86_XSAVE_ID(xrstor);
13994 INTRINSIC_X86_XSAVE_ID(xrstor64);
13995 INTRINSIC_X86_XSAVE_ID(xsaveopt);
13996 INTRINSIC_X86_XSAVE_ID(xsaveopt64);
13997 INTRINSIC_X86_XSAVE_ID(xrstors);
13998 INTRINSIC_X86_XSAVE_ID(xrstors64);
13999 INTRINSIC_X86_XSAVE_ID(xsavec);
14000 INTRINSIC_X86_XSAVE_ID(xsavec64);
14001 INTRINSIC_X86_XSAVE_ID(xsaves);
14002 INTRINSIC_X86_XSAVE_ID(xsaves64);
14003 INTRINSIC_X86_XSAVE_ID(xsetbv);
14004 case X86::BI_xsetbv:
14005 ID = Intrinsic::x86_xsetbv;
14006 break;
14008 #undef INTRINSIC_X86_XSAVE_ID
14009 Value *Mhi = Builder.CreateTrunc(
14010 Builder.CreateLShr(Ops[1], ConstantInt::get(Int64Ty, 32)), Int32Ty);
14011 Value *Mlo = Builder.CreateTrunc(Ops[1], Int32Ty);
14012 Ops[1] = Mhi;
14013 Ops.push_back(Mlo);
14014 return Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
14016 case X86::BI__builtin_ia32_xgetbv:
14017 case X86::BI_xgetbv:
14018 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_xgetbv), Ops);
14019 case X86::BI__builtin_ia32_storedqudi128_mask:
14020 case X86::BI__builtin_ia32_storedqusi128_mask:
14021 case X86::BI__builtin_ia32_storedquhi128_mask:
14022 case X86::BI__builtin_ia32_storedquqi128_mask:
14023 case X86::BI__builtin_ia32_storeupd128_mask:
14024 case X86::BI__builtin_ia32_storeups128_mask:
14025 case X86::BI__builtin_ia32_storedqudi256_mask:
14026 case X86::BI__builtin_ia32_storedqusi256_mask:
14027 case X86::BI__builtin_ia32_storedquhi256_mask:
14028 case X86::BI__builtin_ia32_storedquqi256_mask:
14029 case X86::BI__builtin_ia32_storeupd256_mask:
14030 case X86::BI__builtin_ia32_storeups256_mask:
14031 case X86::BI__builtin_ia32_storedqudi512_mask:
14032 case X86::BI__builtin_ia32_storedqusi512_mask:
14033 case X86::BI__builtin_ia32_storedquhi512_mask:
14034 case X86::BI__builtin_ia32_storedquqi512_mask:
14035 case X86::BI__builtin_ia32_storeupd512_mask:
14036 case X86::BI__builtin_ia32_storeups512_mask:
14037 return EmitX86MaskedStore(*this, Ops, Align(1));
14039 case X86::BI__builtin_ia32_storesh128_mask:
14040 case X86::BI__builtin_ia32_storess128_mask:
14041 case X86::BI__builtin_ia32_storesd128_mask:
14042 return EmitX86MaskedStore(*this, Ops, Align(1));
14044 case X86::BI__builtin_ia32_vpopcntb_128:
14045 case X86::BI__builtin_ia32_vpopcntd_128:
14046 case X86::BI__builtin_ia32_vpopcntq_128:
14047 case X86::BI__builtin_ia32_vpopcntw_128:
14048 case X86::BI__builtin_ia32_vpopcntb_256:
14049 case X86::BI__builtin_ia32_vpopcntd_256:
14050 case X86::BI__builtin_ia32_vpopcntq_256:
14051 case X86::BI__builtin_ia32_vpopcntw_256:
14052 case X86::BI__builtin_ia32_vpopcntb_512:
14053 case X86::BI__builtin_ia32_vpopcntd_512:
14054 case X86::BI__builtin_ia32_vpopcntq_512:
14055 case X86::BI__builtin_ia32_vpopcntw_512: {
14056 llvm::Type *ResultType = ConvertType(E->getType());
14057 llvm::Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ResultType);
14058 return Builder.CreateCall(F, Ops);
14060 case X86::BI__builtin_ia32_cvtmask2b128:
14061 case X86::BI__builtin_ia32_cvtmask2b256:
14062 case X86::BI__builtin_ia32_cvtmask2b512:
14063 case X86::BI__builtin_ia32_cvtmask2w128:
14064 case X86::BI__builtin_ia32_cvtmask2w256:
14065 case X86::BI__builtin_ia32_cvtmask2w512:
14066 case X86::BI__builtin_ia32_cvtmask2d128:
14067 case X86::BI__builtin_ia32_cvtmask2d256:
14068 case X86::BI__builtin_ia32_cvtmask2d512:
14069 case X86::BI__builtin_ia32_cvtmask2q128:
14070 case X86::BI__builtin_ia32_cvtmask2q256:
14071 case X86::BI__builtin_ia32_cvtmask2q512:
14072 return EmitX86SExtMask(*this, Ops[0], ConvertType(E->getType()));
14074 case X86::BI__builtin_ia32_cvtb2mask128:
14075 case X86::BI__builtin_ia32_cvtb2mask256:
14076 case X86::BI__builtin_ia32_cvtb2mask512:
14077 case X86::BI__builtin_ia32_cvtw2mask128:
14078 case X86::BI__builtin_ia32_cvtw2mask256:
14079 case X86::BI__builtin_ia32_cvtw2mask512:
14080 case X86::BI__builtin_ia32_cvtd2mask128:
14081 case X86::BI__builtin_ia32_cvtd2mask256:
14082 case X86::BI__builtin_ia32_cvtd2mask512:
14083 case X86::BI__builtin_ia32_cvtq2mask128:
14084 case X86::BI__builtin_ia32_cvtq2mask256:
14085 case X86::BI__builtin_ia32_cvtq2mask512:
14086 return EmitX86ConvertToMask(*this, Ops[0]);
14088 case X86::BI__builtin_ia32_cvtdq2ps512_mask:
14089 case X86::BI__builtin_ia32_cvtqq2ps512_mask:
14090 case X86::BI__builtin_ia32_cvtqq2pd512_mask:
14091 case X86::BI__builtin_ia32_vcvtw2ph512_mask:
14092 case X86::BI__builtin_ia32_vcvtdq2ph512_mask:
14093 case X86::BI__builtin_ia32_vcvtqq2ph512_mask:
14094 return EmitX86ConvertIntToFp(*this, E, Ops, /*IsSigned*/ true);
14095 case X86::BI__builtin_ia32_cvtudq2ps512_mask:
14096 case X86::BI__builtin_ia32_cvtuqq2ps512_mask:
14097 case X86::BI__builtin_ia32_cvtuqq2pd512_mask:
14098 case X86::BI__builtin_ia32_vcvtuw2ph512_mask:
14099 case X86::BI__builtin_ia32_vcvtudq2ph512_mask:
14100 case X86::BI__builtin_ia32_vcvtuqq2ph512_mask:
14101 return EmitX86ConvertIntToFp(*this, E, Ops, /*IsSigned*/ false);
14103 case X86::BI__builtin_ia32_vfmaddss3:
14104 case X86::BI__builtin_ia32_vfmaddsd3:
14105 case X86::BI__builtin_ia32_vfmaddsh3_mask:
14106 case X86::BI__builtin_ia32_vfmaddss3_mask:
14107 case X86::BI__builtin_ia32_vfmaddsd3_mask:
14108 return EmitScalarFMAExpr(*this, E, Ops, Ops[0]);
14109 case X86::BI__builtin_ia32_vfmaddss:
14110 case X86::BI__builtin_ia32_vfmaddsd:
14111 return EmitScalarFMAExpr(*this, E, Ops,
14112 Constant::getNullValue(Ops[0]->getType()));
14113 case X86::BI__builtin_ia32_vfmaddsh3_maskz:
14114 case X86::BI__builtin_ia32_vfmaddss3_maskz:
14115 case X86::BI__builtin_ia32_vfmaddsd3_maskz:
14116 return EmitScalarFMAExpr(*this, E, Ops, Ops[0], /*ZeroMask*/ true);
14117 case X86::BI__builtin_ia32_vfmaddsh3_mask3:
14118 case X86::BI__builtin_ia32_vfmaddss3_mask3:
14119 case X86::BI__builtin_ia32_vfmaddsd3_mask3:
14120 return EmitScalarFMAExpr(*this, E, Ops, Ops[2], /*ZeroMask*/ false, 2);
14121 case X86::BI__builtin_ia32_vfmsubsh3_mask3:
14122 case X86::BI__builtin_ia32_vfmsubss3_mask3:
14123 case X86::BI__builtin_ia32_vfmsubsd3_mask3:
14124 return EmitScalarFMAExpr(*this, E, Ops, Ops[2], /*ZeroMask*/ false, 2,
14125 /*NegAcc*/ true);
14126 case X86::BI__builtin_ia32_vfmaddph:
14127 case X86::BI__builtin_ia32_vfmaddps:
14128 case X86::BI__builtin_ia32_vfmaddpd:
14129 case X86::BI__builtin_ia32_vfmaddph256:
14130 case X86::BI__builtin_ia32_vfmaddps256:
14131 case X86::BI__builtin_ia32_vfmaddpd256:
14132 case X86::BI__builtin_ia32_vfmaddph512_mask:
14133 case X86::BI__builtin_ia32_vfmaddph512_maskz:
14134 case X86::BI__builtin_ia32_vfmaddph512_mask3:
14135 case X86::BI__builtin_ia32_vfmaddps512_mask:
14136 case X86::BI__builtin_ia32_vfmaddps512_maskz:
14137 case X86::BI__builtin_ia32_vfmaddps512_mask3:
14138 case X86::BI__builtin_ia32_vfmsubps512_mask3:
14139 case X86::BI__builtin_ia32_vfmaddpd512_mask:
14140 case X86::BI__builtin_ia32_vfmaddpd512_maskz:
14141 case X86::BI__builtin_ia32_vfmaddpd512_mask3:
14142 case X86::BI__builtin_ia32_vfmsubpd512_mask3:
14143 case X86::BI__builtin_ia32_vfmsubph512_mask3:
14144 return EmitX86FMAExpr(*this, E, Ops, BuiltinID, /*IsAddSub*/ false);
14145 case X86::BI__builtin_ia32_vfmaddsubph512_mask:
14146 case X86::BI__builtin_ia32_vfmaddsubph512_maskz:
14147 case X86::BI__builtin_ia32_vfmaddsubph512_mask3:
14148 case X86::BI__builtin_ia32_vfmsubaddph512_mask3:
14149 case X86::BI__builtin_ia32_vfmaddsubps512_mask:
14150 case X86::BI__builtin_ia32_vfmaddsubps512_maskz:
14151 case X86::BI__builtin_ia32_vfmaddsubps512_mask3:
14152 case X86::BI__builtin_ia32_vfmsubaddps512_mask3:
14153 case X86::BI__builtin_ia32_vfmaddsubpd512_mask:
14154 case X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
14155 case X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
14156 case X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
14157 return EmitX86FMAExpr(*this, E, Ops, BuiltinID, /*IsAddSub*/ true);
14159 case X86::BI__builtin_ia32_movdqa32store128_mask:
14160 case X86::BI__builtin_ia32_movdqa64store128_mask:
14161 case X86::BI__builtin_ia32_storeaps128_mask:
14162 case X86::BI__builtin_ia32_storeapd128_mask:
14163 case X86::BI__builtin_ia32_movdqa32store256_mask:
14164 case X86::BI__builtin_ia32_movdqa64store256_mask:
14165 case X86::BI__builtin_ia32_storeaps256_mask:
14166 case X86::BI__builtin_ia32_storeapd256_mask:
14167 case X86::BI__builtin_ia32_movdqa32store512_mask:
14168 case X86::BI__builtin_ia32_movdqa64store512_mask:
14169 case X86::BI__builtin_ia32_storeaps512_mask:
14170 case X86::BI__builtin_ia32_storeapd512_mask:
14171 return EmitX86MaskedStore(
14172 *this, Ops,
14173 getContext().getTypeAlignInChars(E->getArg(1)->getType()).getAsAlign());
14175 case X86::BI__builtin_ia32_loadups128_mask:
14176 case X86::BI__builtin_ia32_loadups256_mask:
14177 case X86::BI__builtin_ia32_loadups512_mask:
14178 case X86::BI__builtin_ia32_loadupd128_mask:
14179 case X86::BI__builtin_ia32_loadupd256_mask:
14180 case X86::BI__builtin_ia32_loadupd512_mask:
14181 case X86::BI__builtin_ia32_loaddquqi128_mask:
14182 case X86::BI__builtin_ia32_loaddquqi256_mask:
14183 case X86::BI__builtin_ia32_loaddquqi512_mask:
14184 case X86::BI__builtin_ia32_loaddquhi128_mask:
14185 case X86::BI__builtin_ia32_loaddquhi256_mask:
14186 case X86::BI__builtin_ia32_loaddquhi512_mask:
14187 case X86::BI__builtin_ia32_loaddqusi128_mask:
14188 case X86::BI__builtin_ia32_loaddqusi256_mask:
14189 case X86::BI__builtin_ia32_loaddqusi512_mask:
14190 case X86::BI__builtin_ia32_loaddqudi128_mask:
14191 case X86::BI__builtin_ia32_loaddqudi256_mask:
14192 case X86::BI__builtin_ia32_loaddqudi512_mask:
14193 return EmitX86MaskedLoad(*this, Ops, Align(1));
14195 case X86::BI__builtin_ia32_loadsh128_mask:
14196 case X86::BI__builtin_ia32_loadss128_mask:
14197 case X86::BI__builtin_ia32_loadsd128_mask:
14198 return EmitX86MaskedLoad(*this, Ops, Align(1));
14200 case X86::BI__builtin_ia32_loadaps128_mask:
14201 case X86::BI__builtin_ia32_loadaps256_mask:
14202 case X86::BI__builtin_ia32_loadaps512_mask:
14203 case X86::BI__builtin_ia32_loadapd128_mask:
14204 case X86::BI__builtin_ia32_loadapd256_mask:
14205 case X86::BI__builtin_ia32_loadapd512_mask:
14206 case X86::BI__builtin_ia32_movdqa32load128_mask:
14207 case X86::BI__builtin_ia32_movdqa32load256_mask:
14208 case X86::BI__builtin_ia32_movdqa32load512_mask:
14209 case X86::BI__builtin_ia32_movdqa64load128_mask:
14210 case X86::BI__builtin_ia32_movdqa64load256_mask:
14211 case X86::BI__builtin_ia32_movdqa64load512_mask:
14212 return EmitX86MaskedLoad(
14213 *this, Ops,
14214 getContext().getTypeAlignInChars(E->getArg(1)->getType()).getAsAlign());
14216 case X86::BI__builtin_ia32_expandloaddf128_mask:
14217 case X86::BI__builtin_ia32_expandloaddf256_mask:
14218 case X86::BI__builtin_ia32_expandloaddf512_mask:
14219 case X86::BI__builtin_ia32_expandloadsf128_mask:
14220 case X86::BI__builtin_ia32_expandloadsf256_mask:
14221 case X86::BI__builtin_ia32_expandloadsf512_mask:
14222 case X86::BI__builtin_ia32_expandloaddi128_mask:
14223 case X86::BI__builtin_ia32_expandloaddi256_mask:
14224 case X86::BI__builtin_ia32_expandloaddi512_mask:
14225 case X86::BI__builtin_ia32_expandloadsi128_mask:
14226 case X86::BI__builtin_ia32_expandloadsi256_mask:
14227 case X86::BI__builtin_ia32_expandloadsi512_mask:
14228 case X86::BI__builtin_ia32_expandloadhi128_mask:
14229 case X86::BI__builtin_ia32_expandloadhi256_mask:
14230 case X86::BI__builtin_ia32_expandloadhi512_mask:
14231 case X86::BI__builtin_ia32_expandloadqi128_mask:
14232 case X86::BI__builtin_ia32_expandloadqi256_mask:
14233 case X86::BI__builtin_ia32_expandloadqi512_mask:
14234 return EmitX86ExpandLoad(*this, Ops);
14236 case X86::BI__builtin_ia32_compressstoredf128_mask:
14237 case X86::BI__builtin_ia32_compressstoredf256_mask:
14238 case X86::BI__builtin_ia32_compressstoredf512_mask:
14239 case X86::BI__builtin_ia32_compressstoresf128_mask:
14240 case X86::BI__builtin_ia32_compressstoresf256_mask:
14241 case X86::BI__builtin_ia32_compressstoresf512_mask:
14242 case X86::BI__builtin_ia32_compressstoredi128_mask:
14243 case X86::BI__builtin_ia32_compressstoredi256_mask:
14244 case X86::BI__builtin_ia32_compressstoredi512_mask:
14245 case X86::BI__builtin_ia32_compressstoresi128_mask:
14246 case X86::BI__builtin_ia32_compressstoresi256_mask:
14247 case X86::BI__builtin_ia32_compressstoresi512_mask:
14248 case X86::BI__builtin_ia32_compressstorehi128_mask:
14249 case X86::BI__builtin_ia32_compressstorehi256_mask:
14250 case X86::BI__builtin_ia32_compressstorehi512_mask:
14251 case X86::BI__builtin_ia32_compressstoreqi128_mask:
14252 case X86::BI__builtin_ia32_compressstoreqi256_mask:
14253 case X86::BI__builtin_ia32_compressstoreqi512_mask:
14254 return EmitX86CompressStore(*this, Ops);
14256 case X86::BI__builtin_ia32_expanddf128_mask:
14257 case X86::BI__builtin_ia32_expanddf256_mask:
14258 case X86::BI__builtin_ia32_expanddf512_mask:
14259 case X86::BI__builtin_ia32_expandsf128_mask:
14260 case X86::BI__builtin_ia32_expandsf256_mask:
14261 case X86::BI__builtin_ia32_expandsf512_mask:
14262 case X86::BI__builtin_ia32_expanddi128_mask:
14263 case X86::BI__builtin_ia32_expanddi256_mask:
14264 case X86::BI__builtin_ia32_expanddi512_mask:
14265 case X86::BI__builtin_ia32_expandsi128_mask:
14266 case X86::BI__builtin_ia32_expandsi256_mask:
14267 case X86::BI__builtin_ia32_expandsi512_mask:
14268 case X86::BI__builtin_ia32_expandhi128_mask:
14269 case X86::BI__builtin_ia32_expandhi256_mask:
14270 case X86::BI__builtin_ia32_expandhi512_mask:
14271 case X86::BI__builtin_ia32_expandqi128_mask:
14272 case X86::BI__builtin_ia32_expandqi256_mask:
14273 case X86::BI__builtin_ia32_expandqi512_mask:
14274 return EmitX86CompressExpand(*this, Ops, /*IsCompress*/false);
14276 case X86::BI__builtin_ia32_compressdf128_mask:
14277 case X86::BI__builtin_ia32_compressdf256_mask:
14278 case X86::BI__builtin_ia32_compressdf512_mask:
14279 case X86::BI__builtin_ia32_compresssf128_mask:
14280 case X86::BI__builtin_ia32_compresssf256_mask:
14281 case X86::BI__builtin_ia32_compresssf512_mask:
14282 case X86::BI__builtin_ia32_compressdi128_mask:
14283 case X86::BI__builtin_ia32_compressdi256_mask:
14284 case X86::BI__builtin_ia32_compressdi512_mask:
14285 case X86::BI__builtin_ia32_compresssi128_mask:
14286 case X86::BI__builtin_ia32_compresssi256_mask:
14287 case X86::BI__builtin_ia32_compresssi512_mask:
14288 case X86::BI__builtin_ia32_compresshi128_mask:
14289 case X86::BI__builtin_ia32_compresshi256_mask:
14290 case X86::BI__builtin_ia32_compresshi512_mask:
14291 case X86::BI__builtin_ia32_compressqi128_mask:
14292 case X86::BI__builtin_ia32_compressqi256_mask:
14293 case X86::BI__builtin_ia32_compressqi512_mask:
14294 return EmitX86CompressExpand(*this, Ops, /*IsCompress*/true);
14296 case X86::BI__builtin_ia32_gather3div2df:
14297 case X86::BI__builtin_ia32_gather3div2di:
14298 case X86::BI__builtin_ia32_gather3div4df:
14299 case X86::BI__builtin_ia32_gather3div4di:
14300 case X86::BI__builtin_ia32_gather3div4sf:
14301 case X86::BI__builtin_ia32_gather3div4si:
14302 case X86::BI__builtin_ia32_gather3div8sf:
14303 case X86::BI__builtin_ia32_gather3div8si:
14304 case X86::BI__builtin_ia32_gather3siv2df:
14305 case X86::BI__builtin_ia32_gather3siv2di:
14306 case X86::BI__builtin_ia32_gather3siv4df:
14307 case X86::BI__builtin_ia32_gather3siv4di:
14308 case X86::BI__builtin_ia32_gather3siv4sf:
14309 case X86::BI__builtin_ia32_gather3siv4si:
14310 case X86::BI__builtin_ia32_gather3siv8sf:
14311 case X86::BI__builtin_ia32_gather3siv8si:
14312 case X86::BI__builtin_ia32_gathersiv8df:
14313 case X86::BI__builtin_ia32_gathersiv16sf:
14314 case X86::BI__builtin_ia32_gatherdiv8df:
14315 case X86::BI__builtin_ia32_gatherdiv16sf:
14316 case X86::BI__builtin_ia32_gathersiv8di:
14317 case X86::BI__builtin_ia32_gathersiv16si:
14318 case X86::BI__builtin_ia32_gatherdiv8di:
14319 case X86::BI__builtin_ia32_gatherdiv16si: {
14320 Intrinsic::ID IID;
14321 switch (BuiltinID) {
14322 default: llvm_unreachable("Unexpected builtin");
14323 case X86::BI__builtin_ia32_gather3div2df:
14324 IID = Intrinsic::x86_avx512_mask_gather3div2_df;
14325 break;
14326 case X86::BI__builtin_ia32_gather3div2di:
14327 IID = Intrinsic::x86_avx512_mask_gather3div2_di;
14328 break;
14329 case X86::BI__builtin_ia32_gather3div4df:
14330 IID = Intrinsic::x86_avx512_mask_gather3div4_df;
14331 break;
14332 case X86::BI__builtin_ia32_gather3div4di:
14333 IID = Intrinsic::x86_avx512_mask_gather3div4_di;
14334 break;
14335 case X86::BI__builtin_ia32_gather3div4sf:
14336 IID = Intrinsic::x86_avx512_mask_gather3div4_sf;
14337 break;
14338 case X86::BI__builtin_ia32_gather3div4si:
14339 IID = Intrinsic::x86_avx512_mask_gather3div4_si;
14340 break;
14341 case X86::BI__builtin_ia32_gather3div8sf:
14342 IID = Intrinsic::x86_avx512_mask_gather3div8_sf;
14343 break;
14344 case X86::BI__builtin_ia32_gather3div8si:
14345 IID = Intrinsic::x86_avx512_mask_gather3div8_si;
14346 break;
14347 case X86::BI__builtin_ia32_gather3siv2df:
14348 IID = Intrinsic::x86_avx512_mask_gather3siv2_df;
14349 break;
14350 case X86::BI__builtin_ia32_gather3siv2di:
14351 IID = Intrinsic::x86_avx512_mask_gather3siv2_di;
14352 break;
14353 case X86::BI__builtin_ia32_gather3siv4df:
14354 IID = Intrinsic::x86_avx512_mask_gather3siv4_df;
14355 break;
14356 case X86::BI__builtin_ia32_gather3siv4di:
14357 IID = Intrinsic::x86_avx512_mask_gather3siv4_di;
14358 break;
14359 case X86::BI__builtin_ia32_gather3siv4sf:
14360 IID = Intrinsic::x86_avx512_mask_gather3siv4_sf;
14361 break;
14362 case X86::BI__builtin_ia32_gather3siv4si:
14363 IID = Intrinsic::x86_avx512_mask_gather3siv4_si;
14364 break;
14365 case X86::BI__builtin_ia32_gather3siv8sf:
14366 IID = Intrinsic::x86_avx512_mask_gather3siv8_sf;
14367 break;
14368 case X86::BI__builtin_ia32_gather3siv8si:
14369 IID = Intrinsic::x86_avx512_mask_gather3siv8_si;
14370 break;
14371 case X86::BI__builtin_ia32_gathersiv8df:
14372 IID = Intrinsic::x86_avx512_mask_gather_dpd_512;
14373 break;
14374 case X86::BI__builtin_ia32_gathersiv16sf:
14375 IID = Intrinsic::x86_avx512_mask_gather_dps_512;
14376 break;
14377 case X86::BI__builtin_ia32_gatherdiv8df:
14378 IID = Intrinsic::x86_avx512_mask_gather_qpd_512;
14379 break;
14380 case X86::BI__builtin_ia32_gatherdiv16sf:
14381 IID = Intrinsic::x86_avx512_mask_gather_qps_512;
14382 break;
14383 case X86::BI__builtin_ia32_gathersiv8di:
14384 IID = Intrinsic::x86_avx512_mask_gather_dpq_512;
14385 break;
14386 case X86::BI__builtin_ia32_gathersiv16si:
14387 IID = Intrinsic::x86_avx512_mask_gather_dpi_512;
14388 break;
14389 case X86::BI__builtin_ia32_gatherdiv8di:
14390 IID = Intrinsic::x86_avx512_mask_gather_qpq_512;
14391 break;
14392 case X86::BI__builtin_ia32_gatherdiv16si:
14393 IID = Intrinsic::x86_avx512_mask_gather_qpi_512;
14394 break;
14397 unsigned MinElts = std::min(
14398 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements(),
14399 cast<llvm::FixedVectorType>(Ops[2]->getType())->getNumElements());
14400 Ops[3] = getMaskVecValue(*this, Ops[3], MinElts);
14401 Function *Intr = CGM.getIntrinsic(IID);
14402 return Builder.CreateCall(Intr, Ops);
14405 case X86::BI__builtin_ia32_scattersiv8df:
14406 case X86::BI__builtin_ia32_scattersiv16sf:
14407 case X86::BI__builtin_ia32_scatterdiv8df:
14408 case X86::BI__builtin_ia32_scatterdiv16sf:
14409 case X86::BI__builtin_ia32_scattersiv8di:
14410 case X86::BI__builtin_ia32_scattersiv16si:
14411 case X86::BI__builtin_ia32_scatterdiv8di:
14412 case X86::BI__builtin_ia32_scatterdiv16si:
14413 case X86::BI__builtin_ia32_scatterdiv2df:
14414 case X86::BI__builtin_ia32_scatterdiv2di:
14415 case X86::BI__builtin_ia32_scatterdiv4df:
14416 case X86::BI__builtin_ia32_scatterdiv4di:
14417 case X86::BI__builtin_ia32_scatterdiv4sf:
14418 case X86::BI__builtin_ia32_scatterdiv4si:
14419 case X86::BI__builtin_ia32_scatterdiv8sf:
14420 case X86::BI__builtin_ia32_scatterdiv8si:
14421 case X86::BI__builtin_ia32_scattersiv2df:
14422 case X86::BI__builtin_ia32_scattersiv2di:
14423 case X86::BI__builtin_ia32_scattersiv4df:
14424 case X86::BI__builtin_ia32_scattersiv4di:
14425 case X86::BI__builtin_ia32_scattersiv4sf:
14426 case X86::BI__builtin_ia32_scattersiv4si:
14427 case X86::BI__builtin_ia32_scattersiv8sf:
14428 case X86::BI__builtin_ia32_scattersiv8si: {
14429 Intrinsic::ID IID;
14430 switch (BuiltinID) {
14431 default: llvm_unreachable("Unexpected builtin");
14432 case X86::BI__builtin_ia32_scattersiv8df:
14433 IID = Intrinsic::x86_avx512_mask_scatter_dpd_512;
14434 break;
14435 case X86::BI__builtin_ia32_scattersiv16sf:
14436 IID = Intrinsic::x86_avx512_mask_scatter_dps_512;
14437 break;
14438 case X86::BI__builtin_ia32_scatterdiv8df:
14439 IID = Intrinsic::x86_avx512_mask_scatter_qpd_512;
14440 break;
14441 case X86::BI__builtin_ia32_scatterdiv16sf:
14442 IID = Intrinsic::x86_avx512_mask_scatter_qps_512;
14443 break;
14444 case X86::BI__builtin_ia32_scattersiv8di:
14445 IID = Intrinsic::x86_avx512_mask_scatter_dpq_512;
14446 break;
14447 case X86::BI__builtin_ia32_scattersiv16si:
14448 IID = Intrinsic::x86_avx512_mask_scatter_dpi_512;
14449 break;
14450 case X86::BI__builtin_ia32_scatterdiv8di:
14451 IID = Intrinsic::x86_avx512_mask_scatter_qpq_512;
14452 break;
14453 case X86::BI__builtin_ia32_scatterdiv16si:
14454 IID = Intrinsic::x86_avx512_mask_scatter_qpi_512;
14455 break;
14456 case X86::BI__builtin_ia32_scatterdiv2df:
14457 IID = Intrinsic::x86_avx512_mask_scatterdiv2_df;
14458 break;
14459 case X86::BI__builtin_ia32_scatterdiv2di:
14460 IID = Intrinsic::x86_avx512_mask_scatterdiv2_di;
14461 break;
14462 case X86::BI__builtin_ia32_scatterdiv4df:
14463 IID = Intrinsic::x86_avx512_mask_scatterdiv4_df;
14464 break;
14465 case X86::BI__builtin_ia32_scatterdiv4di:
14466 IID = Intrinsic::x86_avx512_mask_scatterdiv4_di;
14467 break;
14468 case X86::BI__builtin_ia32_scatterdiv4sf:
14469 IID = Intrinsic::x86_avx512_mask_scatterdiv4_sf;
14470 break;
14471 case X86::BI__builtin_ia32_scatterdiv4si:
14472 IID = Intrinsic::x86_avx512_mask_scatterdiv4_si;
14473 break;
14474 case X86::BI__builtin_ia32_scatterdiv8sf:
14475 IID = Intrinsic::x86_avx512_mask_scatterdiv8_sf;
14476 break;
14477 case X86::BI__builtin_ia32_scatterdiv8si:
14478 IID = Intrinsic::x86_avx512_mask_scatterdiv8_si;
14479 break;
14480 case X86::BI__builtin_ia32_scattersiv2df:
14481 IID = Intrinsic::x86_avx512_mask_scattersiv2_df;
14482 break;
14483 case X86::BI__builtin_ia32_scattersiv2di:
14484 IID = Intrinsic::x86_avx512_mask_scattersiv2_di;
14485 break;
14486 case X86::BI__builtin_ia32_scattersiv4df:
14487 IID = Intrinsic::x86_avx512_mask_scattersiv4_df;
14488 break;
14489 case X86::BI__builtin_ia32_scattersiv4di:
14490 IID = Intrinsic::x86_avx512_mask_scattersiv4_di;
14491 break;
14492 case X86::BI__builtin_ia32_scattersiv4sf:
14493 IID = Intrinsic::x86_avx512_mask_scattersiv4_sf;
14494 break;
14495 case X86::BI__builtin_ia32_scattersiv4si:
14496 IID = Intrinsic::x86_avx512_mask_scattersiv4_si;
14497 break;
14498 case X86::BI__builtin_ia32_scattersiv8sf:
14499 IID = Intrinsic::x86_avx512_mask_scattersiv8_sf;
14500 break;
14501 case X86::BI__builtin_ia32_scattersiv8si:
14502 IID = Intrinsic::x86_avx512_mask_scattersiv8_si;
14503 break;
14506 unsigned MinElts = std::min(
14507 cast<llvm::FixedVectorType>(Ops[2]->getType())->getNumElements(),
14508 cast<llvm::FixedVectorType>(Ops[3]->getType())->getNumElements());
14509 Ops[1] = getMaskVecValue(*this, Ops[1], MinElts);
14510 Function *Intr = CGM.getIntrinsic(IID);
14511 return Builder.CreateCall(Intr, Ops);
14514 case X86::BI__builtin_ia32_vextractf128_pd256:
14515 case X86::BI__builtin_ia32_vextractf128_ps256:
14516 case X86::BI__builtin_ia32_vextractf128_si256:
14517 case X86::BI__builtin_ia32_extract128i256:
14518 case X86::BI__builtin_ia32_extractf64x4_mask:
14519 case X86::BI__builtin_ia32_extractf32x4_mask:
14520 case X86::BI__builtin_ia32_extracti64x4_mask:
14521 case X86::BI__builtin_ia32_extracti32x4_mask:
14522 case X86::BI__builtin_ia32_extractf32x8_mask:
14523 case X86::BI__builtin_ia32_extracti32x8_mask:
14524 case X86::BI__builtin_ia32_extractf32x4_256_mask:
14525 case X86::BI__builtin_ia32_extracti32x4_256_mask:
14526 case X86::BI__builtin_ia32_extractf64x2_256_mask:
14527 case X86::BI__builtin_ia32_extracti64x2_256_mask:
14528 case X86::BI__builtin_ia32_extractf64x2_512_mask:
14529 case X86::BI__builtin_ia32_extracti64x2_512_mask: {
14530 auto *DstTy = cast<llvm::FixedVectorType>(ConvertType(E->getType()));
14531 unsigned NumElts = DstTy->getNumElements();
14532 unsigned SrcNumElts =
14533 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
14534 unsigned SubVectors = SrcNumElts / NumElts;
14535 unsigned Index = cast<ConstantInt>(Ops[1])->getZExtValue();
14536 assert(llvm::isPowerOf2_32(SubVectors) && "Expected power of 2 subvectors");
14537 Index &= SubVectors - 1; // Remove any extra bits.
14538 Index *= NumElts;
14540 int Indices[16];
14541 for (unsigned i = 0; i != NumElts; ++i)
14542 Indices[i] = i + Index;
14544 Value *Res = Builder.CreateShuffleVector(Ops[0], ArrayRef(Indices, NumElts),
14545 "extract");
14547 if (Ops.size() == 4)
14548 Res = EmitX86Select(*this, Ops[3], Res, Ops[2]);
14550 return Res;
14552 case X86::BI__builtin_ia32_vinsertf128_pd256:
14553 case X86::BI__builtin_ia32_vinsertf128_ps256:
14554 case X86::BI__builtin_ia32_vinsertf128_si256:
14555 case X86::BI__builtin_ia32_insert128i256:
14556 case X86::BI__builtin_ia32_insertf64x4:
14557 case X86::BI__builtin_ia32_insertf32x4:
14558 case X86::BI__builtin_ia32_inserti64x4:
14559 case X86::BI__builtin_ia32_inserti32x4:
14560 case X86::BI__builtin_ia32_insertf32x8:
14561 case X86::BI__builtin_ia32_inserti32x8:
14562 case X86::BI__builtin_ia32_insertf32x4_256:
14563 case X86::BI__builtin_ia32_inserti32x4_256:
14564 case X86::BI__builtin_ia32_insertf64x2_256:
14565 case X86::BI__builtin_ia32_inserti64x2_256:
14566 case X86::BI__builtin_ia32_insertf64x2_512:
14567 case X86::BI__builtin_ia32_inserti64x2_512: {
14568 unsigned DstNumElts =
14569 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
14570 unsigned SrcNumElts =
14571 cast<llvm::FixedVectorType>(Ops[1]->getType())->getNumElements();
14572 unsigned SubVectors = DstNumElts / SrcNumElts;
14573 unsigned Index = cast<ConstantInt>(Ops[2])->getZExtValue();
14574 assert(llvm::isPowerOf2_32(SubVectors) && "Expected power of 2 subvectors");
14575 Index &= SubVectors - 1; // Remove any extra bits.
14576 Index *= SrcNumElts;
14578 int Indices[16];
14579 for (unsigned i = 0; i != DstNumElts; ++i)
14580 Indices[i] = (i >= SrcNumElts) ? SrcNumElts + (i % SrcNumElts) : i;
14582 Value *Op1 = Builder.CreateShuffleVector(
14583 Ops[1], ArrayRef(Indices, DstNumElts), "widen");
14585 for (unsigned i = 0; i != DstNumElts; ++i) {
14586 if (i >= Index && i < (Index + SrcNumElts))
14587 Indices[i] = (i - Index) + DstNumElts;
14588 else
14589 Indices[i] = i;
14592 return Builder.CreateShuffleVector(Ops[0], Op1,
14593 ArrayRef(Indices, DstNumElts), "insert");
14595 case X86::BI__builtin_ia32_pmovqd512_mask:
14596 case X86::BI__builtin_ia32_pmovwb512_mask: {
14597 Value *Res = Builder.CreateTrunc(Ops[0], Ops[1]->getType());
14598 return EmitX86Select(*this, Ops[2], Res, Ops[1]);
14600 case X86::BI__builtin_ia32_pmovdb512_mask:
14601 case X86::BI__builtin_ia32_pmovdw512_mask:
14602 case X86::BI__builtin_ia32_pmovqw512_mask: {
14603 if (const auto *C = dyn_cast<Constant>(Ops[2]))
14604 if (C->isAllOnesValue())
14605 return Builder.CreateTrunc(Ops[0], Ops[1]->getType());
14607 Intrinsic::ID IID;
14608 switch (BuiltinID) {
14609 default: llvm_unreachable("Unsupported intrinsic!");
14610 case X86::BI__builtin_ia32_pmovdb512_mask:
14611 IID = Intrinsic::x86_avx512_mask_pmov_db_512;
14612 break;
14613 case X86::BI__builtin_ia32_pmovdw512_mask:
14614 IID = Intrinsic::x86_avx512_mask_pmov_dw_512;
14615 break;
14616 case X86::BI__builtin_ia32_pmovqw512_mask:
14617 IID = Intrinsic::x86_avx512_mask_pmov_qw_512;
14618 break;
14621 Function *Intr = CGM.getIntrinsic(IID);
14622 return Builder.CreateCall(Intr, Ops);
14624 case X86::BI__builtin_ia32_pblendw128:
14625 case X86::BI__builtin_ia32_blendpd:
14626 case X86::BI__builtin_ia32_blendps:
14627 case X86::BI__builtin_ia32_blendpd256:
14628 case X86::BI__builtin_ia32_blendps256:
14629 case X86::BI__builtin_ia32_pblendw256:
14630 case X86::BI__builtin_ia32_pblendd128:
14631 case X86::BI__builtin_ia32_pblendd256: {
14632 unsigned NumElts =
14633 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
14634 unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
14636 int Indices[16];
14637 // If there are more than 8 elements, the immediate is used twice so make
14638 // sure we handle that.
14639 for (unsigned i = 0; i != NumElts; ++i)
14640 Indices[i] = ((Imm >> (i % 8)) & 0x1) ? NumElts + i : i;
14642 return Builder.CreateShuffleVector(Ops[0], Ops[1],
14643 ArrayRef(Indices, NumElts), "blend");
14645 case X86::BI__builtin_ia32_pshuflw:
14646 case X86::BI__builtin_ia32_pshuflw256:
14647 case X86::BI__builtin_ia32_pshuflw512: {
14648 uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
14649 auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
14650 unsigned NumElts = Ty->getNumElements();
14652 // Splat the 8-bits of immediate 4 times to help the loop wrap around.
14653 Imm = (Imm & 0xff) * 0x01010101;
14655 int Indices[32];
14656 for (unsigned l = 0; l != NumElts; l += 8) {
14657 for (unsigned i = 0; i != 4; ++i) {
14658 Indices[l + i] = l + (Imm & 3);
14659 Imm >>= 2;
14661 for (unsigned i = 4; i != 8; ++i)
14662 Indices[l + i] = l + i;
14665 return Builder.CreateShuffleVector(Ops[0], ArrayRef(Indices, NumElts),
14666 "pshuflw");
14668 case X86::BI__builtin_ia32_pshufhw:
14669 case X86::BI__builtin_ia32_pshufhw256:
14670 case X86::BI__builtin_ia32_pshufhw512: {
14671 uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
14672 auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
14673 unsigned NumElts = Ty->getNumElements();
14675 // Splat the 8-bits of immediate 4 times to help the loop wrap around.
14676 Imm = (Imm & 0xff) * 0x01010101;
14678 int Indices[32];
14679 for (unsigned l = 0; l != NumElts; l += 8) {
14680 for (unsigned i = 0; i != 4; ++i)
14681 Indices[l + i] = l + i;
14682 for (unsigned i = 4; i != 8; ++i) {
14683 Indices[l + i] = l + 4 + (Imm & 3);
14684 Imm >>= 2;
14688 return Builder.CreateShuffleVector(Ops[0], ArrayRef(Indices, NumElts),
14689 "pshufhw");
14691 case X86::BI__builtin_ia32_pshufd:
14692 case X86::BI__builtin_ia32_pshufd256:
14693 case X86::BI__builtin_ia32_pshufd512:
14694 case X86::BI__builtin_ia32_vpermilpd:
14695 case X86::BI__builtin_ia32_vpermilps:
14696 case X86::BI__builtin_ia32_vpermilpd256:
14697 case X86::BI__builtin_ia32_vpermilps256:
14698 case X86::BI__builtin_ia32_vpermilpd512:
14699 case X86::BI__builtin_ia32_vpermilps512: {
14700 uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
14701 auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
14702 unsigned NumElts = Ty->getNumElements();
14703 unsigned NumLanes = Ty->getPrimitiveSizeInBits() / 128;
14704 unsigned NumLaneElts = NumElts / NumLanes;
14706 // Splat the 8-bits of immediate 4 times to help the loop wrap around.
14707 Imm = (Imm & 0xff) * 0x01010101;
14709 int Indices[16];
14710 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
14711 for (unsigned i = 0; i != NumLaneElts; ++i) {
14712 Indices[i + l] = (Imm % NumLaneElts) + l;
14713 Imm /= NumLaneElts;
14717 return Builder.CreateShuffleVector(Ops[0], ArrayRef(Indices, NumElts),
14718 "permil");
14720 case X86::BI__builtin_ia32_shufpd:
14721 case X86::BI__builtin_ia32_shufpd256:
14722 case X86::BI__builtin_ia32_shufpd512:
14723 case X86::BI__builtin_ia32_shufps:
14724 case X86::BI__builtin_ia32_shufps256:
14725 case X86::BI__builtin_ia32_shufps512: {
14726 uint32_t Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
14727 auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
14728 unsigned NumElts = Ty->getNumElements();
14729 unsigned NumLanes = Ty->getPrimitiveSizeInBits() / 128;
14730 unsigned NumLaneElts = NumElts / NumLanes;
14732 // Splat the 8-bits of immediate 4 times to help the loop wrap around.
14733 Imm = (Imm & 0xff) * 0x01010101;
14735 int Indices[16];
14736 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
14737 for (unsigned i = 0; i != NumLaneElts; ++i) {
14738 unsigned Index = Imm % NumLaneElts;
14739 Imm /= NumLaneElts;
14740 if (i >= (NumLaneElts / 2))
14741 Index += NumElts;
14742 Indices[l + i] = l + Index;
14746 return Builder.CreateShuffleVector(Ops[0], Ops[1],
14747 ArrayRef(Indices, NumElts), "shufp");
14749 case X86::BI__builtin_ia32_permdi256:
14750 case X86::BI__builtin_ia32_permdf256:
14751 case X86::BI__builtin_ia32_permdi512:
14752 case X86::BI__builtin_ia32_permdf512: {
14753 unsigned Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
14754 auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
14755 unsigned NumElts = Ty->getNumElements();
14757 // These intrinsics operate on 256-bit lanes of four 64-bit elements.
14758 int Indices[8];
14759 for (unsigned l = 0; l != NumElts; l += 4)
14760 for (unsigned i = 0; i != 4; ++i)
14761 Indices[l + i] = l + ((Imm >> (2 * i)) & 0x3);
14763 return Builder.CreateShuffleVector(Ops[0], ArrayRef(Indices, NumElts),
14764 "perm");
14766 case X86::BI__builtin_ia32_palignr128:
14767 case X86::BI__builtin_ia32_palignr256:
14768 case X86::BI__builtin_ia32_palignr512: {
14769 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0xff;
14771 unsigned NumElts =
14772 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
14773 assert(NumElts % 16 == 0);
14775 // If palignr is shifting the pair of vectors more than the size of two
14776 // lanes, emit zero.
14777 if (ShiftVal >= 32)
14778 return llvm::Constant::getNullValue(ConvertType(E->getType()));
14780 // If palignr is shifting the pair of input vectors more than one lane,
14781 // but less than two lanes, convert to shifting in zeroes.
14782 if (ShiftVal > 16) {
14783 ShiftVal -= 16;
14784 Ops[1] = Ops[0];
14785 Ops[0] = llvm::Constant::getNullValue(Ops[0]->getType());
14788 int Indices[64];
14789 // 256-bit palignr operates on 128-bit lanes so we need to handle that
14790 for (unsigned l = 0; l != NumElts; l += 16) {
14791 for (unsigned i = 0; i != 16; ++i) {
14792 unsigned Idx = ShiftVal + i;
14793 if (Idx >= 16)
14794 Idx += NumElts - 16; // End of lane, switch operand.
14795 Indices[l + i] = Idx + l;
14799 return Builder.CreateShuffleVector(Ops[1], Ops[0],
14800 ArrayRef(Indices, NumElts), "palignr");
14802 case X86::BI__builtin_ia32_alignd128:
14803 case X86::BI__builtin_ia32_alignd256:
14804 case X86::BI__builtin_ia32_alignd512:
14805 case X86::BI__builtin_ia32_alignq128:
14806 case X86::BI__builtin_ia32_alignq256:
14807 case X86::BI__builtin_ia32_alignq512: {
14808 unsigned NumElts =
14809 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
14810 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0xff;
14812 // Mask the shift amount to width of a vector.
14813 ShiftVal &= NumElts - 1;
14815 int Indices[16];
14816 for (unsigned i = 0; i != NumElts; ++i)
14817 Indices[i] = i + ShiftVal;
14819 return Builder.CreateShuffleVector(Ops[1], Ops[0],
14820 ArrayRef(Indices, NumElts), "valign");
14822 case X86::BI__builtin_ia32_shuf_f32x4_256:
14823 case X86::BI__builtin_ia32_shuf_f64x2_256:
14824 case X86::BI__builtin_ia32_shuf_i32x4_256:
14825 case X86::BI__builtin_ia32_shuf_i64x2_256:
14826 case X86::BI__builtin_ia32_shuf_f32x4:
14827 case X86::BI__builtin_ia32_shuf_f64x2:
14828 case X86::BI__builtin_ia32_shuf_i32x4:
14829 case X86::BI__builtin_ia32_shuf_i64x2: {
14830 unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
14831 auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
14832 unsigned NumElts = Ty->getNumElements();
14833 unsigned NumLanes = Ty->getPrimitiveSizeInBits() == 512 ? 4 : 2;
14834 unsigned NumLaneElts = NumElts / NumLanes;
14836 int Indices[16];
14837 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
14838 unsigned Index = (Imm % NumLanes) * NumLaneElts;
14839 Imm /= NumLanes; // Discard the bits we just used.
14840 if (l >= (NumElts / 2))
14841 Index += NumElts; // Switch to other source.
14842 for (unsigned i = 0; i != NumLaneElts; ++i) {
14843 Indices[l + i] = Index + i;
14847 return Builder.CreateShuffleVector(Ops[0], Ops[1],
14848 ArrayRef(Indices, NumElts), "shuf");
14851 case X86::BI__builtin_ia32_vperm2f128_pd256:
14852 case X86::BI__builtin_ia32_vperm2f128_ps256:
14853 case X86::BI__builtin_ia32_vperm2f128_si256:
14854 case X86::BI__builtin_ia32_permti256: {
14855 unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
14856 unsigned NumElts =
14857 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
14859 // This takes a very simple approach since there are two lanes and a
14860 // shuffle can have 2 inputs. So we reserve the first input for the first
14861 // lane and the second input for the second lane. This may result in
14862 // duplicate sources, but this can be dealt with in the backend.
14864 Value *OutOps[2];
14865 int Indices[8];
14866 for (unsigned l = 0; l != 2; ++l) {
14867 // Determine the source for this lane.
14868 if (Imm & (1 << ((l * 4) + 3)))
14869 OutOps[l] = llvm::ConstantAggregateZero::get(Ops[0]->getType());
14870 else if (Imm & (1 << ((l * 4) + 1)))
14871 OutOps[l] = Ops[1];
14872 else
14873 OutOps[l] = Ops[0];
14875 for (unsigned i = 0; i != NumElts/2; ++i) {
14876 // Start with ith element of the source for this lane.
14877 unsigned Idx = (l * NumElts) + i;
14878 // If bit 0 of the immediate half is set, switch to the high half of
14879 // the source.
14880 if (Imm & (1 << (l * 4)))
14881 Idx += NumElts/2;
14882 Indices[(l * (NumElts/2)) + i] = Idx;
14886 return Builder.CreateShuffleVector(OutOps[0], OutOps[1],
14887 ArrayRef(Indices, NumElts), "vperm");
14890 case X86::BI__builtin_ia32_pslldqi128_byteshift:
14891 case X86::BI__builtin_ia32_pslldqi256_byteshift:
14892 case X86::BI__builtin_ia32_pslldqi512_byteshift: {
14893 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
14894 auto *ResultType = cast<llvm::FixedVectorType>(Ops[0]->getType());
14895 // Builtin type is vXi64 so multiply by 8 to get bytes.
14896 unsigned NumElts = ResultType->getNumElements() * 8;
14898 // If pslldq is shifting the vector more than 15 bytes, emit zero.
14899 if (ShiftVal >= 16)
14900 return llvm::Constant::getNullValue(ResultType);
14902 int Indices[64];
14903 // 256/512-bit pslldq operates on 128-bit lanes so we need to handle that
14904 for (unsigned l = 0; l != NumElts; l += 16) {
14905 for (unsigned i = 0; i != 16; ++i) {
14906 unsigned Idx = NumElts + i - ShiftVal;
14907 if (Idx < NumElts) Idx -= NumElts - 16; // end of lane, switch operand.
14908 Indices[l + i] = Idx + l;
14912 auto *VecTy = llvm::FixedVectorType::get(Int8Ty, NumElts);
14913 Value *Cast = Builder.CreateBitCast(Ops[0], VecTy, "cast");
14914 Value *Zero = llvm::Constant::getNullValue(VecTy);
14915 Value *SV = Builder.CreateShuffleVector(
14916 Zero, Cast, ArrayRef(Indices, NumElts), "pslldq");
14917 return Builder.CreateBitCast(SV, Ops[0]->getType(), "cast");
14919 case X86::BI__builtin_ia32_psrldqi128_byteshift:
14920 case X86::BI__builtin_ia32_psrldqi256_byteshift:
14921 case X86::BI__builtin_ia32_psrldqi512_byteshift: {
14922 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
14923 auto *ResultType = cast<llvm::FixedVectorType>(Ops[0]->getType());
14924 // Builtin type is vXi64 so multiply by 8 to get bytes.
14925 unsigned NumElts = ResultType->getNumElements() * 8;
14927 // If psrldq is shifting the vector more than 15 bytes, emit zero.
14928 if (ShiftVal >= 16)
14929 return llvm::Constant::getNullValue(ResultType);
14931 int Indices[64];
14932 // 256/512-bit psrldq operates on 128-bit lanes so we need to handle that
14933 for (unsigned l = 0; l != NumElts; l += 16) {
14934 for (unsigned i = 0; i != 16; ++i) {
14935 unsigned Idx = i + ShiftVal;
14936 if (Idx >= 16) Idx += NumElts - 16; // end of lane, switch operand.
14937 Indices[l + i] = Idx + l;
14941 auto *VecTy = llvm::FixedVectorType::get(Int8Ty, NumElts);
14942 Value *Cast = Builder.CreateBitCast(Ops[0], VecTy, "cast");
14943 Value *Zero = llvm::Constant::getNullValue(VecTy);
14944 Value *SV = Builder.CreateShuffleVector(
14945 Cast, Zero, ArrayRef(Indices, NumElts), "psrldq");
14946 return Builder.CreateBitCast(SV, ResultType, "cast");
14948 case X86::BI__builtin_ia32_kshiftliqi:
14949 case X86::BI__builtin_ia32_kshiftlihi:
14950 case X86::BI__builtin_ia32_kshiftlisi:
14951 case X86::BI__builtin_ia32_kshiftlidi: {
14952 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
14953 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
14955 if (ShiftVal >= NumElts)
14956 return llvm::Constant::getNullValue(Ops[0]->getType());
14958 Value *In = getMaskVecValue(*this, Ops[0], NumElts);
14960 int Indices[64];
14961 for (unsigned i = 0; i != NumElts; ++i)
14962 Indices[i] = NumElts + i - ShiftVal;
14964 Value *Zero = llvm::Constant::getNullValue(In->getType());
14965 Value *SV = Builder.CreateShuffleVector(
14966 Zero, In, ArrayRef(Indices, NumElts), "kshiftl");
14967 return Builder.CreateBitCast(SV, Ops[0]->getType());
14969 case X86::BI__builtin_ia32_kshiftriqi:
14970 case X86::BI__builtin_ia32_kshiftrihi:
14971 case X86::BI__builtin_ia32_kshiftrisi:
14972 case X86::BI__builtin_ia32_kshiftridi: {
14973 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
14974 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
14976 if (ShiftVal >= NumElts)
14977 return llvm::Constant::getNullValue(Ops[0]->getType());
14979 Value *In = getMaskVecValue(*this, Ops[0], NumElts);
14981 int Indices[64];
14982 for (unsigned i = 0; i != NumElts; ++i)
14983 Indices[i] = i + ShiftVal;
14985 Value *Zero = llvm::Constant::getNullValue(In->getType());
14986 Value *SV = Builder.CreateShuffleVector(
14987 In, Zero, ArrayRef(Indices, NumElts), "kshiftr");
14988 return Builder.CreateBitCast(SV, Ops[0]->getType());
14990 case X86::BI__builtin_ia32_movnti:
14991 case X86::BI__builtin_ia32_movnti64:
14992 case X86::BI__builtin_ia32_movntsd:
14993 case X86::BI__builtin_ia32_movntss: {
14994 llvm::MDNode *Node = llvm::MDNode::get(
14995 getLLVMContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
14997 Value *Ptr = Ops[0];
14998 Value *Src = Ops[1];
15000 // Extract the 0'th element of the source vector.
15001 if (BuiltinID == X86::BI__builtin_ia32_movntsd ||
15002 BuiltinID == X86::BI__builtin_ia32_movntss)
15003 Src = Builder.CreateExtractElement(Src, (uint64_t)0, "extract");
15005 // Unaligned nontemporal store of the scalar value.
15006 StoreInst *SI = Builder.CreateDefaultAlignedStore(Src, Ptr);
15007 SI->setMetadata(llvm::LLVMContext::MD_nontemporal, Node);
15008 SI->setAlignment(llvm::Align(1));
15009 return SI;
15011 // Rotate is a special case of funnel shift - 1st 2 args are the same.
15012 case X86::BI__builtin_ia32_vprotb:
15013 case X86::BI__builtin_ia32_vprotw:
15014 case X86::BI__builtin_ia32_vprotd:
15015 case X86::BI__builtin_ia32_vprotq:
15016 case X86::BI__builtin_ia32_vprotbi:
15017 case X86::BI__builtin_ia32_vprotwi:
15018 case X86::BI__builtin_ia32_vprotdi:
15019 case X86::BI__builtin_ia32_vprotqi:
15020 case X86::BI__builtin_ia32_prold128:
15021 case X86::BI__builtin_ia32_prold256:
15022 case X86::BI__builtin_ia32_prold512:
15023 case X86::BI__builtin_ia32_prolq128:
15024 case X86::BI__builtin_ia32_prolq256:
15025 case X86::BI__builtin_ia32_prolq512:
15026 case X86::BI__builtin_ia32_prolvd128:
15027 case X86::BI__builtin_ia32_prolvd256:
15028 case X86::BI__builtin_ia32_prolvd512:
15029 case X86::BI__builtin_ia32_prolvq128:
15030 case X86::BI__builtin_ia32_prolvq256:
15031 case X86::BI__builtin_ia32_prolvq512:
15032 return EmitX86FunnelShift(*this, Ops[0], Ops[0], Ops[1], false);
15033 case X86::BI__builtin_ia32_prord128:
15034 case X86::BI__builtin_ia32_prord256:
15035 case X86::BI__builtin_ia32_prord512:
15036 case X86::BI__builtin_ia32_prorq128:
15037 case X86::BI__builtin_ia32_prorq256:
15038 case X86::BI__builtin_ia32_prorq512:
15039 case X86::BI__builtin_ia32_prorvd128:
15040 case X86::BI__builtin_ia32_prorvd256:
15041 case X86::BI__builtin_ia32_prorvd512:
15042 case X86::BI__builtin_ia32_prorvq128:
15043 case X86::BI__builtin_ia32_prorvq256:
15044 case X86::BI__builtin_ia32_prorvq512:
15045 return EmitX86FunnelShift(*this, Ops[0], Ops[0], Ops[1], true);
15046 case X86::BI__builtin_ia32_selectb_128:
15047 case X86::BI__builtin_ia32_selectb_256:
15048 case X86::BI__builtin_ia32_selectb_512:
15049 case X86::BI__builtin_ia32_selectw_128:
15050 case X86::BI__builtin_ia32_selectw_256:
15051 case X86::BI__builtin_ia32_selectw_512:
15052 case X86::BI__builtin_ia32_selectd_128:
15053 case X86::BI__builtin_ia32_selectd_256:
15054 case X86::BI__builtin_ia32_selectd_512:
15055 case X86::BI__builtin_ia32_selectq_128:
15056 case X86::BI__builtin_ia32_selectq_256:
15057 case X86::BI__builtin_ia32_selectq_512:
15058 case X86::BI__builtin_ia32_selectph_128:
15059 case X86::BI__builtin_ia32_selectph_256:
15060 case X86::BI__builtin_ia32_selectph_512:
15061 case X86::BI__builtin_ia32_selectpbf_128:
15062 case X86::BI__builtin_ia32_selectpbf_256:
15063 case X86::BI__builtin_ia32_selectpbf_512:
15064 case X86::BI__builtin_ia32_selectps_128:
15065 case X86::BI__builtin_ia32_selectps_256:
15066 case X86::BI__builtin_ia32_selectps_512:
15067 case X86::BI__builtin_ia32_selectpd_128:
15068 case X86::BI__builtin_ia32_selectpd_256:
15069 case X86::BI__builtin_ia32_selectpd_512:
15070 return EmitX86Select(*this, Ops[0], Ops[1], Ops[2]);
15071 case X86::BI__builtin_ia32_selectsh_128:
15072 case X86::BI__builtin_ia32_selectsbf_128:
15073 case X86::BI__builtin_ia32_selectss_128:
15074 case X86::BI__builtin_ia32_selectsd_128: {
15075 Value *A = Builder.CreateExtractElement(Ops[1], (uint64_t)0);
15076 Value *B = Builder.CreateExtractElement(Ops[2], (uint64_t)0);
15077 A = EmitX86ScalarSelect(*this, Ops[0], A, B);
15078 return Builder.CreateInsertElement(Ops[1], A, (uint64_t)0);
15080 case X86::BI__builtin_ia32_cmpb128_mask:
15081 case X86::BI__builtin_ia32_cmpb256_mask:
15082 case X86::BI__builtin_ia32_cmpb512_mask:
15083 case X86::BI__builtin_ia32_cmpw128_mask:
15084 case X86::BI__builtin_ia32_cmpw256_mask:
15085 case X86::BI__builtin_ia32_cmpw512_mask:
15086 case X86::BI__builtin_ia32_cmpd128_mask:
15087 case X86::BI__builtin_ia32_cmpd256_mask:
15088 case X86::BI__builtin_ia32_cmpd512_mask:
15089 case X86::BI__builtin_ia32_cmpq128_mask:
15090 case X86::BI__builtin_ia32_cmpq256_mask:
15091 case X86::BI__builtin_ia32_cmpq512_mask: {
15092 unsigned CC = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x7;
15093 return EmitX86MaskedCompare(*this, CC, true, Ops);
15095 case X86::BI__builtin_ia32_ucmpb128_mask:
15096 case X86::BI__builtin_ia32_ucmpb256_mask:
15097 case X86::BI__builtin_ia32_ucmpb512_mask:
15098 case X86::BI__builtin_ia32_ucmpw128_mask:
15099 case X86::BI__builtin_ia32_ucmpw256_mask:
15100 case X86::BI__builtin_ia32_ucmpw512_mask:
15101 case X86::BI__builtin_ia32_ucmpd128_mask:
15102 case X86::BI__builtin_ia32_ucmpd256_mask:
15103 case X86::BI__builtin_ia32_ucmpd512_mask:
15104 case X86::BI__builtin_ia32_ucmpq128_mask:
15105 case X86::BI__builtin_ia32_ucmpq256_mask:
15106 case X86::BI__builtin_ia32_ucmpq512_mask: {
15107 unsigned CC = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x7;
15108 return EmitX86MaskedCompare(*this, CC, false, Ops);
15110 case X86::BI__builtin_ia32_vpcomb:
15111 case X86::BI__builtin_ia32_vpcomw:
15112 case X86::BI__builtin_ia32_vpcomd:
15113 case X86::BI__builtin_ia32_vpcomq:
15114 return EmitX86vpcom(*this, Ops, true);
15115 case X86::BI__builtin_ia32_vpcomub:
15116 case X86::BI__builtin_ia32_vpcomuw:
15117 case X86::BI__builtin_ia32_vpcomud:
15118 case X86::BI__builtin_ia32_vpcomuq:
15119 return EmitX86vpcom(*this, Ops, false);
15121 case X86::BI__builtin_ia32_kortestcqi:
15122 case X86::BI__builtin_ia32_kortestchi:
15123 case X86::BI__builtin_ia32_kortestcsi:
15124 case X86::BI__builtin_ia32_kortestcdi: {
15125 Value *Or = EmitX86MaskLogic(*this, Instruction::Or, Ops);
15126 Value *C = llvm::Constant::getAllOnesValue(Ops[0]->getType());
15127 Value *Cmp = Builder.CreateICmpEQ(Or, C);
15128 return Builder.CreateZExt(Cmp, ConvertType(E->getType()));
15130 case X86::BI__builtin_ia32_kortestzqi:
15131 case X86::BI__builtin_ia32_kortestzhi:
15132 case X86::BI__builtin_ia32_kortestzsi:
15133 case X86::BI__builtin_ia32_kortestzdi: {
15134 Value *Or = EmitX86MaskLogic(*this, Instruction::Or, Ops);
15135 Value *C = llvm::Constant::getNullValue(Ops[0]->getType());
15136 Value *Cmp = Builder.CreateICmpEQ(Or, C);
15137 return Builder.CreateZExt(Cmp, ConvertType(E->getType()));
15140 case X86::BI__builtin_ia32_ktestcqi:
15141 case X86::BI__builtin_ia32_ktestzqi:
15142 case X86::BI__builtin_ia32_ktestchi:
15143 case X86::BI__builtin_ia32_ktestzhi:
15144 case X86::BI__builtin_ia32_ktestcsi:
15145 case X86::BI__builtin_ia32_ktestzsi:
15146 case X86::BI__builtin_ia32_ktestcdi:
15147 case X86::BI__builtin_ia32_ktestzdi: {
15148 Intrinsic::ID IID;
15149 switch (BuiltinID) {
15150 default: llvm_unreachable("Unsupported intrinsic!");
15151 case X86::BI__builtin_ia32_ktestcqi:
15152 IID = Intrinsic::x86_avx512_ktestc_b;
15153 break;
15154 case X86::BI__builtin_ia32_ktestzqi:
15155 IID = Intrinsic::x86_avx512_ktestz_b;
15156 break;
15157 case X86::BI__builtin_ia32_ktestchi:
15158 IID = Intrinsic::x86_avx512_ktestc_w;
15159 break;
15160 case X86::BI__builtin_ia32_ktestzhi:
15161 IID = Intrinsic::x86_avx512_ktestz_w;
15162 break;
15163 case X86::BI__builtin_ia32_ktestcsi:
15164 IID = Intrinsic::x86_avx512_ktestc_d;
15165 break;
15166 case X86::BI__builtin_ia32_ktestzsi:
15167 IID = Intrinsic::x86_avx512_ktestz_d;
15168 break;
15169 case X86::BI__builtin_ia32_ktestcdi:
15170 IID = Intrinsic::x86_avx512_ktestc_q;
15171 break;
15172 case X86::BI__builtin_ia32_ktestzdi:
15173 IID = Intrinsic::x86_avx512_ktestz_q;
15174 break;
15177 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
15178 Value *LHS = getMaskVecValue(*this, Ops[0], NumElts);
15179 Value *RHS = getMaskVecValue(*this, Ops[1], NumElts);
15180 Function *Intr = CGM.getIntrinsic(IID);
15181 return Builder.CreateCall(Intr, {LHS, RHS});
15184 case X86::BI__builtin_ia32_kaddqi:
15185 case X86::BI__builtin_ia32_kaddhi:
15186 case X86::BI__builtin_ia32_kaddsi:
15187 case X86::BI__builtin_ia32_kadddi: {
15188 Intrinsic::ID IID;
15189 switch (BuiltinID) {
15190 default: llvm_unreachable("Unsupported intrinsic!");
15191 case X86::BI__builtin_ia32_kaddqi:
15192 IID = Intrinsic::x86_avx512_kadd_b;
15193 break;
15194 case X86::BI__builtin_ia32_kaddhi:
15195 IID = Intrinsic::x86_avx512_kadd_w;
15196 break;
15197 case X86::BI__builtin_ia32_kaddsi:
15198 IID = Intrinsic::x86_avx512_kadd_d;
15199 break;
15200 case X86::BI__builtin_ia32_kadddi:
15201 IID = Intrinsic::x86_avx512_kadd_q;
15202 break;
15205 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
15206 Value *LHS = getMaskVecValue(*this, Ops[0], NumElts);
15207 Value *RHS = getMaskVecValue(*this, Ops[1], NumElts);
15208 Function *Intr = CGM.getIntrinsic(IID);
15209 Value *Res = Builder.CreateCall(Intr, {LHS, RHS});
15210 return Builder.CreateBitCast(Res, Ops[0]->getType());
15212 case X86::BI__builtin_ia32_kandqi:
15213 case X86::BI__builtin_ia32_kandhi:
15214 case X86::BI__builtin_ia32_kandsi:
15215 case X86::BI__builtin_ia32_kanddi:
15216 return EmitX86MaskLogic(*this, Instruction::And, Ops);
15217 case X86::BI__builtin_ia32_kandnqi:
15218 case X86::BI__builtin_ia32_kandnhi:
15219 case X86::BI__builtin_ia32_kandnsi:
15220 case X86::BI__builtin_ia32_kandndi:
15221 return EmitX86MaskLogic(*this, Instruction::And, Ops, true);
15222 case X86::BI__builtin_ia32_korqi:
15223 case X86::BI__builtin_ia32_korhi:
15224 case X86::BI__builtin_ia32_korsi:
15225 case X86::BI__builtin_ia32_kordi:
15226 return EmitX86MaskLogic(*this, Instruction::Or, Ops);
15227 case X86::BI__builtin_ia32_kxnorqi:
15228 case X86::BI__builtin_ia32_kxnorhi:
15229 case X86::BI__builtin_ia32_kxnorsi:
15230 case X86::BI__builtin_ia32_kxnordi:
15231 return EmitX86MaskLogic(*this, Instruction::Xor, Ops, true);
15232 case X86::BI__builtin_ia32_kxorqi:
15233 case X86::BI__builtin_ia32_kxorhi:
15234 case X86::BI__builtin_ia32_kxorsi:
15235 case X86::BI__builtin_ia32_kxordi:
15236 return EmitX86MaskLogic(*this, Instruction::Xor, Ops);
15237 case X86::BI__builtin_ia32_knotqi:
15238 case X86::BI__builtin_ia32_knothi:
15239 case X86::BI__builtin_ia32_knotsi:
15240 case X86::BI__builtin_ia32_knotdi: {
15241 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
15242 Value *Res = getMaskVecValue(*this, Ops[0], NumElts);
15243 return Builder.CreateBitCast(Builder.CreateNot(Res),
15244 Ops[0]->getType());
15246 case X86::BI__builtin_ia32_kmovb:
15247 case X86::BI__builtin_ia32_kmovw:
15248 case X86::BI__builtin_ia32_kmovd:
15249 case X86::BI__builtin_ia32_kmovq: {
15250 // Bitcast to vXi1 type and then back to integer. This gets the mask
15251 // register type into the IR, but might be optimized out depending on
15252 // what's around it.
15253 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
15254 Value *Res = getMaskVecValue(*this, Ops[0], NumElts);
15255 return Builder.CreateBitCast(Res, Ops[0]->getType());
15258 case X86::BI__builtin_ia32_kunpckdi:
15259 case X86::BI__builtin_ia32_kunpcksi:
15260 case X86::BI__builtin_ia32_kunpckhi: {
15261 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
15262 Value *LHS = getMaskVecValue(*this, Ops[0], NumElts);
15263 Value *RHS = getMaskVecValue(*this, Ops[1], NumElts);
15264 int Indices[64];
15265 for (unsigned i = 0; i != NumElts; ++i)
15266 Indices[i] = i;
15268 // First extract half of each vector. This gives better codegen than
15269 // doing it in a single shuffle.
15270 LHS = Builder.CreateShuffleVector(LHS, LHS, ArrayRef(Indices, NumElts / 2));
15271 RHS = Builder.CreateShuffleVector(RHS, RHS, ArrayRef(Indices, NumElts / 2));
15272 // Concat the vectors.
15273 // NOTE: Operands are swapped to match the intrinsic definition.
15274 Value *Res =
15275 Builder.CreateShuffleVector(RHS, LHS, ArrayRef(Indices, NumElts));
15276 return Builder.CreateBitCast(Res, Ops[0]->getType());
15279 case X86::BI__builtin_ia32_vplzcntd_128:
15280 case X86::BI__builtin_ia32_vplzcntd_256:
15281 case X86::BI__builtin_ia32_vplzcntd_512:
15282 case X86::BI__builtin_ia32_vplzcntq_128:
15283 case X86::BI__builtin_ia32_vplzcntq_256:
15284 case X86::BI__builtin_ia32_vplzcntq_512: {
15285 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Ops[0]->getType());
15286 return Builder.CreateCall(F, {Ops[0],Builder.getInt1(false)});
15288 case X86::BI__builtin_ia32_sqrtss:
15289 case X86::BI__builtin_ia32_sqrtsd: {
15290 Value *A = Builder.CreateExtractElement(Ops[0], (uint64_t)0);
15291 Function *F;
15292 if (Builder.getIsFPConstrained()) {
15293 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
15294 F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt,
15295 A->getType());
15296 A = Builder.CreateConstrainedFPCall(F, {A});
15297 } else {
15298 F = CGM.getIntrinsic(Intrinsic::sqrt, A->getType());
15299 A = Builder.CreateCall(F, {A});
15301 return Builder.CreateInsertElement(Ops[0], A, (uint64_t)0);
15303 case X86::BI__builtin_ia32_sqrtsh_round_mask:
15304 case X86::BI__builtin_ia32_sqrtsd_round_mask:
15305 case X86::BI__builtin_ia32_sqrtss_round_mask: {
15306 unsigned CC = cast<llvm::ConstantInt>(Ops[4])->getZExtValue();
15307 // Support only if the rounding mode is 4 (AKA CUR_DIRECTION),
15308 // otherwise keep the intrinsic.
15309 if (CC != 4) {
15310 Intrinsic::ID IID;
15312 switch (BuiltinID) {
15313 default:
15314 llvm_unreachable("Unsupported intrinsic!");
15315 case X86::BI__builtin_ia32_sqrtsh_round_mask:
15316 IID = Intrinsic::x86_avx512fp16_mask_sqrt_sh;
15317 break;
15318 case X86::BI__builtin_ia32_sqrtsd_round_mask:
15319 IID = Intrinsic::x86_avx512_mask_sqrt_sd;
15320 break;
15321 case X86::BI__builtin_ia32_sqrtss_round_mask:
15322 IID = Intrinsic::x86_avx512_mask_sqrt_ss;
15323 break;
15325 return Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
15327 Value *A = Builder.CreateExtractElement(Ops[1], (uint64_t)0);
15328 Function *F;
15329 if (Builder.getIsFPConstrained()) {
15330 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
15331 F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt,
15332 A->getType());
15333 A = Builder.CreateConstrainedFPCall(F, A);
15334 } else {
15335 F = CGM.getIntrinsic(Intrinsic::sqrt, A->getType());
15336 A = Builder.CreateCall(F, A);
15338 Value *Src = Builder.CreateExtractElement(Ops[2], (uint64_t)0);
15339 A = EmitX86ScalarSelect(*this, Ops[3], A, Src);
15340 return Builder.CreateInsertElement(Ops[0], A, (uint64_t)0);
15342 case X86::BI__builtin_ia32_sqrtpd256:
15343 case X86::BI__builtin_ia32_sqrtpd:
15344 case X86::BI__builtin_ia32_sqrtps256:
15345 case X86::BI__builtin_ia32_sqrtps:
15346 case X86::BI__builtin_ia32_sqrtph256:
15347 case X86::BI__builtin_ia32_sqrtph:
15348 case X86::BI__builtin_ia32_sqrtph512:
15349 case X86::BI__builtin_ia32_sqrtps512:
15350 case X86::BI__builtin_ia32_sqrtpd512: {
15351 if (Ops.size() == 2) {
15352 unsigned CC = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
15353 // Support only if the rounding mode is 4 (AKA CUR_DIRECTION),
15354 // otherwise keep the intrinsic.
15355 if (CC != 4) {
15356 Intrinsic::ID IID;
15358 switch (BuiltinID) {
15359 default:
15360 llvm_unreachable("Unsupported intrinsic!");
15361 case X86::BI__builtin_ia32_sqrtph512:
15362 IID = Intrinsic::x86_avx512fp16_sqrt_ph_512;
15363 break;
15364 case X86::BI__builtin_ia32_sqrtps512:
15365 IID = Intrinsic::x86_avx512_sqrt_ps_512;
15366 break;
15367 case X86::BI__builtin_ia32_sqrtpd512:
15368 IID = Intrinsic::x86_avx512_sqrt_pd_512;
15369 break;
15371 return Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
15374 if (Builder.getIsFPConstrained()) {
15375 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
15376 Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt,
15377 Ops[0]->getType());
15378 return Builder.CreateConstrainedFPCall(F, Ops[0]);
15379 } else {
15380 Function *F = CGM.getIntrinsic(Intrinsic::sqrt, Ops[0]->getType());
15381 return Builder.CreateCall(F, Ops[0]);
15385 case X86::BI__builtin_ia32_pmuludq128:
15386 case X86::BI__builtin_ia32_pmuludq256:
15387 case X86::BI__builtin_ia32_pmuludq512:
15388 return EmitX86Muldq(*this, /*IsSigned*/false, Ops);
15390 case X86::BI__builtin_ia32_pmuldq128:
15391 case X86::BI__builtin_ia32_pmuldq256:
15392 case X86::BI__builtin_ia32_pmuldq512:
15393 return EmitX86Muldq(*this, /*IsSigned*/true, Ops);
15395 case X86::BI__builtin_ia32_pternlogd512_mask:
15396 case X86::BI__builtin_ia32_pternlogq512_mask:
15397 case X86::BI__builtin_ia32_pternlogd128_mask:
15398 case X86::BI__builtin_ia32_pternlogd256_mask:
15399 case X86::BI__builtin_ia32_pternlogq128_mask:
15400 case X86::BI__builtin_ia32_pternlogq256_mask:
15401 return EmitX86Ternlog(*this, /*ZeroMask*/false, Ops);
15403 case X86::BI__builtin_ia32_pternlogd512_maskz:
15404 case X86::BI__builtin_ia32_pternlogq512_maskz:
15405 case X86::BI__builtin_ia32_pternlogd128_maskz:
15406 case X86::BI__builtin_ia32_pternlogd256_maskz:
15407 case X86::BI__builtin_ia32_pternlogq128_maskz:
15408 case X86::BI__builtin_ia32_pternlogq256_maskz:
15409 return EmitX86Ternlog(*this, /*ZeroMask*/true, Ops);
15411 case X86::BI__builtin_ia32_vpshldd128:
15412 case X86::BI__builtin_ia32_vpshldd256:
15413 case X86::BI__builtin_ia32_vpshldd512:
15414 case X86::BI__builtin_ia32_vpshldq128:
15415 case X86::BI__builtin_ia32_vpshldq256:
15416 case X86::BI__builtin_ia32_vpshldq512:
15417 case X86::BI__builtin_ia32_vpshldw128:
15418 case X86::BI__builtin_ia32_vpshldw256:
15419 case X86::BI__builtin_ia32_vpshldw512:
15420 return EmitX86FunnelShift(*this, Ops[0], Ops[1], Ops[2], false);
15422 case X86::BI__builtin_ia32_vpshrdd128:
15423 case X86::BI__builtin_ia32_vpshrdd256:
15424 case X86::BI__builtin_ia32_vpshrdd512:
15425 case X86::BI__builtin_ia32_vpshrdq128:
15426 case X86::BI__builtin_ia32_vpshrdq256:
15427 case X86::BI__builtin_ia32_vpshrdq512:
15428 case X86::BI__builtin_ia32_vpshrdw128:
15429 case X86::BI__builtin_ia32_vpshrdw256:
15430 case X86::BI__builtin_ia32_vpshrdw512:
15431 // Ops 0 and 1 are swapped.
15432 return EmitX86FunnelShift(*this, Ops[1], Ops[0], Ops[2], true);
15434 case X86::BI__builtin_ia32_vpshldvd128:
15435 case X86::BI__builtin_ia32_vpshldvd256:
15436 case X86::BI__builtin_ia32_vpshldvd512:
15437 case X86::BI__builtin_ia32_vpshldvq128:
15438 case X86::BI__builtin_ia32_vpshldvq256:
15439 case X86::BI__builtin_ia32_vpshldvq512:
15440 case X86::BI__builtin_ia32_vpshldvw128:
15441 case X86::BI__builtin_ia32_vpshldvw256:
15442 case X86::BI__builtin_ia32_vpshldvw512:
15443 return EmitX86FunnelShift(*this, Ops[0], Ops[1], Ops[2], false);
15445 case X86::BI__builtin_ia32_vpshrdvd128:
15446 case X86::BI__builtin_ia32_vpshrdvd256:
15447 case X86::BI__builtin_ia32_vpshrdvd512:
15448 case X86::BI__builtin_ia32_vpshrdvq128:
15449 case X86::BI__builtin_ia32_vpshrdvq256:
15450 case X86::BI__builtin_ia32_vpshrdvq512:
15451 case X86::BI__builtin_ia32_vpshrdvw128:
15452 case X86::BI__builtin_ia32_vpshrdvw256:
15453 case X86::BI__builtin_ia32_vpshrdvw512:
15454 // Ops 0 and 1 are swapped.
15455 return EmitX86FunnelShift(*this, Ops[1], Ops[0], Ops[2], true);
15457 // Reductions
15458 case X86::BI__builtin_ia32_reduce_fadd_pd512:
15459 case X86::BI__builtin_ia32_reduce_fadd_ps512:
15460 case X86::BI__builtin_ia32_reduce_fadd_ph512:
15461 case X86::BI__builtin_ia32_reduce_fadd_ph256:
15462 case X86::BI__builtin_ia32_reduce_fadd_ph128: {
15463 Function *F =
15464 CGM.getIntrinsic(Intrinsic::vector_reduce_fadd, Ops[1]->getType());
15465 IRBuilder<>::FastMathFlagGuard FMFGuard(Builder);
15466 Builder.getFastMathFlags().setAllowReassoc();
15467 return Builder.CreateCall(F, {Ops[0], Ops[1]});
15469 case X86::BI__builtin_ia32_reduce_fmul_pd512:
15470 case X86::BI__builtin_ia32_reduce_fmul_ps512:
15471 case X86::BI__builtin_ia32_reduce_fmul_ph512:
15472 case X86::BI__builtin_ia32_reduce_fmul_ph256:
15473 case X86::BI__builtin_ia32_reduce_fmul_ph128: {
15474 Function *F =
15475 CGM.getIntrinsic(Intrinsic::vector_reduce_fmul, Ops[1]->getType());
15476 IRBuilder<>::FastMathFlagGuard FMFGuard(Builder);
15477 Builder.getFastMathFlags().setAllowReassoc();
15478 return Builder.CreateCall(F, {Ops[0], Ops[1]});
15480 case X86::BI__builtin_ia32_reduce_fmax_pd512:
15481 case X86::BI__builtin_ia32_reduce_fmax_ps512:
15482 case X86::BI__builtin_ia32_reduce_fmax_ph512:
15483 case X86::BI__builtin_ia32_reduce_fmax_ph256:
15484 case X86::BI__builtin_ia32_reduce_fmax_ph128: {
15485 Function *F =
15486 CGM.getIntrinsic(Intrinsic::vector_reduce_fmax, Ops[0]->getType());
15487 IRBuilder<>::FastMathFlagGuard FMFGuard(Builder);
15488 Builder.getFastMathFlags().setNoNaNs();
15489 return Builder.CreateCall(F, {Ops[0]});
15491 case X86::BI__builtin_ia32_reduce_fmin_pd512:
15492 case X86::BI__builtin_ia32_reduce_fmin_ps512:
15493 case X86::BI__builtin_ia32_reduce_fmin_ph512:
15494 case X86::BI__builtin_ia32_reduce_fmin_ph256:
15495 case X86::BI__builtin_ia32_reduce_fmin_ph128: {
15496 Function *F =
15497 CGM.getIntrinsic(Intrinsic::vector_reduce_fmin, Ops[0]->getType());
15498 IRBuilder<>::FastMathFlagGuard FMFGuard(Builder);
15499 Builder.getFastMathFlags().setNoNaNs();
15500 return Builder.CreateCall(F, {Ops[0]});
15503 // 3DNow!
15504 case X86::BI__builtin_ia32_pswapdsf:
15505 case X86::BI__builtin_ia32_pswapdsi: {
15506 llvm::Type *MMXTy = llvm::Type::getX86_MMXTy(getLLVMContext());
15507 Ops[0] = Builder.CreateBitCast(Ops[0], MMXTy, "cast");
15508 llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_3dnowa_pswapd);
15509 return Builder.CreateCall(F, Ops, "pswapd");
15511 case X86::BI__builtin_ia32_rdrand16_step:
15512 case X86::BI__builtin_ia32_rdrand32_step:
15513 case X86::BI__builtin_ia32_rdrand64_step:
15514 case X86::BI__builtin_ia32_rdseed16_step:
15515 case X86::BI__builtin_ia32_rdseed32_step:
15516 case X86::BI__builtin_ia32_rdseed64_step: {
15517 Intrinsic::ID ID;
15518 switch (BuiltinID) {
15519 default: llvm_unreachable("Unsupported intrinsic!");
15520 case X86::BI__builtin_ia32_rdrand16_step:
15521 ID = Intrinsic::x86_rdrand_16;
15522 break;
15523 case X86::BI__builtin_ia32_rdrand32_step:
15524 ID = Intrinsic::x86_rdrand_32;
15525 break;
15526 case X86::BI__builtin_ia32_rdrand64_step:
15527 ID = Intrinsic::x86_rdrand_64;
15528 break;
15529 case X86::BI__builtin_ia32_rdseed16_step:
15530 ID = Intrinsic::x86_rdseed_16;
15531 break;
15532 case X86::BI__builtin_ia32_rdseed32_step:
15533 ID = Intrinsic::x86_rdseed_32;
15534 break;
15535 case X86::BI__builtin_ia32_rdseed64_step:
15536 ID = Intrinsic::x86_rdseed_64;
15537 break;
15540 Value *Call = Builder.CreateCall(CGM.getIntrinsic(ID));
15541 Builder.CreateDefaultAlignedStore(Builder.CreateExtractValue(Call, 0),
15542 Ops[0]);
15543 return Builder.CreateExtractValue(Call, 1);
15545 case X86::BI__builtin_ia32_addcarryx_u32:
15546 case X86::BI__builtin_ia32_addcarryx_u64:
15547 case X86::BI__builtin_ia32_subborrow_u32:
15548 case X86::BI__builtin_ia32_subborrow_u64: {
15549 Intrinsic::ID IID;
15550 switch (BuiltinID) {
15551 default: llvm_unreachable("Unsupported intrinsic!");
15552 case X86::BI__builtin_ia32_addcarryx_u32:
15553 IID = Intrinsic::x86_addcarry_32;
15554 break;
15555 case X86::BI__builtin_ia32_addcarryx_u64:
15556 IID = Intrinsic::x86_addcarry_64;
15557 break;
15558 case X86::BI__builtin_ia32_subborrow_u32:
15559 IID = Intrinsic::x86_subborrow_32;
15560 break;
15561 case X86::BI__builtin_ia32_subborrow_u64:
15562 IID = Intrinsic::x86_subborrow_64;
15563 break;
15566 Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID),
15567 { Ops[0], Ops[1], Ops[2] });
15568 Builder.CreateDefaultAlignedStore(Builder.CreateExtractValue(Call, 1),
15569 Ops[3]);
15570 return Builder.CreateExtractValue(Call, 0);
15573 case X86::BI__builtin_ia32_fpclassps128_mask:
15574 case X86::BI__builtin_ia32_fpclassps256_mask:
15575 case X86::BI__builtin_ia32_fpclassps512_mask:
15576 case X86::BI__builtin_ia32_fpclassph128_mask:
15577 case X86::BI__builtin_ia32_fpclassph256_mask:
15578 case X86::BI__builtin_ia32_fpclassph512_mask:
15579 case X86::BI__builtin_ia32_fpclasspd128_mask:
15580 case X86::BI__builtin_ia32_fpclasspd256_mask:
15581 case X86::BI__builtin_ia32_fpclasspd512_mask: {
15582 unsigned NumElts =
15583 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
15584 Value *MaskIn = Ops[2];
15585 Ops.erase(&Ops[2]);
15587 Intrinsic::ID ID;
15588 switch (BuiltinID) {
15589 default: llvm_unreachable("Unsupported intrinsic!");
15590 case X86::BI__builtin_ia32_fpclassph128_mask:
15591 ID = Intrinsic::x86_avx512fp16_fpclass_ph_128;
15592 break;
15593 case X86::BI__builtin_ia32_fpclassph256_mask:
15594 ID = Intrinsic::x86_avx512fp16_fpclass_ph_256;
15595 break;
15596 case X86::BI__builtin_ia32_fpclassph512_mask:
15597 ID = Intrinsic::x86_avx512fp16_fpclass_ph_512;
15598 break;
15599 case X86::BI__builtin_ia32_fpclassps128_mask:
15600 ID = Intrinsic::x86_avx512_fpclass_ps_128;
15601 break;
15602 case X86::BI__builtin_ia32_fpclassps256_mask:
15603 ID = Intrinsic::x86_avx512_fpclass_ps_256;
15604 break;
15605 case X86::BI__builtin_ia32_fpclassps512_mask:
15606 ID = Intrinsic::x86_avx512_fpclass_ps_512;
15607 break;
15608 case X86::BI__builtin_ia32_fpclasspd128_mask:
15609 ID = Intrinsic::x86_avx512_fpclass_pd_128;
15610 break;
15611 case X86::BI__builtin_ia32_fpclasspd256_mask:
15612 ID = Intrinsic::x86_avx512_fpclass_pd_256;
15613 break;
15614 case X86::BI__builtin_ia32_fpclasspd512_mask:
15615 ID = Intrinsic::x86_avx512_fpclass_pd_512;
15616 break;
15619 Value *Fpclass = Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
15620 return EmitX86MaskedCompareResult(*this, Fpclass, NumElts, MaskIn);
15623 case X86::BI__builtin_ia32_vp2intersect_q_512:
15624 case X86::BI__builtin_ia32_vp2intersect_q_256:
15625 case X86::BI__builtin_ia32_vp2intersect_q_128:
15626 case X86::BI__builtin_ia32_vp2intersect_d_512:
15627 case X86::BI__builtin_ia32_vp2intersect_d_256:
15628 case X86::BI__builtin_ia32_vp2intersect_d_128: {
15629 unsigned NumElts =
15630 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
15631 Intrinsic::ID ID;
15633 switch (BuiltinID) {
15634 default: llvm_unreachable("Unsupported intrinsic!");
15635 case X86::BI__builtin_ia32_vp2intersect_q_512:
15636 ID = Intrinsic::x86_avx512_vp2intersect_q_512;
15637 break;
15638 case X86::BI__builtin_ia32_vp2intersect_q_256:
15639 ID = Intrinsic::x86_avx512_vp2intersect_q_256;
15640 break;
15641 case X86::BI__builtin_ia32_vp2intersect_q_128:
15642 ID = Intrinsic::x86_avx512_vp2intersect_q_128;
15643 break;
15644 case X86::BI__builtin_ia32_vp2intersect_d_512:
15645 ID = Intrinsic::x86_avx512_vp2intersect_d_512;
15646 break;
15647 case X86::BI__builtin_ia32_vp2intersect_d_256:
15648 ID = Intrinsic::x86_avx512_vp2intersect_d_256;
15649 break;
15650 case X86::BI__builtin_ia32_vp2intersect_d_128:
15651 ID = Intrinsic::x86_avx512_vp2intersect_d_128;
15652 break;
15655 Value *Call = Builder.CreateCall(CGM.getIntrinsic(ID), {Ops[0], Ops[1]});
15656 Value *Result = Builder.CreateExtractValue(Call, 0);
15657 Result = EmitX86MaskedCompareResult(*this, Result, NumElts, nullptr);
15658 Builder.CreateDefaultAlignedStore(Result, Ops[2]);
15660 Result = Builder.CreateExtractValue(Call, 1);
15661 Result = EmitX86MaskedCompareResult(*this, Result, NumElts, nullptr);
15662 return Builder.CreateDefaultAlignedStore(Result, Ops[3]);
15665 case X86::BI__builtin_ia32_vpmultishiftqb128:
15666 case X86::BI__builtin_ia32_vpmultishiftqb256:
15667 case X86::BI__builtin_ia32_vpmultishiftqb512: {
15668 Intrinsic::ID ID;
15669 switch (BuiltinID) {
15670 default: llvm_unreachable("Unsupported intrinsic!");
15671 case X86::BI__builtin_ia32_vpmultishiftqb128:
15672 ID = Intrinsic::x86_avx512_pmultishift_qb_128;
15673 break;
15674 case X86::BI__builtin_ia32_vpmultishiftqb256:
15675 ID = Intrinsic::x86_avx512_pmultishift_qb_256;
15676 break;
15677 case X86::BI__builtin_ia32_vpmultishiftqb512:
15678 ID = Intrinsic::x86_avx512_pmultishift_qb_512;
15679 break;
15682 return Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
15685 case X86::BI__builtin_ia32_vpshufbitqmb128_mask:
15686 case X86::BI__builtin_ia32_vpshufbitqmb256_mask:
15687 case X86::BI__builtin_ia32_vpshufbitqmb512_mask: {
15688 unsigned NumElts =
15689 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
15690 Value *MaskIn = Ops[2];
15691 Ops.erase(&Ops[2]);
15693 Intrinsic::ID ID;
15694 switch (BuiltinID) {
15695 default: llvm_unreachable("Unsupported intrinsic!");
15696 case X86::BI__builtin_ia32_vpshufbitqmb128_mask:
15697 ID = Intrinsic::x86_avx512_vpshufbitqmb_128;
15698 break;
15699 case X86::BI__builtin_ia32_vpshufbitqmb256_mask:
15700 ID = Intrinsic::x86_avx512_vpshufbitqmb_256;
15701 break;
15702 case X86::BI__builtin_ia32_vpshufbitqmb512_mask:
15703 ID = Intrinsic::x86_avx512_vpshufbitqmb_512;
15704 break;
15707 Value *Shufbit = Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
15708 return EmitX86MaskedCompareResult(*this, Shufbit, NumElts, MaskIn);
15711 // packed comparison intrinsics
15712 case X86::BI__builtin_ia32_cmpeqps:
15713 case X86::BI__builtin_ia32_cmpeqpd:
15714 return getVectorFCmpIR(CmpInst::FCMP_OEQ, /*IsSignaling*/false);
15715 case X86::BI__builtin_ia32_cmpltps:
15716 case X86::BI__builtin_ia32_cmpltpd:
15717 return getVectorFCmpIR(CmpInst::FCMP_OLT, /*IsSignaling*/true);
15718 case X86::BI__builtin_ia32_cmpleps:
15719 case X86::BI__builtin_ia32_cmplepd:
15720 return getVectorFCmpIR(CmpInst::FCMP_OLE, /*IsSignaling*/true);
15721 case X86::BI__builtin_ia32_cmpunordps:
15722 case X86::BI__builtin_ia32_cmpunordpd:
15723 return getVectorFCmpIR(CmpInst::FCMP_UNO, /*IsSignaling*/false);
15724 case X86::BI__builtin_ia32_cmpneqps:
15725 case X86::BI__builtin_ia32_cmpneqpd:
15726 return getVectorFCmpIR(CmpInst::FCMP_UNE, /*IsSignaling*/false);
15727 case X86::BI__builtin_ia32_cmpnltps:
15728 case X86::BI__builtin_ia32_cmpnltpd:
15729 return getVectorFCmpIR(CmpInst::FCMP_UGE, /*IsSignaling*/true);
15730 case X86::BI__builtin_ia32_cmpnleps:
15731 case X86::BI__builtin_ia32_cmpnlepd:
15732 return getVectorFCmpIR(CmpInst::FCMP_UGT, /*IsSignaling*/true);
15733 case X86::BI__builtin_ia32_cmpordps:
15734 case X86::BI__builtin_ia32_cmpordpd:
15735 return getVectorFCmpIR(CmpInst::FCMP_ORD, /*IsSignaling*/false);
15736 case X86::BI__builtin_ia32_cmpph128_mask:
15737 case X86::BI__builtin_ia32_cmpph256_mask:
15738 case X86::BI__builtin_ia32_cmpph512_mask:
15739 case X86::BI__builtin_ia32_cmpps128_mask:
15740 case X86::BI__builtin_ia32_cmpps256_mask:
15741 case X86::BI__builtin_ia32_cmpps512_mask:
15742 case X86::BI__builtin_ia32_cmppd128_mask:
15743 case X86::BI__builtin_ia32_cmppd256_mask:
15744 case X86::BI__builtin_ia32_cmppd512_mask:
15745 IsMaskFCmp = true;
15746 [[fallthrough]];
15747 case X86::BI__builtin_ia32_cmpps:
15748 case X86::BI__builtin_ia32_cmpps256:
15749 case X86::BI__builtin_ia32_cmppd:
15750 case X86::BI__builtin_ia32_cmppd256: {
15751 // Lowering vector comparisons to fcmp instructions, while
15752 // ignoring signalling behaviour requested
15753 // ignoring rounding mode requested
15754 // This is only possible if fp-model is not strict and FENV_ACCESS is off.
15756 // The third argument is the comparison condition, and integer in the
15757 // range [0, 31]
15758 unsigned CC = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x1f;
15760 // Lowering to IR fcmp instruction.
15761 // Ignoring requested signaling behaviour,
15762 // e.g. both _CMP_GT_OS & _CMP_GT_OQ are translated to FCMP_OGT.
15763 FCmpInst::Predicate Pred;
15764 bool IsSignaling;
15765 // Predicates for 16-31 repeat the 0-15 predicates. Only the signalling
15766 // behavior is inverted. We'll handle that after the switch.
15767 switch (CC & 0xf) {
15768 case 0x00: Pred = FCmpInst::FCMP_OEQ; IsSignaling = false; break;
15769 case 0x01: Pred = FCmpInst::FCMP_OLT; IsSignaling = true; break;
15770 case 0x02: Pred = FCmpInst::FCMP_OLE; IsSignaling = true; break;
15771 case 0x03: Pred = FCmpInst::FCMP_UNO; IsSignaling = false; break;
15772 case 0x04: Pred = FCmpInst::FCMP_UNE; IsSignaling = false; break;
15773 case 0x05: Pred = FCmpInst::FCMP_UGE; IsSignaling = true; break;
15774 case 0x06: Pred = FCmpInst::FCMP_UGT; IsSignaling = true; break;
15775 case 0x07: Pred = FCmpInst::FCMP_ORD; IsSignaling = false; break;
15776 case 0x08: Pred = FCmpInst::FCMP_UEQ; IsSignaling = false; break;
15777 case 0x09: Pred = FCmpInst::FCMP_ULT; IsSignaling = true; break;
15778 case 0x0a: Pred = FCmpInst::FCMP_ULE; IsSignaling = true; break;
15779 case 0x0b: Pred = FCmpInst::FCMP_FALSE; IsSignaling = false; break;
15780 case 0x0c: Pred = FCmpInst::FCMP_ONE; IsSignaling = false; break;
15781 case 0x0d: Pred = FCmpInst::FCMP_OGE; IsSignaling = true; break;
15782 case 0x0e: Pred = FCmpInst::FCMP_OGT; IsSignaling = true; break;
15783 case 0x0f: Pred = FCmpInst::FCMP_TRUE; IsSignaling = false; break;
15784 default: llvm_unreachable("Unhandled CC");
15787 // Invert the signalling behavior for 16-31.
15788 if (CC & 0x10)
15789 IsSignaling = !IsSignaling;
15791 // If the predicate is true or false and we're using constrained intrinsics,
15792 // we don't have a compare intrinsic we can use. Just use the legacy X86
15793 // specific intrinsic.
15794 // If the intrinsic is mask enabled and we're using constrained intrinsics,
15795 // use the legacy X86 specific intrinsic.
15796 if (Builder.getIsFPConstrained() &&
15797 (Pred == FCmpInst::FCMP_TRUE || Pred == FCmpInst::FCMP_FALSE ||
15798 IsMaskFCmp)) {
15800 Intrinsic::ID IID;
15801 switch (BuiltinID) {
15802 default: llvm_unreachable("Unexpected builtin");
15803 case X86::BI__builtin_ia32_cmpps:
15804 IID = Intrinsic::x86_sse_cmp_ps;
15805 break;
15806 case X86::BI__builtin_ia32_cmpps256:
15807 IID = Intrinsic::x86_avx_cmp_ps_256;
15808 break;
15809 case X86::BI__builtin_ia32_cmppd:
15810 IID = Intrinsic::x86_sse2_cmp_pd;
15811 break;
15812 case X86::BI__builtin_ia32_cmppd256:
15813 IID = Intrinsic::x86_avx_cmp_pd_256;
15814 break;
15815 case X86::BI__builtin_ia32_cmpph128_mask:
15816 IID = Intrinsic::x86_avx512fp16_mask_cmp_ph_128;
15817 break;
15818 case X86::BI__builtin_ia32_cmpph256_mask:
15819 IID = Intrinsic::x86_avx512fp16_mask_cmp_ph_256;
15820 break;
15821 case X86::BI__builtin_ia32_cmpph512_mask:
15822 IID = Intrinsic::x86_avx512fp16_mask_cmp_ph_512;
15823 break;
15824 case X86::BI__builtin_ia32_cmpps512_mask:
15825 IID = Intrinsic::x86_avx512_mask_cmp_ps_512;
15826 break;
15827 case X86::BI__builtin_ia32_cmppd512_mask:
15828 IID = Intrinsic::x86_avx512_mask_cmp_pd_512;
15829 break;
15830 case X86::BI__builtin_ia32_cmpps128_mask:
15831 IID = Intrinsic::x86_avx512_mask_cmp_ps_128;
15832 break;
15833 case X86::BI__builtin_ia32_cmpps256_mask:
15834 IID = Intrinsic::x86_avx512_mask_cmp_ps_256;
15835 break;
15836 case X86::BI__builtin_ia32_cmppd128_mask:
15837 IID = Intrinsic::x86_avx512_mask_cmp_pd_128;
15838 break;
15839 case X86::BI__builtin_ia32_cmppd256_mask:
15840 IID = Intrinsic::x86_avx512_mask_cmp_pd_256;
15841 break;
15844 Function *Intr = CGM.getIntrinsic(IID);
15845 if (IsMaskFCmp) {
15846 unsigned NumElts =
15847 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
15848 Ops[3] = getMaskVecValue(*this, Ops[3], NumElts);
15849 Value *Cmp = Builder.CreateCall(Intr, Ops);
15850 return EmitX86MaskedCompareResult(*this, Cmp, NumElts, nullptr);
15853 return Builder.CreateCall(Intr, Ops);
15856 // Builtins without the _mask suffix return a vector of integers
15857 // of the same width as the input vectors
15858 if (IsMaskFCmp) {
15859 // We ignore SAE if strict FP is disabled. We only keep precise
15860 // exception behavior under strict FP.
15861 // NOTE: If strict FP does ever go through here a CGFPOptionsRAII
15862 // object will be required.
15863 unsigned NumElts =
15864 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
15865 Value *Cmp;
15866 if (IsSignaling)
15867 Cmp = Builder.CreateFCmpS(Pred, Ops[0], Ops[1]);
15868 else
15869 Cmp = Builder.CreateFCmp(Pred, Ops[0], Ops[1]);
15870 return EmitX86MaskedCompareResult(*this, Cmp, NumElts, Ops[3]);
15873 return getVectorFCmpIR(Pred, IsSignaling);
15876 // SSE scalar comparison intrinsics
15877 case X86::BI__builtin_ia32_cmpeqss:
15878 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 0);
15879 case X86::BI__builtin_ia32_cmpltss:
15880 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 1);
15881 case X86::BI__builtin_ia32_cmpless:
15882 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 2);
15883 case X86::BI__builtin_ia32_cmpunordss:
15884 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 3);
15885 case X86::BI__builtin_ia32_cmpneqss:
15886 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 4);
15887 case X86::BI__builtin_ia32_cmpnltss:
15888 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 5);
15889 case X86::BI__builtin_ia32_cmpnless:
15890 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 6);
15891 case X86::BI__builtin_ia32_cmpordss:
15892 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 7);
15893 case X86::BI__builtin_ia32_cmpeqsd:
15894 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 0);
15895 case X86::BI__builtin_ia32_cmpltsd:
15896 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 1);
15897 case X86::BI__builtin_ia32_cmplesd:
15898 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 2);
15899 case X86::BI__builtin_ia32_cmpunordsd:
15900 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 3);
15901 case X86::BI__builtin_ia32_cmpneqsd:
15902 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 4);
15903 case X86::BI__builtin_ia32_cmpnltsd:
15904 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 5);
15905 case X86::BI__builtin_ia32_cmpnlesd:
15906 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 6);
15907 case X86::BI__builtin_ia32_cmpordsd:
15908 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 7);
15910 // f16c half2float intrinsics
15911 case X86::BI__builtin_ia32_vcvtph2ps:
15912 case X86::BI__builtin_ia32_vcvtph2ps256:
15913 case X86::BI__builtin_ia32_vcvtph2ps_mask:
15914 case X86::BI__builtin_ia32_vcvtph2ps256_mask:
15915 case X86::BI__builtin_ia32_vcvtph2ps512_mask: {
15916 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
15917 return EmitX86CvtF16ToFloatExpr(*this, Ops, ConvertType(E->getType()));
15920 // AVX512 bf16 intrinsics
15921 case X86::BI__builtin_ia32_cvtneps2bf16_128_mask: {
15922 Ops[2] = getMaskVecValue(
15923 *this, Ops[2],
15924 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements());
15925 Intrinsic::ID IID = Intrinsic::x86_avx512bf16_mask_cvtneps2bf16_128;
15926 return Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
15928 case X86::BI__builtin_ia32_cvtsbf162ss_32:
15929 return Builder.CreateFPExt(Ops[0], Builder.getFloatTy());
15931 case X86::BI__builtin_ia32_cvtneps2bf16_256_mask:
15932 case X86::BI__builtin_ia32_cvtneps2bf16_512_mask: {
15933 Intrinsic::ID IID;
15934 switch (BuiltinID) {
15935 default: llvm_unreachable("Unsupported intrinsic!");
15936 case X86::BI__builtin_ia32_cvtneps2bf16_256_mask:
15937 IID = Intrinsic::x86_avx512bf16_cvtneps2bf16_256;
15938 break;
15939 case X86::BI__builtin_ia32_cvtneps2bf16_512_mask:
15940 IID = Intrinsic::x86_avx512bf16_cvtneps2bf16_512;
15941 break;
15943 Value *Res = Builder.CreateCall(CGM.getIntrinsic(IID), Ops[0]);
15944 return EmitX86Select(*this, Ops[2], Res, Ops[1]);
15947 case X86::BI__cpuid:
15948 case X86::BI__cpuidex: {
15949 Value *FuncId = EmitScalarExpr(E->getArg(1));
15950 Value *SubFuncId = BuiltinID == X86::BI__cpuidex
15951 ? EmitScalarExpr(E->getArg(2))
15952 : llvm::ConstantInt::get(Int32Ty, 0);
15954 llvm::StructType *CpuidRetTy =
15955 llvm::StructType::get(Int32Ty, Int32Ty, Int32Ty, Int32Ty);
15956 llvm::FunctionType *FTy =
15957 llvm::FunctionType::get(CpuidRetTy, {Int32Ty, Int32Ty}, false);
15959 StringRef Asm, Constraints;
15960 if (getTarget().getTriple().getArch() == llvm::Triple::x86) {
15961 Asm = "cpuid";
15962 Constraints = "={ax},={bx},={cx},={dx},{ax},{cx}";
15963 } else {
15964 // x86-64 uses %rbx as the base register, so preserve it.
15965 Asm = "xchgq %rbx, ${1:q}\n"
15966 "cpuid\n"
15967 "xchgq %rbx, ${1:q}";
15968 Constraints = "={ax},=r,={cx},={dx},0,2";
15971 llvm::InlineAsm *IA = llvm::InlineAsm::get(FTy, Asm, Constraints,
15972 /*hasSideEffects=*/false);
15973 Value *IACall = Builder.CreateCall(IA, {FuncId, SubFuncId});
15974 Value *BasePtr = EmitScalarExpr(E->getArg(0));
15975 Value *Store = nullptr;
15976 for (unsigned i = 0; i < 4; i++) {
15977 Value *Extracted = Builder.CreateExtractValue(IACall, i);
15978 Value *StorePtr = Builder.CreateConstInBoundsGEP1_32(Int32Ty, BasePtr, i);
15979 Store = Builder.CreateAlignedStore(Extracted, StorePtr, getIntAlign());
15982 // Return the last store instruction to signal that we have emitted the
15983 // the intrinsic.
15984 return Store;
15987 case X86::BI__emul:
15988 case X86::BI__emulu: {
15989 llvm::Type *Int64Ty = llvm::IntegerType::get(getLLVMContext(), 64);
15990 bool isSigned = (BuiltinID == X86::BI__emul);
15991 Value *LHS = Builder.CreateIntCast(Ops[0], Int64Ty, isSigned);
15992 Value *RHS = Builder.CreateIntCast(Ops[1], Int64Ty, isSigned);
15993 return Builder.CreateMul(LHS, RHS, "", !isSigned, isSigned);
15995 case X86::BI__mulh:
15996 case X86::BI__umulh:
15997 case X86::BI_mul128:
15998 case X86::BI_umul128: {
15999 llvm::Type *ResType = ConvertType(E->getType());
16000 llvm::Type *Int128Ty = llvm::IntegerType::get(getLLVMContext(), 128);
16002 bool IsSigned = (BuiltinID == X86::BI__mulh || BuiltinID == X86::BI_mul128);
16003 Value *LHS = Builder.CreateIntCast(Ops[0], Int128Ty, IsSigned);
16004 Value *RHS = Builder.CreateIntCast(Ops[1], Int128Ty, IsSigned);
16006 Value *MulResult, *HigherBits;
16007 if (IsSigned) {
16008 MulResult = Builder.CreateNSWMul(LHS, RHS);
16009 HigherBits = Builder.CreateAShr(MulResult, 64);
16010 } else {
16011 MulResult = Builder.CreateNUWMul(LHS, RHS);
16012 HigherBits = Builder.CreateLShr(MulResult, 64);
16014 HigherBits = Builder.CreateIntCast(HigherBits, ResType, IsSigned);
16016 if (BuiltinID == X86::BI__mulh || BuiltinID == X86::BI__umulh)
16017 return HigherBits;
16019 Address HighBitsAddress = EmitPointerWithAlignment(E->getArg(2));
16020 Builder.CreateStore(HigherBits, HighBitsAddress);
16021 return Builder.CreateIntCast(MulResult, ResType, IsSigned);
16024 case X86::BI__faststorefence: {
16025 return Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent,
16026 llvm::SyncScope::System);
16028 case X86::BI__shiftleft128:
16029 case X86::BI__shiftright128: {
16030 llvm::Function *F = CGM.getIntrinsic(
16031 BuiltinID == X86::BI__shiftleft128 ? Intrinsic::fshl : Intrinsic::fshr,
16032 Int64Ty);
16033 // Flip low/high ops and zero-extend amount to matching type.
16034 // shiftleft128(Low, High, Amt) -> fshl(High, Low, Amt)
16035 // shiftright128(Low, High, Amt) -> fshr(High, Low, Amt)
16036 std::swap(Ops[0], Ops[1]);
16037 Ops[2] = Builder.CreateZExt(Ops[2], Int64Ty);
16038 return Builder.CreateCall(F, Ops);
16040 case X86::BI_ReadWriteBarrier:
16041 case X86::BI_ReadBarrier:
16042 case X86::BI_WriteBarrier: {
16043 return Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent,
16044 llvm::SyncScope::SingleThread);
16047 case X86::BI_AddressOfReturnAddress: {
16048 Function *F =
16049 CGM.getIntrinsic(Intrinsic::addressofreturnaddress, AllocaInt8PtrTy);
16050 return Builder.CreateCall(F);
16052 case X86::BI__stosb: {
16053 // We treat __stosb as a volatile memset - it may not generate "rep stosb"
16054 // instruction, but it will create a memset that won't be optimized away.
16055 return Builder.CreateMemSet(Ops[0], Ops[1], Ops[2], Align(1), true);
16057 case X86::BI__ud2:
16058 // llvm.trap makes a ud2a instruction on x86.
16059 return EmitTrapCall(Intrinsic::trap);
16060 case X86::BI__int2c: {
16061 // This syscall signals a driver assertion failure in x86 NT kernels.
16062 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
16063 llvm::InlineAsm *IA =
16064 llvm::InlineAsm::get(FTy, "int $$0x2c", "", /*hasSideEffects=*/true);
16065 llvm::AttributeList NoReturnAttr = llvm::AttributeList::get(
16066 getLLVMContext(), llvm::AttributeList::FunctionIndex,
16067 llvm::Attribute::NoReturn);
16068 llvm::CallInst *CI = Builder.CreateCall(IA);
16069 CI->setAttributes(NoReturnAttr);
16070 return CI;
16072 case X86::BI__readfsbyte:
16073 case X86::BI__readfsword:
16074 case X86::BI__readfsdword:
16075 case X86::BI__readfsqword: {
16076 llvm::Type *IntTy = ConvertType(E->getType());
16077 Value *Ptr = Builder.CreateIntToPtr(
16078 Ops[0], llvm::PointerType::get(getLLVMContext(), 257));
16079 LoadInst *Load = Builder.CreateAlignedLoad(
16080 IntTy, Ptr, getContext().getTypeAlignInChars(E->getType()));
16081 Load->setVolatile(true);
16082 return Load;
16084 case X86::BI__readgsbyte:
16085 case X86::BI__readgsword:
16086 case X86::BI__readgsdword:
16087 case X86::BI__readgsqword: {
16088 llvm::Type *IntTy = ConvertType(E->getType());
16089 Value *Ptr = Builder.CreateIntToPtr(
16090 Ops[0], llvm::PointerType::get(getLLVMContext(), 256));
16091 LoadInst *Load = Builder.CreateAlignedLoad(
16092 IntTy, Ptr, getContext().getTypeAlignInChars(E->getType()));
16093 Load->setVolatile(true);
16094 return Load;
16096 case X86::BI__builtin_ia32_encodekey128_u32: {
16097 Intrinsic::ID IID = Intrinsic::x86_encodekey128;
16099 Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID), {Ops[0], Ops[1]});
16101 for (int i = 0; i < 3; ++i) {
16102 Value *Extract = Builder.CreateExtractValue(Call, i + 1);
16103 Value *Ptr = Builder.CreateConstGEP1_32(Int8Ty, Ops[2], i * 16);
16104 Builder.CreateAlignedStore(Extract, Ptr, Align(1));
16107 return Builder.CreateExtractValue(Call, 0);
16109 case X86::BI__builtin_ia32_encodekey256_u32: {
16110 Intrinsic::ID IID = Intrinsic::x86_encodekey256;
16112 Value *Call =
16113 Builder.CreateCall(CGM.getIntrinsic(IID), {Ops[0], Ops[1], Ops[2]});
16115 for (int i = 0; i < 4; ++i) {
16116 Value *Extract = Builder.CreateExtractValue(Call, i + 1);
16117 Value *Ptr = Builder.CreateConstGEP1_32(Int8Ty, Ops[3], i * 16);
16118 Builder.CreateAlignedStore(Extract, Ptr, Align(1));
16121 return Builder.CreateExtractValue(Call, 0);
16123 case X86::BI__builtin_ia32_aesenc128kl_u8:
16124 case X86::BI__builtin_ia32_aesdec128kl_u8:
16125 case X86::BI__builtin_ia32_aesenc256kl_u8:
16126 case X86::BI__builtin_ia32_aesdec256kl_u8: {
16127 Intrinsic::ID IID;
16128 StringRef BlockName;
16129 switch (BuiltinID) {
16130 default:
16131 llvm_unreachable("Unexpected builtin");
16132 case X86::BI__builtin_ia32_aesenc128kl_u8:
16133 IID = Intrinsic::x86_aesenc128kl;
16134 BlockName = "aesenc128kl";
16135 break;
16136 case X86::BI__builtin_ia32_aesdec128kl_u8:
16137 IID = Intrinsic::x86_aesdec128kl;
16138 BlockName = "aesdec128kl";
16139 break;
16140 case X86::BI__builtin_ia32_aesenc256kl_u8:
16141 IID = Intrinsic::x86_aesenc256kl;
16142 BlockName = "aesenc256kl";
16143 break;
16144 case X86::BI__builtin_ia32_aesdec256kl_u8:
16145 IID = Intrinsic::x86_aesdec256kl;
16146 BlockName = "aesdec256kl";
16147 break;
16150 Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID), {Ops[1], Ops[2]});
16152 BasicBlock *NoError =
16153 createBasicBlock(BlockName + "_no_error", this->CurFn);
16154 BasicBlock *Error = createBasicBlock(BlockName + "_error", this->CurFn);
16155 BasicBlock *End = createBasicBlock(BlockName + "_end", this->CurFn);
16157 Value *Ret = Builder.CreateExtractValue(Call, 0);
16158 Value *Succ = Builder.CreateTrunc(Ret, Builder.getInt1Ty());
16159 Value *Out = Builder.CreateExtractValue(Call, 1);
16160 Builder.CreateCondBr(Succ, NoError, Error);
16162 Builder.SetInsertPoint(NoError);
16163 Builder.CreateDefaultAlignedStore(Out, Ops[0]);
16164 Builder.CreateBr(End);
16166 Builder.SetInsertPoint(Error);
16167 Constant *Zero = llvm::Constant::getNullValue(Out->getType());
16168 Builder.CreateDefaultAlignedStore(Zero, Ops[0]);
16169 Builder.CreateBr(End);
16171 Builder.SetInsertPoint(End);
16172 return Builder.CreateExtractValue(Call, 0);
16174 case X86::BI__builtin_ia32_aesencwide128kl_u8:
16175 case X86::BI__builtin_ia32_aesdecwide128kl_u8:
16176 case X86::BI__builtin_ia32_aesencwide256kl_u8:
16177 case X86::BI__builtin_ia32_aesdecwide256kl_u8: {
16178 Intrinsic::ID IID;
16179 StringRef BlockName;
16180 switch (BuiltinID) {
16181 case X86::BI__builtin_ia32_aesencwide128kl_u8:
16182 IID = Intrinsic::x86_aesencwide128kl;
16183 BlockName = "aesencwide128kl";
16184 break;
16185 case X86::BI__builtin_ia32_aesdecwide128kl_u8:
16186 IID = Intrinsic::x86_aesdecwide128kl;
16187 BlockName = "aesdecwide128kl";
16188 break;
16189 case X86::BI__builtin_ia32_aesencwide256kl_u8:
16190 IID = Intrinsic::x86_aesencwide256kl;
16191 BlockName = "aesencwide256kl";
16192 break;
16193 case X86::BI__builtin_ia32_aesdecwide256kl_u8:
16194 IID = Intrinsic::x86_aesdecwide256kl;
16195 BlockName = "aesdecwide256kl";
16196 break;
16199 llvm::Type *Ty = FixedVectorType::get(Builder.getInt64Ty(), 2);
16200 Value *InOps[9];
16201 InOps[0] = Ops[2];
16202 for (int i = 0; i != 8; ++i) {
16203 Value *Ptr = Builder.CreateConstGEP1_32(Ty, Ops[1], i);
16204 InOps[i + 1] = Builder.CreateAlignedLoad(Ty, Ptr, Align(16));
16207 Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID), InOps);
16209 BasicBlock *NoError =
16210 createBasicBlock(BlockName + "_no_error", this->CurFn);
16211 BasicBlock *Error = createBasicBlock(BlockName + "_error", this->CurFn);
16212 BasicBlock *End = createBasicBlock(BlockName + "_end", this->CurFn);
16214 Value *Ret = Builder.CreateExtractValue(Call, 0);
16215 Value *Succ = Builder.CreateTrunc(Ret, Builder.getInt1Ty());
16216 Builder.CreateCondBr(Succ, NoError, Error);
16218 Builder.SetInsertPoint(NoError);
16219 for (int i = 0; i != 8; ++i) {
16220 Value *Extract = Builder.CreateExtractValue(Call, i + 1);
16221 Value *Ptr = Builder.CreateConstGEP1_32(Extract->getType(), Ops[0], i);
16222 Builder.CreateAlignedStore(Extract, Ptr, Align(16));
16224 Builder.CreateBr(End);
16226 Builder.SetInsertPoint(Error);
16227 for (int i = 0; i != 8; ++i) {
16228 Value *Out = Builder.CreateExtractValue(Call, i + 1);
16229 Constant *Zero = llvm::Constant::getNullValue(Out->getType());
16230 Value *Ptr = Builder.CreateConstGEP1_32(Out->getType(), Ops[0], i);
16231 Builder.CreateAlignedStore(Zero, Ptr, Align(16));
16233 Builder.CreateBr(End);
16235 Builder.SetInsertPoint(End);
16236 return Builder.CreateExtractValue(Call, 0);
16238 case X86::BI__builtin_ia32_vfcmaddcph512_mask:
16239 IsConjFMA = true;
16240 [[fallthrough]];
16241 case X86::BI__builtin_ia32_vfmaddcph512_mask: {
16242 Intrinsic::ID IID = IsConjFMA
16243 ? Intrinsic::x86_avx512fp16_mask_vfcmadd_cph_512
16244 : Intrinsic::x86_avx512fp16_mask_vfmadd_cph_512;
16245 Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
16246 return EmitX86Select(*this, Ops[3], Call, Ops[0]);
16248 case X86::BI__builtin_ia32_vfcmaddcsh_round_mask:
16249 IsConjFMA = true;
16250 [[fallthrough]];
16251 case X86::BI__builtin_ia32_vfmaddcsh_round_mask: {
16252 Intrinsic::ID IID = IsConjFMA ? Intrinsic::x86_avx512fp16_mask_vfcmadd_csh
16253 : Intrinsic::x86_avx512fp16_mask_vfmadd_csh;
16254 Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
16255 Value *And = Builder.CreateAnd(Ops[3], llvm::ConstantInt::get(Int8Ty, 1));
16256 return EmitX86Select(*this, And, Call, Ops[0]);
16258 case X86::BI__builtin_ia32_vfcmaddcsh_round_mask3:
16259 IsConjFMA = true;
16260 [[fallthrough]];
16261 case X86::BI__builtin_ia32_vfmaddcsh_round_mask3: {
16262 Intrinsic::ID IID = IsConjFMA ? Intrinsic::x86_avx512fp16_mask_vfcmadd_csh
16263 : Intrinsic::x86_avx512fp16_mask_vfmadd_csh;
16264 Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
16265 static constexpr int Mask[] = {0, 5, 6, 7};
16266 return Builder.CreateShuffleVector(Call, Ops[2], Mask);
16268 case X86::BI__builtin_ia32_prefetchi:
16269 return Builder.CreateCall(
16270 CGM.getIntrinsic(Intrinsic::prefetch, Ops[0]->getType()),
16271 {Ops[0], llvm::ConstantInt::get(Int32Ty, 0), Ops[1],
16272 llvm::ConstantInt::get(Int32Ty, 0)});
16276 Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
16277 const CallExpr *E) {
16278 // Do not emit the builtin arguments in the arguments of a function call,
16279 // because the evaluation order of function arguments is not specified in C++.
16280 // This is important when testing to ensure the arguments are emitted in the
16281 // same order every time. Eg:
16282 // Instead of:
16283 // return Builder.CreateFDiv(EmitScalarExpr(E->getArg(0)),
16284 // EmitScalarExpr(E->getArg(1)), "swdiv");
16285 // Use:
16286 // Value *Op0 = EmitScalarExpr(E->getArg(0));
16287 // Value *Op1 = EmitScalarExpr(E->getArg(1));
16288 // return Builder.CreateFDiv(Op0, Op1, "swdiv")
16290 Intrinsic::ID ID = Intrinsic::not_intrinsic;
16292 switch (BuiltinID) {
16293 default: return nullptr;
16295 // __builtin_ppc_get_timebase is GCC 4.8+'s PowerPC-specific name for what we
16296 // call __builtin_readcyclecounter.
16297 case PPC::BI__builtin_ppc_get_timebase:
16298 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::readcyclecounter));
16300 // vec_ld, vec_xl_be, vec_lvsl, vec_lvsr
16301 case PPC::BI__builtin_altivec_lvx:
16302 case PPC::BI__builtin_altivec_lvxl:
16303 case PPC::BI__builtin_altivec_lvebx:
16304 case PPC::BI__builtin_altivec_lvehx:
16305 case PPC::BI__builtin_altivec_lvewx:
16306 case PPC::BI__builtin_altivec_lvsl:
16307 case PPC::BI__builtin_altivec_lvsr:
16308 case PPC::BI__builtin_vsx_lxvd2x:
16309 case PPC::BI__builtin_vsx_lxvw4x:
16310 case PPC::BI__builtin_vsx_lxvd2x_be:
16311 case PPC::BI__builtin_vsx_lxvw4x_be:
16312 case PPC::BI__builtin_vsx_lxvl:
16313 case PPC::BI__builtin_vsx_lxvll:
16315 SmallVector<Value *, 2> Ops;
16316 Ops.push_back(EmitScalarExpr(E->getArg(0)));
16317 Ops.push_back(EmitScalarExpr(E->getArg(1)));
16318 if (!(BuiltinID == PPC::BI__builtin_vsx_lxvl ||
16319 BuiltinID == PPC::BI__builtin_vsx_lxvll)) {
16320 Ops[0] = Builder.CreateGEP(Int8Ty, Ops[1], Ops[0]);
16321 Ops.pop_back();
16324 switch (BuiltinID) {
16325 default: llvm_unreachable("Unsupported ld/lvsl/lvsr intrinsic!");
16326 case PPC::BI__builtin_altivec_lvx:
16327 ID = Intrinsic::ppc_altivec_lvx;
16328 break;
16329 case PPC::BI__builtin_altivec_lvxl:
16330 ID = Intrinsic::ppc_altivec_lvxl;
16331 break;
16332 case PPC::BI__builtin_altivec_lvebx:
16333 ID = Intrinsic::ppc_altivec_lvebx;
16334 break;
16335 case PPC::BI__builtin_altivec_lvehx:
16336 ID = Intrinsic::ppc_altivec_lvehx;
16337 break;
16338 case PPC::BI__builtin_altivec_lvewx:
16339 ID = Intrinsic::ppc_altivec_lvewx;
16340 break;
16341 case PPC::BI__builtin_altivec_lvsl:
16342 ID = Intrinsic::ppc_altivec_lvsl;
16343 break;
16344 case PPC::BI__builtin_altivec_lvsr:
16345 ID = Intrinsic::ppc_altivec_lvsr;
16346 break;
16347 case PPC::BI__builtin_vsx_lxvd2x:
16348 ID = Intrinsic::ppc_vsx_lxvd2x;
16349 break;
16350 case PPC::BI__builtin_vsx_lxvw4x:
16351 ID = Intrinsic::ppc_vsx_lxvw4x;
16352 break;
16353 case PPC::BI__builtin_vsx_lxvd2x_be:
16354 ID = Intrinsic::ppc_vsx_lxvd2x_be;
16355 break;
16356 case PPC::BI__builtin_vsx_lxvw4x_be:
16357 ID = Intrinsic::ppc_vsx_lxvw4x_be;
16358 break;
16359 case PPC::BI__builtin_vsx_lxvl:
16360 ID = Intrinsic::ppc_vsx_lxvl;
16361 break;
16362 case PPC::BI__builtin_vsx_lxvll:
16363 ID = Intrinsic::ppc_vsx_lxvll;
16364 break;
16366 llvm::Function *F = CGM.getIntrinsic(ID);
16367 return Builder.CreateCall(F, Ops, "");
16370 // vec_st, vec_xst_be
16371 case PPC::BI__builtin_altivec_stvx:
16372 case PPC::BI__builtin_altivec_stvxl:
16373 case PPC::BI__builtin_altivec_stvebx:
16374 case PPC::BI__builtin_altivec_stvehx:
16375 case PPC::BI__builtin_altivec_stvewx:
16376 case PPC::BI__builtin_vsx_stxvd2x:
16377 case PPC::BI__builtin_vsx_stxvw4x:
16378 case PPC::BI__builtin_vsx_stxvd2x_be:
16379 case PPC::BI__builtin_vsx_stxvw4x_be:
16380 case PPC::BI__builtin_vsx_stxvl:
16381 case PPC::BI__builtin_vsx_stxvll:
16383 SmallVector<Value *, 3> Ops;
16384 Ops.push_back(EmitScalarExpr(E->getArg(0)));
16385 Ops.push_back(EmitScalarExpr(E->getArg(1)));
16386 Ops.push_back(EmitScalarExpr(E->getArg(2)));
16387 if (!(BuiltinID == PPC::BI__builtin_vsx_stxvl ||
16388 BuiltinID == PPC::BI__builtin_vsx_stxvll)) {
16389 Ops[1] = Builder.CreateGEP(Int8Ty, Ops[2], Ops[1]);
16390 Ops.pop_back();
16393 switch (BuiltinID) {
16394 default: llvm_unreachable("Unsupported st intrinsic!");
16395 case PPC::BI__builtin_altivec_stvx:
16396 ID = Intrinsic::ppc_altivec_stvx;
16397 break;
16398 case PPC::BI__builtin_altivec_stvxl:
16399 ID = Intrinsic::ppc_altivec_stvxl;
16400 break;
16401 case PPC::BI__builtin_altivec_stvebx:
16402 ID = Intrinsic::ppc_altivec_stvebx;
16403 break;
16404 case PPC::BI__builtin_altivec_stvehx:
16405 ID = Intrinsic::ppc_altivec_stvehx;
16406 break;
16407 case PPC::BI__builtin_altivec_stvewx:
16408 ID = Intrinsic::ppc_altivec_stvewx;
16409 break;
16410 case PPC::BI__builtin_vsx_stxvd2x:
16411 ID = Intrinsic::ppc_vsx_stxvd2x;
16412 break;
16413 case PPC::BI__builtin_vsx_stxvw4x:
16414 ID = Intrinsic::ppc_vsx_stxvw4x;
16415 break;
16416 case PPC::BI__builtin_vsx_stxvd2x_be:
16417 ID = Intrinsic::ppc_vsx_stxvd2x_be;
16418 break;
16419 case PPC::BI__builtin_vsx_stxvw4x_be:
16420 ID = Intrinsic::ppc_vsx_stxvw4x_be;
16421 break;
16422 case PPC::BI__builtin_vsx_stxvl:
16423 ID = Intrinsic::ppc_vsx_stxvl;
16424 break;
16425 case PPC::BI__builtin_vsx_stxvll:
16426 ID = Intrinsic::ppc_vsx_stxvll;
16427 break;
16429 llvm::Function *F = CGM.getIntrinsic(ID);
16430 return Builder.CreateCall(F, Ops, "");
16432 case PPC::BI__builtin_vsx_ldrmb: {
16433 // Essentially boils down to performing an unaligned VMX load sequence so
16434 // as to avoid crossing a page boundary and then shuffling the elements
16435 // into the right side of the vector register.
16436 Value *Op0 = EmitScalarExpr(E->getArg(0));
16437 Value *Op1 = EmitScalarExpr(E->getArg(1));
16438 int64_t NumBytes = cast<ConstantInt>(Op1)->getZExtValue();
16439 llvm::Type *ResTy = ConvertType(E->getType());
16440 bool IsLE = getTarget().isLittleEndian();
16442 // If the user wants the entire vector, just load the entire vector.
16443 if (NumBytes == 16) {
16444 Value *LD =
16445 Builder.CreateLoad(Address(Op0, ResTy, CharUnits::fromQuantity(1)));
16446 if (!IsLE)
16447 return LD;
16449 // Reverse the bytes on LE.
16450 SmallVector<int, 16> RevMask;
16451 for (int Idx = 0; Idx < 16; Idx++)
16452 RevMask.push_back(15 - Idx);
16453 return Builder.CreateShuffleVector(LD, LD, RevMask);
16456 llvm::Function *Lvx = CGM.getIntrinsic(Intrinsic::ppc_altivec_lvx);
16457 llvm::Function *Lvs = CGM.getIntrinsic(IsLE ? Intrinsic::ppc_altivec_lvsr
16458 : Intrinsic::ppc_altivec_lvsl);
16459 llvm::Function *Vperm = CGM.getIntrinsic(Intrinsic::ppc_altivec_vperm);
16460 Value *HiMem = Builder.CreateGEP(
16461 Int8Ty, Op0, ConstantInt::get(Op1->getType(), NumBytes - 1));
16462 Value *LoLd = Builder.CreateCall(Lvx, Op0, "ld.lo");
16463 Value *HiLd = Builder.CreateCall(Lvx, HiMem, "ld.hi");
16464 Value *Mask1 = Builder.CreateCall(Lvs, Op0, "mask1");
16466 Op0 = IsLE ? HiLd : LoLd;
16467 Op1 = IsLE ? LoLd : HiLd;
16468 Value *AllElts = Builder.CreateCall(Vperm, {Op0, Op1, Mask1}, "shuffle1");
16469 Constant *Zero = llvm::Constant::getNullValue(IsLE ? ResTy : AllElts->getType());
16471 if (IsLE) {
16472 SmallVector<int, 16> Consts;
16473 for (int Idx = 0; Idx < 16; Idx++) {
16474 int Val = (NumBytes - Idx - 1 >= 0) ? (NumBytes - Idx - 1)
16475 : 16 - (NumBytes - Idx);
16476 Consts.push_back(Val);
16478 return Builder.CreateShuffleVector(Builder.CreateBitCast(AllElts, ResTy),
16479 Zero, Consts);
16481 SmallVector<Constant *, 16> Consts;
16482 for (int Idx = 0; Idx < 16; Idx++)
16483 Consts.push_back(Builder.getInt8(NumBytes + Idx));
16484 Value *Mask2 = ConstantVector::get(Consts);
16485 return Builder.CreateBitCast(
16486 Builder.CreateCall(Vperm, {Zero, AllElts, Mask2}, "shuffle2"), ResTy);
16488 case PPC::BI__builtin_vsx_strmb: {
16489 Value *Op0 = EmitScalarExpr(E->getArg(0));
16490 Value *Op1 = EmitScalarExpr(E->getArg(1));
16491 Value *Op2 = EmitScalarExpr(E->getArg(2));
16492 int64_t NumBytes = cast<ConstantInt>(Op1)->getZExtValue();
16493 bool IsLE = getTarget().isLittleEndian();
16494 auto StoreSubVec = [&](unsigned Width, unsigned Offset, unsigned EltNo) {
16495 // Storing the whole vector, simply store it on BE and reverse bytes and
16496 // store on LE.
16497 if (Width == 16) {
16498 Value *StVec = Op2;
16499 if (IsLE) {
16500 SmallVector<int, 16> RevMask;
16501 for (int Idx = 0; Idx < 16; Idx++)
16502 RevMask.push_back(15 - Idx);
16503 StVec = Builder.CreateShuffleVector(Op2, Op2, RevMask);
16505 return Builder.CreateStore(
16506 StVec, Address(Op0, Op2->getType(), CharUnits::fromQuantity(1)));
16508 auto *ConvTy = Int64Ty;
16509 unsigned NumElts = 0;
16510 switch (Width) {
16511 default:
16512 llvm_unreachable("width for stores must be a power of 2");
16513 case 8:
16514 ConvTy = Int64Ty;
16515 NumElts = 2;
16516 break;
16517 case 4:
16518 ConvTy = Int32Ty;
16519 NumElts = 4;
16520 break;
16521 case 2:
16522 ConvTy = Int16Ty;
16523 NumElts = 8;
16524 break;
16525 case 1:
16526 ConvTy = Int8Ty;
16527 NumElts = 16;
16528 break;
16530 Value *Vec = Builder.CreateBitCast(
16531 Op2, llvm::FixedVectorType::get(ConvTy, NumElts));
16532 Value *Ptr =
16533 Builder.CreateGEP(Int8Ty, Op0, ConstantInt::get(Int64Ty, Offset));
16534 Value *Elt = Builder.CreateExtractElement(Vec, EltNo);
16535 if (IsLE && Width > 1) {
16536 Function *F = CGM.getIntrinsic(Intrinsic::bswap, ConvTy);
16537 Elt = Builder.CreateCall(F, Elt);
16539 return Builder.CreateStore(
16540 Elt, Address(Ptr, ConvTy, CharUnits::fromQuantity(1)));
16542 unsigned Stored = 0;
16543 unsigned RemainingBytes = NumBytes;
16544 Value *Result;
16545 if (NumBytes == 16)
16546 return StoreSubVec(16, 0, 0);
16547 if (NumBytes >= 8) {
16548 Result = StoreSubVec(8, NumBytes - 8, IsLE ? 0 : 1);
16549 RemainingBytes -= 8;
16550 Stored += 8;
16552 if (RemainingBytes >= 4) {
16553 Result = StoreSubVec(4, NumBytes - Stored - 4,
16554 IsLE ? (Stored >> 2) : 3 - (Stored >> 2));
16555 RemainingBytes -= 4;
16556 Stored += 4;
16558 if (RemainingBytes >= 2) {
16559 Result = StoreSubVec(2, NumBytes - Stored - 2,
16560 IsLE ? (Stored >> 1) : 7 - (Stored >> 1));
16561 RemainingBytes -= 2;
16562 Stored += 2;
16564 if (RemainingBytes)
16565 Result =
16566 StoreSubVec(1, NumBytes - Stored - 1, IsLE ? Stored : 15 - Stored);
16567 return Result;
16569 // Square root
16570 case PPC::BI__builtin_vsx_xvsqrtsp:
16571 case PPC::BI__builtin_vsx_xvsqrtdp: {
16572 llvm::Type *ResultType = ConvertType(E->getType());
16573 Value *X = EmitScalarExpr(E->getArg(0));
16574 if (Builder.getIsFPConstrained()) {
16575 llvm::Function *F = CGM.getIntrinsic(
16576 Intrinsic::experimental_constrained_sqrt, ResultType);
16577 return Builder.CreateConstrainedFPCall(F, X);
16578 } else {
16579 llvm::Function *F = CGM.getIntrinsic(Intrinsic::sqrt, ResultType);
16580 return Builder.CreateCall(F, X);
16583 // Count leading zeros
16584 case PPC::BI__builtin_altivec_vclzb:
16585 case PPC::BI__builtin_altivec_vclzh:
16586 case PPC::BI__builtin_altivec_vclzw:
16587 case PPC::BI__builtin_altivec_vclzd: {
16588 llvm::Type *ResultType = ConvertType(E->getType());
16589 Value *X = EmitScalarExpr(E->getArg(0));
16590 Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
16591 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ResultType);
16592 return Builder.CreateCall(F, {X, Undef});
16594 case PPC::BI__builtin_altivec_vctzb:
16595 case PPC::BI__builtin_altivec_vctzh:
16596 case PPC::BI__builtin_altivec_vctzw:
16597 case PPC::BI__builtin_altivec_vctzd: {
16598 llvm::Type *ResultType = ConvertType(E->getType());
16599 Value *X = EmitScalarExpr(E->getArg(0));
16600 Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
16601 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ResultType);
16602 return Builder.CreateCall(F, {X, Undef});
16604 case PPC::BI__builtin_altivec_vinsd:
16605 case PPC::BI__builtin_altivec_vinsw:
16606 case PPC::BI__builtin_altivec_vinsd_elt:
16607 case PPC::BI__builtin_altivec_vinsw_elt: {
16608 llvm::Type *ResultType = ConvertType(E->getType());
16609 Value *Op0 = EmitScalarExpr(E->getArg(0));
16610 Value *Op1 = EmitScalarExpr(E->getArg(1));
16611 Value *Op2 = EmitScalarExpr(E->getArg(2));
16613 bool IsUnaligned = (BuiltinID == PPC::BI__builtin_altivec_vinsw ||
16614 BuiltinID == PPC::BI__builtin_altivec_vinsd);
16616 bool Is32bit = (BuiltinID == PPC::BI__builtin_altivec_vinsw ||
16617 BuiltinID == PPC::BI__builtin_altivec_vinsw_elt);
16619 // The third argument must be a compile time constant.
16620 ConstantInt *ArgCI = dyn_cast<ConstantInt>(Op2);
16621 assert(ArgCI &&
16622 "Third Arg to vinsw/vinsd intrinsic must be a constant integer!");
16624 // Valid value for the third argument is dependent on the input type and
16625 // builtin called.
16626 int ValidMaxValue = 0;
16627 if (IsUnaligned)
16628 ValidMaxValue = (Is32bit) ? 12 : 8;
16629 else
16630 ValidMaxValue = (Is32bit) ? 3 : 1;
16632 // Get value of third argument.
16633 int64_t ConstArg = ArgCI->getSExtValue();
16635 // Compose range checking error message.
16636 std::string RangeErrMsg = IsUnaligned ? "byte" : "element";
16637 RangeErrMsg += " number " + llvm::to_string(ConstArg);
16638 RangeErrMsg += " is outside of the valid range [0, ";
16639 RangeErrMsg += llvm::to_string(ValidMaxValue) + "]";
16641 // Issue error if third argument is not within the valid range.
16642 if (ConstArg < 0 || ConstArg > ValidMaxValue)
16643 CGM.Error(E->getExprLoc(), RangeErrMsg);
16645 // Input to vec_replace_elt is an element index, convert to byte index.
16646 if (!IsUnaligned) {
16647 ConstArg *= Is32bit ? 4 : 8;
16648 // Fix the constant according to endianess.
16649 if (getTarget().isLittleEndian())
16650 ConstArg = (Is32bit ? 12 : 8) - ConstArg;
16653 ID = Is32bit ? Intrinsic::ppc_altivec_vinsw : Intrinsic::ppc_altivec_vinsd;
16654 Op2 = ConstantInt::getSigned(Int32Ty, ConstArg);
16655 // Casting input to vector int as per intrinsic definition.
16656 Op0 =
16657 Is32bit
16658 ? Builder.CreateBitCast(Op0, llvm::FixedVectorType::get(Int32Ty, 4))
16659 : Builder.CreateBitCast(Op0,
16660 llvm::FixedVectorType::get(Int64Ty, 2));
16661 return Builder.CreateBitCast(
16662 Builder.CreateCall(CGM.getIntrinsic(ID), {Op0, Op1, Op2}), ResultType);
16664 case PPC::BI__builtin_altivec_vpopcntb:
16665 case PPC::BI__builtin_altivec_vpopcnth:
16666 case PPC::BI__builtin_altivec_vpopcntw:
16667 case PPC::BI__builtin_altivec_vpopcntd: {
16668 llvm::Type *ResultType = ConvertType(E->getType());
16669 Value *X = EmitScalarExpr(E->getArg(0));
16670 llvm::Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ResultType);
16671 return Builder.CreateCall(F, X);
16673 case PPC::BI__builtin_altivec_vadduqm:
16674 case PPC::BI__builtin_altivec_vsubuqm: {
16675 Value *Op0 = EmitScalarExpr(E->getArg(0));
16676 Value *Op1 = EmitScalarExpr(E->getArg(1));
16677 llvm::Type *Int128Ty = llvm::IntegerType::get(getLLVMContext(), 128);
16678 Op0 = Builder.CreateBitCast(Op0, llvm::FixedVectorType::get(Int128Ty, 1));
16679 Op1 = Builder.CreateBitCast(Op1, llvm::FixedVectorType::get(Int128Ty, 1));
16680 if (BuiltinID == PPC::BI__builtin_altivec_vadduqm)
16681 return Builder.CreateAdd(Op0, Op1, "vadduqm");
16682 else
16683 return Builder.CreateSub(Op0, Op1, "vsubuqm");
16685 case PPC::BI__builtin_altivec_vaddcuq_c:
16686 case PPC::BI__builtin_altivec_vsubcuq_c: {
16687 SmallVector<Value *, 2> Ops;
16688 Value *Op0 = EmitScalarExpr(E->getArg(0));
16689 Value *Op1 = EmitScalarExpr(E->getArg(1));
16690 llvm::Type *V1I128Ty = llvm::FixedVectorType::get(
16691 llvm::IntegerType::get(getLLVMContext(), 128), 1);
16692 Ops.push_back(Builder.CreateBitCast(Op0, V1I128Ty));
16693 Ops.push_back(Builder.CreateBitCast(Op1, V1I128Ty));
16694 ID = (BuiltinID == PPC::BI__builtin_altivec_vaddcuq_c)
16695 ? Intrinsic::ppc_altivec_vaddcuq
16696 : Intrinsic::ppc_altivec_vsubcuq;
16697 return Builder.CreateCall(CGM.getIntrinsic(ID), Ops, "");
16699 case PPC::BI__builtin_altivec_vaddeuqm_c:
16700 case PPC::BI__builtin_altivec_vaddecuq_c:
16701 case PPC::BI__builtin_altivec_vsubeuqm_c:
16702 case PPC::BI__builtin_altivec_vsubecuq_c: {
16703 SmallVector<Value *, 3> Ops;
16704 Value *Op0 = EmitScalarExpr(E->getArg(0));
16705 Value *Op1 = EmitScalarExpr(E->getArg(1));
16706 Value *Op2 = EmitScalarExpr(E->getArg(2));
16707 llvm::Type *V1I128Ty = llvm::FixedVectorType::get(
16708 llvm::IntegerType::get(getLLVMContext(), 128), 1);
16709 Ops.push_back(Builder.CreateBitCast(Op0, V1I128Ty));
16710 Ops.push_back(Builder.CreateBitCast(Op1, V1I128Ty));
16711 Ops.push_back(Builder.CreateBitCast(Op2, V1I128Ty));
16712 switch (BuiltinID) {
16713 default:
16714 llvm_unreachable("Unsupported intrinsic!");
16715 case PPC::BI__builtin_altivec_vaddeuqm_c:
16716 ID = Intrinsic::ppc_altivec_vaddeuqm;
16717 break;
16718 case PPC::BI__builtin_altivec_vaddecuq_c:
16719 ID = Intrinsic::ppc_altivec_vaddecuq;
16720 break;
16721 case PPC::BI__builtin_altivec_vsubeuqm_c:
16722 ID = Intrinsic::ppc_altivec_vsubeuqm;
16723 break;
16724 case PPC::BI__builtin_altivec_vsubecuq_c:
16725 ID = Intrinsic::ppc_altivec_vsubecuq;
16726 break;
16728 return Builder.CreateCall(CGM.getIntrinsic(ID), Ops, "");
16730 // Rotate and insert under mask operation.
16731 // __rldimi(rs, is, shift, mask)
16732 // (rotl64(rs, shift) & mask) | (is & ~mask)
16733 // __rlwimi(rs, is, shift, mask)
16734 // (rotl(rs, shift) & mask) | (is & ~mask)
16735 case PPC::BI__builtin_ppc_rldimi:
16736 case PPC::BI__builtin_ppc_rlwimi: {
16737 Value *Op0 = EmitScalarExpr(E->getArg(0));
16738 Value *Op1 = EmitScalarExpr(E->getArg(1));
16739 Value *Op2 = EmitScalarExpr(E->getArg(2));
16740 Value *Op3 = EmitScalarExpr(E->getArg(3));
16741 llvm::Type *Ty = Op0->getType();
16742 Function *F = CGM.getIntrinsic(Intrinsic::fshl, Ty);
16743 if (BuiltinID == PPC::BI__builtin_ppc_rldimi)
16744 Op2 = Builder.CreateZExt(Op2, Int64Ty);
16745 Value *Shift = Builder.CreateCall(F, {Op0, Op0, Op2});
16746 Value *X = Builder.CreateAnd(Shift, Op3);
16747 Value *Y = Builder.CreateAnd(Op1, Builder.CreateNot(Op3));
16748 return Builder.CreateOr(X, Y);
16750 // Rotate and insert under mask operation.
16751 // __rlwnm(rs, shift, mask)
16752 // rotl(rs, shift) & mask
16753 case PPC::BI__builtin_ppc_rlwnm: {
16754 Value *Op0 = EmitScalarExpr(E->getArg(0));
16755 Value *Op1 = EmitScalarExpr(E->getArg(1));
16756 Value *Op2 = EmitScalarExpr(E->getArg(2));
16757 llvm::Type *Ty = Op0->getType();
16758 Function *F = CGM.getIntrinsic(Intrinsic::fshl, Ty);
16759 Value *Shift = Builder.CreateCall(F, {Op0, Op0, Op1});
16760 return Builder.CreateAnd(Shift, Op2);
16762 case PPC::BI__builtin_ppc_poppar4:
16763 case PPC::BI__builtin_ppc_poppar8: {
16764 Value *Op0 = EmitScalarExpr(E->getArg(0));
16765 llvm::Type *ArgType = Op0->getType();
16766 Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
16767 Value *Tmp = Builder.CreateCall(F, Op0);
16769 llvm::Type *ResultType = ConvertType(E->getType());
16770 Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1));
16771 if (Result->getType() != ResultType)
16772 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
16773 "cast");
16774 return Result;
16776 case PPC::BI__builtin_ppc_cmpb: {
16777 Value *Op0 = EmitScalarExpr(E->getArg(0));
16778 Value *Op1 = EmitScalarExpr(E->getArg(1));
16779 if (getTarget().getTriple().isPPC64()) {
16780 Function *F =
16781 CGM.getIntrinsic(Intrinsic::ppc_cmpb, {Int64Ty, Int64Ty, Int64Ty});
16782 return Builder.CreateCall(F, {Op0, Op1}, "cmpb");
16784 // For 32 bit, emit the code as below:
16785 // %conv = trunc i64 %a to i32
16786 // %conv1 = trunc i64 %b to i32
16787 // %shr = lshr i64 %a, 32
16788 // %conv2 = trunc i64 %shr to i32
16789 // %shr3 = lshr i64 %b, 32
16790 // %conv4 = trunc i64 %shr3 to i32
16791 // %0 = tail call i32 @llvm.ppc.cmpb32(i32 %conv, i32 %conv1)
16792 // %conv5 = zext i32 %0 to i64
16793 // %1 = tail call i32 @llvm.ppc.cmpb32(i32 %conv2, i32 %conv4)
16794 // %conv614 = zext i32 %1 to i64
16795 // %shl = shl nuw i64 %conv614, 32
16796 // %or = or i64 %shl, %conv5
16797 // ret i64 %or
16798 Function *F =
16799 CGM.getIntrinsic(Intrinsic::ppc_cmpb, {Int32Ty, Int32Ty, Int32Ty});
16800 Value *ArgOneLo = Builder.CreateTrunc(Op0, Int32Ty);
16801 Value *ArgTwoLo = Builder.CreateTrunc(Op1, Int32Ty);
16802 Constant *ShiftAmt = ConstantInt::get(Int64Ty, 32);
16803 Value *ArgOneHi =
16804 Builder.CreateTrunc(Builder.CreateLShr(Op0, ShiftAmt), Int32Ty);
16805 Value *ArgTwoHi =
16806 Builder.CreateTrunc(Builder.CreateLShr(Op1, ShiftAmt), Int32Ty);
16807 Value *ResLo = Builder.CreateZExt(
16808 Builder.CreateCall(F, {ArgOneLo, ArgTwoLo}, "cmpb"), Int64Ty);
16809 Value *ResHiShift = Builder.CreateZExt(
16810 Builder.CreateCall(F, {ArgOneHi, ArgTwoHi}, "cmpb"), Int64Ty);
16811 Value *ResHi = Builder.CreateShl(ResHiShift, ShiftAmt);
16812 return Builder.CreateOr(ResLo, ResHi);
16814 // Copy sign
16815 case PPC::BI__builtin_vsx_xvcpsgnsp:
16816 case PPC::BI__builtin_vsx_xvcpsgndp: {
16817 llvm::Type *ResultType = ConvertType(E->getType());
16818 Value *X = EmitScalarExpr(E->getArg(0));
16819 Value *Y = EmitScalarExpr(E->getArg(1));
16820 ID = Intrinsic::copysign;
16821 llvm::Function *F = CGM.getIntrinsic(ID, ResultType);
16822 return Builder.CreateCall(F, {X, Y});
16824 // Rounding/truncation
16825 case PPC::BI__builtin_vsx_xvrspip:
16826 case PPC::BI__builtin_vsx_xvrdpip:
16827 case PPC::BI__builtin_vsx_xvrdpim:
16828 case PPC::BI__builtin_vsx_xvrspim:
16829 case PPC::BI__builtin_vsx_xvrdpi:
16830 case PPC::BI__builtin_vsx_xvrspi:
16831 case PPC::BI__builtin_vsx_xvrdpic:
16832 case PPC::BI__builtin_vsx_xvrspic:
16833 case PPC::BI__builtin_vsx_xvrdpiz:
16834 case PPC::BI__builtin_vsx_xvrspiz: {
16835 llvm::Type *ResultType = ConvertType(E->getType());
16836 Value *X = EmitScalarExpr(E->getArg(0));
16837 if (BuiltinID == PPC::BI__builtin_vsx_xvrdpim ||
16838 BuiltinID == PPC::BI__builtin_vsx_xvrspim)
16839 ID = Builder.getIsFPConstrained()
16840 ? Intrinsic::experimental_constrained_floor
16841 : Intrinsic::floor;
16842 else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpi ||
16843 BuiltinID == PPC::BI__builtin_vsx_xvrspi)
16844 ID = Builder.getIsFPConstrained()
16845 ? Intrinsic::experimental_constrained_round
16846 : Intrinsic::round;
16847 else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpic ||
16848 BuiltinID == PPC::BI__builtin_vsx_xvrspic)
16849 ID = Builder.getIsFPConstrained()
16850 ? Intrinsic::experimental_constrained_rint
16851 : Intrinsic::rint;
16852 else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpip ||
16853 BuiltinID == PPC::BI__builtin_vsx_xvrspip)
16854 ID = Builder.getIsFPConstrained()
16855 ? Intrinsic::experimental_constrained_ceil
16856 : Intrinsic::ceil;
16857 else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpiz ||
16858 BuiltinID == PPC::BI__builtin_vsx_xvrspiz)
16859 ID = Builder.getIsFPConstrained()
16860 ? Intrinsic::experimental_constrained_trunc
16861 : Intrinsic::trunc;
16862 llvm::Function *F = CGM.getIntrinsic(ID, ResultType);
16863 return Builder.getIsFPConstrained() ? Builder.CreateConstrainedFPCall(F, X)
16864 : Builder.CreateCall(F, X);
16867 // Absolute value
16868 case PPC::BI__builtin_vsx_xvabsdp:
16869 case PPC::BI__builtin_vsx_xvabssp: {
16870 llvm::Type *ResultType = ConvertType(E->getType());
16871 Value *X = EmitScalarExpr(E->getArg(0));
16872 llvm::Function *F = CGM.getIntrinsic(Intrinsic::fabs, ResultType);
16873 return Builder.CreateCall(F, X);
16876 // Fastmath by default
16877 case PPC::BI__builtin_ppc_recipdivf:
16878 case PPC::BI__builtin_ppc_recipdivd:
16879 case PPC::BI__builtin_ppc_rsqrtf:
16880 case PPC::BI__builtin_ppc_rsqrtd: {
16881 FastMathFlags FMF = Builder.getFastMathFlags();
16882 Builder.getFastMathFlags().setFast();
16883 llvm::Type *ResultType = ConvertType(E->getType());
16884 Value *X = EmitScalarExpr(E->getArg(0));
16886 if (BuiltinID == PPC::BI__builtin_ppc_recipdivf ||
16887 BuiltinID == PPC::BI__builtin_ppc_recipdivd) {
16888 Value *Y = EmitScalarExpr(E->getArg(1));
16889 Value *FDiv = Builder.CreateFDiv(X, Y, "recipdiv");
16890 Builder.getFastMathFlags() &= (FMF);
16891 return FDiv;
16893 auto *One = ConstantFP::get(ResultType, 1.0);
16894 llvm::Function *F = CGM.getIntrinsic(Intrinsic::sqrt, ResultType);
16895 Value *FDiv = Builder.CreateFDiv(One, Builder.CreateCall(F, X), "rsqrt");
16896 Builder.getFastMathFlags() &= (FMF);
16897 return FDiv;
16899 case PPC::BI__builtin_ppc_alignx: {
16900 Value *Op0 = EmitScalarExpr(E->getArg(0));
16901 Value *Op1 = EmitScalarExpr(E->getArg(1));
16902 ConstantInt *AlignmentCI = cast<ConstantInt>(Op0);
16903 if (AlignmentCI->getValue().ugt(llvm::Value::MaximumAlignment))
16904 AlignmentCI = ConstantInt::get(AlignmentCI->getType(),
16905 llvm::Value::MaximumAlignment);
16907 emitAlignmentAssumption(Op1, E->getArg(1),
16908 /*The expr loc is sufficient.*/ SourceLocation(),
16909 AlignmentCI, nullptr);
16910 return Op1;
16912 case PPC::BI__builtin_ppc_rdlam: {
16913 Value *Op0 = EmitScalarExpr(E->getArg(0));
16914 Value *Op1 = EmitScalarExpr(E->getArg(1));
16915 Value *Op2 = EmitScalarExpr(E->getArg(2));
16916 llvm::Type *Ty = Op0->getType();
16917 Value *ShiftAmt = Builder.CreateIntCast(Op1, Ty, false);
16918 Function *F = CGM.getIntrinsic(Intrinsic::fshl, Ty);
16919 Value *Rotate = Builder.CreateCall(F, {Op0, Op0, ShiftAmt});
16920 return Builder.CreateAnd(Rotate, Op2);
16922 case PPC::BI__builtin_ppc_load2r: {
16923 Function *F = CGM.getIntrinsic(Intrinsic::ppc_load2r);
16924 Value *Op0 = EmitScalarExpr(E->getArg(0));
16925 Value *LoadIntrinsic = Builder.CreateCall(F, {Op0});
16926 return Builder.CreateTrunc(LoadIntrinsic, Int16Ty);
16928 // FMA variations
16929 case PPC::BI__builtin_ppc_fnmsub:
16930 case PPC::BI__builtin_ppc_fnmsubs:
16931 case PPC::BI__builtin_vsx_xvmaddadp:
16932 case PPC::BI__builtin_vsx_xvmaddasp:
16933 case PPC::BI__builtin_vsx_xvnmaddadp:
16934 case PPC::BI__builtin_vsx_xvnmaddasp:
16935 case PPC::BI__builtin_vsx_xvmsubadp:
16936 case PPC::BI__builtin_vsx_xvmsubasp:
16937 case PPC::BI__builtin_vsx_xvnmsubadp:
16938 case PPC::BI__builtin_vsx_xvnmsubasp: {
16939 llvm::Type *ResultType = ConvertType(E->getType());
16940 Value *X = EmitScalarExpr(E->getArg(0));
16941 Value *Y = EmitScalarExpr(E->getArg(1));
16942 Value *Z = EmitScalarExpr(E->getArg(2));
16943 llvm::Function *F;
16944 if (Builder.getIsFPConstrained())
16945 F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
16946 else
16947 F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
16948 switch (BuiltinID) {
16949 case PPC::BI__builtin_vsx_xvmaddadp:
16950 case PPC::BI__builtin_vsx_xvmaddasp:
16951 if (Builder.getIsFPConstrained())
16952 return Builder.CreateConstrainedFPCall(F, {X, Y, Z});
16953 else
16954 return Builder.CreateCall(F, {X, Y, Z});
16955 case PPC::BI__builtin_vsx_xvnmaddadp:
16956 case PPC::BI__builtin_vsx_xvnmaddasp:
16957 if (Builder.getIsFPConstrained())
16958 return Builder.CreateFNeg(
16959 Builder.CreateConstrainedFPCall(F, {X, Y, Z}), "neg");
16960 else
16961 return Builder.CreateFNeg(Builder.CreateCall(F, {X, Y, Z}), "neg");
16962 case PPC::BI__builtin_vsx_xvmsubadp:
16963 case PPC::BI__builtin_vsx_xvmsubasp:
16964 if (Builder.getIsFPConstrained())
16965 return Builder.CreateConstrainedFPCall(
16966 F, {X, Y, Builder.CreateFNeg(Z, "neg")});
16967 else
16968 return Builder.CreateCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")});
16969 case PPC::BI__builtin_ppc_fnmsub:
16970 case PPC::BI__builtin_ppc_fnmsubs:
16971 case PPC::BI__builtin_vsx_xvnmsubadp:
16972 case PPC::BI__builtin_vsx_xvnmsubasp:
16973 if (Builder.getIsFPConstrained())
16974 return Builder.CreateFNeg(
16975 Builder.CreateConstrainedFPCall(
16976 F, {X, Y, Builder.CreateFNeg(Z, "neg")}),
16977 "neg");
16978 else
16979 return Builder.CreateCall(
16980 CGM.getIntrinsic(Intrinsic::ppc_fnmsub, ResultType), {X, Y, Z});
16982 llvm_unreachable("Unknown FMA operation");
16983 return nullptr; // Suppress no-return warning
16986 case PPC::BI__builtin_vsx_insertword: {
16987 Value *Op0 = EmitScalarExpr(E->getArg(0));
16988 Value *Op1 = EmitScalarExpr(E->getArg(1));
16989 Value *Op2 = EmitScalarExpr(E->getArg(2));
16990 llvm::Function *F = CGM.getIntrinsic(Intrinsic::ppc_vsx_xxinsertw);
16992 // Third argument is a compile time constant int. It must be clamped to
16993 // to the range [0, 12].
16994 ConstantInt *ArgCI = dyn_cast<ConstantInt>(Op2);
16995 assert(ArgCI &&
16996 "Third arg to xxinsertw intrinsic must be constant integer");
16997 const int64_t MaxIndex = 12;
16998 int64_t Index = std::clamp(ArgCI->getSExtValue(), (int64_t)0, MaxIndex);
17000 // The builtin semantics don't exactly match the xxinsertw instructions
17001 // semantics (which ppc_vsx_xxinsertw follows). The builtin extracts the
17002 // word from the first argument, and inserts it in the second argument. The
17003 // instruction extracts the word from its second input register and inserts
17004 // it into its first input register, so swap the first and second arguments.
17005 std::swap(Op0, Op1);
17007 // Need to cast the second argument from a vector of unsigned int to a
17008 // vector of long long.
17009 Op1 = Builder.CreateBitCast(Op1, llvm::FixedVectorType::get(Int64Ty, 2));
17011 if (getTarget().isLittleEndian()) {
17012 // Reverse the double words in the vector we will extract from.
17013 Op0 = Builder.CreateBitCast(Op0, llvm::FixedVectorType::get(Int64Ty, 2));
17014 Op0 = Builder.CreateShuffleVector(Op0, Op0, ArrayRef<int>{1, 0});
17016 // Reverse the index.
17017 Index = MaxIndex - Index;
17020 // Intrinsic expects the first arg to be a vector of int.
17021 Op0 = Builder.CreateBitCast(Op0, llvm::FixedVectorType::get(Int32Ty, 4));
17022 Op2 = ConstantInt::getSigned(Int32Ty, Index);
17023 return Builder.CreateCall(F, {Op0, Op1, Op2});
17026 case PPC::BI__builtin_vsx_extractuword: {
17027 Value *Op0 = EmitScalarExpr(E->getArg(0));
17028 Value *Op1 = EmitScalarExpr(E->getArg(1));
17029 llvm::Function *F = CGM.getIntrinsic(Intrinsic::ppc_vsx_xxextractuw);
17031 // Intrinsic expects the first argument to be a vector of doublewords.
17032 Op0 = Builder.CreateBitCast(Op0, llvm::FixedVectorType::get(Int64Ty, 2));
17034 // The second argument is a compile time constant int that needs to
17035 // be clamped to the range [0, 12].
17036 ConstantInt *ArgCI = dyn_cast<ConstantInt>(Op1);
17037 assert(ArgCI &&
17038 "Second Arg to xxextractuw intrinsic must be a constant integer!");
17039 const int64_t MaxIndex = 12;
17040 int64_t Index = std::clamp(ArgCI->getSExtValue(), (int64_t)0, MaxIndex);
17042 if (getTarget().isLittleEndian()) {
17043 // Reverse the index.
17044 Index = MaxIndex - Index;
17045 Op1 = ConstantInt::getSigned(Int32Ty, Index);
17047 // Emit the call, then reverse the double words of the results vector.
17048 Value *Call = Builder.CreateCall(F, {Op0, Op1});
17050 Value *ShuffleCall =
17051 Builder.CreateShuffleVector(Call, Call, ArrayRef<int>{1, 0});
17052 return ShuffleCall;
17053 } else {
17054 Op1 = ConstantInt::getSigned(Int32Ty, Index);
17055 return Builder.CreateCall(F, {Op0, Op1});
17059 case PPC::BI__builtin_vsx_xxpermdi: {
17060 Value *Op0 = EmitScalarExpr(E->getArg(0));
17061 Value *Op1 = EmitScalarExpr(E->getArg(1));
17062 Value *Op2 = EmitScalarExpr(E->getArg(2));
17063 ConstantInt *ArgCI = dyn_cast<ConstantInt>(Op2);
17064 assert(ArgCI && "Third arg must be constant integer!");
17066 unsigned Index = ArgCI->getZExtValue();
17067 Op0 = Builder.CreateBitCast(Op0, llvm::FixedVectorType::get(Int64Ty, 2));
17068 Op1 = Builder.CreateBitCast(Op1, llvm::FixedVectorType::get(Int64Ty, 2));
17070 // Account for endianness by treating this as just a shuffle. So we use the
17071 // same indices for both LE and BE in order to produce expected results in
17072 // both cases.
17073 int ElemIdx0 = (Index & 2) >> 1;
17074 int ElemIdx1 = 2 + (Index & 1);
17076 int ShuffleElts[2] = {ElemIdx0, ElemIdx1};
17077 Value *ShuffleCall = Builder.CreateShuffleVector(Op0, Op1, ShuffleElts);
17078 QualType BIRetType = E->getType();
17079 auto RetTy = ConvertType(BIRetType);
17080 return Builder.CreateBitCast(ShuffleCall, RetTy);
17083 case PPC::BI__builtin_vsx_xxsldwi: {
17084 Value *Op0 = EmitScalarExpr(E->getArg(0));
17085 Value *Op1 = EmitScalarExpr(E->getArg(1));
17086 Value *Op2 = EmitScalarExpr(E->getArg(2));
17087 ConstantInt *ArgCI = dyn_cast<ConstantInt>(Op2);
17088 assert(ArgCI && "Third argument must be a compile time constant");
17089 unsigned Index = ArgCI->getZExtValue() & 0x3;
17090 Op0 = Builder.CreateBitCast(Op0, llvm::FixedVectorType::get(Int32Ty, 4));
17091 Op1 = Builder.CreateBitCast(Op1, llvm::FixedVectorType::get(Int32Ty, 4));
17093 // Create a shuffle mask
17094 int ElemIdx0;
17095 int ElemIdx1;
17096 int ElemIdx2;
17097 int ElemIdx3;
17098 if (getTarget().isLittleEndian()) {
17099 // Little endian element N comes from element 8+N-Index of the
17100 // concatenated wide vector (of course, using modulo arithmetic on
17101 // the total number of elements).
17102 ElemIdx0 = (8 - Index) % 8;
17103 ElemIdx1 = (9 - Index) % 8;
17104 ElemIdx2 = (10 - Index) % 8;
17105 ElemIdx3 = (11 - Index) % 8;
17106 } else {
17107 // Big endian ElemIdx<N> = Index + N
17108 ElemIdx0 = Index;
17109 ElemIdx1 = Index + 1;
17110 ElemIdx2 = Index + 2;
17111 ElemIdx3 = Index + 3;
17114 int ShuffleElts[4] = {ElemIdx0, ElemIdx1, ElemIdx2, ElemIdx3};
17115 Value *ShuffleCall = Builder.CreateShuffleVector(Op0, Op1, ShuffleElts);
17116 QualType BIRetType = E->getType();
17117 auto RetTy = ConvertType(BIRetType);
17118 return Builder.CreateBitCast(ShuffleCall, RetTy);
17121 case PPC::BI__builtin_pack_vector_int128: {
17122 Value *Op0 = EmitScalarExpr(E->getArg(0));
17123 Value *Op1 = EmitScalarExpr(E->getArg(1));
17124 bool isLittleEndian = getTarget().isLittleEndian();
17125 Value *PoisonValue =
17126 llvm::PoisonValue::get(llvm::FixedVectorType::get(Op0->getType(), 2));
17127 Value *Res = Builder.CreateInsertElement(
17128 PoisonValue, Op0, (uint64_t)(isLittleEndian ? 1 : 0));
17129 Res = Builder.CreateInsertElement(Res, Op1,
17130 (uint64_t)(isLittleEndian ? 0 : 1));
17131 return Builder.CreateBitCast(Res, ConvertType(E->getType()));
17134 case PPC::BI__builtin_unpack_vector_int128: {
17135 Value *Op0 = EmitScalarExpr(E->getArg(0));
17136 Value *Op1 = EmitScalarExpr(E->getArg(1));
17137 ConstantInt *Index = cast<ConstantInt>(Op1);
17138 Value *Unpacked = Builder.CreateBitCast(
17139 Op0, llvm::FixedVectorType::get(ConvertType(E->getType()), 2));
17141 if (getTarget().isLittleEndian())
17142 Index = ConstantInt::get(Index->getType(), 1 - Index->getZExtValue());
17144 return Builder.CreateExtractElement(Unpacked, Index);
17147 case PPC::BI__builtin_ppc_sthcx: {
17148 llvm::Function *F = CGM.getIntrinsic(Intrinsic::ppc_sthcx);
17149 Value *Op0 = EmitScalarExpr(E->getArg(0));
17150 Value *Op1 = Builder.CreateSExt(EmitScalarExpr(E->getArg(1)), Int32Ty);
17151 return Builder.CreateCall(F, {Op0, Op1});
17154 // The PPC MMA builtins take a pointer to a __vector_quad as an argument.
17155 // Some of the MMA instructions accumulate their result into an existing
17156 // accumulator whereas the others generate a new accumulator. So we need to
17157 // use custom code generation to expand a builtin call with a pointer to a
17158 // load (if the corresponding instruction accumulates its result) followed by
17159 // the call to the intrinsic and a store of the result.
17160 #define CUSTOM_BUILTIN(Name, Intr, Types, Accumulate, Feature) \
17161 case PPC::BI__builtin_##Name:
17162 #include "clang/Basic/BuiltinsPPC.def"
17164 SmallVector<Value *, 4> Ops;
17165 for (unsigned i = 0, e = E->getNumArgs(); i != e; i++)
17166 if (E->getArg(i)->getType()->isArrayType())
17167 Ops.push_back(EmitArrayToPointerDecay(E->getArg(i)).getPointer());
17168 else
17169 Ops.push_back(EmitScalarExpr(E->getArg(i)));
17170 // The first argument of these two builtins is a pointer used to store their
17171 // result. However, the llvm intrinsics return their result in multiple
17172 // return values. So, here we emit code extracting these values from the
17173 // intrinsic results and storing them using that pointer.
17174 if (BuiltinID == PPC::BI__builtin_mma_disassemble_acc ||
17175 BuiltinID == PPC::BI__builtin_vsx_disassemble_pair ||
17176 BuiltinID == PPC::BI__builtin_mma_disassemble_pair) {
17177 unsigned NumVecs = 2;
17178 auto Intrinsic = Intrinsic::ppc_vsx_disassemble_pair;
17179 if (BuiltinID == PPC::BI__builtin_mma_disassemble_acc) {
17180 NumVecs = 4;
17181 Intrinsic = Intrinsic::ppc_mma_disassemble_acc;
17183 llvm::Function *F = CGM.getIntrinsic(Intrinsic);
17184 Address Addr = EmitPointerWithAlignment(E->getArg(1));
17185 Value *Vec = Builder.CreateLoad(Addr);
17186 Value *Call = Builder.CreateCall(F, {Vec});
17187 llvm::Type *VTy = llvm::FixedVectorType::get(Int8Ty, 16);
17188 Value *Ptr = Ops[0];
17189 for (unsigned i=0; i<NumVecs; i++) {
17190 Value *Vec = Builder.CreateExtractValue(Call, i);
17191 llvm::ConstantInt* Index = llvm::ConstantInt::get(IntTy, i);
17192 Value *GEP = Builder.CreateInBoundsGEP(VTy, Ptr, Index);
17193 Builder.CreateAlignedStore(Vec, GEP, MaybeAlign(16));
17195 return Call;
17197 if (BuiltinID == PPC::BI__builtin_vsx_build_pair ||
17198 BuiltinID == PPC::BI__builtin_mma_build_acc) {
17199 // Reverse the order of the operands for LE, so the
17200 // same builtin call can be used on both LE and BE
17201 // without the need for the programmer to swap operands.
17202 // The operands are reversed starting from the second argument,
17203 // the first operand is the pointer to the pair/accumulator
17204 // that is being built.
17205 if (getTarget().isLittleEndian())
17206 std::reverse(Ops.begin() + 1, Ops.end());
17208 bool Accumulate;
17209 switch (BuiltinID) {
17210 #define CUSTOM_BUILTIN(Name, Intr, Types, Acc, Feature) \
17211 case PPC::BI__builtin_##Name: \
17212 ID = Intrinsic::ppc_##Intr; \
17213 Accumulate = Acc; \
17214 break;
17215 #include "clang/Basic/BuiltinsPPC.def"
17217 if (BuiltinID == PPC::BI__builtin_vsx_lxvp ||
17218 BuiltinID == PPC::BI__builtin_vsx_stxvp ||
17219 BuiltinID == PPC::BI__builtin_mma_lxvp ||
17220 BuiltinID == PPC::BI__builtin_mma_stxvp) {
17221 if (BuiltinID == PPC::BI__builtin_vsx_lxvp ||
17222 BuiltinID == PPC::BI__builtin_mma_lxvp) {
17223 Ops[0] = Builder.CreateGEP(Int8Ty, Ops[1], Ops[0]);
17224 } else {
17225 Ops[1] = Builder.CreateGEP(Int8Ty, Ops[2], Ops[1]);
17227 Ops.pop_back();
17228 llvm::Function *F = CGM.getIntrinsic(ID);
17229 return Builder.CreateCall(F, Ops, "");
17231 SmallVector<Value*, 4> CallOps;
17232 if (Accumulate) {
17233 Address Addr = EmitPointerWithAlignment(E->getArg(0));
17234 Value *Acc = Builder.CreateLoad(Addr);
17235 CallOps.push_back(Acc);
17237 for (unsigned i=1; i<Ops.size(); i++)
17238 CallOps.push_back(Ops[i]);
17239 llvm::Function *F = CGM.getIntrinsic(ID);
17240 Value *Call = Builder.CreateCall(F, CallOps);
17241 return Builder.CreateAlignedStore(Call, Ops[0], MaybeAlign(64));
17244 case PPC::BI__builtin_ppc_compare_and_swap:
17245 case PPC::BI__builtin_ppc_compare_and_swaplp: {
17246 Address Addr = EmitPointerWithAlignment(E->getArg(0));
17247 Address OldValAddr = EmitPointerWithAlignment(E->getArg(1));
17248 Value *OldVal = Builder.CreateLoad(OldValAddr);
17249 QualType AtomicTy = E->getArg(0)->getType()->getPointeeType();
17250 LValue LV = MakeAddrLValue(Addr, AtomicTy);
17251 Value *Op2 = EmitScalarExpr(E->getArg(2));
17252 auto Pair = EmitAtomicCompareExchange(
17253 LV, RValue::get(OldVal), RValue::get(Op2), E->getExprLoc(),
17254 llvm::AtomicOrdering::Monotonic, llvm::AtomicOrdering::Monotonic, true);
17255 // Unlike c11's atomic_compare_exchange, according to
17256 // https://www.ibm.com/docs/en/xl-c-and-cpp-aix/16.1?topic=functions-compare-swap-compare-swaplp
17257 // > In either case, the contents of the memory location specified by addr
17258 // > are copied into the memory location specified by old_val_addr.
17259 // But it hasn't specified storing to OldValAddr is atomic or not and
17260 // which order to use. Now following XL's codegen, treat it as a normal
17261 // store.
17262 Value *LoadedVal = Pair.first.getScalarVal();
17263 Builder.CreateStore(LoadedVal, OldValAddr);
17264 return Builder.CreateZExt(Pair.second, Builder.getInt32Ty());
17266 case PPC::BI__builtin_ppc_fetch_and_add:
17267 case PPC::BI__builtin_ppc_fetch_and_addlp: {
17268 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
17269 llvm::AtomicOrdering::Monotonic);
17271 case PPC::BI__builtin_ppc_fetch_and_and:
17272 case PPC::BI__builtin_ppc_fetch_and_andlp: {
17273 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
17274 llvm::AtomicOrdering::Monotonic);
17277 case PPC::BI__builtin_ppc_fetch_and_or:
17278 case PPC::BI__builtin_ppc_fetch_and_orlp: {
17279 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
17280 llvm::AtomicOrdering::Monotonic);
17282 case PPC::BI__builtin_ppc_fetch_and_swap:
17283 case PPC::BI__builtin_ppc_fetch_and_swaplp: {
17284 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
17285 llvm::AtomicOrdering::Monotonic);
17287 case PPC::BI__builtin_ppc_ldarx:
17288 case PPC::BI__builtin_ppc_lwarx:
17289 case PPC::BI__builtin_ppc_lharx:
17290 case PPC::BI__builtin_ppc_lbarx:
17291 return emitPPCLoadReserveIntrinsic(*this, BuiltinID, E);
17292 case PPC::BI__builtin_ppc_mfspr: {
17293 Value *Op0 = EmitScalarExpr(E->getArg(0));
17294 llvm::Type *RetType = CGM.getDataLayout().getTypeSizeInBits(VoidPtrTy) == 32
17295 ? Int32Ty
17296 : Int64Ty;
17297 Function *F = CGM.getIntrinsic(Intrinsic::ppc_mfspr, RetType);
17298 return Builder.CreateCall(F, {Op0});
17300 case PPC::BI__builtin_ppc_mtspr: {
17301 Value *Op0 = EmitScalarExpr(E->getArg(0));
17302 Value *Op1 = EmitScalarExpr(E->getArg(1));
17303 llvm::Type *RetType = CGM.getDataLayout().getTypeSizeInBits(VoidPtrTy) == 32
17304 ? Int32Ty
17305 : Int64Ty;
17306 Function *F = CGM.getIntrinsic(Intrinsic::ppc_mtspr, RetType);
17307 return Builder.CreateCall(F, {Op0, Op1});
17309 case PPC::BI__builtin_ppc_popcntb: {
17310 Value *ArgValue = EmitScalarExpr(E->getArg(0));
17311 llvm::Type *ArgType = ArgValue->getType();
17312 Function *F = CGM.getIntrinsic(Intrinsic::ppc_popcntb, {ArgType, ArgType});
17313 return Builder.CreateCall(F, {ArgValue}, "popcntb");
17315 case PPC::BI__builtin_ppc_mtfsf: {
17316 // The builtin takes a uint32 that needs to be cast to an
17317 // f64 to be passed to the intrinsic.
17318 Value *Op0 = EmitScalarExpr(E->getArg(0));
17319 Value *Op1 = EmitScalarExpr(E->getArg(1));
17320 Value *Cast = Builder.CreateUIToFP(Op1, DoubleTy);
17321 llvm::Function *F = CGM.getIntrinsic(Intrinsic::ppc_mtfsf);
17322 return Builder.CreateCall(F, {Op0, Cast}, "");
17325 case PPC::BI__builtin_ppc_swdiv_nochk:
17326 case PPC::BI__builtin_ppc_swdivs_nochk: {
17327 Value *Op0 = EmitScalarExpr(E->getArg(0));
17328 Value *Op1 = EmitScalarExpr(E->getArg(1));
17329 FastMathFlags FMF = Builder.getFastMathFlags();
17330 Builder.getFastMathFlags().setFast();
17331 Value *FDiv = Builder.CreateFDiv(Op0, Op1, "swdiv_nochk");
17332 Builder.getFastMathFlags() &= (FMF);
17333 return FDiv;
17335 case PPC::BI__builtin_ppc_fric:
17336 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
17337 *this, E, Intrinsic::rint,
17338 Intrinsic::experimental_constrained_rint))
17339 .getScalarVal();
17340 case PPC::BI__builtin_ppc_frim:
17341 case PPC::BI__builtin_ppc_frims:
17342 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
17343 *this, E, Intrinsic::floor,
17344 Intrinsic::experimental_constrained_floor))
17345 .getScalarVal();
17346 case PPC::BI__builtin_ppc_frin:
17347 case PPC::BI__builtin_ppc_frins:
17348 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
17349 *this, E, Intrinsic::round,
17350 Intrinsic::experimental_constrained_round))
17351 .getScalarVal();
17352 case PPC::BI__builtin_ppc_frip:
17353 case PPC::BI__builtin_ppc_frips:
17354 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
17355 *this, E, Intrinsic::ceil,
17356 Intrinsic::experimental_constrained_ceil))
17357 .getScalarVal();
17358 case PPC::BI__builtin_ppc_friz:
17359 case PPC::BI__builtin_ppc_frizs:
17360 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
17361 *this, E, Intrinsic::trunc,
17362 Intrinsic::experimental_constrained_trunc))
17363 .getScalarVal();
17364 case PPC::BI__builtin_ppc_fsqrt:
17365 case PPC::BI__builtin_ppc_fsqrts:
17366 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
17367 *this, E, Intrinsic::sqrt,
17368 Intrinsic::experimental_constrained_sqrt))
17369 .getScalarVal();
17370 case PPC::BI__builtin_ppc_test_data_class: {
17371 Value *Op0 = EmitScalarExpr(E->getArg(0));
17372 Value *Op1 = EmitScalarExpr(E->getArg(1));
17373 return Builder.CreateCall(
17374 CGM.getIntrinsic(Intrinsic::ppc_test_data_class, Op0->getType()),
17375 {Op0, Op1}, "test_data_class");
17377 case PPC::BI__builtin_ppc_maxfe: {
17378 Value *Op0 = EmitScalarExpr(E->getArg(0));
17379 Value *Op1 = EmitScalarExpr(E->getArg(1));
17380 Value *Op2 = EmitScalarExpr(E->getArg(2));
17381 Value *Op3 = EmitScalarExpr(E->getArg(3));
17382 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::ppc_maxfe),
17383 {Op0, Op1, Op2, Op3});
17385 case PPC::BI__builtin_ppc_maxfl: {
17386 Value *Op0 = EmitScalarExpr(E->getArg(0));
17387 Value *Op1 = EmitScalarExpr(E->getArg(1));
17388 Value *Op2 = EmitScalarExpr(E->getArg(2));
17389 Value *Op3 = EmitScalarExpr(E->getArg(3));
17390 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::ppc_maxfl),
17391 {Op0, Op1, Op2, Op3});
17393 case PPC::BI__builtin_ppc_maxfs: {
17394 Value *Op0 = EmitScalarExpr(E->getArg(0));
17395 Value *Op1 = EmitScalarExpr(E->getArg(1));
17396 Value *Op2 = EmitScalarExpr(E->getArg(2));
17397 Value *Op3 = EmitScalarExpr(E->getArg(3));
17398 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::ppc_maxfs),
17399 {Op0, Op1, Op2, Op3});
17401 case PPC::BI__builtin_ppc_minfe: {
17402 Value *Op0 = EmitScalarExpr(E->getArg(0));
17403 Value *Op1 = EmitScalarExpr(E->getArg(1));
17404 Value *Op2 = EmitScalarExpr(E->getArg(2));
17405 Value *Op3 = EmitScalarExpr(E->getArg(3));
17406 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::ppc_minfe),
17407 {Op0, Op1, Op2, Op3});
17409 case PPC::BI__builtin_ppc_minfl: {
17410 Value *Op0 = EmitScalarExpr(E->getArg(0));
17411 Value *Op1 = EmitScalarExpr(E->getArg(1));
17412 Value *Op2 = EmitScalarExpr(E->getArg(2));
17413 Value *Op3 = EmitScalarExpr(E->getArg(3));
17414 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::ppc_minfl),
17415 {Op0, Op1, Op2, Op3});
17417 case PPC::BI__builtin_ppc_minfs: {
17418 Value *Op0 = EmitScalarExpr(E->getArg(0));
17419 Value *Op1 = EmitScalarExpr(E->getArg(1));
17420 Value *Op2 = EmitScalarExpr(E->getArg(2));
17421 Value *Op3 = EmitScalarExpr(E->getArg(3));
17422 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::ppc_minfs),
17423 {Op0, Op1, Op2, Op3});
17425 case PPC::BI__builtin_ppc_swdiv:
17426 case PPC::BI__builtin_ppc_swdivs: {
17427 Value *Op0 = EmitScalarExpr(E->getArg(0));
17428 Value *Op1 = EmitScalarExpr(E->getArg(1));
17429 return Builder.CreateFDiv(Op0, Op1, "swdiv");
17431 case PPC::BI__builtin_ppc_set_fpscr_rn:
17432 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::ppc_setrnd),
17433 {EmitScalarExpr(E->getArg(0))});
17434 case PPC::BI__builtin_ppc_mffs:
17435 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::ppc_readflm));
17439 namespace {
17440 // If \p E is not null pointer, insert address space cast to match return
17441 // type of \p E if necessary.
17442 Value *EmitAMDGPUDispatchPtr(CodeGenFunction &CGF,
17443 const CallExpr *E = nullptr) {
17444 auto *F = CGF.CGM.getIntrinsic(Intrinsic::amdgcn_dispatch_ptr);
17445 auto *Call = CGF.Builder.CreateCall(F);
17446 Call->addRetAttr(
17447 Attribute::getWithDereferenceableBytes(Call->getContext(), 64));
17448 Call->addRetAttr(Attribute::getWithAlignment(Call->getContext(), Align(4)));
17449 if (!E)
17450 return Call;
17451 QualType BuiltinRetType = E->getType();
17452 auto *RetTy = cast<llvm::PointerType>(CGF.ConvertType(BuiltinRetType));
17453 if (RetTy == Call->getType())
17454 return Call;
17455 return CGF.Builder.CreateAddrSpaceCast(Call, RetTy);
17458 Value *EmitAMDGPUImplicitArgPtr(CodeGenFunction &CGF) {
17459 auto *F = CGF.CGM.getIntrinsic(Intrinsic::amdgcn_implicitarg_ptr);
17460 auto *Call = CGF.Builder.CreateCall(F);
17461 Call->addRetAttr(
17462 Attribute::getWithDereferenceableBytes(Call->getContext(), 256));
17463 Call->addRetAttr(Attribute::getWithAlignment(Call->getContext(), Align(8)));
17464 return Call;
17467 // \p Index is 0, 1, and 2 for x, y, and z dimension, respectively.
17468 /// Emit code based on Code Object ABI version.
17469 /// COV_4 : Emit code to use dispatch ptr
17470 /// COV_5 : Emit code to use implicitarg ptr
17471 /// COV_NONE : Emit code to load a global variable "llvm.amdgcn.abi.version"
17472 /// and use its value for COV_4 or COV_5 approach. It is used for
17473 /// compiling device libraries in an ABI-agnostic way.
17475 /// Note: "llvm.amdgcn.abi.version" is supposed to be emitted and intialized by
17476 /// clang during compilation of user code.
17477 Value *EmitAMDGPUWorkGroupSize(CodeGenFunction &CGF, unsigned Index) {
17478 llvm::LoadInst *LD;
17480 auto Cov = CGF.getTarget().getTargetOpts().CodeObjectVersion;
17482 if (Cov == clang::TargetOptions::COV_None) {
17483 StringRef Name = "llvm.amdgcn.abi.version";
17484 auto *ABIVersionC = CGF.CGM.getModule().getNamedGlobal(Name);
17485 if (!ABIVersionC)
17486 ABIVersionC = new llvm::GlobalVariable(
17487 CGF.CGM.getModule(), CGF.Int32Ty, false,
17488 llvm::GlobalValue::ExternalLinkage, nullptr, Name, nullptr,
17489 llvm::GlobalVariable::NotThreadLocal,
17490 CGF.CGM.getContext().getTargetAddressSpace(LangAS::opencl_constant));
17492 // This load will be eliminated by the IPSCCP because it is constant
17493 // weak_odr without externally_initialized. Either changing it to weak or
17494 // adding externally_initialized will keep the load.
17495 Value *ABIVersion = CGF.Builder.CreateAlignedLoad(CGF.Int32Ty, ABIVersionC,
17496 CGF.CGM.getIntAlign());
17498 Value *IsCOV5 = CGF.Builder.CreateICmpSGE(
17499 ABIVersion,
17500 llvm::ConstantInt::get(CGF.Int32Ty, clang::TargetOptions::COV_5));
17502 // Indexing the implicit kernarg segment.
17503 Value *ImplicitGEP = CGF.Builder.CreateConstGEP1_32(
17504 CGF.Int8Ty, EmitAMDGPUImplicitArgPtr(CGF), 12 + Index * 2);
17506 // Indexing the HSA kernel_dispatch_packet struct.
17507 Value *DispatchGEP = CGF.Builder.CreateConstGEP1_32(
17508 CGF.Int8Ty, EmitAMDGPUDispatchPtr(CGF), 4 + Index * 2);
17510 auto Result = CGF.Builder.CreateSelect(IsCOV5, ImplicitGEP, DispatchGEP);
17511 LD = CGF.Builder.CreateLoad(
17512 Address(Result, CGF.Int16Ty, CharUnits::fromQuantity(2)));
17513 } else {
17514 Value *GEP = nullptr;
17515 if (Cov == clang::TargetOptions::COV_5) {
17516 // Indexing the implicit kernarg segment.
17517 GEP = CGF.Builder.CreateConstGEP1_32(
17518 CGF.Int8Ty, EmitAMDGPUImplicitArgPtr(CGF), 12 + Index * 2);
17519 } else {
17520 // Indexing the HSA kernel_dispatch_packet struct.
17521 GEP = CGF.Builder.CreateConstGEP1_32(
17522 CGF.Int8Ty, EmitAMDGPUDispatchPtr(CGF), 4 + Index * 2);
17524 LD = CGF.Builder.CreateLoad(
17525 Address(GEP, CGF.Int16Ty, CharUnits::fromQuantity(2)));
17528 llvm::MDBuilder MDHelper(CGF.getLLVMContext());
17529 llvm::MDNode *RNode = MDHelper.createRange(APInt(16, 1),
17530 APInt(16, CGF.getTarget().getMaxOpenCLWorkGroupSize() + 1));
17531 LD->setMetadata(llvm::LLVMContext::MD_range, RNode);
17532 LD->setMetadata(llvm::LLVMContext::MD_noundef,
17533 llvm::MDNode::get(CGF.getLLVMContext(), std::nullopt));
17534 LD->setMetadata(llvm::LLVMContext::MD_invariant_load,
17535 llvm::MDNode::get(CGF.getLLVMContext(), std::nullopt));
17536 return LD;
17539 // \p Index is 0, 1, and 2 for x, y, and z dimension, respectively.
17540 Value *EmitAMDGPUGridSize(CodeGenFunction &CGF, unsigned Index) {
17541 const unsigned XOffset = 12;
17542 auto *DP = EmitAMDGPUDispatchPtr(CGF);
17543 // Indexing the HSA kernel_dispatch_packet struct.
17544 auto *Offset = llvm::ConstantInt::get(CGF.Int32Ty, XOffset + Index * 4);
17545 auto *GEP = CGF.Builder.CreateGEP(CGF.Int8Ty, DP, Offset);
17546 auto *LD = CGF.Builder.CreateLoad(
17547 Address(GEP, CGF.Int32Ty, CharUnits::fromQuantity(4)));
17548 LD->setMetadata(llvm::LLVMContext::MD_invariant_load,
17549 llvm::MDNode::get(CGF.getLLVMContext(), std::nullopt));
17550 return LD;
17552 } // namespace
17554 // For processing memory ordering and memory scope arguments of various
17555 // amdgcn builtins.
17556 // \p Order takes a C++11 comptabile memory-ordering specifier and converts
17557 // it into LLVM's memory ordering specifier using atomic C ABI, and writes
17558 // to \p AO. \p Scope takes a const char * and converts it into AMDGCN
17559 // specific SyncScopeID and writes it to \p SSID.
17560 void CodeGenFunction::ProcessOrderScopeAMDGCN(Value *Order, Value *Scope,
17561 llvm::AtomicOrdering &AO,
17562 llvm::SyncScope::ID &SSID) {
17563 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
17565 // Map C11/C++11 memory ordering to LLVM memory ordering
17566 assert(llvm::isValidAtomicOrderingCABI(ord));
17567 switch (static_cast<llvm::AtomicOrderingCABI>(ord)) {
17568 case llvm::AtomicOrderingCABI::acquire:
17569 case llvm::AtomicOrderingCABI::consume:
17570 AO = llvm::AtomicOrdering::Acquire;
17571 break;
17572 case llvm::AtomicOrderingCABI::release:
17573 AO = llvm::AtomicOrdering::Release;
17574 break;
17575 case llvm::AtomicOrderingCABI::acq_rel:
17576 AO = llvm::AtomicOrdering::AcquireRelease;
17577 break;
17578 case llvm::AtomicOrderingCABI::seq_cst:
17579 AO = llvm::AtomicOrdering::SequentiallyConsistent;
17580 break;
17581 case llvm::AtomicOrderingCABI::relaxed:
17582 AO = llvm::AtomicOrdering::Monotonic;
17583 break;
17586 StringRef scp;
17587 llvm::getConstantStringInfo(Scope, scp);
17588 SSID = getLLVMContext().getOrInsertSyncScopeID(scp);
17591 Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
17592 const CallExpr *E) {
17593 llvm::AtomicOrdering AO = llvm::AtomicOrdering::SequentiallyConsistent;
17594 llvm::SyncScope::ID SSID;
17595 switch (BuiltinID) {
17596 case AMDGPU::BI__builtin_amdgcn_div_scale:
17597 case AMDGPU::BI__builtin_amdgcn_div_scalef: {
17598 // Translate from the intrinsics's struct return to the builtin's out
17599 // argument.
17601 Address FlagOutPtr = EmitPointerWithAlignment(E->getArg(3));
17603 llvm::Value *X = EmitScalarExpr(E->getArg(0));
17604 llvm::Value *Y = EmitScalarExpr(E->getArg(1));
17605 llvm::Value *Z = EmitScalarExpr(E->getArg(2));
17607 llvm::Function *Callee = CGM.getIntrinsic(Intrinsic::amdgcn_div_scale,
17608 X->getType());
17610 llvm::Value *Tmp = Builder.CreateCall(Callee, {X, Y, Z});
17612 llvm::Value *Result = Builder.CreateExtractValue(Tmp, 0);
17613 llvm::Value *Flag = Builder.CreateExtractValue(Tmp, 1);
17615 llvm::Type *RealFlagType = FlagOutPtr.getElementType();
17617 llvm::Value *FlagExt = Builder.CreateZExt(Flag, RealFlagType);
17618 Builder.CreateStore(FlagExt, FlagOutPtr);
17619 return Result;
17621 case AMDGPU::BI__builtin_amdgcn_div_fmas:
17622 case AMDGPU::BI__builtin_amdgcn_div_fmasf: {
17623 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
17624 llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
17625 llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
17626 llvm::Value *Src3 = EmitScalarExpr(E->getArg(3));
17628 llvm::Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_div_fmas,
17629 Src0->getType());
17630 llvm::Value *Src3ToBool = Builder.CreateIsNotNull(Src3);
17631 return Builder.CreateCall(F, {Src0, Src1, Src2, Src3ToBool});
17634 case AMDGPU::BI__builtin_amdgcn_ds_swizzle:
17635 return emitBinaryBuiltin(*this, E, Intrinsic::amdgcn_ds_swizzle);
17636 case AMDGPU::BI__builtin_amdgcn_mov_dpp8:
17637 return emitBinaryBuiltin(*this, E, Intrinsic::amdgcn_mov_dpp8);
17638 case AMDGPU::BI__builtin_amdgcn_mov_dpp:
17639 case AMDGPU::BI__builtin_amdgcn_update_dpp: {
17640 llvm::SmallVector<llvm::Value *, 6> Args;
17641 for (unsigned I = 0; I != E->getNumArgs(); ++I)
17642 Args.push_back(EmitScalarExpr(E->getArg(I)));
17643 assert(Args.size() == 5 || Args.size() == 6);
17644 if (Args.size() == 5)
17645 Args.insert(Args.begin(), llvm::PoisonValue::get(Args[0]->getType()));
17646 Function *F =
17647 CGM.getIntrinsic(Intrinsic::amdgcn_update_dpp, Args[0]->getType());
17648 return Builder.CreateCall(F, Args);
17650 case AMDGPU::BI__builtin_amdgcn_div_fixup:
17651 case AMDGPU::BI__builtin_amdgcn_div_fixupf:
17652 case AMDGPU::BI__builtin_amdgcn_div_fixuph:
17653 return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_div_fixup);
17654 case AMDGPU::BI__builtin_amdgcn_trig_preop:
17655 case AMDGPU::BI__builtin_amdgcn_trig_preopf:
17656 return emitFPIntBuiltin(*this, E, Intrinsic::amdgcn_trig_preop);
17657 case AMDGPU::BI__builtin_amdgcn_rcp:
17658 case AMDGPU::BI__builtin_amdgcn_rcpf:
17659 case AMDGPU::BI__builtin_amdgcn_rcph:
17660 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rcp);
17661 case AMDGPU::BI__builtin_amdgcn_sqrt:
17662 case AMDGPU::BI__builtin_amdgcn_sqrtf:
17663 case AMDGPU::BI__builtin_amdgcn_sqrth:
17664 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_sqrt);
17665 case AMDGPU::BI__builtin_amdgcn_rsq:
17666 case AMDGPU::BI__builtin_amdgcn_rsqf:
17667 case AMDGPU::BI__builtin_amdgcn_rsqh:
17668 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rsq);
17669 case AMDGPU::BI__builtin_amdgcn_rsq_clamp:
17670 case AMDGPU::BI__builtin_amdgcn_rsq_clampf:
17671 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rsq_clamp);
17672 case AMDGPU::BI__builtin_amdgcn_sinf:
17673 case AMDGPU::BI__builtin_amdgcn_sinh:
17674 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_sin);
17675 case AMDGPU::BI__builtin_amdgcn_cosf:
17676 case AMDGPU::BI__builtin_amdgcn_cosh:
17677 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_cos);
17678 case AMDGPU::BI__builtin_amdgcn_dispatch_ptr:
17679 return EmitAMDGPUDispatchPtr(*this, E);
17680 case AMDGPU::BI__builtin_amdgcn_logf:
17681 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_log);
17682 case AMDGPU::BI__builtin_amdgcn_exp2f:
17683 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_exp2);
17684 case AMDGPU::BI__builtin_amdgcn_log_clampf:
17685 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_log_clamp);
17686 case AMDGPU::BI__builtin_amdgcn_ldexp:
17687 case AMDGPU::BI__builtin_amdgcn_ldexpf: {
17688 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
17689 llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
17690 llvm::Function *F =
17691 CGM.getIntrinsic(Intrinsic::ldexp, {Src0->getType(), Src1->getType()});
17692 return Builder.CreateCall(F, {Src0, Src1});
17694 case AMDGPU::BI__builtin_amdgcn_ldexph: {
17695 // The raw instruction has a different behavior for out of bounds exponent
17696 // values (implicit truncation instead of saturate to short_min/short_max).
17697 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
17698 llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
17699 llvm::Function *F =
17700 CGM.getIntrinsic(Intrinsic::ldexp, {Src0->getType(), Int16Ty});
17701 return Builder.CreateCall(F, {Src0, Builder.CreateTrunc(Src1, Int16Ty)});
17703 case AMDGPU::BI__builtin_amdgcn_frexp_mant:
17704 case AMDGPU::BI__builtin_amdgcn_frexp_mantf:
17705 case AMDGPU::BI__builtin_amdgcn_frexp_manth:
17706 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_frexp_mant);
17707 case AMDGPU::BI__builtin_amdgcn_frexp_exp:
17708 case AMDGPU::BI__builtin_amdgcn_frexp_expf: {
17709 Value *Src0 = EmitScalarExpr(E->getArg(0));
17710 Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_frexp_exp,
17711 { Builder.getInt32Ty(), Src0->getType() });
17712 return Builder.CreateCall(F, Src0);
17714 case AMDGPU::BI__builtin_amdgcn_frexp_exph: {
17715 Value *Src0 = EmitScalarExpr(E->getArg(0));
17716 Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_frexp_exp,
17717 { Builder.getInt16Ty(), Src0->getType() });
17718 return Builder.CreateCall(F, Src0);
17720 case AMDGPU::BI__builtin_amdgcn_fract:
17721 case AMDGPU::BI__builtin_amdgcn_fractf:
17722 case AMDGPU::BI__builtin_amdgcn_fracth:
17723 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_fract);
17724 case AMDGPU::BI__builtin_amdgcn_lerp:
17725 return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_lerp);
17726 case AMDGPU::BI__builtin_amdgcn_ubfe:
17727 return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_ubfe);
17728 case AMDGPU::BI__builtin_amdgcn_sbfe:
17729 return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_sbfe);
17730 case AMDGPU::BI__builtin_amdgcn_ballot_w32:
17731 case AMDGPU::BI__builtin_amdgcn_ballot_w64: {
17732 llvm::Type *ResultType = ConvertType(E->getType());
17733 llvm::Value *Src = EmitScalarExpr(E->getArg(0));
17734 Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_ballot, { ResultType });
17735 return Builder.CreateCall(F, { Src });
17737 case AMDGPU::BI__builtin_amdgcn_uicmp:
17738 case AMDGPU::BI__builtin_amdgcn_uicmpl:
17739 case AMDGPU::BI__builtin_amdgcn_sicmp:
17740 case AMDGPU::BI__builtin_amdgcn_sicmpl: {
17741 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
17742 llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
17743 llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
17745 // FIXME-GFX10: How should 32 bit mask be handled?
17746 Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_icmp,
17747 { Builder.getInt64Ty(), Src0->getType() });
17748 return Builder.CreateCall(F, { Src0, Src1, Src2 });
17750 case AMDGPU::BI__builtin_amdgcn_fcmp:
17751 case AMDGPU::BI__builtin_amdgcn_fcmpf: {
17752 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
17753 llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
17754 llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
17756 // FIXME-GFX10: How should 32 bit mask be handled?
17757 Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_fcmp,
17758 { Builder.getInt64Ty(), Src0->getType() });
17759 return Builder.CreateCall(F, { Src0, Src1, Src2 });
17761 case AMDGPU::BI__builtin_amdgcn_class:
17762 case AMDGPU::BI__builtin_amdgcn_classf:
17763 case AMDGPU::BI__builtin_amdgcn_classh:
17764 return emitFPIntBuiltin(*this, E, Intrinsic::amdgcn_class);
17765 case AMDGPU::BI__builtin_amdgcn_fmed3f:
17766 case AMDGPU::BI__builtin_amdgcn_fmed3h:
17767 return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_fmed3);
17768 case AMDGPU::BI__builtin_amdgcn_ds_append:
17769 case AMDGPU::BI__builtin_amdgcn_ds_consume: {
17770 Intrinsic::ID Intrin = BuiltinID == AMDGPU::BI__builtin_amdgcn_ds_append ?
17771 Intrinsic::amdgcn_ds_append : Intrinsic::amdgcn_ds_consume;
17772 Value *Src0 = EmitScalarExpr(E->getArg(0));
17773 Function *F = CGM.getIntrinsic(Intrin, { Src0->getType() });
17774 return Builder.CreateCall(F, { Src0, Builder.getFalse() });
17776 case AMDGPU::BI__builtin_amdgcn_ds_faddf:
17777 case AMDGPU::BI__builtin_amdgcn_ds_fminf:
17778 case AMDGPU::BI__builtin_amdgcn_ds_fmaxf: {
17779 Intrinsic::ID Intrin;
17780 switch (BuiltinID) {
17781 case AMDGPU::BI__builtin_amdgcn_ds_faddf:
17782 Intrin = Intrinsic::amdgcn_ds_fadd;
17783 break;
17784 case AMDGPU::BI__builtin_amdgcn_ds_fminf:
17785 Intrin = Intrinsic::amdgcn_ds_fmin;
17786 break;
17787 case AMDGPU::BI__builtin_amdgcn_ds_fmaxf:
17788 Intrin = Intrinsic::amdgcn_ds_fmax;
17789 break;
17791 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
17792 llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
17793 llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
17794 llvm::Value *Src3 = EmitScalarExpr(E->getArg(3));
17795 llvm::Value *Src4 = EmitScalarExpr(E->getArg(4));
17796 llvm::Function *F = CGM.getIntrinsic(Intrin, { Src1->getType() });
17797 llvm::FunctionType *FTy = F->getFunctionType();
17798 llvm::Type *PTy = FTy->getParamType(0);
17799 Src0 = Builder.CreatePointerBitCastOrAddrSpaceCast(Src0, PTy);
17800 return Builder.CreateCall(F, { Src0, Src1, Src2, Src3, Src4 });
17802 case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_f64:
17803 case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_f32:
17804 case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_v2f16:
17805 case AMDGPU::BI__builtin_amdgcn_global_atomic_fmin_f64:
17806 case AMDGPU::BI__builtin_amdgcn_global_atomic_fmax_f64:
17807 case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_f64:
17808 case AMDGPU::BI__builtin_amdgcn_flat_atomic_fmin_f64:
17809 case AMDGPU::BI__builtin_amdgcn_flat_atomic_fmax_f64:
17810 case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_f32:
17811 case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_v2f16: {
17812 Intrinsic::ID IID;
17813 llvm::Type *ArgTy = llvm::Type::getDoubleTy(getLLVMContext());
17814 switch (BuiltinID) {
17815 case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_f32:
17816 ArgTy = llvm::Type::getFloatTy(getLLVMContext());
17817 IID = Intrinsic::amdgcn_global_atomic_fadd;
17818 break;
17819 case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_v2f16:
17820 ArgTy = llvm::FixedVectorType::get(
17821 llvm::Type::getHalfTy(getLLVMContext()), 2);
17822 IID = Intrinsic::amdgcn_global_atomic_fadd;
17823 break;
17824 case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_f64:
17825 IID = Intrinsic::amdgcn_global_atomic_fadd;
17826 break;
17827 case AMDGPU::BI__builtin_amdgcn_global_atomic_fmin_f64:
17828 IID = Intrinsic::amdgcn_global_atomic_fmin;
17829 break;
17830 case AMDGPU::BI__builtin_amdgcn_global_atomic_fmax_f64:
17831 IID = Intrinsic::amdgcn_global_atomic_fmax;
17832 break;
17833 case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_f64:
17834 IID = Intrinsic::amdgcn_flat_atomic_fadd;
17835 break;
17836 case AMDGPU::BI__builtin_amdgcn_flat_atomic_fmin_f64:
17837 IID = Intrinsic::amdgcn_flat_atomic_fmin;
17838 break;
17839 case AMDGPU::BI__builtin_amdgcn_flat_atomic_fmax_f64:
17840 IID = Intrinsic::amdgcn_flat_atomic_fmax;
17841 break;
17842 case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_f32:
17843 ArgTy = llvm::Type::getFloatTy(getLLVMContext());
17844 IID = Intrinsic::amdgcn_flat_atomic_fadd;
17845 break;
17846 case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_v2f16:
17847 ArgTy = llvm::FixedVectorType::get(
17848 llvm::Type::getHalfTy(getLLVMContext()), 2);
17849 IID = Intrinsic::amdgcn_flat_atomic_fadd;
17850 break;
17852 llvm::Value *Addr = EmitScalarExpr(E->getArg(0));
17853 llvm::Value *Val = EmitScalarExpr(E->getArg(1));
17854 llvm::Function *F =
17855 CGM.getIntrinsic(IID, {ArgTy, Addr->getType(), Val->getType()});
17856 return Builder.CreateCall(F, {Addr, Val});
17858 case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_v2bf16:
17859 case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_v2bf16: {
17860 Intrinsic::ID IID;
17861 switch (BuiltinID) {
17862 case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_v2bf16:
17863 IID = Intrinsic::amdgcn_global_atomic_fadd_v2bf16;
17864 break;
17865 case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_v2bf16:
17866 IID = Intrinsic::amdgcn_flat_atomic_fadd_v2bf16;
17867 break;
17869 llvm::Value *Addr = EmitScalarExpr(E->getArg(0));
17870 llvm::Value *Val = EmitScalarExpr(E->getArg(1));
17871 llvm::Function *F = CGM.getIntrinsic(IID, {Addr->getType()});
17872 return Builder.CreateCall(F, {Addr, Val});
17874 case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_f64:
17875 case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_f32:
17876 case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_v2f16: {
17877 Intrinsic::ID IID;
17878 llvm::Type *ArgTy;
17879 switch (BuiltinID) {
17880 case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_f32:
17881 ArgTy = llvm::Type::getFloatTy(getLLVMContext());
17882 IID = Intrinsic::amdgcn_ds_fadd;
17883 break;
17884 case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_f64:
17885 ArgTy = llvm::Type::getDoubleTy(getLLVMContext());
17886 IID = Intrinsic::amdgcn_ds_fadd;
17887 break;
17888 case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_v2f16:
17889 ArgTy = llvm::FixedVectorType::get(
17890 llvm::Type::getHalfTy(getLLVMContext()), 2);
17891 IID = Intrinsic::amdgcn_ds_fadd;
17892 break;
17894 llvm::Value *Addr = EmitScalarExpr(E->getArg(0));
17895 llvm::Value *Val = EmitScalarExpr(E->getArg(1));
17896 llvm::Constant *ZeroI32 = llvm::ConstantInt::getIntegerValue(
17897 llvm::Type::getInt32Ty(getLLVMContext()), APInt(32, 0, true));
17898 llvm::Constant *ZeroI1 = llvm::ConstantInt::getIntegerValue(
17899 llvm::Type::getInt1Ty(getLLVMContext()), APInt(1, 0));
17900 llvm::Function *F = CGM.getIntrinsic(IID, {ArgTy});
17901 return Builder.CreateCall(F, {Addr, Val, ZeroI32, ZeroI32, ZeroI1});
17903 case AMDGPU::BI__builtin_amdgcn_read_exec:
17904 return EmitAMDGCNBallotForExec(*this, E, Int64Ty, Int64Ty, false);
17905 case AMDGPU::BI__builtin_amdgcn_read_exec_lo:
17906 return EmitAMDGCNBallotForExec(*this, E, Int32Ty, Int32Ty, false);
17907 case AMDGPU::BI__builtin_amdgcn_read_exec_hi:
17908 return EmitAMDGCNBallotForExec(*this, E, Int64Ty, Int64Ty, true);
17909 case AMDGPU::BI__builtin_amdgcn_image_bvh_intersect_ray:
17910 case AMDGPU::BI__builtin_amdgcn_image_bvh_intersect_ray_h:
17911 case AMDGPU::BI__builtin_amdgcn_image_bvh_intersect_ray_l:
17912 case AMDGPU::BI__builtin_amdgcn_image_bvh_intersect_ray_lh: {
17913 llvm::Value *NodePtr = EmitScalarExpr(E->getArg(0));
17914 llvm::Value *RayExtent = EmitScalarExpr(E->getArg(1));
17915 llvm::Value *RayOrigin = EmitScalarExpr(E->getArg(2));
17916 llvm::Value *RayDir = EmitScalarExpr(E->getArg(3));
17917 llvm::Value *RayInverseDir = EmitScalarExpr(E->getArg(4));
17918 llvm::Value *TextureDescr = EmitScalarExpr(E->getArg(5));
17920 // The builtins take these arguments as vec4 where the last element is
17921 // ignored. The intrinsic takes them as vec3.
17922 RayOrigin = Builder.CreateShuffleVector(RayOrigin, RayOrigin,
17923 ArrayRef<int>{0, 1, 2});
17924 RayDir =
17925 Builder.CreateShuffleVector(RayDir, RayDir, ArrayRef<int>{0, 1, 2});
17926 RayInverseDir = Builder.CreateShuffleVector(RayInverseDir, RayInverseDir,
17927 ArrayRef<int>{0, 1, 2});
17929 Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_image_bvh_intersect_ray,
17930 {NodePtr->getType(), RayDir->getType()});
17931 return Builder.CreateCall(F, {NodePtr, RayExtent, RayOrigin, RayDir,
17932 RayInverseDir, TextureDescr});
17935 case AMDGPU::BI__builtin_amdgcn_ds_bvh_stack_rtn: {
17936 SmallVector<Value *, 4> Args;
17937 for (int i = 0, e = E->getNumArgs(); i != e; ++i)
17938 Args.push_back(EmitScalarExpr(E->getArg(i)));
17940 Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_ds_bvh_stack_rtn);
17941 Value *Call = Builder.CreateCall(F, Args);
17942 Value *Rtn = Builder.CreateExtractValue(Call, 0);
17943 Value *A = Builder.CreateExtractValue(Call, 1);
17944 llvm::Type *RetTy = ConvertType(E->getType());
17945 Value *I0 = Builder.CreateInsertElement(PoisonValue::get(RetTy), Rtn,
17946 (uint64_t)0);
17947 return Builder.CreateInsertElement(I0, A, 1);
17950 case AMDGPU::BI__builtin_amdgcn_wmma_bf16_16x16x16_bf16_w32:
17951 case AMDGPU::BI__builtin_amdgcn_wmma_bf16_16x16x16_bf16_w64:
17952 case AMDGPU::BI__builtin_amdgcn_wmma_f16_16x16x16_f16_w32:
17953 case AMDGPU::BI__builtin_amdgcn_wmma_f16_16x16x16_f16_w64:
17954 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_bf16_w32:
17955 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_bf16_w64:
17956 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_f16_w32:
17957 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_f16_w64:
17958 case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu4_w32:
17959 case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu4_w64:
17960 case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu8_w32:
17961 case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu8_w64: {
17963 // These operations perform a matrix multiplication and accumulation of
17964 // the form:
17965 // D = A * B + C
17966 // The return type always matches the type of matrix C.
17967 unsigned ArgForMatchingRetType;
17968 unsigned BuiltinWMMAOp;
17970 switch (BuiltinID) {
17971 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_f16_w32:
17972 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_f16_w64:
17973 ArgForMatchingRetType = 2;
17974 BuiltinWMMAOp = Intrinsic::amdgcn_wmma_f32_16x16x16_f16;
17975 break;
17976 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_bf16_w32:
17977 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_bf16_w64:
17978 ArgForMatchingRetType = 2;
17979 BuiltinWMMAOp = Intrinsic::amdgcn_wmma_f32_16x16x16_bf16;
17980 break;
17981 case AMDGPU::BI__builtin_amdgcn_wmma_f16_16x16x16_f16_w32:
17982 case AMDGPU::BI__builtin_amdgcn_wmma_f16_16x16x16_f16_w64:
17983 ArgForMatchingRetType = 2;
17984 BuiltinWMMAOp = Intrinsic::amdgcn_wmma_f16_16x16x16_f16;
17985 break;
17986 case AMDGPU::BI__builtin_amdgcn_wmma_bf16_16x16x16_bf16_w32:
17987 case AMDGPU::BI__builtin_amdgcn_wmma_bf16_16x16x16_bf16_w64:
17988 ArgForMatchingRetType = 2;
17989 BuiltinWMMAOp = Intrinsic::amdgcn_wmma_bf16_16x16x16_bf16;
17990 break;
17991 case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu8_w32:
17992 case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu8_w64:
17993 ArgForMatchingRetType = 4;
17994 BuiltinWMMAOp = Intrinsic::amdgcn_wmma_i32_16x16x16_iu8;
17995 break;
17996 case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu4_w32:
17997 case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu4_w64:
17998 ArgForMatchingRetType = 4;
17999 BuiltinWMMAOp = Intrinsic::amdgcn_wmma_i32_16x16x16_iu4;
18000 break;
18003 SmallVector<Value *, 6> Args;
18004 for (int i = 0, e = E->getNumArgs(); i != e; ++i)
18005 Args.push_back(EmitScalarExpr(E->getArg(i)));
18007 Function *F = CGM.getIntrinsic(BuiltinWMMAOp,
18008 {Args[ArgForMatchingRetType]->getType()});
18010 return Builder.CreateCall(F, Args);
18013 // amdgcn workitem
18014 case AMDGPU::BI__builtin_amdgcn_workitem_id_x:
18015 return emitRangedBuiltin(*this, Intrinsic::amdgcn_workitem_id_x, 0, 1024);
18016 case AMDGPU::BI__builtin_amdgcn_workitem_id_y:
18017 return emitRangedBuiltin(*this, Intrinsic::amdgcn_workitem_id_y, 0, 1024);
18018 case AMDGPU::BI__builtin_amdgcn_workitem_id_z:
18019 return emitRangedBuiltin(*this, Intrinsic::amdgcn_workitem_id_z, 0, 1024);
18021 // amdgcn workgroup size
18022 case AMDGPU::BI__builtin_amdgcn_workgroup_size_x:
18023 return EmitAMDGPUWorkGroupSize(*this, 0);
18024 case AMDGPU::BI__builtin_amdgcn_workgroup_size_y:
18025 return EmitAMDGPUWorkGroupSize(*this, 1);
18026 case AMDGPU::BI__builtin_amdgcn_workgroup_size_z:
18027 return EmitAMDGPUWorkGroupSize(*this, 2);
18029 // amdgcn grid size
18030 case AMDGPU::BI__builtin_amdgcn_grid_size_x:
18031 return EmitAMDGPUGridSize(*this, 0);
18032 case AMDGPU::BI__builtin_amdgcn_grid_size_y:
18033 return EmitAMDGPUGridSize(*this, 1);
18034 case AMDGPU::BI__builtin_amdgcn_grid_size_z:
18035 return EmitAMDGPUGridSize(*this, 2);
18037 // r600 intrinsics
18038 case AMDGPU::BI__builtin_r600_recipsqrt_ieee:
18039 case AMDGPU::BI__builtin_r600_recipsqrt_ieeef:
18040 return emitUnaryBuiltin(*this, E, Intrinsic::r600_recipsqrt_ieee);
18041 case AMDGPU::BI__builtin_r600_read_tidig_x:
18042 return emitRangedBuiltin(*this, Intrinsic::r600_read_tidig_x, 0, 1024);
18043 case AMDGPU::BI__builtin_r600_read_tidig_y:
18044 return emitRangedBuiltin(*this, Intrinsic::r600_read_tidig_y, 0, 1024);
18045 case AMDGPU::BI__builtin_r600_read_tidig_z:
18046 return emitRangedBuiltin(*this, Intrinsic::r600_read_tidig_z, 0, 1024);
18047 case AMDGPU::BI__builtin_amdgcn_alignbit: {
18048 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
18049 llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
18050 llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
18051 Function *F = CGM.getIntrinsic(Intrinsic::fshr, Src0->getType());
18052 return Builder.CreateCall(F, { Src0, Src1, Src2 });
18054 case AMDGPU::BI__builtin_amdgcn_fence: {
18055 ProcessOrderScopeAMDGCN(EmitScalarExpr(E->getArg(0)),
18056 EmitScalarExpr(E->getArg(1)), AO, SSID);
18057 return Builder.CreateFence(AO, SSID);
18059 case AMDGPU::BI__builtin_amdgcn_atomic_inc32:
18060 case AMDGPU::BI__builtin_amdgcn_atomic_inc64:
18061 case AMDGPU::BI__builtin_amdgcn_atomic_dec32:
18062 case AMDGPU::BI__builtin_amdgcn_atomic_dec64: {
18063 llvm::AtomicRMWInst::BinOp BinOp;
18064 switch (BuiltinID) {
18065 case AMDGPU::BI__builtin_amdgcn_atomic_inc32:
18066 case AMDGPU::BI__builtin_amdgcn_atomic_inc64:
18067 BinOp = llvm::AtomicRMWInst::UIncWrap;
18068 break;
18069 case AMDGPU::BI__builtin_amdgcn_atomic_dec32:
18070 case AMDGPU::BI__builtin_amdgcn_atomic_dec64:
18071 BinOp = llvm::AtomicRMWInst::UDecWrap;
18072 break;
18075 Value *Ptr = EmitScalarExpr(E->getArg(0));
18076 Value *Val = EmitScalarExpr(E->getArg(1));
18078 ProcessOrderScopeAMDGCN(EmitScalarExpr(E->getArg(2)),
18079 EmitScalarExpr(E->getArg(3)), AO, SSID);
18081 QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
18082 bool Volatile =
18083 PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
18085 llvm::AtomicRMWInst *RMW =
18086 Builder.CreateAtomicRMW(BinOp, Ptr, Val, AO, SSID);
18087 if (Volatile)
18088 RMW->setVolatile(true);
18089 return RMW;
18091 case AMDGPU::BI__builtin_amdgcn_s_sendmsg_rtn:
18092 case AMDGPU::BI__builtin_amdgcn_s_sendmsg_rtnl: {
18093 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
18094 llvm::Type *ResultType = ConvertType(E->getType());
18095 // s_sendmsg_rtn is mangled using return type only.
18096 Function *F =
18097 CGM.getIntrinsic(Intrinsic::amdgcn_s_sendmsg_rtn, {ResultType});
18098 return Builder.CreateCall(F, {Arg});
18100 default:
18101 return nullptr;
18105 /// Handle a SystemZ function in which the final argument is a pointer
18106 /// to an int that receives the post-instruction CC value. At the LLVM level
18107 /// this is represented as a function that returns a {result, cc} pair.
18108 static Value *EmitSystemZIntrinsicWithCC(CodeGenFunction &CGF,
18109 unsigned IntrinsicID,
18110 const CallExpr *E) {
18111 unsigned NumArgs = E->getNumArgs() - 1;
18112 SmallVector<Value *, 8> Args(NumArgs);
18113 for (unsigned I = 0; I < NumArgs; ++I)
18114 Args[I] = CGF.EmitScalarExpr(E->getArg(I));
18115 Address CCPtr = CGF.EmitPointerWithAlignment(E->getArg(NumArgs));
18116 Function *F = CGF.CGM.getIntrinsic(IntrinsicID);
18117 Value *Call = CGF.Builder.CreateCall(F, Args);
18118 Value *CC = CGF.Builder.CreateExtractValue(Call, 1);
18119 CGF.Builder.CreateStore(CC, CCPtr);
18120 return CGF.Builder.CreateExtractValue(Call, 0);
18123 Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
18124 const CallExpr *E) {
18125 switch (BuiltinID) {
18126 case SystemZ::BI__builtin_tbegin: {
18127 Value *TDB = EmitScalarExpr(E->getArg(0));
18128 Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff0c);
18129 Function *F = CGM.getIntrinsic(Intrinsic::s390_tbegin);
18130 return Builder.CreateCall(F, {TDB, Control});
18132 case SystemZ::BI__builtin_tbegin_nofloat: {
18133 Value *TDB = EmitScalarExpr(E->getArg(0));
18134 Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff0c);
18135 Function *F = CGM.getIntrinsic(Intrinsic::s390_tbegin_nofloat);
18136 return Builder.CreateCall(F, {TDB, Control});
18138 case SystemZ::BI__builtin_tbeginc: {
18139 Value *TDB = llvm::ConstantPointerNull::get(Int8PtrTy);
18140 Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff08);
18141 Function *F = CGM.getIntrinsic(Intrinsic::s390_tbeginc);
18142 return Builder.CreateCall(F, {TDB, Control});
18144 case SystemZ::BI__builtin_tabort: {
18145 Value *Data = EmitScalarExpr(E->getArg(0));
18146 Function *F = CGM.getIntrinsic(Intrinsic::s390_tabort);
18147 return Builder.CreateCall(F, Builder.CreateSExt(Data, Int64Ty, "tabort"));
18149 case SystemZ::BI__builtin_non_tx_store: {
18150 Value *Address = EmitScalarExpr(E->getArg(0));
18151 Value *Data = EmitScalarExpr(E->getArg(1));
18152 Function *F = CGM.getIntrinsic(Intrinsic::s390_ntstg);
18153 return Builder.CreateCall(F, {Data, Address});
18156 // Vector builtins. Note that most vector builtins are mapped automatically
18157 // to target-specific LLVM intrinsics. The ones handled specially here can
18158 // be represented via standard LLVM IR, which is preferable to enable common
18159 // LLVM optimizations.
18161 case SystemZ::BI__builtin_s390_vpopctb:
18162 case SystemZ::BI__builtin_s390_vpopcth:
18163 case SystemZ::BI__builtin_s390_vpopctf:
18164 case SystemZ::BI__builtin_s390_vpopctg: {
18165 llvm::Type *ResultType = ConvertType(E->getType());
18166 Value *X = EmitScalarExpr(E->getArg(0));
18167 Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ResultType);
18168 return Builder.CreateCall(F, X);
18171 case SystemZ::BI__builtin_s390_vclzb:
18172 case SystemZ::BI__builtin_s390_vclzh:
18173 case SystemZ::BI__builtin_s390_vclzf:
18174 case SystemZ::BI__builtin_s390_vclzg: {
18175 llvm::Type *ResultType = ConvertType(E->getType());
18176 Value *X = EmitScalarExpr(E->getArg(0));
18177 Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
18178 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ResultType);
18179 return Builder.CreateCall(F, {X, Undef});
18182 case SystemZ::BI__builtin_s390_vctzb:
18183 case SystemZ::BI__builtin_s390_vctzh:
18184 case SystemZ::BI__builtin_s390_vctzf:
18185 case SystemZ::BI__builtin_s390_vctzg: {
18186 llvm::Type *ResultType = ConvertType(E->getType());
18187 Value *X = EmitScalarExpr(E->getArg(0));
18188 Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
18189 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ResultType);
18190 return Builder.CreateCall(F, {X, Undef});
18193 case SystemZ::BI__builtin_s390_vfsqsb:
18194 case SystemZ::BI__builtin_s390_vfsqdb: {
18195 llvm::Type *ResultType = ConvertType(E->getType());
18196 Value *X = EmitScalarExpr(E->getArg(0));
18197 if (Builder.getIsFPConstrained()) {
18198 Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt, ResultType);
18199 return Builder.CreateConstrainedFPCall(F, { X });
18200 } else {
18201 Function *F = CGM.getIntrinsic(Intrinsic::sqrt, ResultType);
18202 return Builder.CreateCall(F, X);
18205 case SystemZ::BI__builtin_s390_vfmasb:
18206 case SystemZ::BI__builtin_s390_vfmadb: {
18207 llvm::Type *ResultType = ConvertType(E->getType());
18208 Value *X = EmitScalarExpr(E->getArg(0));
18209 Value *Y = EmitScalarExpr(E->getArg(1));
18210 Value *Z = EmitScalarExpr(E->getArg(2));
18211 if (Builder.getIsFPConstrained()) {
18212 Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
18213 return Builder.CreateConstrainedFPCall(F, {X, Y, Z});
18214 } else {
18215 Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
18216 return Builder.CreateCall(F, {X, Y, Z});
18219 case SystemZ::BI__builtin_s390_vfmssb:
18220 case SystemZ::BI__builtin_s390_vfmsdb: {
18221 llvm::Type *ResultType = ConvertType(E->getType());
18222 Value *X = EmitScalarExpr(E->getArg(0));
18223 Value *Y = EmitScalarExpr(E->getArg(1));
18224 Value *Z = EmitScalarExpr(E->getArg(2));
18225 if (Builder.getIsFPConstrained()) {
18226 Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
18227 return Builder.CreateConstrainedFPCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")});
18228 } else {
18229 Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
18230 return Builder.CreateCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")});
18233 case SystemZ::BI__builtin_s390_vfnmasb:
18234 case SystemZ::BI__builtin_s390_vfnmadb: {
18235 llvm::Type *ResultType = ConvertType(E->getType());
18236 Value *X = EmitScalarExpr(E->getArg(0));
18237 Value *Y = EmitScalarExpr(E->getArg(1));
18238 Value *Z = EmitScalarExpr(E->getArg(2));
18239 if (Builder.getIsFPConstrained()) {
18240 Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
18241 return Builder.CreateFNeg(Builder.CreateConstrainedFPCall(F, {X, Y, Z}), "neg");
18242 } else {
18243 Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
18244 return Builder.CreateFNeg(Builder.CreateCall(F, {X, Y, Z}), "neg");
18247 case SystemZ::BI__builtin_s390_vfnmssb:
18248 case SystemZ::BI__builtin_s390_vfnmsdb: {
18249 llvm::Type *ResultType = ConvertType(E->getType());
18250 Value *X = EmitScalarExpr(E->getArg(0));
18251 Value *Y = EmitScalarExpr(E->getArg(1));
18252 Value *Z = EmitScalarExpr(E->getArg(2));
18253 if (Builder.getIsFPConstrained()) {
18254 Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
18255 Value *NegZ = Builder.CreateFNeg(Z, "sub");
18256 return Builder.CreateFNeg(Builder.CreateConstrainedFPCall(F, {X, Y, NegZ}));
18257 } else {
18258 Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
18259 Value *NegZ = Builder.CreateFNeg(Z, "neg");
18260 return Builder.CreateFNeg(Builder.CreateCall(F, {X, Y, NegZ}));
18263 case SystemZ::BI__builtin_s390_vflpsb:
18264 case SystemZ::BI__builtin_s390_vflpdb: {
18265 llvm::Type *ResultType = ConvertType(E->getType());
18266 Value *X = EmitScalarExpr(E->getArg(0));
18267 Function *F = CGM.getIntrinsic(Intrinsic::fabs, ResultType);
18268 return Builder.CreateCall(F, X);
18270 case SystemZ::BI__builtin_s390_vflnsb:
18271 case SystemZ::BI__builtin_s390_vflndb: {
18272 llvm::Type *ResultType = ConvertType(E->getType());
18273 Value *X = EmitScalarExpr(E->getArg(0));
18274 Function *F = CGM.getIntrinsic(Intrinsic::fabs, ResultType);
18275 return Builder.CreateFNeg(Builder.CreateCall(F, X), "neg");
18277 case SystemZ::BI__builtin_s390_vfisb:
18278 case SystemZ::BI__builtin_s390_vfidb: {
18279 llvm::Type *ResultType = ConvertType(E->getType());
18280 Value *X = EmitScalarExpr(E->getArg(0));
18281 // Constant-fold the M4 and M5 mask arguments.
18282 llvm::APSInt M4 = *E->getArg(1)->getIntegerConstantExpr(getContext());
18283 llvm::APSInt M5 = *E->getArg(2)->getIntegerConstantExpr(getContext());
18284 // Check whether this instance can be represented via a LLVM standard
18285 // intrinsic. We only support some combinations of M4 and M5.
18286 Intrinsic::ID ID = Intrinsic::not_intrinsic;
18287 Intrinsic::ID CI;
18288 switch (M4.getZExtValue()) {
18289 default: break;
18290 case 0: // IEEE-inexact exception allowed
18291 switch (M5.getZExtValue()) {
18292 default: break;
18293 case 0: ID = Intrinsic::rint;
18294 CI = Intrinsic::experimental_constrained_rint; break;
18296 break;
18297 case 4: // IEEE-inexact exception suppressed
18298 switch (M5.getZExtValue()) {
18299 default: break;
18300 case 0: ID = Intrinsic::nearbyint;
18301 CI = Intrinsic::experimental_constrained_nearbyint; break;
18302 case 1: ID = Intrinsic::round;
18303 CI = Intrinsic::experimental_constrained_round; break;
18304 case 5: ID = Intrinsic::trunc;
18305 CI = Intrinsic::experimental_constrained_trunc; break;
18306 case 6: ID = Intrinsic::ceil;
18307 CI = Intrinsic::experimental_constrained_ceil; break;
18308 case 7: ID = Intrinsic::floor;
18309 CI = Intrinsic::experimental_constrained_floor; break;
18311 break;
18313 if (ID != Intrinsic::not_intrinsic) {
18314 if (Builder.getIsFPConstrained()) {
18315 Function *F = CGM.getIntrinsic(CI, ResultType);
18316 return Builder.CreateConstrainedFPCall(F, X);
18317 } else {
18318 Function *F = CGM.getIntrinsic(ID, ResultType);
18319 return Builder.CreateCall(F, X);
18322 switch (BuiltinID) { // FIXME: constrained version?
18323 case SystemZ::BI__builtin_s390_vfisb: ID = Intrinsic::s390_vfisb; break;
18324 case SystemZ::BI__builtin_s390_vfidb: ID = Intrinsic::s390_vfidb; break;
18325 default: llvm_unreachable("Unknown BuiltinID");
18327 Function *F = CGM.getIntrinsic(ID);
18328 Value *M4Value = llvm::ConstantInt::get(getLLVMContext(), M4);
18329 Value *M5Value = llvm::ConstantInt::get(getLLVMContext(), M5);
18330 return Builder.CreateCall(F, {X, M4Value, M5Value});
18332 case SystemZ::BI__builtin_s390_vfmaxsb:
18333 case SystemZ::BI__builtin_s390_vfmaxdb: {
18334 llvm::Type *ResultType = ConvertType(E->getType());
18335 Value *X = EmitScalarExpr(E->getArg(0));
18336 Value *Y = EmitScalarExpr(E->getArg(1));
18337 // Constant-fold the M4 mask argument.
18338 llvm::APSInt M4 = *E->getArg(2)->getIntegerConstantExpr(getContext());
18339 // Check whether this instance can be represented via a LLVM standard
18340 // intrinsic. We only support some values of M4.
18341 Intrinsic::ID ID = Intrinsic::not_intrinsic;
18342 Intrinsic::ID CI;
18343 switch (M4.getZExtValue()) {
18344 default: break;
18345 case 4: ID = Intrinsic::maxnum;
18346 CI = Intrinsic::experimental_constrained_maxnum; break;
18348 if (ID != Intrinsic::not_intrinsic) {
18349 if (Builder.getIsFPConstrained()) {
18350 Function *F = CGM.getIntrinsic(CI, ResultType);
18351 return Builder.CreateConstrainedFPCall(F, {X, Y});
18352 } else {
18353 Function *F = CGM.getIntrinsic(ID, ResultType);
18354 return Builder.CreateCall(F, {X, Y});
18357 switch (BuiltinID) {
18358 case SystemZ::BI__builtin_s390_vfmaxsb: ID = Intrinsic::s390_vfmaxsb; break;
18359 case SystemZ::BI__builtin_s390_vfmaxdb: ID = Intrinsic::s390_vfmaxdb; break;
18360 default: llvm_unreachable("Unknown BuiltinID");
18362 Function *F = CGM.getIntrinsic(ID);
18363 Value *M4Value = llvm::ConstantInt::get(getLLVMContext(), M4);
18364 return Builder.CreateCall(F, {X, Y, M4Value});
18366 case SystemZ::BI__builtin_s390_vfminsb:
18367 case SystemZ::BI__builtin_s390_vfmindb: {
18368 llvm::Type *ResultType = ConvertType(E->getType());
18369 Value *X = EmitScalarExpr(E->getArg(0));
18370 Value *Y = EmitScalarExpr(E->getArg(1));
18371 // Constant-fold the M4 mask argument.
18372 llvm::APSInt M4 = *E->getArg(2)->getIntegerConstantExpr(getContext());
18373 // Check whether this instance can be represented via a LLVM standard
18374 // intrinsic. We only support some values of M4.
18375 Intrinsic::ID ID = Intrinsic::not_intrinsic;
18376 Intrinsic::ID CI;
18377 switch (M4.getZExtValue()) {
18378 default: break;
18379 case 4: ID = Intrinsic::minnum;
18380 CI = Intrinsic::experimental_constrained_minnum; break;
18382 if (ID != Intrinsic::not_intrinsic) {
18383 if (Builder.getIsFPConstrained()) {
18384 Function *F = CGM.getIntrinsic(CI, ResultType);
18385 return Builder.CreateConstrainedFPCall(F, {X, Y});
18386 } else {
18387 Function *F = CGM.getIntrinsic(ID, ResultType);
18388 return Builder.CreateCall(F, {X, Y});
18391 switch (BuiltinID) {
18392 case SystemZ::BI__builtin_s390_vfminsb: ID = Intrinsic::s390_vfminsb; break;
18393 case SystemZ::BI__builtin_s390_vfmindb: ID = Intrinsic::s390_vfmindb; break;
18394 default: llvm_unreachable("Unknown BuiltinID");
18396 Function *F = CGM.getIntrinsic(ID);
18397 Value *M4Value = llvm::ConstantInt::get(getLLVMContext(), M4);
18398 return Builder.CreateCall(F, {X, Y, M4Value});
18401 case SystemZ::BI__builtin_s390_vlbrh:
18402 case SystemZ::BI__builtin_s390_vlbrf:
18403 case SystemZ::BI__builtin_s390_vlbrg: {
18404 llvm::Type *ResultType = ConvertType(E->getType());
18405 Value *X = EmitScalarExpr(E->getArg(0));
18406 Function *F = CGM.getIntrinsic(Intrinsic::bswap, ResultType);
18407 return Builder.CreateCall(F, X);
18410 // Vector intrinsics that output the post-instruction CC value.
18412 #define INTRINSIC_WITH_CC(NAME) \
18413 case SystemZ::BI__builtin_##NAME: \
18414 return EmitSystemZIntrinsicWithCC(*this, Intrinsic::NAME, E)
18416 INTRINSIC_WITH_CC(s390_vpkshs);
18417 INTRINSIC_WITH_CC(s390_vpksfs);
18418 INTRINSIC_WITH_CC(s390_vpksgs);
18420 INTRINSIC_WITH_CC(s390_vpklshs);
18421 INTRINSIC_WITH_CC(s390_vpklsfs);
18422 INTRINSIC_WITH_CC(s390_vpklsgs);
18424 INTRINSIC_WITH_CC(s390_vceqbs);
18425 INTRINSIC_WITH_CC(s390_vceqhs);
18426 INTRINSIC_WITH_CC(s390_vceqfs);
18427 INTRINSIC_WITH_CC(s390_vceqgs);
18429 INTRINSIC_WITH_CC(s390_vchbs);
18430 INTRINSIC_WITH_CC(s390_vchhs);
18431 INTRINSIC_WITH_CC(s390_vchfs);
18432 INTRINSIC_WITH_CC(s390_vchgs);
18434 INTRINSIC_WITH_CC(s390_vchlbs);
18435 INTRINSIC_WITH_CC(s390_vchlhs);
18436 INTRINSIC_WITH_CC(s390_vchlfs);
18437 INTRINSIC_WITH_CC(s390_vchlgs);
18439 INTRINSIC_WITH_CC(s390_vfaebs);
18440 INTRINSIC_WITH_CC(s390_vfaehs);
18441 INTRINSIC_WITH_CC(s390_vfaefs);
18443 INTRINSIC_WITH_CC(s390_vfaezbs);
18444 INTRINSIC_WITH_CC(s390_vfaezhs);
18445 INTRINSIC_WITH_CC(s390_vfaezfs);
18447 INTRINSIC_WITH_CC(s390_vfeebs);
18448 INTRINSIC_WITH_CC(s390_vfeehs);
18449 INTRINSIC_WITH_CC(s390_vfeefs);
18451 INTRINSIC_WITH_CC(s390_vfeezbs);
18452 INTRINSIC_WITH_CC(s390_vfeezhs);
18453 INTRINSIC_WITH_CC(s390_vfeezfs);
18455 INTRINSIC_WITH_CC(s390_vfenebs);
18456 INTRINSIC_WITH_CC(s390_vfenehs);
18457 INTRINSIC_WITH_CC(s390_vfenefs);
18459 INTRINSIC_WITH_CC(s390_vfenezbs);
18460 INTRINSIC_WITH_CC(s390_vfenezhs);
18461 INTRINSIC_WITH_CC(s390_vfenezfs);
18463 INTRINSIC_WITH_CC(s390_vistrbs);
18464 INTRINSIC_WITH_CC(s390_vistrhs);
18465 INTRINSIC_WITH_CC(s390_vistrfs);
18467 INTRINSIC_WITH_CC(s390_vstrcbs);
18468 INTRINSIC_WITH_CC(s390_vstrchs);
18469 INTRINSIC_WITH_CC(s390_vstrcfs);
18471 INTRINSIC_WITH_CC(s390_vstrczbs);
18472 INTRINSIC_WITH_CC(s390_vstrczhs);
18473 INTRINSIC_WITH_CC(s390_vstrczfs);
18475 INTRINSIC_WITH_CC(s390_vfcesbs);
18476 INTRINSIC_WITH_CC(s390_vfcedbs);
18477 INTRINSIC_WITH_CC(s390_vfchsbs);
18478 INTRINSIC_WITH_CC(s390_vfchdbs);
18479 INTRINSIC_WITH_CC(s390_vfchesbs);
18480 INTRINSIC_WITH_CC(s390_vfchedbs);
18482 INTRINSIC_WITH_CC(s390_vftcisb);
18483 INTRINSIC_WITH_CC(s390_vftcidb);
18485 INTRINSIC_WITH_CC(s390_vstrsb);
18486 INTRINSIC_WITH_CC(s390_vstrsh);
18487 INTRINSIC_WITH_CC(s390_vstrsf);
18489 INTRINSIC_WITH_CC(s390_vstrszb);
18490 INTRINSIC_WITH_CC(s390_vstrszh);
18491 INTRINSIC_WITH_CC(s390_vstrszf);
18493 #undef INTRINSIC_WITH_CC
18495 default:
18496 return nullptr;
18500 namespace {
18501 // Helper classes for mapping MMA builtins to particular LLVM intrinsic variant.
18502 struct NVPTXMmaLdstInfo {
18503 unsigned NumResults; // Number of elements to load/store
18504 // Intrinsic IDs for row/col variants. 0 if particular layout is unsupported.
18505 unsigned IID_col;
18506 unsigned IID_row;
18509 #define MMA_INTR(geom_op_type, layout) \
18510 Intrinsic::nvvm_wmma_##geom_op_type##_##layout##_stride
18511 #define MMA_LDST(n, geom_op_type) \
18512 { n, MMA_INTR(geom_op_type, col), MMA_INTR(geom_op_type, row) }
18514 static NVPTXMmaLdstInfo getNVPTXMmaLdstInfo(unsigned BuiltinID) {
18515 switch (BuiltinID) {
18516 // FP MMA loads
18517 case NVPTX::BI__hmma_m16n16k16_ld_a:
18518 return MMA_LDST(8, m16n16k16_load_a_f16);
18519 case NVPTX::BI__hmma_m16n16k16_ld_b:
18520 return MMA_LDST(8, m16n16k16_load_b_f16);
18521 case NVPTX::BI__hmma_m16n16k16_ld_c_f16:
18522 return MMA_LDST(4, m16n16k16_load_c_f16);
18523 case NVPTX::BI__hmma_m16n16k16_ld_c_f32:
18524 return MMA_LDST(8, m16n16k16_load_c_f32);
18525 case NVPTX::BI__hmma_m32n8k16_ld_a:
18526 return MMA_LDST(8, m32n8k16_load_a_f16);
18527 case NVPTX::BI__hmma_m32n8k16_ld_b:
18528 return MMA_LDST(8, m32n8k16_load_b_f16);
18529 case NVPTX::BI__hmma_m32n8k16_ld_c_f16:
18530 return MMA_LDST(4, m32n8k16_load_c_f16);
18531 case NVPTX::BI__hmma_m32n8k16_ld_c_f32:
18532 return MMA_LDST(8, m32n8k16_load_c_f32);
18533 case NVPTX::BI__hmma_m8n32k16_ld_a:
18534 return MMA_LDST(8, m8n32k16_load_a_f16);
18535 case NVPTX::BI__hmma_m8n32k16_ld_b:
18536 return MMA_LDST(8, m8n32k16_load_b_f16);
18537 case NVPTX::BI__hmma_m8n32k16_ld_c_f16:
18538 return MMA_LDST(4, m8n32k16_load_c_f16);
18539 case NVPTX::BI__hmma_m8n32k16_ld_c_f32:
18540 return MMA_LDST(8, m8n32k16_load_c_f32);
18542 // Integer MMA loads
18543 case NVPTX::BI__imma_m16n16k16_ld_a_s8:
18544 return MMA_LDST(2, m16n16k16_load_a_s8);
18545 case NVPTX::BI__imma_m16n16k16_ld_a_u8:
18546 return MMA_LDST(2, m16n16k16_load_a_u8);
18547 case NVPTX::BI__imma_m16n16k16_ld_b_s8:
18548 return MMA_LDST(2, m16n16k16_load_b_s8);
18549 case NVPTX::BI__imma_m16n16k16_ld_b_u8:
18550 return MMA_LDST(2, m16n16k16_load_b_u8);
18551 case NVPTX::BI__imma_m16n16k16_ld_c:
18552 return MMA_LDST(8, m16n16k16_load_c_s32);
18553 case NVPTX::BI__imma_m32n8k16_ld_a_s8:
18554 return MMA_LDST(4, m32n8k16_load_a_s8);
18555 case NVPTX::BI__imma_m32n8k16_ld_a_u8:
18556 return MMA_LDST(4, m32n8k16_load_a_u8);
18557 case NVPTX::BI__imma_m32n8k16_ld_b_s8:
18558 return MMA_LDST(1, m32n8k16_load_b_s8);
18559 case NVPTX::BI__imma_m32n8k16_ld_b_u8:
18560 return MMA_LDST(1, m32n8k16_load_b_u8);
18561 case NVPTX::BI__imma_m32n8k16_ld_c:
18562 return MMA_LDST(8, m32n8k16_load_c_s32);
18563 case NVPTX::BI__imma_m8n32k16_ld_a_s8:
18564 return MMA_LDST(1, m8n32k16_load_a_s8);
18565 case NVPTX::BI__imma_m8n32k16_ld_a_u8:
18566 return MMA_LDST(1, m8n32k16_load_a_u8);
18567 case NVPTX::BI__imma_m8n32k16_ld_b_s8:
18568 return MMA_LDST(4, m8n32k16_load_b_s8);
18569 case NVPTX::BI__imma_m8n32k16_ld_b_u8:
18570 return MMA_LDST(4, m8n32k16_load_b_u8);
18571 case NVPTX::BI__imma_m8n32k16_ld_c:
18572 return MMA_LDST(8, m8n32k16_load_c_s32);
18574 // Sub-integer MMA loads.
18575 // Only row/col layout is supported by A/B fragments.
18576 case NVPTX::BI__imma_m8n8k32_ld_a_s4:
18577 return {1, 0, MMA_INTR(m8n8k32_load_a_s4, row)};
18578 case NVPTX::BI__imma_m8n8k32_ld_a_u4:
18579 return {1, 0, MMA_INTR(m8n8k32_load_a_u4, row)};
18580 case NVPTX::BI__imma_m8n8k32_ld_b_s4:
18581 return {1, MMA_INTR(m8n8k32_load_b_s4, col), 0};
18582 case NVPTX::BI__imma_m8n8k32_ld_b_u4:
18583 return {1, MMA_INTR(m8n8k32_load_b_u4, col), 0};
18584 case NVPTX::BI__imma_m8n8k32_ld_c:
18585 return MMA_LDST(2, m8n8k32_load_c_s32);
18586 case NVPTX::BI__bmma_m8n8k128_ld_a_b1:
18587 return {1, 0, MMA_INTR(m8n8k128_load_a_b1, row)};
18588 case NVPTX::BI__bmma_m8n8k128_ld_b_b1:
18589 return {1, MMA_INTR(m8n8k128_load_b_b1, col), 0};
18590 case NVPTX::BI__bmma_m8n8k128_ld_c:
18591 return MMA_LDST(2, m8n8k128_load_c_s32);
18593 // Double MMA loads
18594 case NVPTX::BI__dmma_m8n8k4_ld_a:
18595 return MMA_LDST(1, m8n8k4_load_a_f64);
18596 case NVPTX::BI__dmma_m8n8k4_ld_b:
18597 return MMA_LDST(1, m8n8k4_load_b_f64);
18598 case NVPTX::BI__dmma_m8n8k4_ld_c:
18599 return MMA_LDST(2, m8n8k4_load_c_f64);
18601 // Alternate float MMA loads
18602 case NVPTX::BI__mma_bf16_m16n16k16_ld_a:
18603 return MMA_LDST(4, m16n16k16_load_a_bf16);
18604 case NVPTX::BI__mma_bf16_m16n16k16_ld_b:
18605 return MMA_LDST(4, m16n16k16_load_b_bf16);
18606 case NVPTX::BI__mma_bf16_m8n32k16_ld_a:
18607 return MMA_LDST(2, m8n32k16_load_a_bf16);
18608 case NVPTX::BI__mma_bf16_m8n32k16_ld_b:
18609 return MMA_LDST(8, m8n32k16_load_b_bf16);
18610 case NVPTX::BI__mma_bf16_m32n8k16_ld_a:
18611 return MMA_LDST(8, m32n8k16_load_a_bf16);
18612 case NVPTX::BI__mma_bf16_m32n8k16_ld_b:
18613 return MMA_LDST(2, m32n8k16_load_b_bf16);
18614 case NVPTX::BI__mma_tf32_m16n16k8_ld_a:
18615 return MMA_LDST(4, m16n16k8_load_a_tf32);
18616 case NVPTX::BI__mma_tf32_m16n16k8_ld_b:
18617 return MMA_LDST(4, m16n16k8_load_b_tf32);
18618 case NVPTX::BI__mma_tf32_m16n16k8_ld_c:
18619 return MMA_LDST(8, m16n16k8_load_c_f32);
18621 // NOTE: We need to follow inconsitent naming scheme used by NVCC. Unlike
18622 // PTX and LLVM IR where stores always use fragment D, NVCC builtins always
18623 // use fragment C for both loads and stores.
18624 // FP MMA stores.
18625 case NVPTX::BI__hmma_m16n16k16_st_c_f16:
18626 return MMA_LDST(4, m16n16k16_store_d_f16);
18627 case NVPTX::BI__hmma_m16n16k16_st_c_f32:
18628 return MMA_LDST(8, m16n16k16_store_d_f32);
18629 case NVPTX::BI__hmma_m32n8k16_st_c_f16:
18630 return MMA_LDST(4, m32n8k16_store_d_f16);
18631 case NVPTX::BI__hmma_m32n8k16_st_c_f32:
18632 return MMA_LDST(8, m32n8k16_store_d_f32);
18633 case NVPTX::BI__hmma_m8n32k16_st_c_f16:
18634 return MMA_LDST(4, m8n32k16_store_d_f16);
18635 case NVPTX::BI__hmma_m8n32k16_st_c_f32:
18636 return MMA_LDST(8, m8n32k16_store_d_f32);
18638 // Integer and sub-integer MMA stores.
18639 // Another naming quirk. Unlike other MMA builtins that use PTX types in the
18640 // name, integer loads/stores use LLVM's i32.
18641 case NVPTX::BI__imma_m16n16k16_st_c_i32:
18642 return MMA_LDST(8, m16n16k16_store_d_s32);
18643 case NVPTX::BI__imma_m32n8k16_st_c_i32:
18644 return MMA_LDST(8, m32n8k16_store_d_s32);
18645 case NVPTX::BI__imma_m8n32k16_st_c_i32:
18646 return MMA_LDST(8, m8n32k16_store_d_s32);
18647 case NVPTX::BI__imma_m8n8k32_st_c_i32:
18648 return MMA_LDST(2, m8n8k32_store_d_s32);
18649 case NVPTX::BI__bmma_m8n8k128_st_c_i32:
18650 return MMA_LDST(2, m8n8k128_store_d_s32);
18652 // Double MMA store
18653 case NVPTX::BI__dmma_m8n8k4_st_c_f64:
18654 return MMA_LDST(2, m8n8k4_store_d_f64);
18656 // Alternate float MMA store
18657 case NVPTX::BI__mma_m16n16k8_st_c_f32:
18658 return MMA_LDST(8, m16n16k8_store_d_f32);
18660 default:
18661 llvm_unreachable("Unknown MMA builtin");
18664 #undef MMA_LDST
18665 #undef MMA_INTR
18668 struct NVPTXMmaInfo {
18669 unsigned NumEltsA;
18670 unsigned NumEltsB;
18671 unsigned NumEltsC;
18672 unsigned NumEltsD;
18674 // Variants are ordered by layout-A/layout-B/satf, where 'row' has priority
18675 // over 'col' for layout. The index of non-satf variants is expected to match
18676 // the undocumented layout constants used by CUDA's mma.hpp.
18677 std::array<unsigned, 8> Variants;
18679 unsigned getMMAIntrinsic(int Layout, bool Satf) {
18680 unsigned Index = Layout + 4 * Satf;
18681 if (Index >= Variants.size())
18682 return 0;
18683 return Variants[Index];
18687 // Returns an intrinsic that matches Layout and Satf for valid combinations of
18688 // Layout and Satf, 0 otherwise.
18689 static NVPTXMmaInfo getNVPTXMmaInfo(unsigned BuiltinID) {
18690 // clang-format off
18691 #define MMA_VARIANTS(geom, type) \
18692 Intrinsic::nvvm_wmma_##geom##_mma_row_row_##type, \
18693 Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type, \
18694 Intrinsic::nvvm_wmma_##geom##_mma_col_row_##type, \
18695 Intrinsic::nvvm_wmma_##geom##_mma_col_col_##type
18696 #define MMA_SATF_VARIANTS(geom, type) \
18697 MMA_VARIANTS(geom, type), \
18698 Intrinsic::nvvm_wmma_##geom##_mma_row_row_##type##_satfinite, \
18699 Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type##_satfinite, \
18700 Intrinsic::nvvm_wmma_##geom##_mma_col_row_##type##_satfinite, \
18701 Intrinsic::nvvm_wmma_##geom##_mma_col_col_##type##_satfinite
18702 // Sub-integer MMA only supports row.col layout.
18703 #define MMA_VARIANTS_I4(geom, type) \
18704 0, \
18705 Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type, \
18706 0, \
18707 0, \
18708 0, \
18709 Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type##_satfinite, \
18710 0, \
18712 // b1 MMA does not support .satfinite.
18713 #define MMA_VARIANTS_B1_XOR(geom, type) \
18714 0, \
18715 Intrinsic::nvvm_wmma_##geom##_mma_xor_popc_row_col_##type, \
18716 0, \
18717 0, \
18718 0, \
18719 0, \
18720 0, \
18722 #define MMA_VARIANTS_B1_AND(geom, type) \
18723 0, \
18724 Intrinsic::nvvm_wmma_##geom##_mma_and_popc_row_col_##type, \
18725 0, \
18726 0, \
18727 0, \
18728 0, \
18729 0, \
18731 // clang-format on
18732 switch (BuiltinID) {
18733 // FP MMA
18734 // Note that 'type' argument of MMA_SATF_VARIANTS uses D_C notation, while
18735 // NumEltsN of return value are ordered as A,B,C,D.
18736 case NVPTX::BI__hmma_m16n16k16_mma_f16f16:
18737 return {8, 8, 4, 4, {{MMA_SATF_VARIANTS(m16n16k16, f16_f16)}}};
18738 case NVPTX::BI__hmma_m16n16k16_mma_f32f16:
18739 return {8, 8, 4, 8, {{MMA_SATF_VARIANTS(m16n16k16, f32_f16)}}};
18740 case NVPTX::BI__hmma_m16n16k16_mma_f16f32:
18741 return {8, 8, 8, 4, {{MMA_SATF_VARIANTS(m16n16k16, f16_f32)}}};
18742 case NVPTX::BI__hmma_m16n16k16_mma_f32f32:
18743 return {8, 8, 8, 8, {{MMA_SATF_VARIANTS(m16n16k16, f32_f32)}}};
18744 case NVPTX::BI__hmma_m32n8k16_mma_f16f16:
18745 return {8, 8, 4, 4, {{MMA_SATF_VARIANTS(m32n8k16, f16_f16)}}};
18746 case NVPTX::BI__hmma_m32n8k16_mma_f32f16:
18747 return {8, 8, 4, 8, {{MMA_SATF_VARIANTS(m32n8k16, f32_f16)}}};
18748 case NVPTX::BI__hmma_m32n8k16_mma_f16f32:
18749 return {8, 8, 8, 4, {{MMA_SATF_VARIANTS(m32n8k16, f16_f32)}}};
18750 case NVPTX::BI__hmma_m32n8k16_mma_f32f32:
18751 return {8, 8, 8, 8, {{MMA_SATF_VARIANTS(m32n8k16, f32_f32)}}};
18752 case NVPTX::BI__hmma_m8n32k16_mma_f16f16:
18753 return {8, 8, 4, 4, {{MMA_SATF_VARIANTS(m8n32k16, f16_f16)}}};
18754 case NVPTX::BI__hmma_m8n32k16_mma_f32f16:
18755 return {8, 8, 4, 8, {{MMA_SATF_VARIANTS(m8n32k16, f32_f16)}}};
18756 case NVPTX::BI__hmma_m8n32k16_mma_f16f32:
18757 return {8, 8, 8, 4, {{MMA_SATF_VARIANTS(m8n32k16, f16_f32)}}};
18758 case NVPTX::BI__hmma_m8n32k16_mma_f32f32:
18759 return {8, 8, 8, 8, {{MMA_SATF_VARIANTS(m8n32k16, f32_f32)}}};
18761 // Integer MMA
18762 case NVPTX::BI__imma_m16n16k16_mma_s8:
18763 return {2, 2, 8, 8, {{MMA_SATF_VARIANTS(m16n16k16, s8)}}};
18764 case NVPTX::BI__imma_m16n16k16_mma_u8:
18765 return {2, 2, 8, 8, {{MMA_SATF_VARIANTS(m16n16k16, u8)}}};
18766 case NVPTX::BI__imma_m32n8k16_mma_s8:
18767 return {4, 1, 8, 8, {{MMA_SATF_VARIANTS(m32n8k16, s8)}}};
18768 case NVPTX::BI__imma_m32n8k16_mma_u8:
18769 return {4, 1, 8, 8, {{MMA_SATF_VARIANTS(m32n8k16, u8)}}};
18770 case NVPTX::BI__imma_m8n32k16_mma_s8:
18771 return {1, 4, 8, 8, {{MMA_SATF_VARIANTS(m8n32k16, s8)}}};
18772 case NVPTX::BI__imma_m8n32k16_mma_u8:
18773 return {1, 4, 8, 8, {{MMA_SATF_VARIANTS(m8n32k16, u8)}}};
18775 // Sub-integer MMA
18776 case NVPTX::BI__imma_m8n8k32_mma_s4:
18777 return {1, 1, 2, 2, {{MMA_VARIANTS_I4(m8n8k32, s4)}}};
18778 case NVPTX::BI__imma_m8n8k32_mma_u4:
18779 return {1, 1, 2, 2, {{MMA_VARIANTS_I4(m8n8k32, u4)}}};
18780 case NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1:
18781 return {1, 1, 2, 2, {{MMA_VARIANTS_B1_XOR(m8n8k128, b1)}}};
18782 case NVPTX::BI__bmma_m8n8k128_mma_and_popc_b1:
18783 return {1, 1, 2, 2, {{MMA_VARIANTS_B1_AND(m8n8k128, b1)}}};
18785 // Double MMA
18786 case NVPTX::BI__dmma_m8n8k4_mma_f64:
18787 return {1, 1, 2, 2, {{MMA_VARIANTS(m8n8k4, f64)}}};
18789 // Alternate FP MMA
18790 case NVPTX::BI__mma_bf16_m16n16k16_mma_f32:
18791 return {4, 4, 8, 8, {{MMA_VARIANTS(m16n16k16, bf16)}}};
18792 case NVPTX::BI__mma_bf16_m8n32k16_mma_f32:
18793 return {2, 8, 8, 8, {{MMA_VARIANTS(m8n32k16, bf16)}}};
18794 case NVPTX::BI__mma_bf16_m32n8k16_mma_f32:
18795 return {8, 2, 8, 8, {{MMA_VARIANTS(m32n8k16, bf16)}}};
18796 case NVPTX::BI__mma_tf32_m16n16k8_mma_f32:
18797 return {4, 4, 8, 8, {{MMA_VARIANTS(m16n16k8, tf32)}}};
18798 default:
18799 llvm_unreachable("Unexpected builtin ID.");
18801 #undef MMA_VARIANTS
18802 #undef MMA_SATF_VARIANTS
18803 #undef MMA_VARIANTS_I4
18804 #undef MMA_VARIANTS_B1_AND
18805 #undef MMA_VARIANTS_B1_XOR
18808 static Value *MakeLdgLdu(unsigned IntrinsicID, CodeGenFunction &CGF,
18809 const CallExpr *E) {
18810 Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
18811 QualType ArgType = E->getArg(0)->getType();
18812 clang::CharUnits Align = CGF.CGM.getNaturalPointeeTypeAlignment(ArgType);
18813 llvm::Type *ElemTy = CGF.ConvertTypeForMem(ArgType->getPointeeType());
18814 return CGF.Builder.CreateCall(
18815 CGF.CGM.getIntrinsic(IntrinsicID, {ElemTy, Ptr->getType()}),
18816 {Ptr, ConstantInt::get(CGF.Builder.getInt32Ty(), Align.getQuantity())});
18819 static Value *MakeScopedAtomic(unsigned IntrinsicID, CodeGenFunction &CGF,
18820 const CallExpr *E) {
18821 Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
18822 llvm::Type *ElemTy =
18823 CGF.ConvertTypeForMem(E->getArg(0)->getType()->getPointeeType());
18824 return CGF.Builder.CreateCall(
18825 CGF.CGM.getIntrinsic(IntrinsicID, {ElemTy, Ptr->getType()}),
18826 {Ptr, CGF.EmitScalarExpr(E->getArg(1))});
18829 static Value *MakeCpAsync(unsigned IntrinsicID, unsigned IntrinsicIDS,
18830 CodeGenFunction &CGF, const CallExpr *E,
18831 int SrcSize) {
18832 return E->getNumArgs() == 3
18833 ? CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(IntrinsicIDS),
18834 {CGF.EmitScalarExpr(E->getArg(0)),
18835 CGF.EmitScalarExpr(E->getArg(1)),
18836 CGF.EmitScalarExpr(E->getArg(2))})
18837 : CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(IntrinsicID),
18838 {CGF.EmitScalarExpr(E->getArg(0)),
18839 CGF.EmitScalarExpr(E->getArg(1))});
18842 static Value *MakeHalfType(unsigned IntrinsicID, unsigned BuiltinID,
18843 const CallExpr *E, CodeGenFunction &CGF) {
18844 auto &C = CGF.CGM.getContext();
18845 if (!(C.getLangOpts().NativeHalfType ||
18846 !C.getTargetInfo().useFP16ConversionIntrinsics())) {
18847 CGF.CGM.Error(E->getExprLoc(), C.BuiltinInfo.getName(BuiltinID).str() +
18848 " requires native half type support.");
18849 return nullptr;
18852 if (IntrinsicID == Intrinsic::nvvm_ldg_global_f ||
18853 IntrinsicID == Intrinsic::nvvm_ldu_global_f)
18854 return MakeLdgLdu(IntrinsicID, CGF, E);
18856 SmallVector<Value *, 16> Args;
18857 auto *F = CGF.CGM.getIntrinsic(IntrinsicID);
18858 auto *FTy = F->getFunctionType();
18859 unsigned ICEArguments = 0;
18860 ASTContext::GetBuiltinTypeError Error;
18861 C.GetBuiltinType(BuiltinID, Error, &ICEArguments);
18862 assert(Error == ASTContext::GE_None && "Should not codegen an error");
18863 for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) {
18864 assert((ICEArguments & (1 << i)) == 0);
18865 auto *ArgValue = CGF.EmitScalarExpr(E->getArg(i));
18866 auto *PTy = FTy->getParamType(i);
18867 if (PTy != ArgValue->getType())
18868 ArgValue = CGF.Builder.CreateBitCast(ArgValue, PTy);
18869 Args.push_back(ArgValue);
18872 return CGF.Builder.CreateCall(F, Args);
18874 } // namespace
18876 Value *CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID,
18877 const CallExpr *E) {
18878 switch (BuiltinID) {
18879 case NVPTX::BI__nvvm_atom_add_gen_i:
18880 case NVPTX::BI__nvvm_atom_add_gen_l:
18881 case NVPTX::BI__nvvm_atom_add_gen_ll:
18882 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Add, E);
18884 case NVPTX::BI__nvvm_atom_sub_gen_i:
18885 case NVPTX::BI__nvvm_atom_sub_gen_l:
18886 case NVPTX::BI__nvvm_atom_sub_gen_ll:
18887 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Sub, E);
18889 case NVPTX::BI__nvvm_atom_and_gen_i:
18890 case NVPTX::BI__nvvm_atom_and_gen_l:
18891 case NVPTX::BI__nvvm_atom_and_gen_ll:
18892 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::And, E);
18894 case NVPTX::BI__nvvm_atom_or_gen_i:
18895 case NVPTX::BI__nvvm_atom_or_gen_l:
18896 case NVPTX::BI__nvvm_atom_or_gen_ll:
18897 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Or, E);
18899 case NVPTX::BI__nvvm_atom_xor_gen_i:
18900 case NVPTX::BI__nvvm_atom_xor_gen_l:
18901 case NVPTX::BI__nvvm_atom_xor_gen_ll:
18902 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Xor, E);
18904 case NVPTX::BI__nvvm_atom_xchg_gen_i:
18905 case NVPTX::BI__nvvm_atom_xchg_gen_l:
18906 case NVPTX::BI__nvvm_atom_xchg_gen_ll:
18907 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Xchg, E);
18909 case NVPTX::BI__nvvm_atom_max_gen_i:
18910 case NVPTX::BI__nvvm_atom_max_gen_l:
18911 case NVPTX::BI__nvvm_atom_max_gen_ll:
18912 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Max, E);
18914 case NVPTX::BI__nvvm_atom_max_gen_ui:
18915 case NVPTX::BI__nvvm_atom_max_gen_ul:
18916 case NVPTX::BI__nvvm_atom_max_gen_ull:
18917 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::UMax, E);
18919 case NVPTX::BI__nvvm_atom_min_gen_i:
18920 case NVPTX::BI__nvvm_atom_min_gen_l:
18921 case NVPTX::BI__nvvm_atom_min_gen_ll:
18922 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Min, E);
18924 case NVPTX::BI__nvvm_atom_min_gen_ui:
18925 case NVPTX::BI__nvvm_atom_min_gen_ul:
18926 case NVPTX::BI__nvvm_atom_min_gen_ull:
18927 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::UMin, E);
18929 case NVPTX::BI__nvvm_atom_cas_gen_i:
18930 case NVPTX::BI__nvvm_atom_cas_gen_l:
18931 case NVPTX::BI__nvvm_atom_cas_gen_ll:
18932 // __nvvm_atom_cas_gen_* should return the old value rather than the
18933 // success flag.
18934 return MakeAtomicCmpXchgValue(*this, E, /*ReturnBool=*/false);
18936 case NVPTX::BI__nvvm_atom_add_gen_f:
18937 case NVPTX::BI__nvvm_atom_add_gen_d: {
18938 Value *Ptr = EmitScalarExpr(E->getArg(0));
18939 Value *Val = EmitScalarExpr(E->getArg(1));
18940 return Builder.CreateAtomicRMW(llvm::AtomicRMWInst::FAdd, Ptr, Val,
18941 AtomicOrdering::SequentiallyConsistent);
18944 case NVPTX::BI__nvvm_atom_inc_gen_ui: {
18945 Value *Ptr = EmitScalarExpr(E->getArg(0));
18946 Value *Val = EmitScalarExpr(E->getArg(1));
18947 Function *FnALI32 =
18948 CGM.getIntrinsic(Intrinsic::nvvm_atomic_load_inc_32, Ptr->getType());
18949 return Builder.CreateCall(FnALI32, {Ptr, Val});
18952 case NVPTX::BI__nvvm_atom_dec_gen_ui: {
18953 Value *Ptr = EmitScalarExpr(E->getArg(0));
18954 Value *Val = EmitScalarExpr(E->getArg(1));
18955 Function *FnALD32 =
18956 CGM.getIntrinsic(Intrinsic::nvvm_atomic_load_dec_32, Ptr->getType());
18957 return Builder.CreateCall(FnALD32, {Ptr, Val});
18960 case NVPTX::BI__nvvm_ldg_c:
18961 case NVPTX::BI__nvvm_ldg_sc:
18962 case NVPTX::BI__nvvm_ldg_c2:
18963 case NVPTX::BI__nvvm_ldg_sc2:
18964 case NVPTX::BI__nvvm_ldg_c4:
18965 case NVPTX::BI__nvvm_ldg_sc4:
18966 case NVPTX::BI__nvvm_ldg_s:
18967 case NVPTX::BI__nvvm_ldg_s2:
18968 case NVPTX::BI__nvvm_ldg_s4:
18969 case NVPTX::BI__nvvm_ldg_i:
18970 case NVPTX::BI__nvvm_ldg_i2:
18971 case NVPTX::BI__nvvm_ldg_i4:
18972 case NVPTX::BI__nvvm_ldg_l:
18973 case NVPTX::BI__nvvm_ldg_l2:
18974 case NVPTX::BI__nvvm_ldg_ll:
18975 case NVPTX::BI__nvvm_ldg_ll2:
18976 case NVPTX::BI__nvvm_ldg_uc:
18977 case NVPTX::BI__nvvm_ldg_uc2:
18978 case NVPTX::BI__nvvm_ldg_uc4:
18979 case NVPTX::BI__nvvm_ldg_us:
18980 case NVPTX::BI__nvvm_ldg_us2:
18981 case NVPTX::BI__nvvm_ldg_us4:
18982 case NVPTX::BI__nvvm_ldg_ui:
18983 case NVPTX::BI__nvvm_ldg_ui2:
18984 case NVPTX::BI__nvvm_ldg_ui4:
18985 case NVPTX::BI__nvvm_ldg_ul:
18986 case NVPTX::BI__nvvm_ldg_ul2:
18987 case NVPTX::BI__nvvm_ldg_ull:
18988 case NVPTX::BI__nvvm_ldg_ull2:
18989 // PTX Interoperability section 2.2: "For a vector with an even number of
18990 // elements, its alignment is set to number of elements times the alignment
18991 // of its member: n*alignof(t)."
18992 return MakeLdgLdu(Intrinsic::nvvm_ldg_global_i, *this, E);
18993 case NVPTX::BI__nvvm_ldg_f:
18994 case NVPTX::BI__nvvm_ldg_f2:
18995 case NVPTX::BI__nvvm_ldg_f4:
18996 case NVPTX::BI__nvvm_ldg_d:
18997 case NVPTX::BI__nvvm_ldg_d2:
18998 return MakeLdgLdu(Intrinsic::nvvm_ldg_global_f, *this, E);
19000 case NVPTX::BI__nvvm_ldu_c:
19001 case NVPTX::BI__nvvm_ldu_sc:
19002 case NVPTX::BI__nvvm_ldu_c2:
19003 case NVPTX::BI__nvvm_ldu_sc2:
19004 case NVPTX::BI__nvvm_ldu_c4:
19005 case NVPTX::BI__nvvm_ldu_sc4:
19006 case NVPTX::BI__nvvm_ldu_s:
19007 case NVPTX::BI__nvvm_ldu_s2:
19008 case NVPTX::BI__nvvm_ldu_s4:
19009 case NVPTX::BI__nvvm_ldu_i:
19010 case NVPTX::BI__nvvm_ldu_i2:
19011 case NVPTX::BI__nvvm_ldu_i4:
19012 case NVPTX::BI__nvvm_ldu_l:
19013 case NVPTX::BI__nvvm_ldu_l2:
19014 case NVPTX::BI__nvvm_ldu_ll:
19015 case NVPTX::BI__nvvm_ldu_ll2:
19016 case NVPTX::BI__nvvm_ldu_uc:
19017 case NVPTX::BI__nvvm_ldu_uc2:
19018 case NVPTX::BI__nvvm_ldu_uc4:
19019 case NVPTX::BI__nvvm_ldu_us:
19020 case NVPTX::BI__nvvm_ldu_us2:
19021 case NVPTX::BI__nvvm_ldu_us4:
19022 case NVPTX::BI__nvvm_ldu_ui:
19023 case NVPTX::BI__nvvm_ldu_ui2:
19024 case NVPTX::BI__nvvm_ldu_ui4:
19025 case NVPTX::BI__nvvm_ldu_ul:
19026 case NVPTX::BI__nvvm_ldu_ul2:
19027 case NVPTX::BI__nvvm_ldu_ull:
19028 case NVPTX::BI__nvvm_ldu_ull2:
19029 return MakeLdgLdu(Intrinsic::nvvm_ldu_global_i, *this, E);
19030 case NVPTX::BI__nvvm_ldu_f:
19031 case NVPTX::BI__nvvm_ldu_f2:
19032 case NVPTX::BI__nvvm_ldu_f4:
19033 case NVPTX::BI__nvvm_ldu_d:
19034 case NVPTX::BI__nvvm_ldu_d2:
19035 return MakeLdgLdu(Intrinsic::nvvm_ldu_global_f, *this, E);
19037 case NVPTX::BI__nvvm_atom_cta_add_gen_i:
19038 case NVPTX::BI__nvvm_atom_cta_add_gen_l:
19039 case NVPTX::BI__nvvm_atom_cta_add_gen_ll:
19040 return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_i_cta, *this, E);
19041 case NVPTX::BI__nvvm_atom_sys_add_gen_i:
19042 case NVPTX::BI__nvvm_atom_sys_add_gen_l:
19043 case NVPTX::BI__nvvm_atom_sys_add_gen_ll:
19044 return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_i_sys, *this, E);
19045 case NVPTX::BI__nvvm_atom_cta_add_gen_f:
19046 case NVPTX::BI__nvvm_atom_cta_add_gen_d:
19047 return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_f_cta, *this, E);
19048 case NVPTX::BI__nvvm_atom_sys_add_gen_f:
19049 case NVPTX::BI__nvvm_atom_sys_add_gen_d:
19050 return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_f_sys, *this, E);
19051 case NVPTX::BI__nvvm_atom_cta_xchg_gen_i:
19052 case NVPTX::BI__nvvm_atom_cta_xchg_gen_l:
19053 case NVPTX::BI__nvvm_atom_cta_xchg_gen_ll:
19054 return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_gen_i_cta, *this, E);
19055 case NVPTX::BI__nvvm_atom_sys_xchg_gen_i:
19056 case NVPTX::BI__nvvm_atom_sys_xchg_gen_l:
19057 case NVPTX::BI__nvvm_atom_sys_xchg_gen_ll:
19058 return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_gen_i_sys, *this, E);
19059 case NVPTX::BI__nvvm_atom_cta_max_gen_i:
19060 case NVPTX::BI__nvvm_atom_cta_max_gen_ui:
19061 case NVPTX::BI__nvvm_atom_cta_max_gen_l:
19062 case NVPTX::BI__nvvm_atom_cta_max_gen_ul:
19063 case NVPTX::BI__nvvm_atom_cta_max_gen_ll:
19064 case NVPTX::BI__nvvm_atom_cta_max_gen_ull:
19065 return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_i_cta, *this, E);
19066 case NVPTX::BI__nvvm_atom_sys_max_gen_i:
19067 case NVPTX::BI__nvvm_atom_sys_max_gen_ui:
19068 case NVPTX::BI__nvvm_atom_sys_max_gen_l:
19069 case NVPTX::BI__nvvm_atom_sys_max_gen_ul:
19070 case NVPTX::BI__nvvm_atom_sys_max_gen_ll:
19071 case NVPTX::BI__nvvm_atom_sys_max_gen_ull:
19072 return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_i_sys, *this, E);
19073 case NVPTX::BI__nvvm_atom_cta_min_gen_i:
19074 case NVPTX::BI__nvvm_atom_cta_min_gen_ui:
19075 case NVPTX::BI__nvvm_atom_cta_min_gen_l:
19076 case NVPTX::BI__nvvm_atom_cta_min_gen_ul:
19077 case NVPTX::BI__nvvm_atom_cta_min_gen_ll:
19078 case NVPTX::BI__nvvm_atom_cta_min_gen_ull:
19079 return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_i_cta, *this, E);
19080 case NVPTX::BI__nvvm_atom_sys_min_gen_i:
19081 case NVPTX::BI__nvvm_atom_sys_min_gen_ui:
19082 case NVPTX::BI__nvvm_atom_sys_min_gen_l:
19083 case NVPTX::BI__nvvm_atom_sys_min_gen_ul:
19084 case NVPTX::BI__nvvm_atom_sys_min_gen_ll:
19085 case NVPTX::BI__nvvm_atom_sys_min_gen_ull:
19086 return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_i_sys, *this, E);
19087 case NVPTX::BI__nvvm_atom_cta_inc_gen_ui:
19088 return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_gen_i_cta, *this, E);
19089 case NVPTX::BI__nvvm_atom_cta_dec_gen_ui:
19090 return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_gen_i_cta, *this, E);
19091 case NVPTX::BI__nvvm_atom_sys_inc_gen_ui:
19092 return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_gen_i_sys, *this, E);
19093 case NVPTX::BI__nvvm_atom_sys_dec_gen_ui:
19094 return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_gen_i_sys, *this, E);
19095 case NVPTX::BI__nvvm_atom_cta_and_gen_i:
19096 case NVPTX::BI__nvvm_atom_cta_and_gen_l:
19097 case NVPTX::BI__nvvm_atom_cta_and_gen_ll:
19098 return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_gen_i_cta, *this, E);
19099 case NVPTX::BI__nvvm_atom_sys_and_gen_i:
19100 case NVPTX::BI__nvvm_atom_sys_and_gen_l:
19101 case NVPTX::BI__nvvm_atom_sys_and_gen_ll:
19102 return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_gen_i_sys, *this, E);
19103 case NVPTX::BI__nvvm_atom_cta_or_gen_i:
19104 case NVPTX::BI__nvvm_atom_cta_or_gen_l:
19105 case NVPTX::BI__nvvm_atom_cta_or_gen_ll:
19106 return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_gen_i_cta, *this, E);
19107 case NVPTX::BI__nvvm_atom_sys_or_gen_i:
19108 case NVPTX::BI__nvvm_atom_sys_or_gen_l:
19109 case NVPTX::BI__nvvm_atom_sys_or_gen_ll:
19110 return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_gen_i_sys, *this, E);
19111 case NVPTX::BI__nvvm_atom_cta_xor_gen_i:
19112 case NVPTX::BI__nvvm_atom_cta_xor_gen_l:
19113 case NVPTX::BI__nvvm_atom_cta_xor_gen_ll:
19114 return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_gen_i_cta, *this, E);
19115 case NVPTX::BI__nvvm_atom_sys_xor_gen_i:
19116 case NVPTX::BI__nvvm_atom_sys_xor_gen_l:
19117 case NVPTX::BI__nvvm_atom_sys_xor_gen_ll:
19118 return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_gen_i_sys, *this, E);
19119 case NVPTX::BI__nvvm_atom_cta_cas_gen_i:
19120 case NVPTX::BI__nvvm_atom_cta_cas_gen_l:
19121 case NVPTX::BI__nvvm_atom_cta_cas_gen_ll: {
19122 Value *Ptr = EmitScalarExpr(E->getArg(0));
19123 llvm::Type *ElemTy =
19124 ConvertTypeForMem(E->getArg(0)->getType()->getPointeeType());
19125 return Builder.CreateCall(
19126 CGM.getIntrinsic(
19127 Intrinsic::nvvm_atomic_cas_gen_i_cta, {ElemTy, Ptr->getType()}),
19128 {Ptr, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2))});
19130 case NVPTX::BI__nvvm_atom_sys_cas_gen_i:
19131 case NVPTX::BI__nvvm_atom_sys_cas_gen_l:
19132 case NVPTX::BI__nvvm_atom_sys_cas_gen_ll: {
19133 Value *Ptr = EmitScalarExpr(E->getArg(0));
19134 llvm::Type *ElemTy =
19135 ConvertTypeForMem(E->getArg(0)->getType()->getPointeeType());
19136 return Builder.CreateCall(
19137 CGM.getIntrinsic(
19138 Intrinsic::nvvm_atomic_cas_gen_i_sys, {ElemTy, Ptr->getType()}),
19139 {Ptr, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2))});
19141 case NVPTX::BI__nvvm_match_all_sync_i32p:
19142 case NVPTX::BI__nvvm_match_all_sync_i64p: {
19143 Value *Mask = EmitScalarExpr(E->getArg(0));
19144 Value *Val = EmitScalarExpr(E->getArg(1));
19145 Address PredOutPtr = EmitPointerWithAlignment(E->getArg(2));
19146 Value *ResultPair = Builder.CreateCall(
19147 CGM.getIntrinsic(BuiltinID == NVPTX::BI__nvvm_match_all_sync_i32p
19148 ? Intrinsic::nvvm_match_all_sync_i32p
19149 : Intrinsic::nvvm_match_all_sync_i64p),
19150 {Mask, Val});
19151 Value *Pred = Builder.CreateZExt(Builder.CreateExtractValue(ResultPair, 1),
19152 PredOutPtr.getElementType());
19153 Builder.CreateStore(Pred, PredOutPtr);
19154 return Builder.CreateExtractValue(ResultPair, 0);
19157 // FP MMA loads
19158 case NVPTX::BI__hmma_m16n16k16_ld_a:
19159 case NVPTX::BI__hmma_m16n16k16_ld_b:
19160 case NVPTX::BI__hmma_m16n16k16_ld_c_f16:
19161 case NVPTX::BI__hmma_m16n16k16_ld_c_f32:
19162 case NVPTX::BI__hmma_m32n8k16_ld_a:
19163 case NVPTX::BI__hmma_m32n8k16_ld_b:
19164 case NVPTX::BI__hmma_m32n8k16_ld_c_f16:
19165 case NVPTX::BI__hmma_m32n8k16_ld_c_f32:
19166 case NVPTX::BI__hmma_m8n32k16_ld_a:
19167 case NVPTX::BI__hmma_m8n32k16_ld_b:
19168 case NVPTX::BI__hmma_m8n32k16_ld_c_f16:
19169 case NVPTX::BI__hmma_m8n32k16_ld_c_f32:
19170 // Integer MMA loads.
19171 case NVPTX::BI__imma_m16n16k16_ld_a_s8:
19172 case NVPTX::BI__imma_m16n16k16_ld_a_u8:
19173 case NVPTX::BI__imma_m16n16k16_ld_b_s8:
19174 case NVPTX::BI__imma_m16n16k16_ld_b_u8:
19175 case NVPTX::BI__imma_m16n16k16_ld_c:
19176 case NVPTX::BI__imma_m32n8k16_ld_a_s8:
19177 case NVPTX::BI__imma_m32n8k16_ld_a_u8:
19178 case NVPTX::BI__imma_m32n8k16_ld_b_s8:
19179 case NVPTX::BI__imma_m32n8k16_ld_b_u8:
19180 case NVPTX::BI__imma_m32n8k16_ld_c:
19181 case NVPTX::BI__imma_m8n32k16_ld_a_s8:
19182 case NVPTX::BI__imma_m8n32k16_ld_a_u8:
19183 case NVPTX::BI__imma_m8n32k16_ld_b_s8:
19184 case NVPTX::BI__imma_m8n32k16_ld_b_u8:
19185 case NVPTX::BI__imma_m8n32k16_ld_c:
19186 // Sub-integer MMA loads.
19187 case NVPTX::BI__imma_m8n8k32_ld_a_s4:
19188 case NVPTX::BI__imma_m8n8k32_ld_a_u4:
19189 case NVPTX::BI__imma_m8n8k32_ld_b_s4:
19190 case NVPTX::BI__imma_m8n8k32_ld_b_u4:
19191 case NVPTX::BI__imma_m8n8k32_ld_c:
19192 case NVPTX::BI__bmma_m8n8k128_ld_a_b1:
19193 case NVPTX::BI__bmma_m8n8k128_ld_b_b1:
19194 case NVPTX::BI__bmma_m8n8k128_ld_c:
19195 // Double MMA loads.
19196 case NVPTX::BI__dmma_m8n8k4_ld_a:
19197 case NVPTX::BI__dmma_m8n8k4_ld_b:
19198 case NVPTX::BI__dmma_m8n8k4_ld_c:
19199 // Alternate float MMA loads.
19200 case NVPTX::BI__mma_bf16_m16n16k16_ld_a:
19201 case NVPTX::BI__mma_bf16_m16n16k16_ld_b:
19202 case NVPTX::BI__mma_bf16_m8n32k16_ld_a:
19203 case NVPTX::BI__mma_bf16_m8n32k16_ld_b:
19204 case NVPTX::BI__mma_bf16_m32n8k16_ld_a:
19205 case NVPTX::BI__mma_bf16_m32n8k16_ld_b:
19206 case NVPTX::BI__mma_tf32_m16n16k8_ld_a:
19207 case NVPTX::BI__mma_tf32_m16n16k8_ld_b:
19208 case NVPTX::BI__mma_tf32_m16n16k8_ld_c: {
19209 Address Dst = EmitPointerWithAlignment(E->getArg(0));
19210 Value *Src = EmitScalarExpr(E->getArg(1));
19211 Value *Ldm = EmitScalarExpr(E->getArg(2));
19212 std::optional<llvm::APSInt> isColMajorArg =
19213 E->getArg(3)->getIntegerConstantExpr(getContext());
19214 if (!isColMajorArg)
19215 return nullptr;
19216 bool isColMajor = isColMajorArg->getSExtValue();
19217 NVPTXMmaLdstInfo II = getNVPTXMmaLdstInfo(BuiltinID);
19218 unsigned IID = isColMajor ? II.IID_col : II.IID_row;
19219 if (IID == 0)
19220 return nullptr;
19222 Value *Result =
19223 Builder.CreateCall(CGM.getIntrinsic(IID, Src->getType()), {Src, Ldm});
19225 // Save returned values.
19226 assert(II.NumResults);
19227 if (II.NumResults == 1) {
19228 Builder.CreateAlignedStore(Result, Dst.getPointer(),
19229 CharUnits::fromQuantity(4));
19230 } else {
19231 for (unsigned i = 0; i < II.NumResults; ++i) {
19232 Builder.CreateAlignedStore(
19233 Builder.CreateBitCast(Builder.CreateExtractValue(Result, i),
19234 Dst.getElementType()),
19235 Builder.CreateGEP(Dst.getElementType(), Dst.getPointer(),
19236 llvm::ConstantInt::get(IntTy, i)),
19237 CharUnits::fromQuantity(4));
19240 return Result;
19243 case NVPTX::BI__hmma_m16n16k16_st_c_f16:
19244 case NVPTX::BI__hmma_m16n16k16_st_c_f32:
19245 case NVPTX::BI__hmma_m32n8k16_st_c_f16:
19246 case NVPTX::BI__hmma_m32n8k16_st_c_f32:
19247 case NVPTX::BI__hmma_m8n32k16_st_c_f16:
19248 case NVPTX::BI__hmma_m8n32k16_st_c_f32:
19249 case NVPTX::BI__imma_m16n16k16_st_c_i32:
19250 case NVPTX::BI__imma_m32n8k16_st_c_i32:
19251 case NVPTX::BI__imma_m8n32k16_st_c_i32:
19252 case NVPTX::BI__imma_m8n8k32_st_c_i32:
19253 case NVPTX::BI__bmma_m8n8k128_st_c_i32:
19254 case NVPTX::BI__dmma_m8n8k4_st_c_f64:
19255 case NVPTX::BI__mma_m16n16k8_st_c_f32: {
19256 Value *Dst = EmitScalarExpr(E->getArg(0));
19257 Address Src = EmitPointerWithAlignment(E->getArg(1));
19258 Value *Ldm = EmitScalarExpr(E->getArg(2));
19259 std::optional<llvm::APSInt> isColMajorArg =
19260 E->getArg(3)->getIntegerConstantExpr(getContext());
19261 if (!isColMajorArg)
19262 return nullptr;
19263 bool isColMajor = isColMajorArg->getSExtValue();
19264 NVPTXMmaLdstInfo II = getNVPTXMmaLdstInfo(BuiltinID);
19265 unsigned IID = isColMajor ? II.IID_col : II.IID_row;
19266 if (IID == 0)
19267 return nullptr;
19268 Function *Intrinsic =
19269 CGM.getIntrinsic(IID, Dst->getType());
19270 llvm::Type *ParamType = Intrinsic->getFunctionType()->getParamType(1);
19271 SmallVector<Value *, 10> Values = {Dst};
19272 for (unsigned i = 0; i < II.NumResults; ++i) {
19273 Value *V = Builder.CreateAlignedLoad(
19274 Src.getElementType(),
19275 Builder.CreateGEP(Src.getElementType(), Src.getPointer(),
19276 llvm::ConstantInt::get(IntTy, i)),
19277 CharUnits::fromQuantity(4));
19278 Values.push_back(Builder.CreateBitCast(V, ParamType));
19280 Values.push_back(Ldm);
19281 Value *Result = Builder.CreateCall(Intrinsic, Values);
19282 return Result;
19285 // BI__hmma_m16n16k16_mma_<Dtype><CType>(d, a, b, c, layout, satf) -->
19286 // Intrinsic::nvvm_wmma_m16n16k16_mma_sync<layout A,B><DType><CType><Satf>
19287 case NVPTX::BI__hmma_m16n16k16_mma_f16f16:
19288 case NVPTX::BI__hmma_m16n16k16_mma_f32f16:
19289 case NVPTX::BI__hmma_m16n16k16_mma_f32f32:
19290 case NVPTX::BI__hmma_m16n16k16_mma_f16f32:
19291 case NVPTX::BI__hmma_m32n8k16_mma_f16f16:
19292 case NVPTX::BI__hmma_m32n8k16_mma_f32f16:
19293 case NVPTX::BI__hmma_m32n8k16_mma_f32f32:
19294 case NVPTX::BI__hmma_m32n8k16_mma_f16f32:
19295 case NVPTX::BI__hmma_m8n32k16_mma_f16f16:
19296 case NVPTX::BI__hmma_m8n32k16_mma_f32f16:
19297 case NVPTX::BI__hmma_m8n32k16_mma_f32f32:
19298 case NVPTX::BI__hmma_m8n32k16_mma_f16f32:
19299 case NVPTX::BI__imma_m16n16k16_mma_s8:
19300 case NVPTX::BI__imma_m16n16k16_mma_u8:
19301 case NVPTX::BI__imma_m32n8k16_mma_s8:
19302 case NVPTX::BI__imma_m32n8k16_mma_u8:
19303 case NVPTX::BI__imma_m8n32k16_mma_s8:
19304 case NVPTX::BI__imma_m8n32k16_mma_u8:
19305 case NVPTX::BI__imma_m8n8k32_mma_s4:
19306 case NVPTX::BI__imma_m8n8k32_mma_u4:
19307 case NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1:
19308 case NVPTX::BI__bmma_m8n8k128_mma_and_popc_b1:
19309 case NVPTX::BI__dmma_m8n8k4_mma_f64:
19310 case NVPTX::BI__mma_bf16_m16n16k16_mma_f32:
19311 case NVPTX::BI__mma_bf16_m8n32k16_mma_f32:
19312 case NVPTX::BI__mma_bf16_m32n8k16_mma_f32:
19313 case NVPTX::BI__mma_tf32_m16n16k8_mma_f32: {
19314 Address Dst = EmitPointerWithAlignment(E->getArg(0));
19315 Address SrcA = EmitPointerWithAlignment(E->getArg(1));
19316 Address SrcB = EmitPointerWithAlignment(E->getArg(2));
19317 Address SrcC = EmitPointerWithAlignment(E->getArg(3));
19318 std::optional<llvm::APSInt> LayoutArg =
19319 E->getArg(4)->getIntegerConstantExpr(getContext());
19320 if (!LayoutArg)
19321 return nullptr;
19322 int Layout = LayoutArg->getSExtValue();
19323 if (Layout < 0 || Layout > 3)
19324 return nullptr;
19325 llvm::APSInt SatfArg;
19326 if (BuiltinID == NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1 ||
19327 BuiltinID == NVPTX::BI__bmma_m8n8k128_mma_and_popc_b1)
19328 SatfArg = 0; // .b1 does not have satf argument.
19329 else if (std::optional<llvm::APSInt> OptSatfArg =
19330 E->getArg(5)->getIntegerConstantExpr(getContext()))
19331 SatfArg = *OptSatfArg;
19332 else
19333 return nullptr;
19334 bool Satf = SatfArg.getSExtValue();
19335 NVPTXMmaInfo MI = getNVPTXMmaInfo(BuiltinID);
19336 unsigned IID = MI.getMMAIntrinsic(Layout, Satf);
19337 if (IID == 0) // Unsupported combination of Layout/Satf.
19338 return nullptr;
19340 SmallVector<Value *, 24> Values;
19341 Function *Intrinsic = CGM.getIntrinsic(IID);
19342 llvm::Type *AType = Intrinsic->getFunctionType()->getParamType(0);
19343 // Load A
19344 for (unsigned i = 0; i < MI.NumEltsA; ++i) {
19345 Value *V = Builder.CreateAlignedLoad(
19346 SrcA.getElementType(),
19347 Builder.CreateGEP(SrcA.getElementType(), SrcA.getPointer(),
19348 llvm::ConstantInt::get(IntTy, i)),
19349 CharUnits::fromQuantity(4));
19350 Values.push_back(Builder.CreateBitCast(V, AType));
19352 // Load B
19353 llvm::Type *BType = Intrinsic->getFunctionType()->getParamType(MI.NumEltsA);
19354 for (unsigned i = 0; i < MI.NumEltsB; ++i) {
19355 Value *V = Builder.CreateAlignedLoad(
19356 SrcB.getElementType(),
19357 Builder.CreateGEP(SrcB.getElementType(), SrcB.getPointer(),
19358 llvm::ConstantInt::get(IntTy, i)),
19359 CharUnits::fromQuantity(4));
19360 Values.push_back(Builder.CreateBitCast(V, BType));
19362 // Load C
19363 llvm::Type *CType =
19364 Intrinsic->getFunctionType()->getParamType(MI.NumEltsA + MI.NumEltsB);
19365 for (unsigned i = 0; i < MI.NumEltsC; ++i) {
19366 Value *V = Builder.CreateAlignedLoad(
19367 SrcC.getElementType(),
19368 Builder.CreateGEP(SrcC.getElementType(), SrcC.getPointer(),
19369 llvm::ConstantInt::get(IntTy, i)),
19370 CharUnits::fromQuantity(4));
19371 Values.push_back(Builder.CreateBitCast(V, CType));
19373 Value *Result = Builder.CreateCall(Intrinsic, Values);
19374 llvm::Type *DType = Dst.getElementType();
19375 for (unsigned i = 0; i < MI.NumEltsD; ++i)
19376 Builder.CreateAlignedStore(
19377 Builder.CreateBitCast(Builder.CreateExtractValue(Result, i), DType),
19378 Builder.CreateGEP(Dst.getElementType(), Dst.getPointer(),
19379 llvm::ConstantInt::get(IntTy, i)),
19380 CharUnits::fromQuantity(4));
19381 return Result;
19383 // The following builtins require half type support
19384 case NVPTX::BI__nvvm_ex2_approx_f16:
19385 return MakeHalfType(Intrinsic::nvvm_ex2_approx_f16, BuiltinID, E, *this);
19386 case NVPTX::BI__nvvm_ex2_approx_f16x2:
19387 return MakeHalfType(Intrinsic::nvvm_ex2_approx_f16x2, BuiltinID, E, *this);
19388 case NVPTX::BI__nvvm_ff2f16x2_rn:
19389 return MakeHalfType(Intrinsic::nvvm_ff2f16x2_rn, BuiltinID, E, *this);
19390 case NVPTX::BI__nvvm_ff2f16x2_rn_relu:
19391 return MakeHalfType(Intrinsic::nvvm_ff2f16x2_rn_relu, BuiltinID, E, *this);
19392 case NVPTX::BI__nvvm_ff2f16x2_rz:
19393 return MakeHalfType(Intrinsic::nvvm_ff2f16x2_rz, BuiltinID, E, *this);
19394 case NVPTX::BI__nvvm_ff2f16x2_rz_relu:
19395 return MakeHalfType(Intrinsic::nvvm_ff2f16x2_rz_relu, BuiltinID, E, *this);
19396 case NVPTX::BI__nvvm_fma_rn_f16:
19397 return MakeHalfType(Intrinsic::nvvm_fma_rn_f16, BuiltinID, E, *this);
19398 case NVPTX::BI__nvvm_fma_rn_f16x2:
19399 return MakeHalfType(Intrinsic::nvvm_fma_rn_f16x2, BuiltinID, E, *this);
19400 case NVPTX::BI__nvvm_fma_rn_ftz_f16:
19401 return MakeHalfType(Intrinsic::nvvm_fma_rn_ftz_f16, BuiltinID, E, *this);
19402 case NVPTX::BI__nvvm_fma_rn_ftz_f16x2:
19403 return MakeHalfType(Intrinsic::nvvm_fma_rn_ftz_f16x2, BuiltinID, E, *this);
19404 case NVPTX::BI__nvvm_fma_rn_ftz_relu_f16:
19405 return MakeHalfType(Intrinsic::nvvm_fma_rn_ftz_relu_f16, BuiltinID, E,
19406 *this);
19407 case NVPTX::BI__nvvm_fma_rn_ftz_relu_f16x2:
19408 return MakeHalfType(Intrinsic::nvvm_fma_rn_ftz_relu_f16x2, BuiltinID, E,
19409 *this);
19410 case NVPTX::BI__nvvm_fma_rn_ftz_sat_f16:
19411 return MakeHalfType(Intrinsic::nvvm_fma_rn_ftz_sat_f16, BuiltinID, E,
19412 *this);
19413 case NVPTX::BI__nvvm_fma_rn_ftz_sat_f16x2:
19414 return MakeHalfType(Intrinsic::nvvm_fma_rn_ftz_sat_f16x2, BuiltinID, E,
19415 *this);
19416 case NVPTX::BI__nvvm_fma_rn_relu_f16:
19417 return MakeHalfType(Intrinsic::nvvm_fma_rn_relu_f16, BuiltinID, E, *this);
19418 case NVPTX::BI__nvvm_fma_rn_relu_f16x2:
19419 return MakeHalfType(Intrinsic::nvvm_fma_rn_relu_f16x2, BuiltinID, E, *this);
19420 case NVPTX::BI__nvvm_fma_rn_sat_f16:
19421 return MakeHalfType(Intrinsic::nvvm_fma_rn_sat_f16, BuiltinID, E, *this);
19422 case NVPTX::BI__nvvm_fma_rn_sat_f16x2:
19423 return MakeHalfType(Intrinsic::nvvm_fma_rn_sat_f16x2, BuiltinID, E, *this);
19424 case NVPTX::BI__nvvm_fmax_f16:
19425 return MakeHalfType(Intrinsic::nvvm_fmax_f16, BuiltinID, E, *this);
19426 case NVPTX::BI__nvvm_fmax_f16x2:
19427 return MakeHalfType(Intrinsic::nvvm_fmax_f16x2, BuiltinID, E, *this);
19428 case NVPTX::BI__nvvm_fmax_ftz_f16:
19429 return MakeHalfType(Intrinsic::nvvm_fmax_ftz_f16, BuiltinID, E, *this);
19430 case NVPTX::BI__nvvm_fmax_ftz_f16x2:
19431 return MakeHalfType(Intrinsic::nvvm_fmax_ftz_f16x2, BuiltinID, E, *this);
19432 case NVPTX::BI__nvvm_fmax_ftz_nan_f16:
19433 return MakeHalfType(Intrinsic::nvvm_fmax_ftz_nan_f16, BuiltinID, E, *this);
19434 case NVPTX::BI__nvvm_fmax_ftz_nan_f16x2:
19435 return MakeHalfType(Intrinsic::nvvm_fmax_ftz_nan_f16x2, BuiltinID, E,
19436 *this);
19437 case NVPTX::BI__nvvm_fmax_ftz_nan_xorsign_abs_f16:
19438 return MakeHalfType(Intrinsic::nvvm_fmax_ftz_nan_xorsign_abs_f16, BuiltinID,
19439 E, *this);
19440 case NVPTX::BI__nvvm_fmax_ftz_nan_xorsign_abs_f16x2:
19441 return MakeHalfType(Intrinsic::nvvm_fmax_ftz_nan_xorsign_abs_f16x2,
19442 BuiltinID, E, *this);
19443 case NVPTX::BI__nvvm_fmax_ftz_xorsign_abs_f16:
19444 return MakeHalfType(Intrinsic::nvvm_fmax_ftz_xorsign_abs_f16, BuiltinID, E,
19445 *this);
19446 case NVPTX::BI__nvvm_fmax_ftz_xorsign_abs_f16x2:
19447 return MakeHalfType(Intrinsic::nvvm_fmax_ftz_xorsign_abs_f16x2, BuiltinID,
19448 E, *this);
19449 case NVPTX::BI__nvvm_fmax_nan_f16:
19450 return MakeHalfType(Intrinsic::nvvm_fmax_nan_f16, BuiltinID, E, *this);
19451 case NVPTX::BI__nvvm_fmax_nan_f16x2:
19452 return MakeHalfType(Intrinsic::nvvm_fmax_nan_f16x2, BuiltinID, E, *this);
19453 case NVPTX::BI__nvvm_fmax_nan_xorsign_abs_f16:
19454 return MakeHalfType(Intrinsic::nvvm_fmax_nan_xorsign_abs_f16, BuiltinID, E,
19455 *this);
19456 case NVPTX::BI__nvvm_fmax_nan_xorsign_abs_f16x2:
19457 return MakeHalfType(Intrinsic::nvvm_fmax_nan_xorsign_abs_f16x2, BuiltinID,
19458 E, *this);
19459 case NVPTX::BI__nvvm_fmax_xorsign_abs_f16:
19460 return MakeHalfType(Intrinsic::nvvm_fmax_xorsign_abs_f16, BuiltinID, E,
19461 *this);
19462 case NVPTX::BI__nvvm_fmax_xorsign_abs_f16x2:
19463 return MakeHalfType(Intrinsic::nvvm_fmax_xorsign_abs_f16x2, BuiltinID, E,
19464 *this);
19465 case NVPTX::BI__nvvm_fmin_f16:
19466 return MakeHalfType(Intrinsic::nvvm_fmin_f16, BuiltinID, E, *this);
19467 case NVPTX::BI__nvvm_fmin_f16x2:
19468 return MakeHalfType(Intrinsic::nvvm_fmin_f16x2, BuiltinID, E, *this);
19469 case NVPTX::BI__nvvm_fmin_ftz_f16:
19470 return MakeHalfType(Intrinsic::nvvm_fmin_ftz_f16, BuiltinID, E, *this);
19471 case NVPTX::BI__nvvm_fmin_ftz_f16x2:
19472 return MakeHalfType(Intrinsic::nvvm_fmin_ftz_f16x2, BuiltinID, E, *this);
19473 case NVPTX::BI__nvvm_fmin_ftz_nan_f16:
19474 return MakeHalfType(Intrinsic::nvvm_fmin_ftz_nan_f16, BuiltinID, E, *this);
19475 case NVPTX::BI__nvvm_fmin_ftz_nan_f16x2:
19476 return MakeHalfType(Intrinsic::nvvm_fmin_ftz_nan_f16x2, BuiltinID, E,
19477 *this);
19478 case NVPTX::BI__nvvm_fmin_ftz_nan_xorsign_abs_f16:
19479 return MakeHalfType(Intrinsic::nvvm_fmin_ftz_nan_xorsign_abs_f16, BuiltinID,
19480 E, *this);
19481 case NVPTX::BI__nvvm_fmin_ftz_nan_xorsign_abs_f16x2:
19482 return MakeHalfType(Intrinsic::nvvm_fmin_ftz_nan_xorsign_abs_f16x2,
19483 BuiltinID, E, *this);
19484 case NVPTX::BI__nvvm_fmin_ftz_xorsign_abs_f16:
19485 return MakeHalfType(Intrinsic::nvvm_fmin_ftz_xorsign_abs_f16, BuiltinID, E,
19486 *this);
19487 case NVPTX::BI__nvvm_fmin_ftz_xorsign_abs_f16x2:
19488 return MakeHalfType(Intrinsic::nvvm_fmin_ftz_xorsign_abs_f16x2, BuiltinID,
19489 E, *this);
19490 case NVPTX::BI__nvvm_fmin_nan_f16:
19491 return MakeHalfType(Intrinsic::nvvm_fmin_nan_f16, BuiltinID, E, *this);
19492 case NVPTX::BI__nvvm_fmin_nan_f16x2:
19493 return MakeHalfType(Intrinsic::nvvm_fmin_nan_f16x2, BuiltinID, E, *this);
19494 case NVPTX::BI__nvvm_fmin_nan_xorsign_abs_f16:
19495 return MakeHalfType(Intrinsic::nvvm_fmin_nan_xorsign_abs_f16, BuiltinID, E,
19496 *this);
19497 case NVPTX::BI__nvvm_fmin_nan_xorsign_abs_f16x2:
19498 return MakeHalfType(Intrinsic::nvvm_fmin_nan_xorsign_abs_f16x2, BuiltinID,
19499 E, *this);
19500 case NVPTX::BI__nvvm_fmin_xorsign_abs_f16:
19501 return MakeHalfType(Intrinsic::nvvm_fmin_xorsign_abs_f16, BuiltinID, E,
19502 *this);
19503 case NVPTX::BI__nvvm_fmin_xorsign_abs_f16x2:
19504 return MakeHalfType(Intrinsic::nvvm_fmin_xorsign_abs_f16x2, BuiltinID, E,
19505 *this);
19506 case NVPTX::BI__nvvm_ldg_h:
19507 return MakeHalfType(Intrinsic::nvvm_ldg_global_f, BuiltinID, E, *this);
19508 case NVPTX::BI__nvvm_ldg_h2:
19509 return MakeHalfType(Intrinsic::nvvm_ldg_global_f, BuiltinID, E, *this);
19510 case NVPTX::BI__nvvm_ldu_h:
19511 return MakeHalfType(Intrinsic::nvvm_ldu_global_f, BuiltinID, E, *this);
19512 case NVPTX::BI__nvvm_ldu_h2: {
19513 return MakeHalfType(Intrinsic::nvvm_ldu_global_f, BuiltinID, E, *this);
19515 case NVPTX::BI__nvvm_cp_async_ca_shared_global_4:
19516 return MakeCpAsync(Intrinsic::nvvm_cp_async_ca_shared_global_4,
19517 Intrinsic::nvvm_cp_async_ca_shared_global_4_s, *this, E,
19519 case NVPTX::BI__nvvm_cp_async_ca_shared_global_8:
19520 return MakeCpAsync(Intrinsic::nvvm_cp_async_ca_shared_global_8,
19521 Intrinsic::nvvm_cp_async_ca_shared_global_8_s, *this, E,
19523 case NVPTX::BI__nvvm_cp_async_ca_shared_global_16:
19524 return MakeCpAsync(Intrinsic::nvvm_cp_async_ca_shared_global_16,
19525 Intrinsic::nvvm_cp_async_ca_shared_global_16_s, *this, E,
19526 16);
19527 case NVPTX::BI__nvvm_cp_async_cg_shared_global_16:
19528 return MakeCpAsync(Intrinsic::nvvm_cp_async_cg_shared_global_16,
19529 Intrinsic::nvvm_cp_async_cg_shared_global_16_s, *this, E,
19530 16);
19531 case NVPTX::BI__nvvm_read_ptx_sreg_clusterid_x:
19532 return Builder.CreateCall(
19533 CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_clusterid_x));
19534 case NVPTX::BI__nvvm_read_ptx_sreg_clusterid_y:
19535 return Builder.CreateCall(
19536 CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_clusterid_y));
19537 case NVPTX::BI__nvvm_read_ptx_sreg_clusterid_z:
19538 return Builder.CreateCall(
19539 CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_clusterid_z));
19540 case NVPTX::BI__nvvm_read_ptx_sreg_clusterid_w:
19541 return Builder.CreateCall(
19542 CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_clusterid_w));
19543 case NVPTX::BI__nvvm_read_ptx_sreg_nclusterid_x:
19544 return Builder.CreateCall(
19545 CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_nclusterid_x));
19546 case NVPTX::BI__nvvm_read_ptx_sreg_nclusterid_y:
19547 return Builder.CreateCall(
19548 CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_nclusterid_y));
19549 case NVPTX::BI__nvvm_read_ptx_sreg_nclusterid_z:
19550 return Builder.CreateCall(
19551 CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_nclusterid_z));
19552 case NVPTX::BI__nvvm_read_ptx_sreg_nclusterid_w:
19553 return Builder.CreateCall(
19554 CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_nclusterid_w));
19555 case NVPTX::BI__nvvm_read_ptx_sreg_cluster_ctaid_x:
19556 return Builder.CreateCall(
19557 CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_cluster_ctaid_x));
19558 case NVPTX::BI__nvvm_read_ptx_sreg_cluster_ctaid_y:
19559 return Builder.CreateCall(
19560 CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_cluster_ctaid_y));
19561 case NVPTX::BI__nvvm_read_ptx_sreg_cluster_ctaid_z:
19562 return Builder.CreateCall(
19563 CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_cluster_ctaid_z));
19564 case NVPTX::BI__nvvm_read_ptx_sreg_cluster_ctaid_w:
19565 return Builder.CreateCall(
19566 CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_cluster_ctaid_w));
19567 case NVPTX::BI__nvvm_read_ptx_sreg_cluster_nctaid_x:
19568 return Builder.CreateCall(
19569 CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_cluster_nctaid_x));
19570 case NVPTX::BI__nvvm_read_ptx_sreg_cluster_nctaid_y:
19571 return Builder.CreateCall(
19572 CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_cluster_nctaid_y));
19573 case NVPTX::BI__nvvm_read_ptx_sreg_cluster_nctaid_z:
19574 return Builder.CreateCall(
19575 CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_cluster_nctaid_z));
19576 case NVPTX::BI__nvvm_read_ptx_sreg_cluster_nctaid_w:
19577 return Builder.CreateCall(
19578 CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_cluster_nctaid_w));
19579 case NVPTX::BI__nvvm_read_ptx_sreg_cluster_ctarank:
19580 return Builder.CreateCall(
19581 CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_cluster_ctarank));
19582 case NVPTX::BI__nvvm_read_ptx_sreg_cluster_nctarank:
19583 return Builder.CreateCall(
19584 CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_cluster_nctarank));
19585 case NVPTX::BI__nvvm_is_explicit_cluster:
19586 return Builder.CreateCall(
19587 CGM.getIntrinsic(Intrinsic::nvvm_is_explicit_cluster));
19588 case NVPTX::BI__nvvm_isspacep_shared_cluster:
19589 return Builder.CreateCall(
19590 CGM.getIntrinsic(Intrinsic::nvvm_isspacep_shared_cluster),
19591 EmitScalarExpr(E->getArg(0)));
19592 case NVPTX::BI__nvvm_mapa:
19593 return Builder.CreateCall(
19594 CGM.getIntrinsic(Intrinsic::nvvm_mapa),
19595 {EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1))});
19596 case NVPTX::BI__nvvm_mapa_shared_cluster:
19597 return Builder.CreateCall(
19598 CGM.getIntrinsic(Intrinsic::nvvm_mapa_shared_cluster),
19599 {EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1))});
19600 case NVPTX::BI__nvvm_getctarank:
19601 return Builder.CreateCall(
19602 CGM.getIntrinsic(Intrinsic::nvvm_getctarank),
19603 EmitScalarExpr(E->getArg(0)));
19604 case NVPTX::BI__nvvm_getctarank_shared_cluster:
19605 return Builder.CreateCall(
19606 CGM.getIntrinsic(Intrinsic::nvvm_getctarank_shared_cluster),
19607 EmitScalarExpr(E->getArg(0)));
19608 case NVPTX::BI__nvvm_barrier_cluster_arrive:
19609 return Builder.CreateCall(
19610 CGM.getIntrinsic(Intrinsic::nvvm_barrier_cluster_arrive));
19611 case NVPTX::BI__nvvm_barrier_cluster_arrive_relaxed:
19612 return Builder.CreateCall(
19613 CGM.getIntrinsic(Intrinsic::nvvm_barrier_cluster_arrive_relaxed));
19614 case NVPTX::BI__nvvm_barrier_cluster_wait:
19615 return Builder.CreateCall(
19616 CGM.getIntrinsic(Intrinsic::nvvm_barrier_cluster_wait));
19617 case NVPTX::BI__nvvm_fence_sc_cluster:
19618 return Builder.CreateCall(
19619 CGM.getIntrinsic(Intrinsic::nvvm_fence_sc_cluster));
19620 default:
19621 return nullptr;
19625 namespace {
19626 struct BuiltinAlignArgs {
19627 llvm::Value *Src = nullptr;
19628 llvm::Type *SrcType = nullptr;
19629 llvm::Value *Alignment = nullptr;
19630 llvm::Value *Mask = nullptr;
19631 llvm::IntegerType *IntType = nullptr;
19633 BuiltinAlignArgs(const CallExpr *E, CodeGenFunction &CGF) {
19634 QualType AstType = E->getArg(0)->getType();
19635 if (AstType->isArrayType())
19636 Src = CGF.EmitArrayToPointerDecay(E->getArg(0)).getPointer();
19637 else
19638 Src = CGF.EmitScalarExpr(E->getArg(0));
19639 SrcType = Src->getType();
19640 if (SrcType->isPointerTy()) {
19641 IntType = IntegerType::get(
19642 CGF.getLLVMContext(),
19643 CGF.CGM.getDataLayout().getIndexTypeSizeInBits(SrcType));
19644 } else {
19645 assert(SrcType->isIntegerTy());
19646 IntType = cast<llvm::IntegerType>(SrcType);
19648 Alignment = CGF.EmitScalarExpr(E->getArg(1));
19649 Alignment = CGF.Builder.CreateZExtOrTrunc(Alignment, IntType, "alignment");
19650 auto *One = llvm::ConstantInt::get(IntType, 1);
19651 Mask = CGF.Builder.CreateSub(Alignment, One, "mask");
19654 } // namespace
19656 /// Generate (x & (y-1)) == 0.
19657 RValue CodeGenFunction::EmitBuiltinIsAligned(const CallExpr *E) {
19658 BuiltinAlignArgs Args(E, *this);
19659 llvm::Value *SrcAddress = Args.Src;
19660 if (Args.SrcType->isPointerTy())
19661 SrcAddress =
19662 Builder.CreateBitOrPointerCast(Args.Src, Args.IntType, "src_addr");
19663 return RValue::get(Builder.CreateICmpEQ(
19664 Builder.CreateAnd(SrcAddress, Args.Mask, "set_bits"),
19665 llvm::Constant::getNullValue(Args.IntType), "is_aligned"));
19668 /// Generate (x & ~(y-1)) to align down or ((x+(y-1)) & ~(y-1)) to align up.
19669 /// Note: For pointer types we can avoid ptrtoint/inttoptr pairs by using the
19670 /// llvm.ptrmask intrinsic (with a GEP before in the align_up case).
19671 /// TODO: actually use ptrmask once most optimization passes know about it.
19672 RValue CodeGenFunction::EmitBuiltinAlignTo(const CallExpr *E, bool AlignUp) {
19673 BuiltinAlignArgs Args(E, *this);
19674 llvm::Value *SrcAddr = Args.Src;
19675 if (Args.Src->getType()->isPointerTy())
19676 SrcAddr = Builder.CreatePtrToInt(Args.Src, Args.IntType, "intptr");
19677 llvm::Value *SrcForMask = SrcAddr;
19678 if (AlignUp) {
19679 // When aligning up we have to first add the mask to ensure we go over the
19680 // next alignment value and then align down to the next valid multiple.
19681 // By adding the mask, we ensure that align_up on an already aligned
19682 // value will not change the value.
19683 SrcForMask = Builder.CreateAdd(SrcForMask, Args.Mask, "over_boundary");
19685 // Invert the mask to only clear the lower bits.
19686 llvm::Value *InvertedMask = Builder.CreateNot(Args.Mask, "inverted_mask");
19687 llvm::Value *Result =
19688 Builder.CreateAnd(SrcForMask, InvertedMask, "aligned_result");
19689 if (Args.Src->getType()->isPointerTy()) {
19690 /// TODO: Use ptrmask instead of ptrtoint+gep once it is optimized well.
19691 // Result = Builder.CreateIntrinsic(
19692 // Intrinsic::ptrmask, {Args.SrcType, SrcForMask->getType(), Args.IntType},
19693 // {SrcForMask, NegatedMask}, nullptr, "aligned_result");
19694 Result->setName("aligned_intptr");
19695 llvm::Value *Difference = Builder.CreateSub(Result, SrcAddr, "diff");
19696 // The result must point to the same underlying allocation. This means we
19697 // can use an inbounds GEP to enable better optimization.
19698 if (getLangOpts().isSignedOverflowDefined())
19699 Result =
19700 Builder.CreateGEP(Int8Ty, Args.Src, Difference, "aligned_result");
19701 else
19702 Result = EmitCheckedInBoundsGEP(Int8Ty, Args.Src, Difference,
19703 /*SignedIndices=*/true,
19704 /*isSubtraction=*/!AlignUp,
19705 E->getExprLoc(), "aligned_result");
19706 // Emit an alignment assumption to ensure that the new alignment is
19707 // propagated to loads/stores, etc.
19708 emitAlignmentAssumption(Result, E, E->getExprLoc(), Args.Alignment);
19710 assert(Result->getType() == Args.SrcType);
19711 return RValue::get(Result);
19714 Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
19715 const CallExpr *E) {
19716 switch (BuiltinID) {
19717 case WebAssembly::BI__builtin_wasm_memory_size: {
19718 llvm::Type *ResultType = ConvertType(E->getType());
19719 Value *I = EmitScalarExpr(E->getArg(0));
19720 Function *Callee =
19721 CGM.getIntrinsic(Intrinsic::wasm_memory_size, ResultType);
19722 return Builder.CreateCall(Callee, I);
19724 case WebAssembly::BI__builtin_wasm_memory_grow: {
19725 llvm::Type *ResultType = ConvertType(E->getType());
19726 Value *Args[] = {EmitScalarExpr(E->getArg(0)),
19727 EmitScalarExpr(E->getArg(1))};
19728 Function *Callee =
19729 CGM.getIntrinsic(Intrinsic::wasm_memory_grow, ResultType);
19730 return Builder.CreateCall(Callee, Args);
19732 case WebAssembly::BI__builtin_wasm_tls_size: {
19733 llvm::Type *ResultType = ConvertType(E->getType());
19734 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_tls_size, ResultType);
19735 return Builder.CreateCall(Callee);
19737 case WebAssembly::BI__builtin_wasm_tls_align: {
19738 llvm::Type *ResultType = ConvertType(E->getType());
19739 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_tls_align, ResultType);
19740 return Builder.CreateCall(Callee);
19742 case WebAssembly::BI__builtin_wasm_tls_base: {
19743 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_tls_base);
19744 return Builder.CreateCall(Callee);
19746 case WebAssembly::BI__builtin_wasm_throw: {
19747 Value *Tag = EmitScalarExpr(E->getArg(0));
19748 Value *Obj = EmitScalarExpr(E->getArg(1));
19749 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_throw);
19750 return Builder.CreateCall(Callee, {Tag, Obj});
19752 case WebAssembly::BI__builtin_wasm_rethrow: {
19753 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_rethrow);
19754 return Builder.CreateCall(Callee);
19756 case WebAssembly::BI__builtin_wasm_memory_atomic_wait32: {
19757 Value *Addr = EmitScalarExpr(E->getArg(0));
19758 Value *Expected = EmitScalarExpr(E->getArg(1));
19759 Value *Timeout = EmitScalarExpr(E->getArg(2));
19760 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_atomic_wait32);
19761 return Builder.CreateCall(Callee, {Addr, Expected, Timeout});
19763 case WebAssembly::BI__builtin_wasm_memory_atomic_wait64: {
19764 Value *Addr = EmitScalarExpr(E->getArg(0));
19765 Value *Expected = EmitScalarExpr(E->getArg(1));
19766 Value *Timeout = EmitScalarExpr(E->getArg(2));
19767 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_atomic_wait64);
19768 return Builder.CreateCall(Callee, {Addr, Expected, Timeout});
19770 case WebAssembly::BI__builtin_wasm_memory_atomic_notify: {
19771 Value *Addr = EmitScalarExpr(E->getArg(0));
19772 Value *Count = EmitScalarExpr(E->getArg(1));
19773 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_atomic_notify);
19774 return Builder.CreateCall(Callee, {Addr, Count});
19776 case WebAssembly::BI__builtin_wasm_trunc_s_i32_f32:
19777 case WebAssembly::BI__builtin_wasm_trunc_s_i32_f64:
19778 case WebAssembly::BI__builtin_wasm_trunc_s_i64_f32:
19779 case WebAssembly::BI__builtin_wasm_trunc_s_i64_f64: {
19780 Value *Src = EmitScalarExpr(E->getArg(0));
19781 llvm::Type *ResT = ConvertType(E->getType());
19782 Function *Callee =
19783 CGM.getIntrinsic(Intrinsic::wasm_trunc_signed, {ResT, Src->getType()});
19784 return Builder.CreateCall(Callee, {Src});
19786 case WebAssembly::BI__builtin_wasm_trunc_u_i32_f32:
19787 case WebAssembly::BI__builtin_wasm_trunc_u_i32_f64:
19788 case WebAssembly::BI__builtin_wasm_trunc_u_i64_f32:
19789 case WebAssembly::BI__builtin_wasm_trunc_u_i64_f64: {
19790 Value *Src = EmitScalarExpr(E->getArg(0));
19791 llvm::Type *ResT = ConvertType(E->getType());
19792 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_trunc_unsigned,
19793 {ResT, Src->getType()});
19794 return Builder.CreateCall(Callee, {Src});
19796 case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32_f32:
19797 case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32_f64:
19798 case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i64_f32:
19799 case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i64_f64:
19800 case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32x4_f32x4: {
19801 Value *Src = EmitScalarExpr(E->getArg(0));
19802 llvm::Type *ResT = ConvertType(E->getType());
19803 Function *Callee =
19804 CGM.getIntrinsic(Intrinsic::fptosi_sat, {ResT, Src->getType()});
19805 return Builder.CreateCall(Callee, {Src});
19807 case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32_f32:
19808 case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32_f64:
19809 case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i64_f32:
19810 case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i64_f64:
19811 case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32x4_f32x4: {
19812 Value *Src = EmitScalarExpr(E->getArg(0));
19813 llvm::Type *ResT = ConvertType(E->getType());
19814 Function *Callee =
19815 CGM.getIntrinsic(Intrinsic::fptoui_sat, {ResT, Src->getType()});
19816 return Builder.CreateCall(Callee, {Src});
19818 case WebAssembly::BI__builtin_wasm_min_f32:
19819 case WebAssembly::BI__builtin_wasm_min_f64:
19820 case WebAssembly::BI__builtin_wasm_min_f32x4:
19821 case WebAssembly::BI__builtin_wasm_min_f64x2: {
19822 Value *LHS = EmitScalarExpr(E->getArg(0));
19823 Value *RHS = EmitScalarExpr(E->getArg(1));
19824 Function *Callee =
19825 CGM.getIntrinsic(Intrinsic::minimum, ConvertType(E->getType()));
19826 return Builder.CreateCall(Callee, {LHS, RHS});
19828 case WebAssembly::BI__builtin_wasm_max_f32:
19829 case WebAssembly::BI__builtin_wasm_max_f64:
19830 case WebAssembly::BI__builtin_wasm_max_f32x4:
19831 case WebAssembly::BI__builtin_wasm_max_f64x2: {
19832 Value *LHS = EmitScalarExpr(E->getArg(0));
19833 Value *RHS = EmitScalarExpr(E->getArg(1));
19834 Function *Callee =
19835 CGM.getIntrinsic(Intrinsic::maximum, ConvertType(E->getType()));
19836 return Builder.CreateCall(Callee, {LHS, RHS});
19838 case WebAssembly::BI__builtin_wasm_pmin_f32x4:
19839 case WebAssembly::BI__builtin_wasm_pmin_f64x2: {
19840 Value *LHS = EmitScalarExpr(E->getArg(0));
19841 Value *RHS = EmitScalarExpr(E->getArg(1));
19842 Function *Callee =
19843 CGM.getIntrinsic(Intrinsic::wasm_pmin, ConvertType(E->getType()));
19844 return Builder.CreateCall(Callee, {LHS, RHS});
19846 case WebAssembly::BI__builtin_wasm_pmax_f32x4:
19847 case WebAssembly::BI__builtin_wasm_pmax_f64x2: {
19848 Value *LHS = EmitScalarExpr(E->getArg(0));
19849 Value *RHS = EmitScalarExpr(E->getArg(1));
19850 Function *Callee =
19851 CGM.getIntrinsic(Intrinsic::wasm_pmax, ConvertType(E->getType()));
19852 return Builder.CreateCall(Callee, {LHS, RHS});
19854 case WebAssembly::BI__builtin_wasm_ceil_f32x4:
19855 case WebAssembly::BI__builtin_wasm_floor_f32x4:
19856 case WebAssembly::BI__builtin_wasm_trunc_f32x4:
19857 case WebAssembly::BI__builtin_wasm_nearest_f32x4:
19858 case WebAssembly::BI__builtin_wasm_ceil_f64x2:
19859 case WebAssembly::BI__builtin_wasm_floor_f64x2:
19860 case WebAssembly::BI__builtin_wasm_trunc_f64x2:
19861 case WebAssembly::BI__builtin_wasm_nearest_f64x2: {
19862 unsigned IntNo;
19863 switch (BuiltinID) {
19864 case WebAssembly::BI__builtin_wasm_ceil_f32x4:
19865 case WebAssembly::BI__builtin_wasm_ceil_f64x2:
19866 IntNo = Intrinsic::ceil;
19867 break;
19868 case WebAssembly::BI__builtin_wasm_floor_f32x4:
19869 case WebAssembly::BI__builtin_wasm_floor_f64x2:
19870 IntNo = Intrinsic::floor;
19871 break;
19872 case WebAssembly::BI__builtin_wasm_trunc_f32x4:
19873 case WebAssembly::BI__builtin_wasm_trunc_f64x2:
19874 IntNo = Intrinsic::trunc;
19875 break;
19876 case WebAssembly::BI__builtin_wasm_nearest_f32x4:
19877 case WebAssembly::BI__builtin_wasm_nearest_f64x2:
19878 IntNo = Intrinsic::nearbyint;
19879 break;
19880 default:
19881 llvm_unreachable("unexpected builtin ID");
19883 Value *Value = EmitScalarExpr(E->getArg(0));
19884 Function *Callee = CGM.getIntrinsic(IntNo, ConvertType(E->getType()));
19885 return Builder.CreateCall(Callee, Value);
19887 case WebAssembly::BI__builtin_wasm_ref_null_extern: {
19888 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_ref_null_extern);
19889 return Builder.CreateCall(Callee);
19891 case WebAssembly::BI__builtin_wasm_ref_null_func: {
19892 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_ref_null_func);
19893 return Builder.CreateCall(Callee);
19895 case WebAssembly::BI__builtin_wasm_swizzle_i8x16: {
19896 Value *Src = EmitScalarExpr(E->getArg(0));
19897 Value *Indices = EmitScalarExpr(E->getArg(1));
19898 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_swizzle);
19899 return Builder.CreateCall(Callee, {Src, Indices});
19901 case WebAssembly::BI__builtin_wasm_add_sat_s_i8x16:
19902 case WebAssembly::BI__builtin_wasm_add_sat_u_i8x16:
19903 case WebAssembly::BI__builtin_wasm_add_sat_s_i16x8:
19904 case WebAssembly::BI__builtin_wasm_add_sat_u_i16x8:
19905 case WebAssembly::BI__builtin_wasm_sub_sat_s_i8x16:
19906 case WebAssembly::BI__builtin_wasm_sub_sat_u_i8x16:
19907 case WebAssembly::BI__builtin_wasm_sub_sat_s_i16x8:
19908 case WebAssembly::BI__builtin_wasm_sub_sat_u_i16x8: {
19909 unsigned IntNo;
19910 switch (BuiltinID) {
19911 case WebAssembly::BI__builtin_wasm_add_sat_s_i8x16:
19912 case WebAssembly::BI__builtin_wasm_add_sat_s_i16x8:
19913 IntNo = Intrinsic::sadd_sat;
19914 break;
19915 case WebAssembly::BI__builtin_wasm_add_sat_u_i8x16:
19916 case WebAssembly::BI__builtin_wasm_add_sat_u_i16x8:
19917 IntNo = Intrinsic::uadd_sat;
19918 break;
19919 case WebAssembly::BI__builtin_wasm_sub_sat_s_i8x16:
19920 case WebAssembly::BI__builtin_wasm_sub_sat_s_i16x8:
19921 IntNo = Intrinsic::wasm_sub_sat_signed;
19922 break;
19923 case WebAssembly::BI__builtin_wasm_sub_sat_u_i8x16:
19924 case WebAssembly::BI__builtin_wasm_sub_sat_u_i16x8:
19925 IntNo = Intrinsic::wasm_sub_sat_unsigned;
19926 break;
19927 default:
19928 llvm_unreachable("unexpected builtin ID");
19930 Value *LHS = EmitScalarExpr(E->getArg(0));
19931 Value *RHS = EmitScalarExpr(E->getArg(1));
19932 Function *Callee = CGM.getIntrinsic(IntNo, ConvertType(E->getType()));
19933 return Builder.CreateCall(Callee, {LHS, RHS});
19935 case WebAssembly::BI__builtin_wasm_abs_i8x16:
19936 case WebAssembly::BI__builtin_wasm_abs_i16x8:
19937 case WebAssembly::BI__builtin_wasm_abs_i32x4:
19938 case WebAssembly::BI__builtin_wasm_abs_i64x2: {
19939 Value *Vec = EmitScalarExpr(E->getArg(0));
19940 Value *Neg = Builder.CreateNeg(Vec, "neg");
19941 Constant *Zero = llvm::Constant::getNullValue(Vec->getType());
19942 Value *ICmp = Builder.CreateICmpSLT(Vec, Zero, "abscond");
19943 return Builder.CreateSelect(ICmp, Neg, Vec, "abs");
19945 case WebAssembly::BI__builtin_wasm_min_s_i8x16:
19946 case WebAssembly::BI__builtin_wasm_min_u_i8x16:
19947 case WebAssembly::BI__builtin_wasm_max_s_i8x16:
19948 case WebAssembly::BI__builtin_wasm_max_u_i8x16:
19949 case WebAssembly::BI__builtin_wasm_min_s_i16x8:
19950 case WebAssembly::BI__builtin_wasm_min_u_i16x8:
19951 case WebAssembly::BI__builtin_wasm_max_s_i16x8:
19952 case WebAssembly::BI__builtin_wasm_max_u_i16x8:
19953 case WebAssembly::BI__builtin_wasm_min_s_i32x4:
19954 case WebAssembly::BI__builtin_wasm_min_u_i32x4:
19955 case WebAssembly::BI__builtin_wasm_max_s_i32x4:
19956 case WebAssembly::BI__builtin_wasm_max_u_i32x4: {
19957 Value *LHS = EmitScalarExpr(E->getArg(0));
19958 Value *RHS = EmitScalarExpr(E->getArg(1));
19959 Value *ICmp;
19960 switch (BuiltinID) {
19961 case WebAssembly::BI__builtin_wasm_min_s_i8x16:
19962 case WebAssembly::BI__builtin_wasm_min_s_i16x8:
19963 case WebAssembly::BI__builtin_wasm_min_s_i32x4:
19964 ICmp = Builder.CreateICmpSLT(LHS, RHS);
19965 break;
19966 case WebAssembly::BI__builtin_wasm_min_u_i8x16:
19967 case WebAssembly::BI__builtin_wasm_min_u_i16x8:
19968 case WebAssembly::BI__builtin_wasm_min_u_i32x4:
19969 ICmp = Builder.CreateICmpULT(LHS, RHS);
19970 break;
19971 case WebAssembly::BI__builtin_wasm_max_s_i8x16:
19972 case WebAssembly::BI__builtin_wasm_max_s_i16x8:
19973 case WebAssembly::BI__builtin_wasm_max_s_i32x4:
19974 ICmp = Builder.CreateICmpSGT(LHS, RHS);
19975 break;
19976 case WebAssembly::BI__builtin_wasm_max_u_i8x16:
19977 case WebAssembly::BI__builtin_wasm_max_u_i16x8:
19978 case WebAssembly::BI__builtin_wasm_max_u_i32x4:
19979 ICmp = Builder.CreateICmpUGT(LHS, RHS);
19980 break;
19981 default:
19982 llvm_unreachable("unexpected builtin ID");
19984 return Builder.CreateSelect(ICmp, LHS, RHS);
19986 case WebAssembly::BI__builtin_wasm_avgr_u_i8x16:
19987 case WebAssembly::BI__builtin_wasm_avgr_u_i16x8: {
19988 Value *LHS = EmitScalarExpr(E->getArg(0));
19989 Value *RHS = EmitScalarExpr(E->getArg(1));
19990 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_avgr_unsigned,
19991 ConvertType(E->getType()));
19992 return Builder.CreateCall(Callee, {LHS, RHS});
19994 case WebAssembly::BI__builtin_wasm_q15mulr_sat_s_i16x8: {
19995 Value *LHS = EmitScalarExpr(E->getArg(0));
19996 Value *RHS = EmitScalarExpr(E->getArg(1));
19997 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_q15mulr_sat_signed);
19998 return Builder.CreateCall(Callee, {LHS, RHS});
20000 case WebAssembly::BI__builtin_wasm_extadd_pairwise_i8x16_s_i16x8:
20001 case WebAssembly::BI__builtin_wasm_extadd_pairwise_i8x16_u_i16x8:
20002 case WebAssembly::BI__builtin_wasm_extadd_pairwise_i16x8_s_i32x4:
20003 case WebAssembly::BI__builtin_wasm_extadd_pairwise_i16x8_u_i32x4: {
20004 Value *Vec = EmitScalarExpr(E->getArg(0));
20005 unsigned IntNo;
20006 switch (BuiltinID) {
20007 case WebAssembly::BI__builtin_wasm_extadd_pairwise_i8x16_s_i16x8:
20008 case WebAssembly::BI__builtin_wasm_extadd_pairwise_i16x8_s_i32x4:
20009 IntNo = Intrinsic::wasm_extadd_pairwise_signed;
20010 break;
20011 case WebAssembly::BI__builtin_wasm_extadd_pairwise_i8x16_u_i16x8:
20012 case WebAssembly::BI__builtin_wasm_extadd_pairwise_i16x8_u_i32x4:
20013 IntNo = Intrinsic::wasm_extadd_pairwise_unsigned;
20014 break;
20015 default:
20016 llvm_unreachable("unexpected builtin ID");
20019 Function *Callee = CGM.getIntrinsic(IntNo, ConvertType(E->getType()));
20020 return Builder.CreateCall(Callee, Vec);
20022 case WebAssembly::BI__builtin_wasm_bitselect: {
20023 Value *V1 = EmitScalarExpr(E->getArg(0));
20024 Value *V2 = EmitScalarExpr(E->getArg(1));
20025 Value *C = EmitScalarExpr(E->getArg(2));
20026 Function *Callee =
20027 CGM.getIntrinsic(Intrinsic::wasm_bitselect, ConvertType(E->getType()));
20028 return Builder.CreateCall(Callee, {V1, V2, C});
20030 case WebAssembly::BI__builtin_wasm_dot_s_i32x4_i16x8: {
20031 Value *LHS = EmitScalarExpr(E->getArg(0));
20032 Value *RHS = EmitScalarExpr(E->getArg(1));
20033 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_dot);
20034 return Builder.CreateCall(Callee, {LHS, RHS});
20036 case WebAssembly::BI__builtin_wasm_popcnt_i8x16: {
20037 Value *Vec = EmitScalarExpr(E->getArg(0));
20038 Function *Callee =
20039 CGM.getIntrinsic(Intrinsic::ctpop, ConvertType(E->getType()));
20040 return Builder.CreateCall(Callee, {Vec});
20042 case WebAssembly::BI__builtin_wasm_any_true_v128:
20043 case WebAssembly::BI__builtin_wasm_all_true_i8x16:
20044 case WebAssembly::BI__builtin_wasm_all_true_i16x8:
20045 case WebAssembly::BI__builtin_wasm_all_true_i32x4:
20046 case WebAssembly::BI__builtin_wasm_all_true_i64x2: {
20047 unsigned IntNo;
20048 switch (BuiltinID) {
20049 case WebAssembly::BI__builtin_wasm_any_true_v128:
20050 IntNo = Intrinsic::wasm_anytrue;
20051 break;
20052 case WebAssembly::BI__builtin_wasm_all_true_i8x16:
20053 case WebAssembly::BI__builtin_wasm_all_true_i16x8:
20054 case WebAssembly::BI__builtin_wasm_all_true_i32x4:
20055 case WebAssembly::BI__builtin_wasm_all_true_i64x2:
20056 IntNo = Intrinsic::wasm_alltrue;
20057 break;
20058 default:
20059 llvm_unreachable("unexpected builtin ID");
20061 Value *Vec = EmitScalarExpr(E->getArg(0));
20062 Function *Callee = CGM.getIntrinsic(IntNo, Vec->getType());
20063 return Builder.CreateCall(Callee, {Vec});
20065 case WebAssembly::BI__builtin_wasm_bitmask_i8x16:
20066 case WebAssembly::BI__builtin_wasm_bitmask_i16x8:
20067 case WebAssembly::BI__builtin_wasm_bitmask_i32x4:
20068 case WebAssembly::BI__builtin_wasm_bitmask_i64x2: {
20069 Value *Vec = EmitScalarExpr(E->getArg(0));
20070 Function *Callee =
20071 CGM.getIntrinsic(Intrinsic::wasm_bitmask, Vec->getType());
20072 return Builder.CreateCall(Callee, {Vec});
20074 case WebAssembly::BI__builtin_wasm_abs_f32x4:
20075 case WebAssembly::BI__builtin_wasm_abs_f64x2: {
20076 Value *Vec = EmitScalarExpr(E->getArg(0));
20077 Function *Callee = CGM.getIntrinsic(Intrinsic::fabs, Vec->getType());
20078 return Builder.CreateCall(Callee, {Vec});
20080 case WebAssembly::BI__builtin_wasm_sqrt_f32x4:
20081 case WebAssembly::BI__builtin_wasm_sqrt_f64x2: {
20082 Value *Vec = EmitScalarExpr(E->getArg(0));
20083 Function *Callee = CGM.getIntrinsic(Intrinsic::sqrt, Vec->getType());
20084 return Builder.CreateCall(Callee, {Vec});
20086 case WebAssembly::BI__builtin_wasm_narrow_s_i8x16_i16x8:
20087 case WebAssembly::BI__builtin_wasm_narrow_u_i8x16_i16x8:
20088 case WebAssembly::BI__builtin_wasm_narrow_s_i16x8_i32x4:
20089 case WebAssembly::BI__builtin_wasm_narrow_u_i16x8_i32x4: {
20090 Value *Low = EmitScalarExpr(E->getArg(0));
20091 Value *High = EmitScalarExpr(E->getArg(1));
20092 unsigned IntNo;
20093 switch (BuiltinID) {
20094 case WebAssembly::BI__builtin_wasm_narrow_s_i8x16_i16x8:
20095 case WebAssembly::BI__builtin_wasm_narrow_s_i16x8_i32x4:
20096 IntNo = Intrinsic::wasm_narrow_signed;
20097 break;
20098 case WebAssembly::BI__builtin_wasm_narrow_u_i8x16_i16x8:
20099 case WebAssembly::BI__builtin_wasm_narrow_u_i16x8_i32x4:
20100 IntNo = Intrinsic::wasm_narrow_unsigned;
20101 break;
20102 default:
20103 llvm_unreachable("unexpected builtin ID");
20105 Function *Callee =
20106 CGM.getIntrinsic(IntNo, {ConvertType(E->getType()), Low->getType()});
20107 return Builder.CreateCall(Callee, {Low, High});
20109 case WebAssembly::BI__builtin_wasm_trunc_sat_s_zero_f64x2_i32x4:
20110 case WebAssembly::BI__builtin_wasm_trunc_sat_u_zero_f64x2_i32x4: {
20111 Value *Vec = EmitScalarExpr(E->getArg(0));
20112 unsigned IntNo;
20113 switch (BuiltinID) {
20114 case WebAssembly::BI__builtin_wasm_trunc_sat_s_zero_f64x2_i32x4:
20115 IntNo = Intrinsic::fptosi_sat;
20116 break;
20117 case WebAssembly::BI__builtin_wasm_trunc_sat_u_zero_f64x2_i32x4:
20118 IntNo = Intrinsic::fptoui_sat;
20119 break;
20120 default:
20121 llvm_unreachable("unexpected builtin ID");
20123 llvm::Type *SrcT = Vec->getType();
20124 llvm::Type *TruncT = SrcT->getWithNewType(Builder.getInt32Ty());
20125 Function *Callee = CGM.getIntrinsic(IntNo, {TruncT, SrcT});
20126 Value *Trunc = Builder.CreateCall(Callee, Vec);
20127 Value *Splat = Constant::getNullValue(TruncT);
20128 return Builder.CreateShuffleVector(Trunc, Splat, ArrayRef<int>{0, 1, 2, 3});
20130 case WebAssembly::BI__builtin_wasm_shuffle_i8x16: {
20131 Value *Ops[18];
20132 size_t OpIdx = 0;
20133 Ops[OpIdx++] = EmitScalarExpr(E->getArg(0));
20134 Ops[OpIdx++] = EmitScalarExpr(E->getArg(1));
20135 while (OpIdx < 18) {
20136 std::optional<llvm::APSInt> LaneConst =
20137 E->getArg(OpIdx)->getIntegerConstantExpr(getContext());
20138 assert(LaneConst && "Constant arg isn't actually constant?");
20139 Ops[OpIdx++] = llvm::ConstantInt::get(getLLVMContext(), *LaneConst);
20141 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_shuffle);
20142 return Builder.CreateCall(Callee, Ops);
20144 case WebAssembly::BI__builtin_wasm_relaxed_madd_f32x4:
20145 case WebAssembly::BI__builtin_wasm_relaxed_nmadd_f32x4:
20146 case WebAssembly::BI__builtin_wasm_relaxed_madd_f64x2:
20147 case WebAssembly::BI__builtin_wasm_relaxed_nmadd_f64x2: {
20148 Value *A = EmitScalarExpr(E->getArg(0));
20149 Value *B = EmitScalarExpr(E->getArg(1));
20150 Value *C = EmitScalarExpr(E->getArg(2));
20151 unsigned IntNo;
20152 switch (BuiltinID) {
20153 case WebAssembly::BI__builtin_wasm_relaxed_madd_f32x4:
20154 case WebAssembly::BI__builtin_wasm_relaxed_madd_f64x2:
20155 IntNo = Intrinsic::wasm_relaxed_madd;
20156 break;
20157 case WebAssembly::BI__builtin_wasm_relaxed_nmadd_f32x4:
20158 case WebAssembly::BI__builtin_wasm_relaxed_nmadd_f64x2:
20159 IntNo = Intrinsic::wasm_relaxed_nmadd;
20160 break;
20161 default:
20162 llvm_unreachable("unexpected builtin ID");
20164 Function *Callee = CGM.getIntrinsic(IntNo, A->getType());
20165 return Builder.CreateCall(Callee, {A, B, C});
20167 case WebAssembly::BI__builtin_wasm_relaxed_laneselect_i8x16:
20168 case WebAssembly::BI__builtin_wasm_relaxed_laneselect_i16x8:
20169 case WebAssembly::BI__builtin_wasm_relaxed_laneselect_i32x4:
20170 case WebAssembly::BI__builtin_wasm_relaxed_laneselect_i64x2: {
20171 Value *A = EmitScalarExpr(E->getArg(0));
20172 Value *B = EmitScalarExpr(E->getArg(1));
20173 Value *C = EmitScalarExpr(E->getArg(2));
20174 Function *Callee =
20175 CGM.getIntrinsic(Intrinsic::wasm_relaxed_laneselect, A->getType());
20176 return Builder.CreateCall(Callee, {A, B, C});
20178 case WebAssembly::BI__builtin_wasm_relaxed_swizzle_i8x16: {
20179 Value *Src = EmitScalarExpr(E->getArg(0));
20180 Value *Indices = EmitScalarExpr(E->getArg(1));
20181 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_relaxed_swizzle);
20182 return Builder.CreateCall(Callee, {Src, Indices});
20184 case WebAssembly::BI__builtin_wasm_relaxed_min_f32x4:
20185 case WebAssembly::BI__builtin_wasm_relaxed_max_f32x4:
20186 case WebAssembly::BI__builtin_wasm_relaxed_min_f64x2:
20187 case WebAssembly::BI__builtin_wasm_relaxed_max_f64x2: {
20188 Value *LHS = EmitScalarExpr(E->getArg(0));
20189 Value *RHS = EmitScalarExpr(E->getArg(1));
20190 unsigned IntNo;
20191 switch (BuiltinID) {
20192 case WebAssembly::BI__builtin_wasm_relaxed_min_f32x4:
20193 case WebAssembly::BI__builtin_wasm_relaxed_min_f64x2:
20194 IntNo = Intrinsic::wasm_relaxed_min;
20195 break;
20196 case WebAssembly::BI__builtin_wasm_relaxed_max_f32x4:
20197 case WebAssembly::BI__builtin_wasm_relaxed_max_f64x2:
20198 IntNo = Intrinsic::wasm_relaxed_max;
20199 break;
20200 default:
20201 llvm_unreachable("unexpected builtin ID");
20203 Function *Callee = CGM.getIntrinsic(IntNo, LHS->getType());
20204 return Builder.CreateCall(Callee, {LHS, RHS});
20206 case WebAssembly::BI__builtin_wasm_relaxed_trunc_s_i32x4_f32x4:
20207 case WebAssembly::BI__builtin_wasm_relaxed_trunc_u_i32x4_f32x4:
20208 case WebAssembly::BI__builtin_wasm_relaxed_trunc_s_zero_i32x4_f64x2:
20209 case WebAssembly::BI__builtin_wasm_relaxed_trunc_u_zero_i32x4_f64x2: {
20210 Value *Vec = EmitScalarExpr(E->getArg(0));
20211 unsigned IntNo;
20212 switch (BuiltinID) {
20213 case WebAssembly::BI__builtin_wasm_relaxed_trunc_s_i32x4_f32x4:
20214 IntNo = Intrinsic::wasm_relaxed_trunc_signed;
20215 break;
20216 case WebAssembly::BI__builtin_wasm_relaxed_trunc_u_i32x4_f32x4:
20217 IntNo = Intrinsic::wasm_relaxed_trunc_unsigned;
20218 break;
20219 case WebAssembly::BI__builtin_wasm_relaxed_trunc_s_zero_i32x4_f64x2:
20220 IntNo = Intrinsic::wasm_relaxed_trunc_signed_zero;
20221 break;
20222 case WebAssembly::BI__builtin_wasm_relaxed_trunc_u_zero_i32x4_f64x2:
20223 IntNo = Intrinsic::wasm_relaxed_trunc_unsigned_zero;
20224 break;
20225 default:
20226 llvm_unreachable("unexpected builtin ID");
20228 Function *Callee = CGM.getIntrinsic(IntNo);
20229 return Builder.CreateCall(Callee, {Vec});
20231 case WebAssembly::BI__builtin_wasm_relaxed_q15mulr_s_i16x8: {
20232 Value *LHS = EmitScalarExpr(E->getArg(0));
20233 Value *RHS = EmitScalarExpr(E->getArg(1));
20234 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_relaxed_q15mulr_signed);
20235 return Builder.CreateCall(Callee, {LHS, RHS});
20237 case WebAssembly::BI__builtin_wasm_relaxed_dot_i8x16_i7x16_s_i16x8: {
20238 Value *LHS = EmitScalarExpr(E->getArg(0));
20239 Value *RHS = EmitScalarExpr(E->getArg(1));
20240 Function *Callee =
20241 CGM.getIntrinsic(Intrinsic::wasm_relaxed_dot_i8x16_i7x16_signed);
20242 return Builder.CreateCall(Callee, {LHS, RHS});
20244 case WebAssembly::BI__builtin_wasm_relaxed_dot_i8x16_i7x16_add_s_i32x4: {
20245 Value *LHS = EmitScalarExpr(E->getArg(0));
20246 Value *RHS = EmitScalarExpr(E->getArg(1));
20247 Value *Acc = EmitScalarExpr(E->getArg(2));
20248 Function *Callee =
20249 CGM.getIntrinsic(Intrinsic::wasm_relaxed_dot_i8x16_i7x16_add_signed);
20250 return Builder.CreateCall(Callee, {LHS, RHS, Acc});
20252 case WebAssembly::BI__builtin_wasm_relaxed_dot_bf16x8_add_f32_f32x4: {
20253 Value *LHS = EmitScalarExpr(E->getArg(0));
20254 Value *RHS = EmitScalarExpr(E->getArg(1));
20255 Value *Acc = EmitScalarExpr(E->getArg(2));
20256 Function *Callee =
20257 CGM.getIntrinsic(Intrinsic::wasm_relaxed_dot_bf16x8_add_f32);
20258 return Builder.CreateCall(Callee, {LHS, RHS, Acc});
20260 case WebAssembly::BI__builtin_wasm_table_get: {
20261 assert(E->getArg(0)->getType()->isArrayType());
20262 Value *Table = EmitArrayToPointerDecay(E->getArg(0)).getPointer();
20263 Value *Index = EmitScalarExpr(E->getArg(1));
20264 Function *Callee;
20265 if (E->getType().isWebAssemblyExternrefType())
20266 Callee = CGM.getIntrinsic(Intrinsic::wasm_table_get_externref);
20267 else if (E->getType().isWebAssemblyFuncrefType())
20268 Callee = CGM.getIntrinsic(Intrinsic::wasm_table_get_funcref);
20269 else
20270 llvm_unreachable(
20271 "Unexpected reference type for __builtin_wasm_table_get");
20272 return Builder.CreateCall(Callee, {Table, Index});
20274 case WebAssembly::BI__builtin_wasm_table_set: {
20275 assert(E->getArg(0)->getType()->isArrayType());
20276 Value *Table = EmitArrayToPointerDecay(E->getArg(0)).getPointer();
20277 Value *Index = EmitScalarExpr(E->getArg(1));
20278 Value *Val = EmitScalarExpr(E->getArg(2));
20279 Function *Callee;
20280 if (E->getArg(2)->getType().isWebAssemblyExternrefType())
20281 Callee = CGM.getIntrinsic(Intrinsic::wasm_table_set_externref);
20282 else if (E->getArg(2)->getType().isWebAssemblyFuncrefType())
20283 Callee = CGM.getIntrinsic(Intrinsic::wasm_table_set_funcref);
20284 else
20285 llvm_unreachable(
20286 "Unexpected reference type for __builtin_wasm_table_set");
20287 return Builder.CreateCall(Callee, {Table, Index, Val});
20289 case WebAssembly::BI__builtin_wasm_table_size: {
20290 assert(E->getArg(0)->getType()->isArrayType());
20291 Value *Value = EmitArrayToPointerDecay(E->getArg(0)).getPointer();
20292 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_table_size);
20293 return Builder.CreateCall(Callee, Value);
20295 case WebAssembly::BI__builtin_wasm_table_grow: {
20296 assert(E->getArg(0)->getType()->isArrayType());
20297 Value *Table = EmitArrayToPointerDecay(E->getArg(0)).getPointer();
20298 Value *Val = EmitScalarExpr(E->getArg(1));
20299 Value *NElems = EmitScalarExpr(E->getArg(2));
20301 Function *Callee;
20302 if (E->getArg(1)->getType().isWebAssemblyExternrefType())
20303 Callee = CGM.getIntrinsic(Intrinsic::wasm_table_grow_externref);
20304 else if (E->getArg(2)->getType().isWebAssemblyFuncrefType())
20305 Callee = CGM.getIntrinsic(Intrinsic::wasm_table_fill_funcref);
20306 else
20307 llvm_unreachable(
20308 "Unexpected reference type for __builtin_wasm_table_grow");
20310 return Builder.CreateCall(Callee, {Table, Val, NElems});
20312 case WebAssembly::BI__builtin_wasm_table_fill: {
20313 assert(E->getArg(0)->getType()->isArrayType());
20314 Value *Table = EmitArrayToPointerDecay(E->getArg(0)).getPointer();
20315 Value *Index = EmitScalarExpr(E->getArg(1));
20316 Value *Val = EmitScalarExpr(E->getArg(2));
20317 Value *NElems = EmitScalarExpr(E->getArg(3));
20319 Function *Callee;
20320 if (E->getArg(2)->getType().isWebAssemblyExternrefType())
20321 Callee = CGM.getIntrinsic(Intrinsic::wasm_table_fill_externref);
20322 else if (E->getArg(2)->getType().isWebAssemblyFuncrefType())
20323 Callee = CGM.getIntrinsic(Intrinsic::wasm_table_fill_funcref);
20324 else
20325 llvm_unreachable(
20326 "Unexpected reference type for __builtin_wasm_table_fill");
20328 return Builder.CreateCall(Callee, {Table, Index, Val, NElems});
20330 case WebAssembly::BI__builtin_wasm_table_copy: {
20331 assert(E->getArg(0)->getType()->isArrayType());
20332 Value *TableX = EmitArrayToPointerDecay(E->getArg(0)).getPointer();
20333 Value *TableY = EmitArrayToPointerDecay(E->getArg(1)).getPointer();
20334 Value *DstIdx = EmitScalarExpr(E->getArg(2));
20335 Value *SrcIdx = EmitScalarExpr(E->getArg(3));
20336 Value *NElems = EmitScalarExpr(E->getArg(4));
20338 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_table_copy);
20340 return Builder.CreateCall(Callee, {TableX, TableY, SrcIdx, DstIdx, NElems});
20342 default:
20343 return nullptr;
20347 static std::pair<Intrinsic::ID, unsigned>
20348 getIntrinsicForHexagonNonClangBuiltin(unsigned BuiltinID) {
20349 struct Info {
20350 unsigned BuiltinID;
20351 Intrinsic::ID IntrinsicID;
20352 unsigned VecLen;
20354 static Info Infos[] = {
20355 #define CUSTOM_BUILTIN_MAPPING(x,s) \
20356 { Hexagon::BI__builtin_HEXAGON_##x, Intrinsic::hexagon_##x, s },
20357 CUSTOM_BUILTIN_MAPPING(L2_loadrub_pci, 0)
20358 CUSTOM_BUILTIN_MAPPING(L2_loadrb_pci, 0)
20359 CUSTOM_BUILTIN_MAPPING(L2_loadruh_pci, 0)
20360 CUSTOM_BUILTIN_MAPPING(L2_loadrh_pci, 0)
20361 CUSTOM_BUILTIN_MAPPING(L2_loadri_pci, 0)
20362 CUSTOM_BUILTIN_MAPPING(L2_loadrd_pci, 0)
20363 CUSTOM_BUILTIN_MAPPING(L2_loadrub_pcr, 0)
20364 CUSTOM_BUILTIN_MAPPING(L2_loadrb_pcr, 0)
20365 CUSTOM_BUILTIN_MAPPING(L2_loadruh_pcr, 0)
20366 CUSTOM_BUILTIN_MAPPING(L2_loadrh_pcr, 0)
20367 CUSTOM_BUILTIN_MAPPING(L2_loadri_pcr, 0)
20368 CUSTOM_BUILTIN_MAPPING(L2_loadrd_pcr, 0)
20369 CUSTOM_BUILTIN_MAPPING(S2_storerb_pci, 0)
20370 CUSTOM_BUILTIN_MAPPING(S2_storerh_pci, 0)
20371 CUSTOM_BUILTIN_MAPPING(S2_storerf_pci, 0)
20372 CUSTOM_BUILTIN_MAPPING(S2_storeri_pci, 0)
20373 CUSTOM_BUILTIN_MAPPING(S2_storerd_pci, 0)
20374 CUSTOM_BUILTIN_MAPPING(S2_storerb_pcr, 0)
20375 CUSTOM_BUILTIN_MAPPING(S2_storerh_pcr, 0)
20376 CUSTOM_BUILTIN_MAPPING(S2_storerf_pcr, 0)
20377 CUSTOM_BUILTIN_MAPPING(S2_storeri_pcr, 0)
20378 CUSTOM_BUILTIN_MAPPING(S2_storerd_pcr, 0)
20379 // Legacy builtins that take a vector in place of a vector predicate.
20380 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstoreq, 64)
20381 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorenq, 64)
20382 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentq, 64)
20383 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentnq, 64)
20384 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstoreq_128B, 128)
20385 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorenq_128B, 128)
20386 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentq_128B, 128)
20387 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentnq_128B, 128)
20388 #include "clang/Basic/BuiltinsHexagonMapCustomDep.def"
20389 #undef CUSTOM_BUILTIN_MAPPING
20392 auto CmpInfo = [] (Info A, Info B) { return A.BuiltinID < B.BuiltinID; };
20393 static const bool SortOnce = (llvm::sort(Infos, CmpInfo), true);
20394 (void)SortOnce;
20396 const Info *F = llvm::lower_bound(Infos, Info{BuiltinID, 0, 0}, CmpInfo);
20397 if (F == std::end(Infos) || F->BuiltinID != BuiltinID)
20398 return {Intrinsic::not_intrinsic, 0};
20400 return {F->IntrinsicID, F->VecLen};
20403 Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID,
20404 const CallExpr *E) {
20405 Intrinsic::ID ID;
20406 unsigned VecLen;
20407 std::tie(ID, VecLen) = getIntrinsicForHexagonNonClangBuiltin(BuiltinID);
20409 auto MakeCircOp = [this, E](unsigned IntID, bool IsLoad) {
20410 // The base pointer is passed by address, so it needs to be loaded.
20411 Address A = EmitPointerWithAlignment(E->getArg(0));
20412 Address BP = Address(A.getPointer(), Int8PtrTy, A.getAlignment());
20413 llvm::Value *Base = Builder.CreateLoad(BP);
20414 // The treatment of both loads and stores is the same: the arguments for
20415 // the builtin are the same as the arguments for the intrinsic.
20416 // Load:
20417 // builtin(Base, Inc, Mod, Start) -> intr(Base, Inc, Mod, Start)
20418 // builtin(Base, Mod, Start) -> intr(Base, Mod, Start)
20419 // Store:
20420 // builtin(Base, Inc, Mod, Val, Start) -> intr(Base, Inc, Mod, Val, Start)
20421 // builtin(Base, Mod, Val, Start) -> intr(Base, Mod, Val, Start)
20422 SmallVector<llvm::Value*,5> Ops = { Base };
20423 for (unsigned i = 1, e = E->getNumArgs(); i != e; ++i)
20424 Ops.push_back(EmitScalarExpr(E->getArg(i)));
20426 llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(IntID), Ops);
20427 // The load intrinsics generate two results (Value, NewBase), stores
20428 // generate one (NewBase). The new base address needs to be stored.
20429 llvm::Value *NewBase = IsLoad ? Builder.CreateExtractValue(Result, 1)
20430 : Result;
20431 llvm::Value *LV = EmitScalarExpr(E->getArg(0));
20432 Address Dest = EmitPointerWithAlignment(E->getArg(0));
20433 llvm::Value *RetVal =
20434 Builder.CreateAlignedStore(NewBase, LV, Dest.getAlignment());
20435 if (IsLoad)
20436 RetVal = Builder.CreateExtractValue(Result, 0);
20437 return RetVal;
20440 // Handle the conversion of bit-reverse load intrinsics to bit code.
20441 // The intrinsic call after this function only reads from memory and the
20442 // write to memory is dealt by the store instruction.
20443 auto MakeBrevLd = [this, E](unsigned IntID, llvm::Type *DestTy) {
20444 // The intrinsic generates one result, which is the new value for the base
20445 // pointer. It needs to be returned. The result of the load instruction is
20446 // passed to intrinsic by address, so the value needs to be stored.
20447 llvm::Value *BaseAddress = EmitScalarExpr(E->getArg(0));
20449 // Expressions like &(*pt++) will be incremented per evaluation.
20450 // EmitPointerWithAlignment and EmitScalarExpr evaluates the expression
20451 // per call.
20452 Address DestAddr = EmitPointerWithAlignment(E->getArg(1));
20453 DestAddr = Address(DestAddr.getPointer(), Int8Ty, DestAddr.getAlignment());
20454 llvm::Value *DestAddress = DestAddr.getPointer();
20456 // Operands are Base, Dest, Modifier.
20457 // The intrinsic format in LLVM IR is defined as
20458 // { ValueType, i8* } (i8*, i32).
20459 llvm::Value *Result = Builder.CreateCall(
20460 CGM.getIntrinsic(IntID), {BaseAddress, EmitScalarExpr(E->getArg(2))});
20462 // The value needs to be stored as the variable is passed by reference.
20463 llvm::Value *DestVal = Builder.CreateExtractValue(Result, 0);
20465 // The store needs to be truncated to fit the destination type.
20466 // While i32 and i64 are natively supported on Hexagon, i8 and i16 needs
20467 // to be handled with stores of respective destination type.
20468 DestVal = Builder.CreateTrunc(DestVal, DestTy);
20470 Builder.CreateAlignedStore(DestVal, DestAddress, DestAddr.getAlignment());
20471 // The updated value of the base pointer is returned.
20472 return Builder.CreateExtractValue(Result, 1);
20475 auto V2Q = [this, VecLen] (llvm::Value *Vec) {
20476 Intrinsic::ID ID = VecLen == 128 ? Intrinsic::hexagon_V6_vandvrt_128B
20477 : Intrinsic::hexagon_V6_vandvrt;
20478 return Builder.CreateCall(CGM.getIntrinsic(ID),
20479 {Vec, Builder.getInt32(-1)});
20481 auto Q2V = [this, VecLen] (llvm::Value *Pred) {
20482 Intrinsic::ID ID = VecLen == 128 ? Intrinsic::hexagon_V6_vandqrt_128B
20483 : Intrinsic::hexagon_V6_vandqrt;
20484 return Builder.CreateCall(CGM.getIntrinsic(ID),
20485 {Pred, Builder.getInt32(-1)});
20488 switch (BuiltinID) {
20489 // These intrinsics return a tuple {Vector, VectorPred} in LLVM IR,
20490 // and the corresponding C/C++ builtins use loads/stores to update
20491 // the predicate.
20492 case Hexagon::BI__builtin_HEXAGON_V6_vaddcarry:
20493 case Hexagon::BI__builtin_HEXAGON_V6_vaddcarry_128B:
20494 case Hexagon::BI__builtin_HEXAGON_V6_vsubcarry:
20495 case Hexagon::BI__builtin_HEXAGON_V6_vsubcarry_128B: {
20496 // Get the type from the 0-th argument.
20497 llvm::Type *VecType = ConvertType(E->getArg(0)->getType());
20498 Address PredAddr =
20499 EmitPointerWithAlignment(E->getArg(2)).withElementType(VecType);
20500 llvm::Value *PredIn = V2Q(Builder.CreateLoad(PredAddr));
20501 llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(ID),
20502 {EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1)), PredIn});
20504 llvm::Value *PredOut = Builder.CreateExtractValue(Result, 1);
20505 Builder.CreateAlignedStore(Q2V(PredOut), PredAddr.getPointer(),
20506 PredAddr.getAlignment());
20507 return Builder.CreateExtractValue(Result, 0);
20509 // These are identical to the builtins above, except they don't consume
20510 // input carry, only generate carry-out. Since they still produce two
20511 // outputs, generate the store of the predicate, but no load.
20512 case Hexagon::BI__builtin_HEXAGON_V6_vaddcarryo:
20513 case Hexagon::BI__builtin_HEXAGON_V6_vaddcarryo_128B:
20514 case Hexagon::BI__builtin_HEXAGON_V6_vsubcarryo:
20515 case Hexagon::BI__builtin_HEXAGON_V6_vsubcarryo_128B: {
20516 // Get the type from the 0-th argument.
20517 llvm::Type *VecType = ConvertType(E->getArg(0)->getType());
20518 Address PredAddr =
20519 EmitPointerWithAlignment(E->getArg(2)).withElementType(VecType);
20520 llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(ID),
20521 {EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1))});
20523 llvm::Value *PredOut = Builder.CreateExtractValue(Result, 1);
20524 Builder.CreateAlignedStore(Q2V(PredOut), PredAddr.getPointer(),
20525 PredAddr.getAlignment());
20526 return Builder.CreateExtractValue(Result, 0);
20529 case Hexagon::BI__builtin_HEXAGON_V6_vmaskedstoreq:
20530 case Hexagon::BI__builtin_HEXAGON_V6_vmaskedstorenq:
20531 case Hexagon::BI__builtin_HEXAGON_V6_vmaskedstorentq:
20532 case Hexagon::BI__builtin_HEXAGON_V6_vmaskedstorentnq:
20533 case Hexagon::BI__builtin_HEXAGON_V6_vmaskedstoreq_128B:
20534 case Hexagon::BI__builtin_HEXAGON_V6_vmaskedstorenq_128B:
20535 case Hexagon::BI__builtin_HEXAGON_V6_vmaskedstorentq_128B:
20536 case Hexagon::BI__builtin_HEXAGON_V6_vmaskedstorentnq_128B: {
20537 SmallVector<llvm::Value*,4> Ops;
20538 const Expr *PredOp = E->getArg(0);
20539 // There will be an implicit cast to a boolean vector. Strip it.
20540 if (auto *Cast = dyn_cast<ImplicitCastExpr>(PredOp)) {
20541 if (Cast->getCastKind() == CK_BitCast)
20542 PredOp = Cast->getSubExpr();
20543 Ops.push_back(V2Q(EmitScalarExpr(PredOp)));
20545 for (int i = 1, e = E->getNumArgs(); i != e; ++i)
20546 Ops.push_back(EmitScalarExpr(E->getArg(i)));
20547 return Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
20550 case Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci:
20551 case Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci:
20552 case Hexagon::BI__builtin_HEXAGON_L2_loadruh_pci:
20553 case Hexagon::BI__builtin_HEXAGON_L2_loadrh_pci:
20554 case Hexagon::BI__builtin_HEXAGON_L2_loadri_pci:
20555 case Hexagon::BI__builtin_HEXAGON_L2_loadrd_pci:
20556 case Hexagon::BI__builtin_HEXAGON_L2_loadrub_pcr:
20557 case Hexagon::BI__builtin_HEXAGON_L2_loadrb_pcr:
20558 case Hexagon::BI__builtin_HEXAGON_L2_loadruh_pcr:
20559 case Hexagon::BI__builtin_HEXAGON_L2_loadrh_pcr:
20560 case Hexagon::BI__builtin_HEXAGON_L2_loadri_pcr:
20561 case Hexagon::BI__builtin_HEXAGON_L2_loadrd_pcr:
20562 return MakeCircOp(ID, /*IsLoad=*/true);
20563 case Hexagon::BI__builtin_HEXAGON_S2_storerb_pci:
20564 case Hexagon::BI__builtin_HEXAGON_S2_storerh_pci:
20565 case Hexagon::BI__builtin_HEXAGON_S2_storerf_pci:
20566 case Hexagon::BI__builtin_HEXAGON_S2_storeri_pci:
20567 case Hexagon::BI__builtin_HEXAGON_S2_storerd_pci:
20568 case Hexagon::BI__builtin_HEXAGON_S2_storerb_pcr:
20569 case Hexagon::BI__builtin_HEXAGON_S2_storerh_pcr:
20570 case Hexagon::BI__builtin_HEXAGON_S2_storerf_pcr:
20571 case Hexagon::BI__builtin_HEXAGON_S2_storeri_pcr:
20572 case Hexagon::BI__builtin_HEXAGON_S2_storerd_pcr:
20573 return MakeCircOp(ID, /*IsLoad=*/false);
20574 case Hexagon::BI__builtin_brev_ldub:
20575 return MakeBrevLd(Intrinsic::hexagon_L2_loadrub_pbr, Int8Ty);
20576 case Hexagon::BI__builtin_brev_ldb:
20577 return MakeBrevLd(Intrinsic::hexagon_L2_loadrb_pbr, Int8Ty);
20578 case Hexagon::BI__builtin_brev_lduh:
20579 return MakeBrevLd(Intrinsic::hexagon_L2_loadruh_pbr, Int16Ty);
20580 case Hexagon::BI__builtin_brev_ldh:
20581 return MakeBrevLd(Intrinsic::hexagon_L2_loadrh_pbr, Int16Ty);
20582 case Hexagon::BI__builtin_brev_ldw:
20583 return MakeBrevLd(Intrinsic::hexagon_L2_loadri_pbr, Int32Ty);
20584 case Hexagon::BI__builtin_brev_ldd:
20585 return MakeBrevLd(Intrinsic::hexagon_L2_loadrd_pbr, Int64Ty);
20586 } // switch
20588 return nullptr;
20591 Value *CodeGenFunction::EmitRISCVBuiltinExpr(unsigned BuiltinID,
20592 const CallExpr *E,
20593 ReturnValueSlot ReturnValue) {
20594 SmallVector<Value *, 4> Ops;
20595 llvm::Type *ResultType = ConvertType(E->getType());
20597 // Find out if any arguments are required to be integer constant expressions.
20598 unsigned ICEArguments = 0;
20599 ASTContext::GetBuiltinTypeError Error;
20600 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
20601 if (Error == ASTContext::GE_Missing_type) {
20602 // Vector intrinsics don't have a type string.
20603 assert(BuiltinID >= clang::RISCV::FirstRVVBuiltin &&
20604 BuiltinID <= clang::RISCV::LastRVVBuiltin);
20605 ICEArguments = 0;
20606 if (BuiltinID == RISCVVector::BI__builtin_rvv_vget_v ||
20607 BuiltinID == RISCVVector::BI__builtin_rvv_vset_v)
20608 ICEArguments = 1 << 1;
20609 } else {
20610 assert(Error == ASTContext::GE_None && "Unexpected error");
20613 if (BuiltinID == RISCV::BI__builtin_riscv_ntl_load)
20614 ICEArguments |= (1 << 1);
20615 if (BuiltinID == RISCV::BI__builtin_riscv_ntl_store)
20616 ICEArguments |= (1 << 2);
20618 for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) {
20619 // Handle aggregate argument, namely RVV tuple types in segment load/store
20620 if (hasAggregateEvaluationKind(E->getArg(i)->getType())) {
20621 LValue L = EmitAggExprToLValue(E->getArg(i));
20622 llvm::Value *AggValue = Builder.CreateLoad(L.getAddress(*this));
20623 Ops.push_back(AggValue);
20624 continue;
20627 // If this is a normal argument, just emit it as a scalar.
20628 if ((ICEArguments & (1 << i)) == 0) {
20629 Ops.push_back(EmitScalarExpr(E->getArg(i)));
20630 continue;
20633 // If this is required to be a constant, constant fold it so that we know
20634 // that the generated intrinsic gets a ConstantInt.
20635 Ops.push_back(llvm::ConstantInt::get(
20636 getLLVMContext(), *E->getArg(i)->getIntegerConstantExpr(getContext())));
20639 Intrinsic::ID ID = Intrinsic::not_intrinsic;
20640 unsigned NF = 1;
20641 // The 0th bit simulates the `vta` of RVV
20642 // The 1st bit simulates the `vma` of RVV
20643 constexpr unsigned RVV_VTA = 0x1;
20644 constexpr unsigned RVV_VMA = 0x2;
20645 int PolicyAttrs = 0;
20646 bool IsMasked = false;
20648 // Required for overloaded intrinsics.
20649 llvm::SmallVector<llvm::Type *, 2> IntrinsicTypes;
20650 switch (BuiltinID) {
20651 default: llvm_unreachable("unexpected builtin ID");
20652 case RISCV::BI__builtin_riscv_orc_b_32:
20653 case RISCV::BI__builtin_riscv_orc_b_64:
20654 case RISCV::BI__builtin_riscv_clz_32:
20655 case RISCV::BI__builtin_riscv_clz_64:
20656 case RISCV::BI__builtin_riscv_ctz_32:
20657 case RISCV::BI__builtin_riscv_ctz_64:
20658 case RISCV::BI__builtin_riscv_clmul_32:
20659 case RISCV::BI__builtin_riscv_clmul_64:
20660 case RISCV::BI__builtin_riscv_clmulh_32:
20661 case RISCV::BI__builtin_riscv_clmulh_64:
20662 case RISCV::BI__builtin_riscv_clmulr_32:
20663 case RISCV::BI__builtin_riscv_clmulr_64:
20664 case RISCV::BI__builtin_riscv_xperm4_32:
20665 case RISCV::BI__builtin_riscv_xperm4_64:
20666 case RISCV::BI__builtin_riscv_xperm8_32:
20667 case RISCV::BI__builtin_riscv_xperm8_64:
20668 case RISCV::BI__builtin_riscv_brev8_32:
20669 case RISCV::BI__builtin_riscv_brev8_64:
20670 case RISCV::BI__builtin_riscv_zip_32:
20671 case RISCV::BI__builtin_riscv_unzip_32: {
20672 switch (BuiltinID) {
20673 default: llvm_unreachable("unexpected builtin ID");
20674 // Zbb
20675 case RISCV::BI__builtin_riscv_orc_b_32:
20676 case RISCV::BI__builtin_riscv_orc_b_64:
20677 ID = Intrinsic::riscv_orc_b;
20678 break;
20679 case RISCV::BI__builtin_riscv_clz_32:
20680 case RISCV::BI__builtin_riscv_clz_64: {
20681 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Ops[0]->getType());
20682 Value *Result = Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)});
20683 if (Result->getType() != ResultType)
20684 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
20685 "cast");
20686 return Result;
20688 case RISCV::BI__builtin_riscv_ctz_32:
20689 case RISCV::BI__builtin_riscv_ctz_64: {
20690 Function *F = CGM.getIntrinsic(Intrinsic::cttz, Ops[0]->getType());
20691 Value *Result = Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)});
20692 if (Result->getType() != ResultType)
20693 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
20694 "cast");
20695 return Result;
20698 // Zbc
20699 case RISCV::BI__builtin_riscv_clmul_32:
20700 case RISCV::BI__builtin_riscv_clmul_64:
20701 ID = Intrinsic::riscv_clmul;
20702 break;
20703 case RISCV::BI__builtin_riscv_clmulh_32:
20704 case RISCV::BI__builtin_riscv_clmulh_64:
20705 ID = Intrinsic::riscv_clmulh;
20706 break;
20707 case RISCV::BI__builtin_riscv_clmulr_32:
20708 case RISCV::BI__builtin_riscv_clmulr_64:
20709 ID = Intrinsic::riscv_clmulr;
20710 break;
20712 // Zbkx
20713 case RISCV::BI__builtin_riscv_xperm8_32:
20714 case RISCV::BI__builtin_riscv_xperm8_64:
20715 ID = Intrinsic::riscv_xperm8;
20716 break;
20717 case RISCV::BI__builtin_riscv_xperm4_32:
20718 case RISCV::BI__builtin_riscv_xperm4_64:
20719 ID = Intrinsic::riscv_xperm4;
20720 break;
20722 // Zbkb
20723 case RISCV::BI__builtin_riscv_brev8_32:
20724 case RISCV::BI__builtin_riscv_brev8_64:
20725 ID = Intrinsic::riscv_brev8;
20726 break;
20727 case RISCV::BI__builtin_riscv_zip_32:
20728 ID = Intrinsic::riscv_zip;
20729 break;
20730 case RISCV::BI__builtin_riscv_unzip_32:
20731 ID = Intrinsic::riscv_unzip;
20732 break;
20735 IntrinsicTypes = {ResultType};
20736 break;
20739 // Zk builtins
20741 // Zknh
20742 case RISCV::BI__builtin_riscv_sha256sig0:
20743 ID = Intrinsic::riscv_sha256sig0;
20744 break;
20745 case RISCV::BI__builtin_riscv_sha256sig1:
20746 ID = Intrinsic::riscv_sha256sig1;
20747 break;
20748 case RISCV::BI__builtin_riscv_sha256sum0:
20749 ID = Intrinsic::riscv_sha256sum0;
20750 break;
20751 case RISCV::BI__builtin_riscv_sha256sum1:
20752 ID = Intrinsic::riscv_sha256sum1;
20753 break;
20755 // Zksed
20756 case RISCV::BI__builtin_riscv_sm4ks:
20757 ID = Intrinsic::riscv_sm4ks;
20758 break;
20759 case RISCV::BI__builtin_riscv_sm4ed:
20760 ID = Intrinsic::riscv_sm4ed;
20761 break;
20763 // Zksh
20764 case RISCV::BI__builtin_riscv_sm3p0:
20765 ID = Intrinsic::riscv_sm3p0;
20766 break;
20767 case RISCV::BI__builtin_riscv_sm3p1:
20768 ID = Intrinsic::riscv_sm3p1;
20769 break;
20771 // Zihintntl
20772 case RISCV::BI__builtin_riscv_ntl_load: {
20773 llvm::Type *ResTy = ConvertType(E->getType());
20774 unsigned DomainVal = 5; // Default __RISCV_NTLH_ALL
20775 if (Ops.size() == 2)
20776 DomainVal = cast<ConstantInt>(Ops[1])->getZExtValue();
20778 llvm::MDNode *RISCVDomainNode = llvm::MDNode::get(
20779 getLLVMContext(),
20780 llvm::ConstantAsMetadata::get(Builder.getInt32(DomainVal)));
20781 llvm::MDNode *NontemporalNode = llvm::MDNode::get(
20782 getLLVMContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
20784 int Width;
20785 if(ResTy->isScalableTy()) {
20786 const ScalableVectorType *SVTy = cast<ScalableVectorType>(ResTy);
20787 llvm::Type *ScalarTy = ResTy->getScalarType();
20788 Width = ScalarTy->getPrimitiveSizeInBits() *
20789 SVTy->getElementCount().getKnownMinValue();
20790 } else
20791 Width = ResTy->getPrimitiveSizeInBits();
20792 LoadInst *Load = Builder.CreateLoad(
20793 Address(Ops[0], ResTy, CharUnits::fromQuantity(Width / 8)));
20795 Load->setMetadata(llvm::LLVMContext::MD_nontemporal, NontemporalNode);
20796 Load->setMetadata(CGM.getModule().getMDKindID("riscv-nontemporal-domain"),
20797 RISCVDomainNode);
20799 return Load;
20801 case RISCV::BI__builtin_riscv_ntl_store: {
20802 unsigned DomainVal = 5; // Default __RISCV_NTLH_ALL
20803 if (Ops.size() == 3)
20804 DomainVal = cast<ConstantInt>(Ops[2])->getZExtValue();
20806 llvm::MDNode *RISCVDomainNode = llvm::MDNode::get(
20807 getLLVMContext(),
20808 llvm::ConstantAsMetadata::get(Builder.getInt32(DomainVal)));
20809 llvm::MDNode *NontemporalNode = llvm::MDNode::get(
20810 getLLVMContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
20812 StoreInst *Store = Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
20813 Store->setMetadata(llvm::LLVMContext::MD_nontemporal, NontemporalNode);
20814 Store->setMetadata(CGM.getModule().getMDKindID("riscv-nontemporal-domain"),
20815 RISCVDomainNode);
20817 return Store;
20820 // Vector builtins are handled from here.
20821 #include "clang/Basic/riscv_vector_builtin_cg.inc"
20822 // SiFive Vector builtins are handled from here.
20823 #include "clang/Basic/riscv_sifive_vector_builtin_cg.inc"
20826 assert(ID != Intrinsic::not_intrinsic);
20828 llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
20829 return Builder.CreateCall(F, Ops, "");