1 //===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This contains code to emit Builtin calls as LLVM code.
11 //===----------------------------------------------------------------------===//
14 #include "CGCUDARuntime.h"
16 #include "CGObjCRuntime.h"
17 #include "CGOpenCLRuntime.h"
18 #include "CGRecordLayout.h"
19 #include "CodeGenFunction.h"
20 #include "CodeGenModule.h"
21 #include "ConstantEmitter.h"
22 #include "PatternInit.h"
23 #include "TargetInfo.h"
24 #include "clang/AST/ASTContext.h"
25 #include "clang/AST/Attr.h"
26 #include "clang/AST/Decl.h"
27 #include "clang/AST/OSLog.h"
28 #include "clang/Basic/TargetBuiltins.h"
29 #include "clang/Basic/TargetInfo.h"
30 #include "clang/Basic/TargetOptions.h"
31 #include "clang/CodeGen/CGFunctionInfo.h"
32 #include "clang/Frontend/FrontendDiagnostic.h"
33 #include "llvm/ADT/APFloat.h"
34 #include "llvm/ADT/APInt.h"
35 #include "llvm/ADT/FloatingPointMode.h"
36 #include "llvm/ADT/SmallPtrSet.h"
37 #include "llvm/ADT/StringExtras.h"
38 #include "llvm/Analysis/ValueTracking.h"
39 #include "llvm/IR/DataLayout.h"
40 #include "llvm/IR/InlineAsm.h"
41 #include "llvm/IR/Intrinsics.h"
42 #include "llvm/IR/IntrinsicsAArch64.h"
43 #include "llvm/IR/IntrinsicsAMDGPU.h"
44 #include "llvm/IR/IntrinsicsARM.h"
45 #include "llvm/IR/IntrinsicsBPF.h"
46 #include "llvm/IR/IntrinsicsHexagon.h"
47 #include "llvm/IR/IntrinsicsNVPTX.h"
48 #include "llvm/IR/IntrinsicsPowerPC.h"
49 #include "llvm/IR/IntrinsicsR600.h"
50 #include "llvm/IR/IntrinsicsRISCV.h"
51 #include "llvm/IR/IntrinsicsS390.h"
52 #include "llvm/IR/IntrinsicsVE.h"
53 #include "llvm/IR/IntrinsicsWebAssembly.h"
54 #include "llvm/IR/IntrinsicsX86.h"
55 #include "llvm/IR/MDBuilder.h"
56 #include "llvm/IR/MatrixBuilder.h"
57 #include "llvm/Support/ConvertUTF.h"
58 #include "llvm/Support/MathExtras.h"
59 #include "llvm/Support/ScopedPrinter.h"
60 #include "llvm/TargetParser/AArch64TargetParser.h"
61 #include "llvm/TargetParser/X86TargetParser.h"
65 using namespace clang
;
66 using namespace CodeGen
;
69 static llvm::cl::opt
<bool> ClSanitizeAlignmentBuiltin(
70 "sanitize-alignment-builtin", llvm::cl::Hidden
,
71 llvm::cl::desc("Instrument builtin functions for -fsanitize=alignment"),
72 llvm::cl::init(true));
74 static void initializeAlloca(CodeGenFunction
&CGF
, AllocaInst
*AI
, Value
*Size
,
75 Align AlignmentInBytes
) {
77 switch (CGF
.getLangOpts().getTrivialAutoVarInit()) {
78 case LangOptions::TrivialAutoVarInitKind::Uninitialized
:
79 // Nothing to initialize.
81 case LangOptions::TrivialAutoVarInitKind::Zero
:
82 Byte
= CGF
.Builder
.getInt8(0x00);
84 case LangOptions::TrivialAutoVarInitKind::Pattern
: {
85 llvm::Type
*Int8
= llvm::IntegerType::getInt8Ty(CGF
.CGM
.getLLVMContext());
86 Byte
= llvm::dyn_cast
<llvm::ConstantInt
>(
87 initializationPatternFor(CGF
.CGM
, Int8
));
91 if (CGF
.CGM
.stopAutoInit())
93 auto *I
= CGF
.Builder
.CreateMemSet(AI
, Byte
, Size
, AlignmentInBytes
);
94 I
->addAnnotationMetadata("auto-init");
97 /// getBuiltinLibFunction - Given a builtin id for a function like
98 /// "__builtin_fabsf", return a Function* for "fabsf".
99 llvm::Constant
*CodeGenModule::getBuiltinLibFunction(const FunctionDecl
*FD
,
100 unsigned BuiltinID
) {
101 assert(Context
.BuiltinInfo
.isLibFunction(BuiltinID
));
103 // Get the name, skip over the __builtin_ prefix (if necessary).
107 // TODO: This list should be expanded or refactored after all GCC-compatible
108 // std libcall builtins are implemented.
109 static SmallDenseMap
<unsigned, StringRef
, 64> F128Builtins
{
110 {Builtin::BI__builtin___fprintf_chk
, "__fprintf_chkieee128"},
111 {Builtin::BI__builtin___printf_chk
, "__printf_chkieee128"},
112 {Builtin::BI__builtin___snprintf_chk
, "__snprintf_chkieee128"},
113 {Builtin::BI__builtin___sprintf_chk
, "__sprintf_chkieee128"},
114 {Builtin::BI__builtin___vfprintf_chk
, "__vfprintf_chkieee128"},
115 {Builtin::BI__builtin___vprintf_chk
, "__vprintf_chkieee128"},
116 {Builtin::BI__builtin___vsnprintf_chk
, "__vsnprintf_chkieee128"},
117 {Builtin::BI__builtin___vsprintf_chk
, "__vsprintf_chkieee128"},
118 {Builtin::BI__builtin_fprintf
, "__fprintfieee128"},
119 {Builtin::BI__builtin_printf
, "__printfieee128"},
120 {Builtin::BI__builtin_snprintf
, "__snprintfieee128"},
121 {Builtin::BI__builtin_sprintf
, "__sprintfieee128"},
122 {Builtin::BI__builtin_vfprintf
, "__vfprintfieee128"},
123 {Builtin::BI__builtin_vprintf
, "__vprintfieee128"},
124 {Builtin::BI__builtin_vsnprintf
, "__vsnprintfieee128"},
125 {Builtin::BI__builtin_vsprintf
, "__vsprintfieee128"},
126 {Builtin::BI__builtin_fscanf
, "__fscanfieee128"},
127 {Builtin::BI__builtin_scanf
, "__scanfieee128"},
128 {Builtin::BI__builtin_sscanf
, "__sscanfieee128"},
129 {Builtin::BI__builtin_vfscanf
, "__vfscanfieee128"},
130 {Builtin::BI__builtin_vscanf
, "__vscanfieee128"},
131 {Builtin::BI__builtin_vsscanf
, "__vsscanfieee128"},
132 {Builtin::BI__builtin_nexttowardf128
, "__nexttowardieee128"},
135 // The AIX library functions frexpl, ldexpl, and modfl are for 128-bit
136 // IBM 'long double' (i.e. __ibm128). Map to the 'double' versions
137 // if it is 64-bit 'long double' mode.
138 static SmallDenseMap
<unsigned, StringRef
, 4> AIXLongDouble64Builtins
{
139 {Builtin::BI__builtin_frexpl
, "frexp"},
140 {Builtin::BI__builtin_ldexpl
, "ldexp"},
141 {Builtin::BI__builtin_modfl
, "modf"},
144 // If the builtin has been declared explicitly with an assembler label,
145 // use the mangled name. This differs from the plain label on platforms
146 // that prefix labels.
147 if (FD
->hasAttr
<AsmLabelAttr
>())
148 Name
= getMangledName(D
);
150 // TODO: This mutation should also be applied to other targets other than
151 // PPC, after backend supports IEEE 128-bit style libcalls.
152 if (getTriple().isPPC64() &&
153 &getTarget().getLongDoubleFormat() == &llvm::APFloat::IEEEquad() &&
154 F128Builtins
.contains(BuiltinID
))
155 Name
= F128Builtins
[BuiltinID
];
156 else if (getTriple().isOSAIX() &&
157 &getTarget().getLongDoubleFormat() ==
158 &llvm::APFloat::IEEEdouble() &&
159 AIXLongDouble64Builtins
.contains(BuiltinID
))
160 Name
= AIXLongDouble64Builtins
[BuiltinID
];
162 Name
= Context
.BuiltinInfo
.getName(BuiltinID
).substr(10);
165 llvm::FunctionType
*Ty
=
166 cast
<llvm::FunctionType
>(getTypes().ConvertType(FD
->getType()));
168 return GetOrCreateLLVMFunction(Name
, Ty
, D
, /*ForVTable=*/false);
171 /// Emit the conversions required to turn the given value into an
172 /// integer of the given size.
173 static Value
*EmitToInt(CodeGenFunction
&CGF
, llvm::Value
*V
,
174 QualType T
, llvm::IntegerType
*IntType
) {
175 V
= CGF
.EmitToMemory(V
, T
);
177 if (V
->getType()->isPointerTy())
178 return CGF
.Builder
.CreatePtrToInt(V
, IntType
);
180 assert(V
->getType() == IntType
);
184 static Value
*EmitFromInt(CodeGenFunction
&CGF
, llvm::Value
*V
,
185 QualType T
, llvm::Type
*ResultType
) {
186 V
= CGF
.EmitFromMemory(V
, T
);
188 if (ResultType
->isPointerTy())
189 return CGF
.Builder
.CreateIntToPtr(V
, ResultType
);
191 assert(V
->getType() == ResultType
);
195 static llvm::Value
*CheckAtomicAlignment(CodeGenFunction
&CGF
,
197 ASTContext
&Ctx
= CGF
.getContext();
198 Address Ptr
= CGF
.EmitPointerWithAlignment(E
->getArg(0));
199 unsigned Bytes
= Ptr
.getElementType()->isPointerTy()
200 ? Ctx
.getTypeSizeInChars(Ctx
.VoidPtrTy
).getQuantity()
201 : Ptr
.getElementType()->getScalarSizeInBits() / 8;
202 unsigned Align
= Ptr
.getAlignment().getQuantity();
203 if (Align
% Bytes
!= 0) {
204 DiagnosticsEngine
&Diags
= CGF
.CGM
.getDiags();
205 Diags
.Report(E
->getBeginLoc(), diag::warn_sync_op_misaligned
);
207 return Ptr
.getPointer();
210 /// Utility to insert an atomic instruction based on Intrinsic::ID
211 /// and the expression node.
212 static Value
*MakeBinaryAtomicValue(
213 CodeGenFunction
&CGF
, llvm::AtomicRMWInst::BinOp Kind
, const CallExpr
*E
,
214 AtomicOrdering Ordering
= AtomicOrdering::SequentiallyConsistent
) {
216 QualType T
= E
->getType();
217 assert(E
->getArg(0)->getType()->isPointerType());
218 assert(CGF
.getContext().hasSameUnqualifiedType(T
,
219 E
->getArg(0)->getType()->getPointeeType()));
220 assert(CGF
.getContext().hasSameUnqualifiedType(T
, E
->getArg(1)->getType()));
222 llvm::Value
*DestPtr
= CheckAtomicAlignment(CGF
, E
);
224 llvm::IntegerType
*IntType
= llvm::IntegerType::get(
225 CGF
.getLLVMContext(), CGF
.getContext().getTypeSize(T
));
227 llvm::Value
*Args
[2];
229 Args
[1] = CGF
.EmitScalarExpr(E
->getArg(1));
230 llvm::Type
*ValueType
= Args
[1]->getType();
231 Args
[1] = EmitToInt(CGF
, Args
[1], T
, IntType
);
233 llvm::Value
*Result
= CGF
.Builder
.CreateAtomicRMW(
234 Kind
, Args
[0], Args
[1], Ordering
);
235 return EmitFromInt(CGF
, Result
, T
, ValueType
);
238 static Value
*EmitNontemporalStore(CodeGenFunction
&CGF
, const CallExpr
*E
) {
239 Value
*Val
= CGF
.EmitScalarExpr(E
->getArg(0));
240 Value
*Address
= CGF
.EmitScalarExpr(E
->getArg(1));
242 Val
= CGF
.EmitToMemory(Val
, E
->getArg(0)->getType());
243 LValue LV
= CGF
.MakeNaturalAlignAddrLValue(Address
, E
->getArg(0)->getType());
244 LV
.setNontemporal(true);
245 CGF
.EmitStoreOfScalar(Val
, LV
, false);
249 static Value
*EmitNontemporalLoad(CodeGenFunction
&CGF
, const CallExpr
*E
) {
250 Value
*Address
= CGF
.EmitScalarExpr(E
->getArg(0));
252 LValue LV
= CGF
.MakeNaturalAlignAddrLValue(Address
, E
->getType());
253 LV
.setNontemporal(true);
254 return CGF
.EmitLoadOfScalar(LV
, E
->getExprLoc());
257 static RValue
EmitBinaryAtomic(CodeGenFunction
&CGF
,
258 llvm::AtomicRMWInst::BinOp Kind
,
260 return RValue::get(MakeBinaryAtomicValue(CGF
, Kind
, E
));
263 /// Utility to insert an atomic instruction based Intrinsic::ID and
264 /// the expression node, where the return value is the result of the
266 static RValue
EmitBinaryAtomicPost(CodeGenFunction
&CGF
,
267 llvm::AtomicRMWInst::BinOp Kind
,
269 Instruction::BinaryOps Op
,
270 bool Invert
= false) {
271 QualType T
= E
->getType();
272 assert(E
->getArg(0)->getType()->isPointerType());
273 assert(CGF
.getContext().hasSameUnqualifiedType(T
,
274 E
->getArg(0)->getType()->getPointeeType()));
275 assert(CGF
.getContext().hasSameUnqualifiedType(T
, E
->getArg(1)->getType()));
277 llvm::Value
*DestPtr
= CheckAtomicAlignment(CGF
, E
);
279 llvm::IntegerType
*IntType
= llvm::IntegerType::get(
280 CGF
.getLLVMContext(), CGF
.getContext().getTypeSize(T
));
282 llvm::Value
*Args
[2];
283 Args
[1] = CGF
.EmitScalarExpr(E
->getArg(1));
284 llvm::Type
*ValueType
= Args
[1]->getType();
285 Args
[1] = EmitToInt(CGF
, Args
[1], T
, IntType
);
288 llvm::Value
*Result
= CGF
.Builder
.CreateAtomicRMW(
289 Kind
, Args
[0], Args
[1], llvm::AtomicOrdering::SequentiallyConsistent
);
290 Result
= CGF
.Builder
.CreateBinOp(Op
, Result
, Args
[1]);
293 CGF
.Builder
.CreateBinOp(llvm::Instruction::Xor
, Result
,
294 llvm::ConstantInt::getAllOnesValue(IntType
));
295 Result
= EmitFromInt(CGF
, Result
, T
, ValueType
);
296 return RValue::get(Result
);
299 /// Utility to insert an atomic cmpxchg instruction.
301 /// @param CGF The current codegen function.
302 /// @param E Builtin call expression to convert to cmpxchg.
303 /// arg0 - address to operate on
304 /// arg1 - value to compare with
306 /// @param ReturnBool Specifies whether to return success flag of
307 /// cmpxchg result or the old value.
309 /// @returns result of cmpxchg, according to ReturnBool
311 /// Note: In order to lower Microsoft's _InterlockedCompareExchange* intrinsics
312 /// invoke the function EmitAtomicCmpXchgForMSIntrin.
313 static Value
*MakeAtomicCmpXchgValue(CodeGenFunction
&CGF
, const CallExpr
*E
,
315 QualType T
= ReturnBool
? E
->getArg(1)->getType() : E
->getType();
316 llvm::Value
*DestPtr
= CheckAtomicAlignment(CGF
, E
);
318 llvm::IntegerType
*IntType
= llvm::IntegerType::get(
319 CGF
.getLLVMContext(), CGF
.getContext().getTypeSize(T
));
323 Args
[1] = CGF
.EmitScalarExpr(E
->getArg(1));
324 llvm::Type
*ValueType
= Args
[1]->getType();
325 Args
[1] = EmitToInt(CGF
, Args
[1], T
, IntType
);
326 Args
[2] = EmitToInt(CGF
, CGF
.EmitScalarExpr(E
->getArg(2)), T
, IntType
);
328 Value
*Pair
= CGF
.Builder
.CreateAtomicCmpXchg(
329 Args
[0], Args
[1], Args
[2], llvm::AtomicOrdering::SequentiallyConsistent
,
330 llvm::AtomicOrdering::SequentiallyConsistent
);
332 // Extract boolean success flag and zext it to int.
333 return CGF
.Builder
.CreateZExt(CGF
.Builder
.CreateExtractValue(Pair
, 1),
334 CGF
.ConvertType(E
->getType()));
336 // Extract old value and emit it using the same type as compare value.
337 return EmitFromInt(CGF
, CGF
.Builder
.CreateExtractValue(Pair
, 0), T
,
341 /// This function should be invoked to emit atomic cmpxchg for Microsoft's
342 /// _InterlockedCompareExchange* intrinsics which have the following signature:
343 /// T _InterlockedCompareExchange(T volatile *Destination,
347 /// Whereas the llvm 'cmpxchg' instruction has the following syntax:
348 /// cmpxchg *Destination, Comparand, Exchange.
349 /// So we need to swap Comparand and Exchange when invoking
350 /// CreateAtomicCmpXchg. That is the reason we could not use the above utility
351 /// function MakeAtomicCmpXchgValue since it expects the arguments to be
355 Value
*EmitAtomicCmpXchgForMSIntrin(CodeGenFunction
&CGF
, const CallExpr
*E
,
356 AtomicOrdering SuccessOrdering
= AtomicOrdering::SequentiallyConsistent
) {
357 assert(E
->getArg(0)->getType()->isPointerType());
358 assert(CGF
.getContext().hasSameUnqualifiedType(
359 E
->getType(), E
->getArg(0)->getType()->getPointeeType()));
360 assert(CGF
.getContext().hasSameUnqualifiedType(E
->getType(),
361 E
->getArg(1)->getType()));
362 assert(CGF
.getContext().hasSameUnqualifiedType(E
->getType(),
363 E
->getArg(2)->getType()));
365 auto *Destination
= CGF
.EmitScalarExpr(E
->getArg(0));
366 auto *Comparand
= CGF
.EmitScalarExpr(E
->getArg(2));
367 auto *Exchange
= CGF
.EmitScalarExpr(E
->getArg(1));
369 // For Release ordering, the failure ordering should be Monotonic.
370 auto FailureOrdering
= SuccessOrdering
== AtomicOrdering::Release
?
371 AtomicOrdering::Monotonic
:
374 // The atomic instruction is marked volatile for consistency with MSVC. This
375 // blocks the few atomics optimizations that LLVM has. If we want to optimize
376 // _Interlocked* operations in the future, we will have to remove the volatile
378 auto *Result
= CGF
.Builder
.CreateAtomicCmpXchg(
379 Destination
, Comparand
, Exchange
,
380 SuccessOrdering
, FailureOrdering
);
381 Result
->setVolatile(true);
382 return CGF
.Builder
.CreateExtractValue(Result
, 0);
385 // 64-bit Microsoft platforms support 128 bit cmpxchg operations. They are
386 // prototyped like this:
388 // unsigned char _InterlockedCompareExchange128...(
389 // __int64 volatile * _Destination,
390 // __int64 _ExchangeHigh,
391 // __int64 _ExchangeLow,
392 // __int64 * _ComparandResult);
393 static Value
*EmitAtomicCmpXchg128ForMSIntrin(CodeGenFunction
&CGF
,
395 AtomicOrdering SuccessOrdering
) {
396 assert(E
->getNumArgs() == 4);
397 llvm::Value
*Destination
= CGF
.EmitScalarExpr(E
->getArg(0));
398 llvm::Value
*ExchangeHigh
= CGF
.EmitScalarExpr(E
->getArg(1));
399 llvm::Value
*ExchangeLow
= CGF
.EmitScalarExpr(E
->getArg(2));
400 llvm::Value
*ComparandPtr
= CGF
.EmitScalarExpr(E
->getArg(3));
402 assert(Destination
->getType()->isPointerTy());
403 assert(!ExchangeHigh
->getType()->isPointerTy());
404 assert(!ExchangeLow
->getType()->isPointerTy());
405 assert(ComparandPtr
->getType()->isPointerTy());
407 // For Release ordering, the failure ordering should be Monotonic.
408 auto FailureOrdering
= SuccessOrdering
== AtomicOrdering::Release
409 ? AtomicOrdering::Monotonic
412 // Convert to i128 pointers and values.
413 llvm::Type
*Int128Ty
= llvm::IntegerType::get(CGF
.getLLVMContext(), 128);
414 Address
ComparandResult(ComparandPtr
, Int128Ty
,
415 CGF
.getContext().toCharUnitsFromBits(128));
417 // (((i128)hi) << 64) | ((i128)lo)
418 ExchangeHigh
= CGF
.Builder
.CreateZExt(ExchangeHigh
, Int128Ty
);
419 ExchangeLow
= CGF
.Builder
.CreateZExt(ExchangeLow
, Int128Ty
);
421 CGF
.Builder
.CreateShl(ExchangeHigh
, llvm::ConstantInt::get(Int128Ty
, 64));
422 llvm::Value
*Exchange
= CGF
.Builder
.CreateOr(ExchangeHigh
, ExchangeLow
);
424 // Load the comparand for the instruction.
425 llvm::Value
*Comparand
= CGF
.Builder
.CreateLoad(ComparandResult
);
427 auto *CXI
= CGF
.Builder
.CreateAtomicCmpXchg(Destination
, Comparand
, Exchange
,
428 SuccessOrdering
, FailureOrdering
);
430 // The atomic instruction is marked volatile for consistency with MSVC. This
431 // blocks the few atomics optimizations that LLVM has. If we want to optimize
432 // _Interlocked* operations in the future, we will have to remove the volatile
434 CXI
->setVolatile(true);
436 // Store the result as an outparameter.
437 CGF
.Builder
.CreateStore(CGF
.Builder
.CreateExtractValue(CXI
, 0),
440 // Get the success boolean and zero extend it to i8.
441 Value
*Success
= CGF
.Builder
.CreateExtractValue(CXI
, 1);
442 return CGF
.Builder
.CreateZExt(Success
, CGF
.Int8Ty
);
445 static Value
*EmitAtomicIncrementValue(CodeGenFunction
&CGF
, const CallExpr
*E
,
446 AtomicOrdering Ordering
= AtomicOrdering::SequentiallyConsistent
) {
447 assert(E
->getArg(0)->getType()->isPointerType());
449 auto *IntTy
= CGF
.ConvertType(E
->getType());
450 auto *Result
= CGF
.Builder
.CreateAtomicRMW(
452 CGF
.EmitScalarExpr(E
->getArg(0)),
453 ConstantInt::get(IntTy
, 1),
455 return CGF
.Builder
.CreateAdd(Result
, ConstantInt::get(IntTy
, 1));
458 static Value
*EmitAtomicDecrementValue(CodeGenFunction
&CGF
, const CallExpr
*E
,
459 AtomicOrdering Ordering
= AtomicOrdering::SequentiallyConsistent
) {
460 assert(E
->getArg(0)->getType()->isPointerType());
462 auto *IntTy
= CGF
.ConvertType(E
->getType());
463 auto *Result
= CGF
.Builder
.CreateAtomicRMW(
465 CGF
.EmitScalarExpr(E
->getArg(0)),
466 ConstantInt::get(IntTy
, 1),
468 return CGF
.Builder
.CreateSub(Result
, ConstantInt::get(IntTy
, 1));
471 // Build a plain volatile load.
472 static Value
*EmitISOVolatileLoad(CodeGenFunction
&CGF
, const CallExpr
*E
) {
473 Value
*Ptr
= CGF
.EmitScalarExpr(E
->getArg(0));
474 QualType ElTy
= E
->getArg(0)->getType()->getPointeeType();
475 CharUnits LoadSize
= CGF
.getContext().getTypeSizeInChars(ElTy
);
477 llvm::IntegerType::get(CGF
.getLLVMContext(), LoadSize
.getQuantity() * 8);
478 llvm::LoadInst
*Load
= CGF
.Builder
.CreateAlignedLoad(ITy
, Ptr
, LoadSize
);
479 Load
->setVolatile(true);
483 // Build a plain volatile store.
484 static Value
*EmitISOVolatileStore(CodeGenFunction
&CGF
, const CallExpr
*E
) {
485 Value
*Ptr
= CGF
.EmitScalarExpr(E
->getArg(0));
486 Value
*Value
= CGF
.EmitScalarExpr(E
->getArg(1));
487 QualType ElTy
= E
->getArg(0)->getType()->getPointeeType();
488 CharUnits StoreSize
= CGF
.getContext().getTypeSizeInChars(ElTy
);
489 llvm::StoreInst
*Store
=
490 CGF
.Builder
.CreateAlignedStore(Value
, Ptr
, StoreSize
);
491 Store
->setVolatile(true);
495 // Emit a simple mangled intrinsic that has 1 argument and a return type
496 // matching the argument type. Depending on mode, this may be a constrained
497 // floating-point intrinsic.
498 static Value
*emitUnaryMaybeConstrainedFPBuiltin(CodeGenFunction
&CGF
,
499 const CallExpr
*E
, unsigned IntrinsicID
,
500 unsigned ConstrainedIntrinsicID
) {
501 llvm::Value
*Src0
= CGF
.EmitScalarExpr(E
->getArg(0));
503 CodeGenFunction::CGFPOptionsRAII
FPOptsRAII(CGF
, E
);
504 if (CGF
.Builder
.getIsFPConstrained()) {
505 Function
*F
= CGF
.CGM
.getIntrinsic(ConstrainedIntrinsicID
, Src0
->getType());
506 return CGF
.Builder
.CreateConstrainedFPCall(F
, { Src0
});
508 Function
*F
= CGF
.CGM
.getIntrinsic(IntrinsicID
, Src0
->getType());
509 return CGF
.Builder
.CreateCall(F
, Src0
);
513 // Emit an intrinsic that has 2 operands of the same type as its result.
514 // Depending on mode, this may be a constrained floating-point intrinsic.
515 static Value
*emitBinaryMaybeConstrainedFPBuiltin(CodeGenFunction
&CGF
,
516 const CallExpr
*E
, unsigned IntrinsicID
,
517 unsigned ConstrainedIntrinsicID
) {
518 llvm::Value
*Src0
= CGF
.EmitScalarExpr(E
->getArg(0));
519 llvm::Value
*Src1
= CGF
.EmitScalarExpr(E
->getArg(1));
521 if (CGF
.Builder
.getIsFPConstrained()) {
522 CodeGenFunction::CGFPOptionsRAII
FPOptsRAII(CGF
, E
);
523 Function
*F
= CGF
.CGM
.getIntrinsic(ConstrainedIntrinsicID
, Src0
->getType());
524 return CGF
.Builder
.CreateConstrainedFPCall(F
, { Src0
, Src1
});
526 Function
*F
= CGF
.CGM
.getIntrinsic(IntrinsicID
, Src0
->getType());
527 return CGF
.Builder
.CreateCall(F
, { Src0
, Src1
});
531 // Has second type mangled argument.
532 static Value
*emitBinaryExpMaybeConstrainedFPBuiltin(
533 CodeGenFunction
&CGF
, const CallExpr
*E
, llvm::Intrinsic::ID IntrinsicID
,
534 llvm::Intrinsic::ID ConstrainedIntrinsicID
) {
535 llvm::Value
*Src0
= CGF
.EmitScalarExpr(E
->getArg(0));
536 llvm::Value
*Src1
= CGF
.EmitScalarExpr(E
->getArg(1));
538 if (CGF
.Builder
.getIsFPConstrained()) {
539 CodeGenFunction::CGFPOptionsRAII
FPOptsRAII(CGF
, E
);
540 Function
*F
= CGF
.CGM
.getIntrinsic(ConstrainedIntrinsicID
,
541 {Src0
->getType(), Src1
->getType()});
542 return CGF
.Builder
.CreateConstrainedFPCall(F
, {Src0
, Src1
});
546 CGF
.CGM
.getIntrinsic(IntrinsicID
, {Src0
->getType(), Src1
->getType()});
547 return CGF
.Builder
.CreateCall(F
, {Src0
, Src1
});
550 // Emit an intrinsic that has 3 operands of the same type as its result.
551 // Depending on mode, this may be a constrained floating-point intrinsic.
552 static Value
*emitTernaryMaybeConstrainedFPBuiltin(CodeGenFunction
&CGF
,
553 const CallExpr
*E
, unsigned IntrinsicID
,
554 unsigned ConstrainedIntrinsicID
) {
555 llvm::Value
*Src0
= CGF
.EmitScalarExpr(E
->getArg(0));
556 llvm::Value
*Src1
= CGF
.EmitScalarExpr(E
->getArg(1));
557 llvm::Value
*Src2
= CGF
.EmitScalarExpr(E
->getArg(2));
559 if (CGF
.Builder
.getIsFPConstrained()) {
560 CodeGenFunction::CGFPOptionsRAII
FPOptsRAII(CGF
, E
);
561 Function
*F
= CGF
.CGM
.getIntrinsic(ConstrainedIntrinsicID
, Src0
->getType());
562 return CGF
.Builder
.CreateConstrainedFPCall(F
, { Src0
, Src1
, Src2
});
564 Function
*F
= CGF
.CGM
.getIntrinsic(IntrinsicID
, Src0
->getType());
565 return CGF
.Builder
.CreateCall(F
, { Src0
, Src1
, Src2
});
569 // Emit an intrinsic where all operands are of the same type as the result.
570 // Depending on mode, this may be a constrained floating-point intrinsic.
571 static Value
*emitCallMaybeConstrainedFPBuiltin(CodeGenFunction
&CGF
,
572 unsigned IntrinsicID
,
573 unsigned ConstrainedIntrinsicID
,
575 ArrayRef
<Value
*> Args
) {
577 if (CGF
.Builder
.getIsFPConstrained())
578 F
= CGF
.CGM
.getIntrinsic(ConstrainedIntrinsicID
, Ty
);
580 F
= CGF
.CGM
.getIntrinsic(IntrinsicID
, Ty
);
582 if (CGF
.Builder
.getIsFPConstrained())
583 return CGF
.Builder
.CreateConstrainedFPCall(F
, Args
);
585 return CGF
.Builder
.CreateCall(F
, Args
);
588 // Emit a simple mangled intrinsic that has 1 argument and a return type
589 // matching the argument type.
590 static Value
*emitUnaryBuiltin(CodeGenFunction
&CGF
, const CallExpr
*E
,
591 unsigned IntrinsicID
,
592 llvm::StringRef Name
= "") {
593 llvm::Value
*Src0
= CGF
.EmitScalarExpr(E
->getArg(0));
595 Function
*F
= CGF
.CGM
.getIntrinsic(IntrinsicID
, Src0
->getType());
596 return CGF
.Builder
.CreateCall(F
, Src0
, Name
);
599 // Emit an intrinsic that has 2 operands of the same type as its result.
600 static Value
*emitBinaryBuiltin(CodeGenFunction
&CGF
,
602 unsigned IntrinsicID
) {
603 llvm::Value
*Src0
= CGF
.EmitScalarExpr(E
->getArg(0));
604 llvm::Value
*Src1
= CGF
.EmitScalarExpr(E
->getArg(1));
606 Function
*F
= CGF
.CGM
.getIntrinsic(IntrinsicID
, Src0
->getType());
607 return CGF
.Builder
.CreateCall(F
, { Src0
, Src1
});
610 // Emit an intrinsic that has 3 operands of the same type as its result.
611 static Value
*emitTernaryBuiltin(CodeGenFunction
&CGF
,
613 unsigned IntrinsicID
) {
614 llvm::Value
*Src0
= CGF
.EmitScalarExpr(E
->getArg(0));
615 llvm::Value
*Src1
= CGF
.EmitScalarExpr(E
->getArg(1));
616 llvm::Value
*Src2
= CGF
.EmitScalarExpr(E
->getArg(2));
618 Function
*F
= CGF
.CGM
.getIntrinsic(IntrinsicID
, Src0
->getType());
619 return CGF
.Builder
.CreateCall(F
, { Src0
, Src1
, Src2
});
622 // Emit an intrinsic that has 1 float or double operand, and 1 integer.
623 static Value
*emitFPIntBuiltin(CodeGenFunction
&CGF
,
625 unsigned IntrinsicID
) {
626 llvm::Value
*Src0
= CGF
.EmitScalarExpr(E
->getArg(0));
627 llvm::Value
*Src1
= CGF
.EmitScalarExpr(E
->getArg(1));
629 Function
*F
= CGF
.CGM
.getIntrinsic(IntrinsicID
, Src0
->getType());
630 return CGF
.Builder
.CreateCall(F
, {Src0
, Src1
});
633 // Emit an intrinsic that has overloaded integer result and fp operand.
635 emitMaybeConstrainedFPToIntRoundBuiltin(CodeGenFunction
&CGF
, const CallExpr
*E
,
636 unsigned IntrinsicID
,
637 unsigned ConstrainedIntrinsicID
) {
638 llvm::Type
*ResultType
= CGF
.ConvertType(E
->getType());
639 llvm::Value
*Src0
= CGF
.EmitScalarExpr(E
->getArg(0));
641 if (CGF
.Builder
.getIsFPConstrained()) {
642 CodeGenFunction::CGFPOptionsRAII
FPOptsRAII(CGF
, E
);
643 Function
*F
= CGF
.CGM
.getIntrinsic(ConstrainedIntrinsicID
,
644 {ResultType
, Src0
->getType()});
645 return CGF
.Builder
.CreateConstrainedFPCall(F
, {Src0
});
648 CGF
.CGM
.getIntrinsic(IntrinsicID
, {ResultType
, Src0
->getType()});
649 return CGF
.Builder
.CreateCall(F
, Src0
);
653 static Value
*emitFrexpBuiltin(CodeGenFunction
&CGF
, const CallExpr
*E
,
654 llvm::Intrinsic::ID IntrinsicID
) {
655 llvm::Value
*Src0
= CGF
.EmitScalarExpr(E
->getArg(0));
656 llvm::Value
*Src1
= CGF
.EmitScalarExpr(E
->getArg(1));
658 QualType IntPtrTy
= E
->getArg(1)->getType()->getPointeeType();
659 llvm::Type
*IntTy
= CGF
.ConvertType(IntPtrTy
);
661 CGF
.CGM
.getIntrinsic(IntrinsicID
, {Src0
->getType(), IntTy
});
662 llvm::Value
*Call
= CGF
.Builder
.CreateCall(F
, Src0
);
664 llvm::Value
*Exp
= CGF
.Builder
.CreateExtractValue(Call
, 1);
665 LValue LV
= CGF
.MakeNaturalAlignAddrLValue(Src1
, IntPtrTy
);
666 CGF
.EmitStoreOfScalar(Exp
, LV
);
668 return CGF
.Builder
.CreateExtractValue(Call
, 0);
671 /// EmitFAbs - Emit a call to @llvm.fabs().
672 static Value
*EmitFAbs(CodeGenFunction
&CGF
, Value
*V
) {
673 Function
*F
= CGF
.CGM
.getIntrinsic(Intrinsic::fabs
, V
->getType());
674 llvm::CallInst
*Call
= CGF
.Builder
.CreateCall(F
, V
);
675 Call
->setDoesNotAccessMemory();
679 /// Emit the computation of the sign bit for a floating point value. Returns
680 /// the i1 sign bit value.
681 static Value
*EmitSignBit(CodeGenFunction
&CGF
, Value
*V
) {
682 LLVMContext
&C
= CGF
.CGM
.getLLVMContext();
684 llvm::Type
*Ty
= V
->getType();
685 int Width
= Ty
->getPrimitiveSizeInBits();
686 llvm::Type
*IntTy
= llvm::IntegerType::get(C
, Width
);
687 V
= CGF
.Builder
.CreateBitCast(V
, IntTy
);
688 if (Ty
->isPPC_FP128Ty()) {
689 // We want the sign bit of the higher-order double. The bitcast we just
690 // did works as if the double-double was stored to memory and then
691 // read as an i128. The "store" will put the higher-order double in the
692 // lower address in both little- and big-Endian modes, but the "load"
693 // will treat those bits as a different part of the i128: the low bits in
694 // little-Endian, the high bits in big-Endian. Therefore, on big-Endian
695 // we need to shift the high bits down to the low before truncating.
697 if (CGF
.getTarget().isBigEndian()) {
698 Value
*ShiftCst
= llvm::ConstantInt::get(IntTy
, Width
);
699 V
= CGF
.Builder
.CreateLShr(V
, ShiftCst
);
701 // We are truncating value in order to extract the higher-order
702 // double, which we will be using to extract the sign from.
703 IntTy
= llvm::IntegerType::get(C
, Width
);
704 V
= CGF
.Builder
.CreateTrunc(V
, IntTy
);
706 Value
*Zero
= llvm::Constant::getNullValue(IntTy
);
707 return CGF
.Builder
.CreateICmpSLT(V
, Zero
);
710 static RValue
emitLibraryCall(CodeGenFunction
&CGF
, const FunctionDecl
*FD
,
711 const CallExpr
*E
, llvm::Constant
*calleeValue
) {
712 CGCallee callee
= CGCallee::forDirect(calleeValue
, GlobalDecl(FD
));
713 return CGF
.EmitCall(E
->getCallee()->getType(), callee
, E
, ReturnValueSlot());
716 /// Emit a call to llvm.{sadd,uadd,ssub,usub,smul,umul}.with.overflow.*
717 /// depending on IntrinsicID.
719 /// \arg CGF The current codegen function.
720 /// \arg IntrinsicID The ID for the Intrinsic we wish to generate.
721 /// \arg X The first argument to the llvm.*.with.overflow.*.
722 /// \arg Y The second argument to the llvm.*.with.overflow.*.
723 /// \arg Carry The carry returned by the llvm.*.with.overflow.*.
724 /// \returns The result (i.e. sum/product) returned by the intrinsic.
725 static llvm::Value
*EmitOverflowIntrinsic(CodeGenFunction
&CGF
,
726 const llvm::Intrinsic::ID IntrinsicID
,
727 llvm::Value
*X
, llvm::Value
*Y
,
728 llvm::Value
*&Carry
) {
729 // Make sure we have integers of the same width.
730 assert(X
->getType() == Y
->getType() &&
731 "Arguments must be the same type. (Did you forget to make sure both "
732 "arguments have the same integer width?)");
734 Function
*Callee
= CGF
.CGM
.getIntrinsic(IntrinsicID
, X
->getType());
735 llvm::Value
*Tmp
= CGF
.Builder
.CreateCall(Callee
, {X
, Y
});
736 Carry
= CGF
.Builder
.CreateExtractValue(Tmp
, 1);
737 return CGF
.Builder
.CreateExtractValue(Tmp
, 0);
740 static Value
*emitRangedBuiltin(CodeGenFunction
&CGF
,
741 unsigned IntrinsicID
,
743 llvm::MDBuilder
MDHelper(CGF
.getLLVMContext());
744 llvm::MDNode
*RNode
= MDHelper
.createRange(APInt(32, low
), APInt(32, high
));
745 Function
*F
= CGF
.CGM
.getIntrinsic(IntrinsicID
, {});
746 llvm::Instruction
*Call
= CGF
.Builder
.CreateCall(F
);
747 Call
->setMetadata(llvm::LLVMContext::MD_range
, RNode
);
748 Call
->setMetadata(llvm::LLVMContext::MD_noundef
,
749 llvm::MDNode::get(CGF
.getLLVMContext(), std::nullopt
));
754 struct WidthAndSignedness
{
760 static WidthAndSignedness
761 getIntegerWidthAndSignedness(const clang::ASTContext
&context
,
762 const clang::QualType Type
) {
763 assert(Type
->isIntegerType() && "Given type is not an integer.");
764 unsigned Width
= Type
->isBooleanType() ? 1
765 : Type
->isBitIntType() ? context
.getIntWidth(Type
)
766 : context
.getTypeInfo(Type
).Width
;
767 bool Signed
= Type
->isSignedIntegerType();
768 return {Width
, Signed
};
771 // Given one or more integer types, this function produces an integer type that
772 // encompasses them: any value in one of the given types could be expressed in
773 // the encompassing type.
774 static struct WidthAndSignedness
775 EncompassingIntegerType(ArrayRef
<struct WidthAndSignedness
> Types
) {
776 assert(Types
.size() > 0 && "Empty list of types.");
778 // If any of the given types is signed, we must return a signed type.
780 for (const auto &Type
: Types
) {
781 Signed
|= Type
.Signed
;
784 // The encompassing type must have a width greater than or equal to the width
785 // of the specified types. Additionally, if the encompassing type is signed,
786 // its width must be strictly greater than the width of any unsigned types
789 for (const auto &Type
: Types
) {
790 unsigned MinWidth
= Type
.Width
+ (Signed
&& !Type
.Signed
);
791 if (Width
< MinWidth
) {
796 return {Width
, Signed
};
799 Value
*CodeGenFunction::EmitVAStartEnd(Value
*ArgValue
, bool IsStart
) {
800 Intrinsic::ID inst
= IsStart
? Intrinsic::vastart
: Intrinsic::vaend
;
801 return Builder
.CreateCall(CGM
.getIntrinsic(inst
), ArgValue
);
804 /// Checks if using the result of __builtin_object_size(p, @p From) in place of
805 /// __builtin_object_size(p, @p To) is correct
806 static bool areBOSTypesCompatible(int From
, int To
) {
807 // Note: Our __builtin_object_size implementation currently treats Type=0 and
808 // Type=2 identically. Encoding this implementation detail here may make
809 // improving __builtin_object_size difficult in the future, so it's omitted.
810 return From
== To
|| (From
== 0 && To
== 1) || (From
== 3 && To
== 2);
814 getDefaultBuiltinObjectSizeResult(unsigned Type
, llvm::IntegerType
*ResType
) {
815 return ConstantInt::get(ResType
, (Type
& 2) ? 0 : -1, /*isSigned=*/true);
819 CodeGenFunction::evaluateOrEmitBuiltinObjectSize(const Expr
*E
, unsigned Type
,
820 llvm::IntegerType
*ResType
,
821 llvm::Value
*EmittedE
,
824 if (!E
->tryEvaluateObjectSize(ObjectSize
, getContext(), Type
))
825 return emitBuiltinObjectSize(E
, Type
, ResType
, EmittedE
, IsDynamic
);
826 return ConstantInt::get(ResType
, ObjectSize
, /*isSigned=*/true);
829 /// Returns a Value corresponding to the size of the given expression.
830 /// This Value may be either of the following:
831 /// - A llvm::Argument (if E is a param with the pass_object_size attribute on
833 /// - A call to the @llvm.objectsize intrinsic
835 /// EmittedE is the result of emitting `E` as a scalar expr. If it's non-null
836 /// and we wouldn't otherwise try to reference a pass_object_size parameter,
837 /// we'll call @llvm.objectsize on EmittedE, rather than emitting E.
839 CodeGenFunction::emitBuiltinObjectSize(const Expr
*E
, unsigned Type
,
840 llvm::IntegerType
*ResType
,
841 llvm::Value
*EmittedE
, bool IsDynamic
) {
842 // We need to reference an argument if the pointer is a parameter with the
843 // pass_object_size attribute.
844 if (auto *D
= dyn_cast
<DeclRefExpr
>(E
->IgnoreParenImpCasts())) {
845 auto *Param
= dyn_cast
<ParmVarDecl
>(D
->getDecl());
846 auto *PS
= D
->getDecl()->getAttr
<PassObjectSizeAttr
>();
847 if (Param
!= nullptr && PS
!= nullptr &&
848 areBOSTypesCompatible(PS
->getType(), Type
)) {
849 auto Iter
= SizeArguments
.find(Param
);
850 assert(Iter
!= SizeArguments
.end());
852 const ImplicitParamDecl
*D
= Iter
->second
;
853 auto DIter
= LocalDeclMap
.find(D
);
854 assert(DIter
!= LocalDeclMap
.end());
856 return EmitLoadOfScalar(DIter
->second
, /*Volatile=*/false,
857 getContext().getSizeType(), E
->getBeginLoc());
862 LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel
=
863 getLangOpts().getStrictFlexArraysLevel();
864 const Expr
*Base
= E
->IgnoreParenImpCasts();
866 if (FieldDecl
*FD
= FindCountedByField(Base
, StrictFlexArraysLevel
)) {
867 const auto *ME
= dyn_cast
<MemberExpr
>(Base
);
868 llvm::Value
*ObjectSize
= nullptr;
871 const auto *DRE
= dyn_cast
<DeclRefExpr
>(Base
);
872 ValueDecl
*VD
= nullptr;
874 ObjectSize
= ConstantInt::get(
876 getContext().getTypeSize(DRE
->getType()->getPointeeType()) / 8,
879 if (auto *RD
= DRE
->getType()->getPointeeType()->getAsRecordDecl())
880 VD
= RD
->getLastField();
882 Expr
*ICE
= ImplicitCastExpr::Create(
883 getContext(), DRE
->getType(), CK_LValueToRValue
,
884 const_cast<Expr
*>(cast
<Expr
>(DRE
)), nullptr, VK_PRValue
,
885 FPOptionsOverride());
886 ME
= MemberExpr::CreateImplicit(getContext(), ICE
, true, VD
,
887 VD
->getType(), VK_LValue
, OK_Ordinary
);
890 // At this point, we know that \p ME is a flexible array member.
891 const auto *ArrayTy
= getContext().getAsArrayType(ME
->getType());
892 unsigned Size
= getContext().getTypeSize(ArrayTy
->getElementType());
894 llvm::Value
*CountField
=
895 EmitAnyExprToTemp(MemberExpr::CreateImplicit(
896 getContext(), const_cast<Expr
*>(ME
->getBase()),
897 ME
->isArrow(), FD
, FD
->getType(), VK_LValue
,
901 llvm::Value
*Mul
= Builder
.CreateMul(
902 CountField
, llvm::ConstantInt::get(CountField
->getType(), Size
/ 8));
903 Mul
= Builder
.CreateZExtOrTrunc(Mul
, ResType
);
906 return Builder
.CreateAdd(ObjectSize
, Mul
);
912 // LLVM can't handle Type=3 appropriately, and __builtin_object_size shouldn't
913 // evaluate E for side-effects. In either case, we shouldn't lower to
915 if (Type
== 3 || (!EmittedE
&& E
->HasSideEffects(getContext())))
916 return getDefaultBuiltinObjectSizeResult(Type
, ResType
);
918 Value
*Ptr
= EmittedE
? EmittedE
: EmitScalarExpr(E
);
919 assert(Ptr
->getType()->isPointerTy() &&
920 "Non-pointer passed to __builtin_object_size?");
923 CGM
.getIntrinsic(Intrinsic::objectsize
, {ResType
, Ptr
->getType()});
925 // LLVM only supports 0 and 2, make sure that we pass along that as a boolean.
926 Value
*Min
= Builder
.getInt1((Type
& 2) != 0);
927 // For GCC compatibility, __builtin_object_size treat NULL as unknown size.
928 Value
*NullIsUnknown
= Builder
.getTrue();
929 Value
*Dynamic
= Builder
.getInt1(IsDynamic
);
930 return Builder
.CreateCall(F
, {Ptr
, Min
, NullIsUnknown
, Dynamic
});
934 /// A struct to generically describe a bit test intrinsic.
936 enum ActionKind
: uint8_t { TestOnly
, Complement
, Reset
, Set
};
937 enum InterlockingKind
: uint8_t {
946 InterlockingKind Interlocking
;
949 static BitTest
decodeBitTestBuiltin(unsigned BuiltinID
);
953 BitTest
BitTest::decodeBitTestBuiltin(unsigned BuiltinID
) {
955 // Main portable variants.
956 case Builtin::BI_bittest
:
957 return {TestOnly
, Unlocked
, false};
958 case Builtin::BI_bittestandcomplement
:
959 return {Complement
, Unlocked
, false};
960 case Builtin::BI_bittestandreset
:
961 return {Reset
, Unlocked
, false};
962 case Builtin::BI_bittestandset
:
963 return {Set
, Unlocked
, false};
964 case Builtin::BI_interlockedbittestandreset
:
965 return {Reset
, Sequential
, false};
966 case Builtin::BI_interlockedbittestandset
:
967 return {Set
, Sequential
, false};
969 // X86-specific 64-bit variants.
970 case Builtin::BI_bittest64
:
971 return {TestOnly
, Unlocked
, true};
972 case Builtin::BI_bittestandcomplement64
:
973 return {Complement
, Unlocked
, true};
974 case Builtin::BI_bittestandreset64
:
975 return {Reset
, Unlocked
, true};
976 case Builtin::BI_bittestandset64
:
977 return {Set
, Unlocked
, true};
978 case Builtin::BI_interlockedbittestandreset64
:
979 return {Reset
, Sequential
, true};
980 case Builtin::BI_interlockedbittestandset64
:
981 return {Set
, Sequential
, true};
983 // ARM/AArch64-specific ordering variants.
984 case Builtin::BI_interlockedbittestandset_acq
:
985 return {Set
, Acquire
, false};
986 case Builtin::BI_interlockedbittestandset_rel
:
987 return {Set
, Release
, false};
988 case Builtin::BI_interlockedbittestandset_nf
:
989 return {Set
, NoFence
, false};
990 case Builtin::BI_interlockedbittestandreset_acq
:
991 return {Reset
, Acquire
, false};
992 case Builtin::BI_interlockedbittestandreset_rel
:
993 return {Reset
, Release
, false};
994 case Builtin::BI_interlockedbittestandreset_nf
:
995 return {Reset
, NoFence
, false};
997 llvm_unreachable("expected only bittest intrinsics");
1000 static char bitActionToX86BTCode(BitTest::ActionKind A
) {
1002 case BitTest::TestOnly
: return '\0';
1003 case BitTest::Complement
: return 'c';
1004 case BitTest::Reset
: return 'r';
1005 case BitTest::Set
: return 's';
1007 llvm_unreachable("invalid action");
1010 static llvm::Value
*EmitX86BitTestIntrinsic(CodeGenFunction
&CGF
,
1012 const CallExpr
*E
, Value
*BitBase
,
1014 char Action
= bitActionToX86BTCode(BT
.Action
);
1015 char SizeSuffix
= BT
.Is64Bit
? 'q' : 'l';
1017 // Build the assembly.
1018 SmallString
<64> Asm
;
1019 raw_svector_ostream
AsmOS(Asm
);
1020 if (BT
.Interlocking
!= BitTest::Unlocked
)
1025 AsmOS
<< SizeSuffix
<< " $2, ($1)";
1027 // Build the constraints. FIXME: We should support immediates when possible.
1028 std::string Constraints
= "={@ccc},r,r,~{cc},~{memory}";
1029 std::string_view MachineClobbers
= CGF
.getTarget().getClobbers();
1030 if (!MachineClobbers
.empty()) {
1032 Constraints
+= MachineClobbers
;
1034 llvm::IntegerType
*IntType
= llvm::IntegerType::get(
1035 CGF
.getLLVMContext(),
1036 CGF
.getContext().getTypeSize(E
->getArg(1)->getType()));
1037 llvm::FunctionType
*FTy
=
1038 llvm::FunctionType::get(CGF
.Int8Ty
, {CGF
.UnqualPtrTy
, IntType
}, false);
1040 llvm::InlineAsm
*IA
=
1041 llvm::InlineAsm::get(FTy
, Asm
, Constraints
, /*hasSideEffects=*/true);
1042 return CGF
.Builder
.CreateCall(IA
, {BitBase
, BitPos
});
1045 static llvm::AtomicOrdering
1046 getBitTestAtomicOrdering(BitTest::InterlockingKind I
) {
1048 case BitTest::Unlocked
: return llvm::AtomicOrdering::NotAtomic
;
1049 case BitTest::Sequential
: return llvm::AtomicOrdering::SequentiallyConsistent
;
1050 case BitTest::Acquire
: return llvm::AtomicOrdering::Acquire
;
1051 case BitTest::Release
: return llvm::AtomicOrdering::Release
;
1052 case BitTest::NoFence
: return llvm::AtomicOrdering::Monotonic
;
1054 llvm_unreachable("invalid interlocking");
1057 /// Emit a _bittest* intrinsic. These intrinsics take a pointer to an array of
1058 /// bits and a bit position and read and optionally modify the bit at that
1059 /// position. The position index can be arbitrarily large, i.e. it can be larger
1060 /// than 31 or 63, so we need an indexed load in the general case.
1061 static llvm::Value
*EmitBitTestIntrinsic(CodeGenFunction
&CGF
,
1063 const CallExpr
*E
) {
1064 Value
*BitBase
= CGF
.EmitScalarExpr(E
->getArg(0));
1065 Value
*BitPos
= CGF
.EmitScalarExpr(E
->getArg(1));
1067 BitTest BT
= BitTest::decodeBitTestBuiltin(BuiltinID
);
1069 // X86 has special BT, BTC, BTR, and BTS instructions that handle the array
1070 // indexing operation internally. Use them if possible.
1071 if (CGF
.getTarget().getTriple().isX86())
1072 return EmitX86BitTestIntrinsic(CGF
, BT
, E
, BitBase
, BitPos
);
1074 // Otherwise, use generic code to load one byte and test the bit. Use all but
1075 // the bottom three bits as the array index, and the bottom three bits to form
1077 // Bit = BitBaseI8[BitPos >> 3] & (1 << (BitPos & 0x7)) != 0;
1078 Value
*ByteIndex
= CGF
.Builder
.CreateAShr(
1079 BitPos
, llvm::ConstantInt::get(BitPos
->getType(), 3), "bittest.byteidx");
1080 Value
*BitBaseI8
= CGF
.Builder
.CreatePointerCast(BitBase
, CGF
.Int8PtrTy
);
1081 Address
ByteAddr(CGF
.Builder
.CreateInBoundsGEP(CGF
.Int8Ty
, BitBaseI8
,
1082 ByteIndex
, "bittest.byteaddr"),
1083 CGF
.Int8Ty
, CharUnits::One());
1085 CGF
.Builder
.CreateAnd(CGF
.Builder
.CreateTrunc(BitPos
, CGF
.Int8Ty
),
1086 llvm::ConstantInt::get(CGF
.Int8Ty
, 0x7));
1088 // The updating instructions will need a mask.
1089 Value
*Mask
= nullptr;
1090 if (BT
.Action
!= BitTest::TestOnly
) {
1091 Mask
= CGF
.Builder
.CreateShl(llvm::ConstantInt::get(CGF
.Int8Ty
, 1), PosLow
,
1095 // Check the action and ordering of the interlocked intrinsics.
1096 llvm::AtomicOrdering Ordering
= getBitTestAtomicOrdering(BT
.Interlocking
);
1098 Value
*OldByte
= nullptr;
1099 if (Ordering
!= llvm::AtomicOrdering::NotAtomic
) {
1100 // Emit a combined atomicrmw load/store operation for the interlocked
1102 llvm::AtomicRMWInst::BinOp RMWOp
= llvm::AtomicRMWInst::Or
;
1103 if (BT
.Action
== BitTest::Reset
) {
1104 Mask
= CGF
.Builder
.CreateNot(Mask
);
1105 RMWOp
= llvm::AtomicRMWInst::And
;
1107 OldByte
= CGF
.Builder
.CreateAtomicRMW(RMWOp
, ByteAddr
.getPointer(), Mask
,
1110 // Emit a plain load for the non-interlocked intrinsics.
1111 OldByte
= CGF
.Builder
.CreateLoad(ByteAddr
, "bittest.byte");
1112 Value
*NewByte
= nullptr;
1113 switch (BT
.Action
) {
1114 case BitTest::TestOnly
:
1115 // Don't store anything.
1117 case BitTest::Complement
:
1118 NewByte
= CGF
.Builder
.CreateXor(OldByte
, Mask
);
1120 case BitTest::Reset
:
1121 NewByte
= CGF
.Builder
.CreateAnd(OldByte
, CGF
.Builder
.CreateNot(Mask
));
1124 NewByte
= CGF
.Builder
.CreateOr(OldByte
, Mask
);
1128 CGF
.Builder
.CreateStore(NewByte
, ByteAddr
);
1131 // However we loaded the old byte, either by plain load or atomicrmw, shift
1132 // the bit into the low position and mask it to 0 or 1.
1133 Value
*ShiftedByte
= CGF
.Builder
.CreateLShr(OldByte
, PosLow
, "bittest.shr");
1134 return CGF
.Builder
.CreateAnd(
1135 ShiftedByte
, llvm::ConstantInt::get(CGF
.Int8Ty
, 1), "bittest.res");
1138 static llvm::Value
*emitPPCLoadReserveIntrinsic(CodeGenFunction
&CGF
,
1140 const CallExpr
*E
) {
1141 Value
*Addr
= CGF
.EmitScalarExpr(E
->getArg(0));
1143 SmallString
<64> Asm
;
1144 raw_svector_ostream
AsmOS(Asm
);
1145 llvm::IntegerType
*RetType
= CGF
.Int32Ty
;
1147 switch (BuiltinID
) {
1148 case clang::PPC::BI__builtin_ppc_ldarx
:
1150 RetType
= CGF
.Int64Ty
;
1152 case clang::PPC::BI__builtin_ppc_lwarx
:
1154 RetType
= CGF
.Int32Ty
;
1156 case clang::PPC::BI__builtin_ppc_lharx
:
1158 RetType
= CGF
.Int16Ty
;
1160 case clang::PPC::BI__builtin_ppc_lbarx
:
1162 RetType
= CGF
.Int8Ty
;
1165 llvm_unreachable("Expected only PowerPC load reserve intrinsics");
1168 AsmOS
<< "$0, ${1:y}";
1170 std::string Constraints
= "=r,*Z,~{memory}";
1171 std::string_view MachineClobbers
= CGF
.getTarget().getClobbers();
1172 if (!MachineClobbers
.empty()) {
1174 Constraints
+= MachineClobbers
;
1177 llvm::Type
*PtrType
= CGF
.UnqualPtrTy
;
1178 llvm::FunctionType
*FTy
= llvm::FunctionType::get(RetType
, {PtrType
}, false);
1180 llvm::InlineAsm
*IA
=
1181 llvm::InlineAsm::get(FTy
, Asm
, Constraints
, /*hasSideEffects=*/true);
1182 llvm::CallInst
*CI
= CGF
.Builder
.CreateCall(IA
, {Addr
});
1184 0, Attribute::get(CGF
.getLLVMContext(), Attribute::ElementType
, RetType
));
1189 enum class MSVCSetJmpKind
{
1196 /// MSVC handles setjmp a bit differently on different platforms. On every
1197 /// architecture except 32-bit x86, the frame address is passed. On x86, extra
1198 /// parameters can be passed as variadic arguments, but we always pass none.
1199 static RValue
EmitMSVCRTSetJmp(CodeGenFunction
&CGF
, MSVCSetJmpKind SJKind
,
1200 const CallExpr
*E
) {
1201 llvm::Value
*Arg1
= nullptr;
1202 llvm::Type
*Arg1Ty
= nullptr;
1204 bool IsVarArg
= false;
1205 if (SJKind
== MSVCSetJmpKind::_setjmp3
) {
1207 Arg1Ty
= CGF
.Int32Ty
;
1208 Arg1
= llvm::ConstantInt::get(CGF
.IntTy
, 0);
1211 Name
= SJKind
== MSVCSetJmpKind::_setjmp
? "_setjmp" : "_setjmpex";
1212 Arg1Ty
= CGF
.Int8PtrTy
;
1213 if (CGF
.getTarget().getTriple().getArch() == llvm::Triple::aarch64
) {
1214 Arg1
= CGF
.Builder
.CreateCall(
1215 CGF
.CGM
.getIntrinsic(Intrinsic::sponentry
, CGF
.AllocaInt8PtrTy
));
1217 Arg1
= CGF
.Builder
.CreateCall(
1218 CGF
.CGM
.getIntrinsic(Intrinsic::frameaddress
, CGF
.AllocaInt8PtrTy
),
1219 llvm::ConstantInt::get(CGF
.Int32Ty
, 0));
1222 // Mark the call site and declaration with ReturnsTwice.
1223 llvm::Type
*ArgTypes
[2] = {CGF
.Int8PtrTy
, Arg1Ty
};
1224 llvm::AttributeList ReturnsTwiceAttr
= llvm::AttributeList::get(
1225 CGF
.getLLVMContext(), llvm::AttributeList::FunctionIndex
,
1226 llvm::Attribute::ReturnsTwice
);
1227 llvm::FunctionCallee SetJmpFn
= CGF
.CGM
.CreateRuntimeFunction(
1228 llvm::FunctionType::get(CGF
.IntTy
, ArgTypes
, IsVarArg
), Name
,
1229 ReturnsTwiceAttr
, /*Local=*/true);
1231 llvm::Value
*Buf
= CGF
.Builder
.CreateBitOrPointerCast(
1232 CGF
.EmitScalarExpr(E
->getArg(0)), CGF
.Int8PtrTy
);
1233 llvm::Value
*Args
[] = {Buf
, Arg1
};
1234 llvm::CallBase
*CB
= CGF
.EmitRuntimeCallOrInvoke(SetJmpFn
, Args
);
1235 CB
->setAttributes(ReturnsTwiceAttr
);
1236 return RValue::get(CB
);
1239 // Many of MSVC builtins are on x64, ARM and AArch64; to avoid repeating code,
1240 // we handle them here.
1241 enum class CodeGenFunction::MSVCIntrin
{
1245 _InterlockedDecrement
,
1246 _InterlockedExchange
,
1247 _InterlockedExchangeAdd
,
1248 _InterlockedExchangeSub
,
1249 _InterlockedIncrement
,
1252 _InterlockedExchangeAdd_acq
,
1253 _InterlockedExchangeAdd_rel
,
1254 _InterlockedExchangeAdd_nf
,
1255 _InterlockedExchange_acq
,
1256 _InterlockedExchange_rel
,
1257 _InterlockedExchange_nf
,
1258 _InterlockedCompareExchange_acq
,
1259 _InterlockedCompareExchange_rel
,
1260 _InterlockedCompareExchange_nf
,
1261 _InterlockedCompareExchange128
,
1262 _InterlockedCompareExchange128_acq
,
1263 _InterlockedCompareExchange128_rel
,
1264 _InterlockedCompareExchange128_nf
,
1268 _InterlockedXor_acq
,
1269 _InterlockedXor_rel
,
1271 _InterlockedAnd_acq
,
1272 _InterlockedAnd_rel
,
1274 _InterlockedIncrement_acq
,
1275 _InterlockedIncrement_rel
,
1276 _InterlockedIncrement_nf
,
1277 _InterlockedDecrement_acq
,
1278 _InterlockedDecrement_rel
,
1279 _InterlockedDecrement_nf
,
1283 static std::optional
<CodeGenFunction::MSVCIntrin
>
1284 translateArmToMsvcIntrin(unsigned BuiltinID
) {
1285 using MSVCIntrin
= CodeGenFunction::MSVCIntrin
;
1286 switch (BuiltinID
) {
1288 return std::nullopt
;
1289 case clang::ARM::BI_BitScanForward
:
1290 case clang::ARM::BI_BitScanForward64
:
1291 return MSVCIntrin::_BitScanForward
;
1292 case clang::ARM::BI_BitScanReverse
:
1293 case clang::ARM::BI_BitScanReverse64
:
1294 return MSVCIntrin::_BitScanReverse
;
1295 case clang::ARM::BI_InterlockedAnd64
:
1296 return MSVCIntrin::_InterlockedAnd
;
1297 case clang::ARM::BI_InterlockedExchange64
:
1298 return MSVCIntrin::_InterlockedExchange
;
1299 case clang::ARM::BI_InterlockedExchangeAdd64
:
1300 return MSVCIntrin::_InterlockedExchangeAdd
;
1301 case clang::ARM::BI_InterlockedExchangeSub64
:
1302 return MSVCIntrin::_InterlockedExchangeSub
;
1303 case clang::ARM::BI_InterlockedOr64
:
1304 return MSVCIntrin::_InterlockedOr
;
1305 case clang::ARM::BI_InterlockedXor64
:
1306 return MSVCIntrin::_InterlockedXor
;
1307 case clang::ARM::BI_InterlockedDecrement64
:
1308 return MSVCIntrin::_InterlockedDecrement
;
1309 case clang::ARM::BI_InterlockedIncrement64
:
1310 return MSVCIntrin::_InterlockedIncrement
;
1311 case clang::ARM::BI_InterlockedExchangeAdd8_acq
:
1312 case clang::ARM::BI_InterlockedExchangeAdd16_acq
:
1313 case clang::ARM::BI_InterlockedExchangeAdd_acq
:
1314 case clang::ARM::BI_InterlockedExchangeAdd64_acq
:
1315 return MSVCIntrin::_InterlockedExchangeAdd_acq
;
1316 case clang::ARM::BI_InterlockedExchangeAdd8_rel
:
1317 case clang::ARM::BI_InterlockedExchangeAdd16_rel
:
1318 case clang::ARM::BI_InterlockedExchangeAdd_rel
:
1319 case clang::ARM::BI_InterlockedExchangeAdd64_rel
:
1320 return MSVCIntrin::_InterlockedExchangeAdd_rel
;
1321 case clang::ARM::BI_InterlockedExchangeAdd8_nf
:
1322 case clang::ARM::BI_InterlockedExchangeAdd16_nf
:
1323 case clang::ARM::BI_InterlockedExchangeAdd_nf
:
1324 case clang::ARM::BI_InterlockedExchangeAdd64_nf
:
1325 return MSVCIntrin::_InterlockedExchangeAdd_nf
;
1326 case clang::ARM::BI_InterlockedExchange8_acq
:
1327 case clang::ARM::BI_InterlockedExchange16_acq
:
1328 case clang::ARM::BI_InterlockedExchange_acq
:
1329 case clang::ARM::BI_InterlockedExchange64_acq
:
1330 return MSVCIntrin::_InterlockedExchange_acq
;
1331 case clang::ARM::BI_InterlockedExchange8_rel
:
1332 case clang::ARM::BI_InterlockedExchange16_rel
:
1333 case clang::ARM::BI_InterlockedExchange_rel
:
1334 case clang::ARM::BI_InterlockedExchange64_rel
:
1335 return MSVCIntrin::_InterlockedExchange_rel
;
1336 case clang::ARM::BI_InterlockedExchange8_nf
:
1337 case clang::ARM::BI_InterlockedExchange16_nf
:
1338 case clang::ARM::BI_InterlockedExchange_nf
:
1339 case clang::ARM::BI_InterlockedExchange64_nf
:
1340 return MSVCIntrin::_InterlockedExchange_nf
;
1341 case clang::ARM::BI_InterlockedCompareExchange8_acq
:
1342 case clang::ARM::BI_InterlockedCompareExchange16_acq
:
1343 case clang::ARM::BI_InterlockedCompareExchange_acq
:
1344 case clang::ARM::BI_InterlockedCompareExchange64_acq
:
1345 return MSVCIntrin::_InterlockedCompareExchange_acq
;
1346 case clang::ARM::BI_InterlockedCompareExchange8_rel
:
1347 case clang::ARM::BI_InterlockedCompareExchange16_rel
:
1348 case clang::ARM::BI_InterlockedCompareExchange_rel
:
1349 case clang::ARM::BI_InterlockedCompareExchange64_rel
:
1350 return MSVCIntrin::_InterlockedCompareExchange_rel
;
1351 case clang::ARM::BI_InterlockedCompareExchange8_nf
:
1352 case clang::ARM::BI_InterlockedCompareExchange16_nf
:
1353 case clang::ARM::BI_InterlockedCompareExchange_nf
:
1354 case clang::ARM::BI_InterlockedCompareExchange64_nf
:
1355 return MSVCIntrin::_InterlockedCompareExchange_nf
;
1356 case clang::ARM::BI_InterlockedOr8_acq
:
1357 case clang::ARM::BI_InterlockedOr16_acq
:
1358 case clang::ARM::BI_InterlockedOr_acq
:
1359 case clang::ARM::BI_InterlockedOr64_acq
:
1360 return MSVCIntrin::_InterlockedOr_acq
;
1361 case clang::ARM::BI_InterlockedOr8_rel
:
1362 case clang::ARM::BI_InterlockedOr16_rel
:
1363 case clang::ARM::BI_InterlockedOr_rel
:
1364 case clang::ARM::BI_InterlockedOr64_rel
:
1365 return MSVCIntrin::_InterlockedOr_rel
;
1366 case clang::ARM::BI_InterlockedOr8_nf
:
1367 case clang::ARM::BI_InterlockedOr16_nf
:
1368 case clang::ARM::BI_InterlockedOr_nf
:
1369 case clang::ARM::BI_InterlockedOr64_nf
:
1370 return MSVCIntrin::_InterlockedOr_nf
;
1371 case clang::ARM::BI_InterlockedXor8_acq
:
1372 case clang::ARM::BI_InterlockedXor16_acq
:
1373 case clang::ARM::BI_InterlockedXor_acq
:
1374 case clang::ARM::BI_InterlockedXor64_acq
:
1375 return MSVCIntrin::_InterlockedXor_acq
;
1376 case clang::ARM::BI_InterlockedXor8_rel
:
1377 case clang::ARM::BI_InterlockedXor16_rel
:
1378 case clang::ARM::BI_InterlockedXor_rel
:
1379 case clang::ARM::BI_InterlockedXor64_rel
:
1380 return MSVCIntrin::_InterlockedXor_rel
;
1381 case clang::ARM::BI_InterlockedXor8_nf
:
1382 case clang::ARM::BI_InterlockedXor16_nf
:
1383 case clang::ARM::BI_InterlockedXor_nf
:
1384 case clang::ARM::BI_InterlockedXor64_nf
:
1385 return MSVCIntrin::_InterlockedXor_nf
;
1386 case clang::ARM::BI_InterlockedAnd8_acq
:
1387 case clang::ARM::BI_InterlockedAnd16_acq
:
1388 case clang::ARM::BI_InterlockedAnd_acq
:
1389 case clang::ARM::BI_InterlockedAnd64_acq
:
1390 return MSVCIntrin::_InterlockedAnd_acq
;
1391 case clang::ARM::BI_InterlockedAnd8_rel
:
1392 case clang::ARM::BI_InterlockedAnd16_rel
:
1393 case clang::ARM::BI_InterlockedAnd_rel
:
1394 case clang::ARM::BI_InterlockedAnd64_rel
:
1395 return MSVCIntrin::_InterlockedAnd_rel
;
1396 case clang::ARM::BI_InterlockedAnd8_nf
:
1397 case clang::ARM::BI_InterlockedAnd16_nf
:
1398 case clang::ARM::BI_InterlockedAnd_nf
:
1399 case clang::ARM::BI_InterlockedAnd64_nf
:
1400 return MSVCIntrin::_InterlockedAnd_nf
;
1401 case clang::ARM::BI_InterlockedIncrement16_acq
:
1402 case clang::ARM::BI_InterlockedIncrement_acq
:
1403 case clang::ARM::BI_InterlockedIncrement64_acq
:
1404 return MSVCIntrin::_InterlockedIncrement_acq
;
1405 case clang::ARM::BI_InterlockedIncrement16_rel
:
1406 case clang::ARM::BI_InterlockedIncrement_rel
:
1407 case clang::ARM::BI_InterlockedIncrement64_rel
:
1408 return MSVCIntrin::_InterlockedIncrement_rel
;
1409 case clang::ARM::BI_InterlockedIncrement16_nf
:
1410 case clang::ARM::BI_InterlockedIncrement_nf
:
1411 case clang::ARM::BI_InterlockedIncrement64_nf
:
1412 return MSVCIntrin::_InterlockedIncrement_nf
;
1413 case clang::ARM::BI_InterlockedDecrement16_acq
:
1414 case clang::ARM::BI_InterlockedDecrement_acq
:
1415 case clang::ARM::BI_InterlockedDecrement64_acq
:
1416 return MSVCIntrin::_InterlockedDecrement_acq
;
1417 case clang::ARM::BI_InterlockedDecrement16_rel
:
1418 case clang::ARM::BI_InterlockedDecrement_rel
:
1419 case clang::ARM::BI_InterlockedDecrement64_rel
:
1420 return MSVCIntrin::_InterlockedDecrement_rel
;
1421 case clang::ARM::BI_InterlockedDecrement16_nf
:
1422 case clang::ARM::BI_InterlockedDecrement_nf
:
1423 case clang::ARM::BI_InterlockedDecrement64_nf
:
1424 return MSVCIntrin::_InterlockedDecrement_nf
;
1426 llvm_unreachable("must return from switch");
1429 static std::optional
<CodeGenFunction::MSVCIntrin
>
1430 translateAarch64ToMsvcIntrin(unsigned BuiltinID
) {
1431 using MSVCIntrin
= CodeGenFunction::MSVCIntrin
;
1432 switch (BuiltinID
) {
1434 return std::nullopt
;
1435 case clang::AArch64::BI_BitScanForward
:
1436 case clang::AArch64::BI_BitScanForward64
:
1437 return MSVCIntrin::_BitScanForward
;
1438 case clang::AArch64::BI_BitScanReverse
:
1439 case clang::AArch64::BI_BitScanReverse64
:
1440 return MSVCIntrin::_BitScanReverse
;
1441 case clang::AArch64::BI_InterlockedAnd64
:
1442 return MSVCIntrin::_InterlockedAnd
;
1443 case clang::AArch64::BI_InterlockedExchange64
:
1444 return MSVCIntrin::_InterlockedExchange
;
1445 case clang::AArch64::BI_InterlockedExchangeAdd64
:
1446 return MSVCIntrin::_InterlockedExchangeAdd
;
1447 case clang::AArch64::BI_InterlockedExchangeSub64
:
1448 return MSVCIntrin::_InterlockedExchangeSub
;
1449 case clang::AArch64::BI_InterlockedOr64
:
1450 return MSVCIntrin::_InterlockedOr
;
1451 case clang::AArch64::BI_InterlockedXor64
:
1452 return MSVCIntrin::_InterlockedXor
;
1453 case clang::AArch64::BI_InterlockedDecrement64
:
1454 return MSVCIntrin::_InterlockedDecrement
;
1455 case clang::AArch64::BI_InterlockedIncrement64
:
1456 return MSVCIntrin::_InterlockedIncrement
;
1457 case clang::AArch64::BI_InterlockedExchangeAdd8_acq
:
1458 case clang::AArch64::BI_InterlockedExchangeAdd16_acq
:
1459 case clang::AArch64::BI_InterlockedExchangeAdd_acq
:
1460 case clang::AArch64::BI_InterlockedExchangeAdd64_acq
:
1461 return MSVCIntrin::_InterlockedExchangeAdd_acq
;
1462 case clang::AArch64::BI_InterlockedExchangeAdd8_rel
:
1463 case clang::AArch64::BI_InterlockedExchangeAdd16_rel
:
1464 case clang::AArch64::BI_InterlockedExchangeAdd_rel
:
1465 case clang::AArch64::BI_InterlockedExchangeAdd64_rel
:
1466 return MSVCIntrin::_InterlockedExchangeAdd_rel
;
1467 case clang::AArch64::BI_InterlockedExchangeAdd8_nf
:
1468 case clang::AArch64::BI_InterlockedExchangeAdd16_nf
:
1469 case clang::AArch64::BI_InterlockedExchangeAdd_nf
:
1470 case clang::AArch64::BI_InterlockedExchangeAdd64_nf
:
1471 return MSVCIntrin::_InterlockedExchangeAdd_nf
;
1472 case clang::AArch64::BI_InterlockedExchange8_acq
:
1473 case clang::AArch64::BI_InterlockedExchange16_acq
:
1474 case clang::AArch64::BI_InterlockedExchange_acq
:
1475 case clang::AArch64::BI_InterlockedExchange64_acq
:
1476 return MSVCIntrin::_InterlockedExchange_acq
;
1477 case clang::AArch64::BI_InterlockedExchange8_rel
:
1478 case clang::AArch64::BI_InterlockedExchange16_rel
:
1479 case clang::AArch64::BI_InterlockedExchange_rel
:
1480 case clang::AArch64::BI_InterlockedExchange64_rel
:
1481 return MSVCIntrin::_InterlockedExchange_rel
;
1482 case clang::AArch64::BI_InterlockedExchange8_nf
:
1483 case clang::AArch64::BI_InterlockedExchange16_nf
:
1484 case clang::AArch64::BI_InterlockedExchange_nf
:
1485 case clang::AArch64::BI_InterlockedExchange64_nf
:
1486 return MSVCIntrin::_InterlockedExchange_nf
;
1487 case clang::AArch64::BI_InterlockedCompareExchange8_acq
:
1488 case clang::AArch64::BI_InterlockedCompareExchange16_acq
:
1489 case clang::AArch64::BI_InterlockedCompareExchange_acq
:
1490 case clang::AArch64::BI_InterlockedCompareExchange64_acq
:
1491 return MSVCIntrin::_InterlockedCompareExchange_acq
;
1492 case clang::AArch64::BI_InterlockedCompareExchange8_rel
:
1493 case clang::AArch64::BI_InterlockedCompareExchange16_rel
:
1494 case clang::AArch64::BI_InterlockedCompareExchange_rel
:
1495 case clang::AArch64::BI_InterlockedCompareExchange64_rel
:
1496 return MSVCIntrin::_InterlockedCompareExchange_rel
;
1497 case clang::AArch64::BI_InterlockedCompareExchange8_nf
:
1498 case clang::AArch64::BI_InterlockedCompareExchange16_nf
:
1499 case clang::AArch64::BI_InterlockedCompareExchange_nf
:
1500 case clang::AArch64::BI_InterlockedCompareExchange64_nf
:
1501 return MSVCIntrin::_InterlockedCompareExchange_nf
;
1502 case clang::AArch64::BI_InterlockedCompareExchange128
:
1503 return MSVCIntrin::_InterlockedCompareExchange128
;
1504 case clang::AArch64::BI_InterlockedCompareExchange128_acq
:
1505 return MSVCIntrin::_InterlockedCompareExchange128_acq
;
1506 case clang::AArch64::BI_InterlockedCompareExchange128_nf
:
1507 return MSVCIntrin::_InterlockedCompareExchange128_nf
;
1508 case clang::AArch64::BI_InterlockedCompareExchange128_rel
:
1509 return MSVCIntrin::_InterlockedCompareExchange128_rel
;
1510 case clang::AArch64::BI_InterlockedOr8_acq
:
1511 case clang::AArch64::BI_InterlockedOr16_acq
:
1512 case clang::AArch64::BI_InterlockedOr_acq
:
1513 case clang::AArch64::BI_InterlockedOr64_acq
:
1514 return MSVCIntrin::_InterlockedOr_acq
;
1515 case clang::AArch64::BI_InterlockedOr8_rel
:
1516 case clang::AArch64::BI_InterlockedOr16_rel
:
1517 case clang::AArch64::BI_InterlockedOr_rel
:
1518 case clang::AArch64::BI_InterlockedOr64_rel
:
1519 return MSVCIntrin::_InterlockedOr_rel
;
1520 case clang::AArch64::BI_InterlockedOr8_nf
:
1521 case clang::AArch64::BI_InterlockedOr16_nf
:
1522 case clang::AArch64::BI_InterlockedOr_nf
:
1523 case clang::AArch64::BI_InterlockedOr64_nf
:
1524 return MSVCIntrin::_InterlockedOr_nf
;
1525 case clang::AArch64::BI_InterlockedXor8_acq
:
1526 case clang::AArch64::BI_InterlockedXor16_acq
:
1527 case clang::AArch64::BI_InterlockedXor_acq
:
1528 case clang::AArch64::BI_InterlockedXor64_acq
:
1529 return MSVCIntrin::_InterlockedXor_acq
;
1530 case clang::AArch64::BI_InterlockedXor8_rel
:
1531 case clang::AArch64::BI_InterlockedXor16_rel
:
1532 case clang::AArch64::BI_InterlockedXor_rel
:
1533 case clang::AArch64::BI_InterlockedXor64_rel
:
1534 return MSVCIntrin::_InterlockedXor_rel
;
1535 case clang::AArch64::BI_InterlockedXor8_nf
:
1536 case clang::AArch64::BI_InterlockedXor16_nf
:
1537 case clang::AArch64::BI_InterlockedXor_nf
:
1538 case clang::AArch64::BI_InterlockedXor64_nf
:
1539 return MSVCIntrin::_InterlockedXor_nf
;
1540 case clang::AArch64::BI_InterlockedAnd8_acq
:
1541 case clang::AArch64::BI_InterlockedAnd16_acq
:
1542 case clang::AArch64::BI_InterlockedAnd_acq
:
1543 case clang::AArch64::BI_InterlockedAnd64_acq
:
1544 return MSVCIntrin::_InterlockedAnd_acq
;
1545 case clang::AArch64::BI_InterlockedAnd8_rel
:
1546 case clang::AArch64::BI_InterlockedAnd16_rel
:
1547 case clang::AArch64::BI_InterlockedAnd_rel
:
1548 case clang::AArch64::BI_InterlockedAnd64_rel
:
1549 return MSVCIntrin::_InterlockedAnd_rel
;
1550 case clang::AArch64::BI_InterlockedAnd8_nf
:
1551 case clang::AArch64::BI_InterlockedAnd16_nf
:
1552 case clang::AArch64::BI_InterlockedAnd_nf
:
1553 case clang::AArch64::BI_InterlockedAnd64_nf
:
1554 return MSVCIntrin::_InterlockedAnd_nf
;
1555 case clang::AArch64::BI_InterlockedIncrement16_acq
:
1556 case clang::AArch64::BI_InterlockedIncrement_acq
:
1557 case clang::AArch64::BI_InterlockedIncrement64_acq
:
1558 return MSVCIntrin::_InterlockedIncrement_acq
;
1559 case clang::AArch64::BI_InterlockedIncrement16_rel
:
1560 case clang::AArch64::BI_InterlockedIncrement_rel
:
1561 case clang::AArch64::BI_InterlockedIncrement64_rel
:
1562 return MSVCIntrin::_InterlockedIncrement_rel
;
1563 case clang::AArch64::BI_InterlockedIncrement16_nf
:
1564 case clang::AArch64::BI_InterlockedIncrement_nf
:
1565 case clang::AArch64::BI_InterlockedIncrement64_nf
:
1566 return MSVCIntrin::_InterlockedIncrement_nf
;
1567 case clang::AArch64::BI_InterlockedDecrement16_acq
:
1568 case clang::AArch64::BI_InterlockedDecrement_acq
:
1569 case clang::AArch64::BI_InterlockedDecrement64_acq
:
1570 return MSVCIntrin::_InterlockedDecrement_acq
;
1571 case clang::AArch64::BI_InterlockedDecrement16_rel
:
1572 case clang::AArch64::BI_InterlockedDecrement_rel
:
1573 case clang::AArch64::BI_InterlockedDecrement64_rel
:
1574 return MSVCIntrin::_InterlockedDecrement_rel
;
1575 case clang::AArch64::BI_InterlockedDecrement16_nf
:
1576 case clang::AArch64::BI_InterlockedDecrement_nf
:
1577 case clang::AArch64::BI_InterlockedDecrement64_nf
:
1578 return MSVCIntrin::_InterlockedDecrement_nf
;
1580 llvm_unreachable("must return from switch");
1583 static std::optional
<CodeGenFunction::MSVCIntrin
>
1584 translateX86ToMsvcIntrin(unsigned BuiltinID
) {
1585 using MSVCIntrin
= CodeGenFunction::MSVCIntrin
;
1586 switch (BuiltinID
) {
1588 return std::nullopt
;
1589 case clang::X86::BI_BitScanForward
:
1590 case clang::X86::BI_BitScanForward64
:
1591 return MSVCIntrin::_BitScanForward
;
1592 case clang::X86::BI_BitScanReverse
:
1593 case clang::X86::BI_BitScanReverse64
:
1594 return MSVCIntrin::_BitScanReverse
;
1595 case clang::X86::BI_InterlockedAnd64
:
1596 return MSVCIntrin::_InterlockedAnd
;
1597 case clang::X86::BI_InterlockedCompareExchange128
:
1598 return MSVCIntrin::_InterlockedCompareExchange128
;
1599 case clang::X86::BI_InterlockedExchange64
:
1600 return MSVCIntrin::_InterlockedExchange
;
1601 case clang::X86::BI_InterlockedExchangeAdd64
:
1602 return MSVCIntrin::_InterlockedExchangeAdd
;
1603 case clang::X86::BI_InterlockedExchangeSub64
:
1604 return MSVCIntrin::_InterlockedExchangeSub
;
1605 case clang::X86::BI_InterlockedOr64
:
1606 return MSVCIntrin::_InterlockedOr
;
1607 case clang::X86::BI_InterlockedXor64
:
1608 return MSVCIntrin::_InterlockedXor
;
1609 case clang::X86::BI_InterlockedDecrement64
:
1610 return MSVCIntrin::_InterlockedDecrement
;
1611 case clang::X86::BI_InterlockedIncrement64
:
1612 return MSVCIntrin::_InterlockedIncrement
;
1614 llvm_unreachable("must return from switch");
1617 // Emit an MSVC intrinsic. Assumes that arguments have *not* been evaluated.
1618 Value
*CodeGenFunction::EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID
,
1619 const CallExpr
*E
) {
1620 switch (BuiltinID
) {
1621 case MSVCIntrin::_BitScanForward
:
1622 case MSVCIntrin::_BitScanReverse
: {
1623 Address
IndexAddress(EmitPointerWithAlignment(E
->getArg(0)));
1624 Value
*ArgValue
= EmitScalarExpr(E
->getArg(1));
1626 llvm::Type
*ArgType
= ArgValue
->getType();
1627 llvm::Type
*IndexType
= IndexAddress
.getElementType();
1628 llvm::Type
*ResultType
= ConvertType(E
->getType());
1630 Value
*ArgZero
= llvm::Constant::getNullValue(ArgType
);
1631 Value
*ResZero
= llvm::Constant::getNullValue(ResultType
);
1632 Value
*ResOne
= llvm::ConstantInt::get(ResultType
, 1);
1634 BasicBlock
*Begin
= Builder
.GetInsertBlock();
1635 BasicBlock
*End
= createBasicBlock("bitscan_end", this->CurFn
);
1636 Builder
.SetInsertPoint(End
);
1637 PHINode
*Result
= Builder
.CreatePHI(ResultType
, 2, "bitscan_result");
1639 Builder
.SetInsertPoint(Begin
);
1640 Value
*IsZero
= Builder
.CreateICmpEQ(ArgValue
, ArgZero
);
1641 BasicBlock
*NotZero
= createBasicBlock("bitscan_not_zero", this->CurFn
);
1642 Builder
.CreateCondBr(IsZero
, End
, NotZero
);
1643 Result
->addIncoming(ResZero
, Begin
);
1645 Builder
.SetInsertPoint(NotZero
);
1647 if (BuiltinID
== MSVCIntrin::_BitScanForward
) {
1648 Function
*F
= CGM
.getIntrinsic(Intrinsic::cttz
, ArgType
);
1649 Value
*ZeroCount
= Builder
.CreateCall(F
, {ArgValue
, Builder
.getTrue()});
1650 ZeroCount
= Builder
.CreateIntCast(ZeroCount
, IndexType
, false);
1651 Builder
.CreateStore(ZeroCount
, IndexAddress
, false);
1653 unsigned ArgWidth
= cast
<llvm::IntegerType
>(ArgType
)->getBitWidth();
1654 Value
*ArgTypeLastIndex
= llvm::ConstantInt::get(IndexType
, ArgWidth
- 1);
1656 Function
*F
= CGM
.getIntrinsic(Intrinsic::ctlz
, ArgType
);
1657 Value
*ZeroCount
= Builder
.CreateCall(F
, {ArgValue
, Builder
.getTrue()});
1658 ZeroCount
= Builder
.CreateIntCast(ZeroCount
, IndexType
, false);
1659 Value
*Index
= Builder
.CreateNSWSub(ArgTypeLastIndex
, ZeroCount
);
1660 Builder
.CreateStore(Index
, IndexAddress
, false);
1662 Builder
.CreateBr(End
);
1663 Result
->addIncoming(ResOne
, NotZero
);
1665 Builder
.SetInsertPoint(End
);
1668 case MSVCIntrin::_InterlockedAnd
:
1669 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And
, E
);
1670 case MSVCIntrin::_InterlockedExchange
:
1671 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg
, E
);
1672 case MSVCIntrin::_InterlockedExchangeAdd
:
1673 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add
, E
);
1674 case MSVCIntrin::_InterlockedExchangeSub
:
1675 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Sub
, E
);
1676 case MSVCIntrin::_InterlockedOr
:
1677 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or
, E
);
1678 case MSVCIntrin::_InterlockedXor
:
1679 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor
, E
);
1680 case MSVCIntrin::_InterlockedExchangeAdd_acq
:
1681 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add
, E
,
1682 AtomicOrdering::Acquire
);
1683 case MSVCIntrin::_InterlockedExchangeAdd_rel
:
1684 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add
, E
,
1685 AtomicOrdering::Release
);
1686 case MSVCIntrin::_InterlockedExchangeAdd_nf
:
1687 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add
, E
,
1688 AtomicOrdering::Monotonic
);
1689 case MSVCIntrin::_InterlockedExchange_acq
:
1690 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg
, E
,
1691 AtomicOrdering::Acquire
);
1692 case MSVCIntrin::_InterlockedExchange_rel
:
1693 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg
, E
,
1694 AtomicOrdering::Release
);
1695 case MSVCIntrin::_InterlockedExchange_nf
:
1696 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg
, E
,
1697 AtomicOrdering::Monotonic
);
1698 case MSVCIntrin::_InterlockedCompareExchange_acq
:
1699 return EmitAtomicCmpXchgForMSIntrin(*this, E
, AtomicOrdering::Acquire
);
1700 case MSVCIntrin::_InterlockedCompareExchange_rel
:
1701 return EmitAtomicCmpXchgForMSIntrin(*this, E
, AtomicOrdering::Release
);
1702 case MSVCIntrin::_InterlockedCompareExchange_nf
:
1703 return EmitAtomicCmpXchgForMSIntrin(*this, E
, AtomicOrdering::Monotonic
);
1704 case MSVCIntrin::_InterlockedCompareExchange128
:
1705 return EmitAtomicCmpXchg128ForMSIntrin(
1706 *this, E
, AtomicOrdering::SequentiallyConsistent
);
1707 case MSVCIntrin::_InterlockedCompareExchange128_acq
:
1708 return EmitAtomicCmpXchg128ForMSIntrin(*this, E
, AtomicOrdering::Acquire
);
1709 case MSVCIntrin::_InterlockedCompareExchange128_rel
:
1710 return EmitAtomicCmpXchg128ForMSIntrin(*this, E
, AtomicOrdering::Release
);
1711 case MSVCIntrin::_InterlockedCompareExchange128_nf
:
1712 return EmitAtomicCmpXchg128ForMSIntrin(*this, E
, AtomicOrdering::Monotonic
);
1713 case MSVCIntrin::_InterlockedOr_acq
:
1714 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or
, E
,
1715 AtomicOrdering::Acquire
);
1716 case MSVCIntrin::_InterlockedOr_rel
:
1717 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or
, E
,
1718 AtomicOrdering::Release
);
1719 case MSVCIntrin::_InterlockedOr_nf
:
1720 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or
, E
,
1721 AtomicOrdering::Monotonic
);
1722 case MSVCIntrin::_InterlockedXor_acq
:
1723 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor
, E
,
1724 AtomicOrdering::Acquire
);
1725 case MSVCIntrin::_InterlockedXor_rel
:
1726 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor
, E
,
1727 AtomicOrdering::Release
);
1728 case MSVCIntrin::_InterlockedXor_nf
:
1729 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor
, E
,
1730 AtomicOrdering::Monotonic
);
1731 case MSVCIntrin::_InterlockedAnd_acq
:
1732 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And
, E
,
1733 AtomicOrdering::Acquire
);
1734 case MSVCIntrin::_InterlockedAnd_rel
:
1735 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And
, E
,
1736 AtomicOrdering::Release
);
1737 case MSVCIntrin::_InterlockedAnd_nf
:
1738 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And
, E
,
1739 AtomicOrdering::Monotonic
);
1740 case MSVCIntrin::_InterlockedIncrement_acq
:
1741 return EmitAtomicIncrementValue(*this, E
, AtomicOrdering::Acquire
);
1742 case MSVCIntrin::_InterlockedIncrement_rel
:
1743 return EmitAtomicIncrementValue(*this, E
, AtomicOrdering::Release
);
1744 case MSVCIntrin::_InterlockedIncrement_nf
:
1745 return EmitAtomicIncrementValue(*this, E
, AtomicOrdering::Monotonic
);
1746 case MSVCIntrin::_InterlockedDecrement_acq
:
1747 return EmitAtomicDecrementValue(*this, E
, AtomicOrdering::Acquire
);
1748 case MSVCIntrin::_InterlockedDecrement_rel
:
1749 return EmitAtomicDecrementValue(*this, E
, AtomicOrdering::Release
);
1750 case MSVCIntrin::_InterlockedDecrement_nf
:
1751 return EmitAtomicDecrementValue(*this, E
, AtomicOrdering::Monotonic
);
1753 case MSVCIntrin::_InterlockedDecrement
:
1754 return EmitAtomicDecrementValue(*this, E
);
1755 case MSVCIntrin::_InterlockedIncrement
:
1756 return EmitAtomicIncrementValue(*this, E
);
1758 case MSVCIntrin::__fastfail
: {
1759 // Request immediate process termination from the kernel. The instruction
1760 // sequences to do this are documented on MSDN:
1761 // https://msdn.microsoft.com/en-us/library/dn774154.aspx
1762 llvm::Triple::ArchType ISA
= getTarget().getTriple().getArch();
1763 StringRef Asm
, Constraints
;
1766 ErrorUnsupported(E
, "__fastfail call for this architecture");
1768 case llvm::Triple::x86
:
1769 case llvm::Triple::x86_64
:
1771 Constraints
= "{cx}";
1773 case llvm::Triple::thumb
:
1775 Constraints
= "{r0}";
1777 case llvm::Triple::aarch64
:
1778 Asm
= "brk #0xF003";
1779 Constraints
= "{w0}";
1781 llvm::FunctionType
*FTy
= llvm::FunctionType::get(VoidTy
, {Int32Ty
}, false);
1782 llvm::InlineAsm
*IA
=
1783 llvm::InlineAsm::get(FTy
, Asm
, Constraints
, /*hasSideEffects=*/true);
1784 llvm::AttributeList NoReturnAttr
= llvm::AttributeList::get(
1785 getLLVMContext(), llvm::AttributeList::FunctionIndex
,
1786 llvm::Attribute::NoReturn
);
1787 llvm::CallInst
*CI
= Builder
.CreateCall(IA
, EmitScalarExpr(E
->getArg(0)));
1788 CI
->setAttributes(NoReturnAttr
);
1792 llvm_unreachable("Incorrect MSVC intrinsic!");
1796 // ARC cleanup for __builtin_os_log_format
1797 struct CallObjCArcUse final
: EHScopeStack::Cleanup
{
1798 CallObjCArcUse(llvm::Value
*object
) : object(object
) {}
1799 llvm::Value
*object
;
1801 void Emit(CodeGenFunction
&CGF
, Flags flags
) override
{
1802 CGF
.EmitARCIntrinsicUse(object
);
1807 Value
*CodeGenFunction::EmitCheckedArgForBuiltin(const Expr
*E
,
1808 BuiltinCheckKind Kind
) {
1809 assert((Kind
== BCK_CLZPassedZero
|| Kind
== BCK_CTZPassedZero
)
1810 && "Unsupported builtin check kind");
1812 Value
*ArgValue
= EmitScalarExpr(E
);
1813 if (!SanOpts
.has(SanitizerKind::Builtin
))
1816 SanitizerScope
SanScope(this);
1817 Value
*Cond
= Builder
.CreateICmpNE(
1818 ArgValue
, llvm::Constant::getNullValue(ArgValue
->getType()));
1819 EmitCheck(std::make_pair(Cond
, SanitizerKind::Builtin
),
1820 SanitizerHandler::InvalidBuiltin
,
1821 {EmitCheckSourceLocation(E
->getExprLoc()),
1822 llvm::ConstantInt::get(Builder
.getInt8Ty(), Kind
)},
1827 static Value
*EmitAbs(CodeGenFunction
&CGF
, Value
*ArgValue
, bool HasNSW
) {
1828 return CGF
.Builder
.CreateBinaryIntrinsic(
1829 Intrinsic::abs
, ArgValue
,
1830 ConstantInt::get(CGF
.Builder
.getInt1Ty(), HasNSW
));
1833 static Value
*EmitOverflowCheckedAbs(CodeGenFunction
&CGF
, const CallExpr
*E
,
1834 bool SanitizeOverflow
) {
1835 Value
*ArgValue
= CGF
.EmitScalarExpr(E
->getArg(0));
1837 // Try to eliminate overflow check.
1838 if (const auto *VCI
= dyn_cast
<llvm::ConstantInt
>(ArgValue
)) {
1839 if (!VCI
->isMinSignedValue())
1840 return EmitAbs(CGF
, ArgValue
, true);
1843 CodeGenFunction::SanitizerScope
SanScope(&CGF
);
1845 Constant
*Zero
= Constant::getNullValue(ArgValue
->getType());
1846 Value
*ResultAndOverflow
= CGF
.Builder
.CreateBinaryIntrinsic(
1847 Intrinsic::ssub_with_overflow
, Zero
, ArgValue
);
1848 Value
*Result
= CGF
.Builder
.CreateExtractValue(ResultAndOverflow
, 0);
1849 Value
*NotOverflow
= CGF
.Builder
.CreateNot(
1850 CGF
.Builder
.CreateExtractValue(ResultAndOverflow
, 1));
1852 // TODO: support -ftrapv-handler.
1853 if (SanitizeOverflow
) {
1854 CGF
.EmitCheck({{NotOverflow
, SanitizerKind::SignedIntegerOverflow
}},
1855 SanitizerHandler::NegateOverflow
,
1856 {CGF
.EmitCheckSourceLocation(E
->getArg(0)->getExprLoc()),
1857 CGF
.EmitCheckTypeDescriptor(E
->getType())},
1860 CGF
.EmitTrapCheck(NotOverflow
, SanitizerHandler::SubOverflow
);
1862 Value
*CmpResult
= CGF
.Builder
.CreateICmpSLT(ArgValue
, Zero
, "abscond");
1863 return CGF
.Builder
.CreateSelect(CmpResult
, Result
, ArgValue
, "abs");
1866 /// Get the argument type for arguments to os_log_helper.
1867 static CanQualType
getOSLogArgType(ASTContext
&C
, int Size
) {
1868 QualType UnsignedTy
= C
.getIntTypeForBitwidth(Size
* 8, /*Signed=*/false);
1869 return C
.getCanonicalType(UnsignedTy
);
1872 llvm::Function
*CodeGenFunction::generateBuiltinOSLogHelperFunction(
1873 const analyze_os_log::OSLogBufferLayout
&Layout
,
1874 CharUnits BufferAlignment
) {
1875 ASTContext
&Ctx
= getContext();
1877 llvm::SmallString
<64> Name
;
1879 raw_svector_ostream
OS(Name
);
1880 OS
<< "__os_log_helper";
1881 OS
<< "_" << BufferAlignment
.getQuantity();
1882 OS
<< "_" << int(Layout
.getSummaryByte());
1883 OS
<< "_" << int(Layout
.getNumArgsByte());
1884 for (const auto &Item
: Layout
.Items
)
1885 OS
<< "_" << int(Item
.getSizeByte()) << "_"
1886 << int(Item
.getDescriptorByte());
1889 if (llvm::Function
*F
= CGM
.getModule().getFunction(Name
))
1892 llvm::SmallVector
<QualType
, 4> ArgTys
;
1893 FunctionArgList Args
;
1894 Args
.push_back(ImplicitParamDecl::Create(
1895 Ctx
, nullptr, SourceLocation(), &Ctx
.Idents
.get("buffer"), Ctx
.VoidPtrTy
,
1896 ImplicitParamDecl::Other
));
1897 ArgTys
.emplace_back(Ctx
.VoidPtrTy
);
1899 for (unsigned int I
= 0, E
= Layout
.Items
.size(); I
< E
; ++I
) {
1900 char Size
= Layout
.Items
[I
].getSizeByte();
1904 QualType ArgTy
= getOSLogArgType(Ctx
, Size
);
1905 Args
.push_back(ImplicitParamDecl::Create(
1906 Ctx
, nullptr, SourceLocation(),
1907 &Ctx
.Idents
.get(std::string("arg") + llvm::to_string(I
)), ArgTy
,
1908 ImplicitParamDecl::Other
));
1909 ArgTys
.emplace_back(ArgTy
);
1912 QualType ReturnTy
= Ctx
.VoidTy
;
1914 // The helper function has linkonce_odr linkage to enable the linker to merge
1915 // identical functions. To ensure the merging always happens, 'noinline' is
1916 // attached to the function when compiling with -Oz.
1917 const CGFunctionInfo
&FI
=
1918 CGM
.getTypes().arrangeBuiltinFunctionDeclaration(ReturnTy
, Args
);
1919 llvm::FunctionType
*FuncTy
= CGM
.getTypes().GetFunctionType(FI
);
1920 llvm::Function
*Fn
= llvm::Function::Create(
1921 FuncTy
, llvm::GlobalValue::LinkOnceODRLinkage
, Name
, &CGM
.getModule());
1922 Fn
->setVisibility(llvm::GlobalValue::HiddenVisibility
);
1923 CGM
.SetLLVMFunctionAttributes(GlobalDecl(), FI
, Fn
, /*IsThunk=*/false);
1924 CGM
.SetLLVMFunctionAttributesForDefinition(nullptr, Fn
);
1925 Fn
->setDoesNotThrow();
1927 // Attach 'noinline' at -Oz.
1928 if (CGM
.getCodeGenOpts().OptimizeSize
== 2)
1929 Fn
->addFnAttr(llvm::Attribute::NoInline
);
1931 auto NL
= ApplyDebugLocation::CreateEmpty(*this);
1932 StartFunction(GlobalDecl(), ReturnTy
, Fn
, FI
, Args
);
1934 // Create a scope with an artificial location for the body of this function.
1935 auto AL
= ApplyDebugLocation::CreateArtificial(*this);
1939 Address(Builder
.CreateLoad(GetAddrOfLocalVar(Args
[0]), "buf"), Int8Ty
,
1941 Builder
.CreateStore(Builder
.getInt8(Layout
.getSummaryByte()),
1942 Builder
.CreateConstByteGEP(BufAddr
, Offset
++, "summary"));
1943 Builder
.CreateStore(Builder
.getInt8(Layout
.getNumArgsByte()),
1944 Builder
.CreateConstByteGEP(BufAddr
, Offset
++, "numArgs"));
1947 for (const auto &Item
: Layout
.Items
) {
1948 Builder
.CreateStore(
1949 Builder
.getInt8(Item
.getDescriptorByte()),
1950 Builder
.CreateConstByteGEP(BufAddr
, Offset
++, "argDescriptor"));
1951 Builder
.CreateStore(
1952 Builder
.getInt8(Item
.getSizeByte()),
1953 Builder
.CreateConstByteGEP(BufAddr
, Offset
++, "argSize"));
1955 CharUnits Size
= Item
.size();
1956 if (!Size
.getQuantity())
1959 Address Arg
= GetAddrOfLocalVar(Args
[I
]);
1960 Address Addr
= Builder
.CreateConstByteGEP(BufAddr
, Offset
, "argData");
1961 Addr
= Addr
.withElementType(Arg
.getElementType());
1962 Builder
.CreateStore(Builder
.CreateLoad(Arg
), Addr
);
1972 RValue
CodeGenFunction::emitBuiltinOSLogFormat(const CallExpr
&E
) {
1973 assert(E
.getNumArgs() >= 2 &&
1974 "__builtin_os_log_format takes at least 2 arguments");
1975 ASTContext
&Ctx
= getContext();
1976 analyze_os_log::OSLogBufferLayout Layout
;
1977 analyze_os_log::computeOSLogBufferLayout(Ctx
, &E
, Layout
);
1978 Address BufAddr
= EmitPointerWithAlignment(E
.getArg(0));
1979 llvm::SmallVector
<llvm::Value
*, 4> RetainableOperands
;
1981 // Ignore argument 1, the format string. It is not currently used.
1983 Args
.add(RValue::get(BufAddr
.getPointer()), Ctx
.VoidPtrTy
);
1985 for (const auto &Item
: Layout
.Items
) {
1986 int Size
= Item
.getSizeByte();
1990 llvm::Value
*ArgVal
;
1992 if (Item
.getKind() == analyze_os_log::OSLogBufferItem::MaskKind
) {
1994 for (unsigned I
= 0, E
= Item
.getMaskType().size(); I
< E
; ++I
)
1995 Val
|= ((uint64_t)Item
.getMaskType()[I
]) << I
* 8;
1996 ArgVal
= llvm::Constant::getIntegerValue(Int64Ty
, llvm::APInt(64, Val
));
1997 } else if (const Expr
*TheExpr
= Item
.getExpr()) {
1998 ArgVal
= EmitScalarExpr(TheExpr
, /*Ignore*/ false);
2000 // If a temporary object that requires destruction after the full
2001 // expression is passed, push a lifetime-extended cleanup to extend its
2002 // lifetime to the end of the enclosing block scope.
2003 auto LifetimeExtendObject
= [&](const Expr
*E
) {
2004 E
= E
->IgnoreParenCasts();
2005 // Extend lifetimes of objects returned by function calls and message
2008 // FIXME: We should do this in other cases in which temporaries are
2009 // created including arguments of non-ARC types (e.g., C++
2011 if (isa
<CallExpr
>(E
) || isa
<ObjCMessageExpr
>(E
))
2016 if (TheExpr
->getType()->isObjCRetainableType() &&
2017 getLangOpts().ObjCAutoRefCount
&& LifetimeExtendObject(TheExpr
)) {
2018 assert(getEvaluationKind(TheExpr
->getType()) == TEK_Scalar
&&
2019 "Only scalar can be a ObjC retainable type");
2020 if (!isa
<Constant
>(ArgVal
)) {
2021 CleanupKind Cleanup
= getARCCleanupKind();
2022 QualType Ty
= TheExpr
->getType();
2023 Address Alloca
= Address::invalid();
2024 Address Addr
= CreateMemTemp(Ty
, "os.log.arg", &Alloca
);
2025 ArgVal
= EmitARCRetain(Ty
, ArgVal
);
2026 Builder
.CreateStore(ArgVal
, Addr
);
2027 pushLifetimeExtendedDestroy(Cleanup
, Alloca
, Ty
,
2028 CodeGenFunction::destroyARCStrongPrecise
,
2029 Cleanup
& EHCleanup
);
2031 // Push a clang.arc.use call to ensure ARC optimizer knows that the
2032 // argument has to be alive.
2033 if (CGM
.getCodeGenOpts().OptimizationLevel
!= 0)
2034 pushCleanupAfterFullExpr
<CallObjCArcUse
>(Cleanup
, ArgVal
);
2038 ArgVal
= Builder
.getInt32(Item
.getConstValue().getQuantity());
2041 unsigned ArgValSize
=
2042 CGM
.getDataLayout().getTypeSizeInBits(ArgVal
->getType());
2043 llvm::IntegerType
*IntTy
= llvm::Type::getIntNTy(getLLVMContext(),
2045 ArgVal
= Builder
.CreateBitOrPointerCast(ArgVal
, IntTy
);
2046 CanQualType ArgTy
= getOSLogArgType(Ctx
, Size
);
2047 // If ArgVal has type x86_fp80, zero-extend ArgVal.
2048 ArgVal
= Builder
.CreateZExtOrBitCast(ArgVal
, ConvertType(ArgTy
));
2049 Args
.add(RValue::get(ArgVal
), ArgTy
);
2052 const CGFunctionInfo
&FI
=
2053 CGM
.getTypes().arrangeBuiltinFunctionCall(Ctx
.VoidTy
, Args
);
2054 llvm::Function
*F
= CodeGenFunction(CGM
).generateBuiltinOSLogHelperFunction(
2055 Layout
, BufAddr
.getAlignment());
2056 EmitCall(FI
, CGCallee::forDirect(F
), ReturnValueSlot(), Args
);
2057 return RValue::get(BufAddr
.getPointer());
2060 static bool isSpecialUnsignedMultiplySignedResult(
2061 unsigned BuiltinID
, WidthAndSignedness Op1Info
, WidthAndSignedness Op2Info
,
2062 WidthAndSignedness ResultInfo
) {
2063 return BuiltinID
== Builtin::BI__builtin_mul_overflow
&&
2064 Op1Info
.Width
== Op2Info
.Width
&& Op2Info
.Width
== ResultInfo
.Width
&&
2065 !Op1Info
.Signed
&& !Op2Info
.Signed
&& ResultInfo
.Signed
;
2068 static RValue
EmitCheckedUnsignedMultiplySignedResult(
2069 CodeGenFunction
&CGF
, const clang::Expr
*Op1
, WidthAndSignedness Op1Info
,
2070 const clang::Expr
*Op2
, WidthAndSignedness Op2Info
,
2071 const clang::Expr
*ResultArg
, QualType ResultQTy
,
2072 WidthAndSignedness ResultInfo
) {
2073 assert(isSpecialUnsignedMultiplySignedResult(
2074 Builtin::BI__builtin_mul_overflow
, Op1Info
, Op2Info
, ResultInfo
) &&
2075 "Cannot specialize this multiply");
2077 llvm::Value
*V1
= CGF
.EmitScalarExpr(Op1
);
2078 llvm::Value
*V2
= CGF
.EmitScalarExpr(Op2
);
2080 llvm::Value
*HasOverflow
;
2081 llvm::Value
*Result
= EmitOverflowIntrinsic(
2082 CGF
, llvm::Intrinsic::umul_with_overflow
, V1
, V2
, HasOverflow
);
2084 // The intrinsic call will detect overflow when the value is > UINT_MAX,
2085 // however, since the original builtin had a signed result, we need to report
2086 // an overflow when the result is greater than INT_MAX.
2087 auto IntMax
= llvm::APInt::getSignedMaxValue(ResultInfo
.Width
);
2088 llvm::Value
*IntMaxValue
= llvm::ConstantInt::get(Result
->getType(), IntMax
);
2090 llvm::Value
*IntMaxOverflow
= CGF
.Builder
.CreateICmpUGT(Result
, IntMaxValue
);
2091 HasOverflow
= CGF
.Builder
.CreateOr(HasOverflow
, IntMaxOverflow
);
2094 ResultArg
->getType()->getPointeeType().isVolatileQualified();
2095 Address ResultPtr
= CGF
.EmitPointerWithAlignment(ResultArg
);
2096 CGF
.Builder
.CreateStore(CGF
.EmitToMemory(Result
, ResultQTy
), ResultPtr
,
2098 return RValue::get(HasOverflow
);
2101 /// Determine if a binop is a checked mixed-sign multiply we can specialize.
2102 static bool isSpecialMixedSignMultiply(unsigned BuiltinID
,
2103 WidthAndSignedness Op1Info
,
2104 WidthAndSignedness Op2Info
,
2105 WidthAndSignedness ResultInfo
) {
2106 return BuiltinID
== Builtin::BI__builtin_mul_overflow
&&
2107 std::max(Op1Info
.Width
, Op2Info
.Width
) >= ResultInfo
.Width
&&
2108 Op1Info
.Signed
!= Op2Info
.Signed
;
2111 /// Emit a checked mixed-sign multiply. This is a cheaper specialization of
2112 /// the generic checked-binop irgen.
2114 EmitCheckedMixedSignMultiply(CodeGenFunction
&CGF
, const clang::Expr
*Op1
,
2115 WidthAndSignedness Op1Info
, const clang::Expr
*Op2
,
2116 WidthAndSignedness Op2Info
,
2117 const clang::Expr
*ResultArg
, QualType ResultQTy
,
2118 WidthAndSignedness ResultInfo
) {
2119 assert(isSpecialMixedSignMultiply(Builtin::BI__builtin_mul_overflow
, Op1Info
,
2120 Op2Info
, ResultInfo
) &&
2121 "Not a mixed-sign multipliction we can specialize");
2123 // Emit the signed and unsigned operands.
2124 const clang::Expr
*SignedOp
= Op1Info
.Signed
? Op1
: Op2
;
2125 const clang::Expr
*UnsignedOp
= Op1Info
.Signed
? Op2
: Op1
;
2126 llvm::Value
*Signed
= CGF
.EmitScalarExpr(SignedOp
);
2127 llvm::Value
*Unsigned
= CGF
.EmitScalarExpr(UnsignedOp
);
2128 unsigned SignedOpWidth
= Op1Info
.Signed
? Op1Info
.Width
: Op2Info
.Width
;
2129 unsigned UnsignedOpWidth
= Op1Info
.Signed
? Op2Info
.Width
: Op1Info
.Width
;
2131 // One of the operands may be smaller than the other. If so, [s|z]ext it.
2132 if (SignedOpWidth
< UnsignedOpWidth
)
2133 Signed
= CGF
.Builder
.CreateSExt(Signed
, Unsigned
->getType(), "op.sext");
2134 if (UnsignedOpWidth
< SignedOpWidth
)
2135 Unsigned
= CGF
.Builder
.CreateZExt(Unsigned
, Signed
->getType(), "op.zext");
2137 llvm::Type
*OpTy
= Signed
->getType();
2138 llvm::Value
*Zero
= llvm::Constant::getNullValue(OpTy
);
2139 Address ResultPtr
= CGF
.EmitPointerWithAlignment(ResultArg
);
2140 llvm::Type
*ResTy
= ResultPtr
.getElementType();
2141 unsigned OpWidth
= std::max(Op1Info
.Width
, Op2Info
.Width
);
2143 // Take the absolute value of the signed operand.
2144 llvm::Value
*IsNegative
= CGF
.Builder
.CreateICmpSLT(Signed
, Zero
);
2145 llvm::Value
*AbsOfNegative
= CGF
.Builder
.CreateSub(Zero
, Signed
);
2146 llvm::Value
*AbsSigned
=
2147 CGF
.Builder
.CreateSelect(IsNegative
, AbsOfNegative
, Signed
);
2149 // Perform a checked unsigned multiplication.
2150 llvm::Value
*UnsignedOverflow
;
2151 llvm::Value
*UnsignedResult
=
2152 EmitOverflowIntrinsic(CGF
, llvm::Intrinsic::umul_with_overflow
, AbsSigned
,
2153 Unsigned
, UnsignedOverflow
);
2155 llvm::Value
*Overflow
, *Result
;
2156 if (ResultInfo
.Signed
) {
2157 // Signed overflow occurs if the result is greater than INT_MAX or lesser
2158 // than INT_MIN, i.e when |Result| > (INT_MAX + IsNegative).
2160 llvm::APInt::getSignedMaxValue(ResultInfo
.Width
).zext(OpWidth
);
2161 llvm::Value
*MaxResult
=
2162 CGF
.Builder
.CreateAdd(llvm::ConstantInt::get(OpTy
, IntMax
),
2163 CGF
.Builder
.CreateZExt(IsNegative
, OpTy
));
2164 llvm::Value
*SignedOverflow
=
2165 CGF
.Builder
.CreateICmpUGT(UnsignedResult
, MaxResult
);
2166 Overflow
= CGF
.Builder
.CreateOr(UnsignedOverflow
, SignedOverflow
);
2168 // Prepare the signed result (possibly by negating it).
2169 llvm::Value
*NegativeResult
= CGF
.Builder
.CreateNeg(UnsignedResult
);
2170 llvm::Value
*SignedResult
=
2171 CGF
.Builder
.CreateSelect(IsNegative
, NegativeResult
, UnsignedResult
);
2172 Result
= CGF
.Builder
.CreateTrunc(SignedResult
, ResTy
);
2174 // Unsigned overflow occurs if the result is < 0 or greater than UINT_MAX.
2175 llvm::Value
*Underflow
= CGF
.Builder
.CreateAnd(
2176 IsNegative
, CGF
.Builder
.CreateIsNotNull(UnsignedResult
));
2177 Overflow
= CGF
.Builder
.CreateOr(UnsignedOverflow
, Underflow
);
2178 if (ResultInfo
.Width
< OpWidth
) {
2180 llvm::APInt::getMaxValue(ResultInfo
.Width
).zext(OpWidth
);
2181 llvm::Value
*TruncOverflow
= CGF
.Builder
.CreateICmpUGT(
2182 UnsignedResult
, llvm::ConstantInt::get(OpTy
, IntMax
));
2183 Overflow
= CGF
.Builder
.CreateOr(Overflow
, TruncOverflow
);
2186 // Negate the product if it would be negative in infinite precision.
2187 Result
= CGF
.Builder
.CreateSelect(
2188 IsNegative
, CGF
.Builder
.CreateNeg(UnsignedResult
), UnsignedResult
);
2190 Result
= CGF
.Builder
.CreateTrunc(Result
, ResTy
);
2192 assert(Overflow
&& Result
&& "Missing overflow or result");
2195 ResultArg
->getType()->getPointeeType().isVolatileQualified();
2196 CGF
.Builder
.CreateStore(CGF
.EmitToMemory(Result
, ResultQTy
), ResultPtr
,
2198 return RValue::get(Overflow
);
2202 TypeRequiresBuiltinLaunderImp(const ASTContext
&Ctx
, QualType Ty
,
2203 llvm::SmallPtrSetImpl
<const Decl
*> &Seen
) {
2204 if (const auto *Arr
= Ctx
.getAsArrayType(Ty
))
2205 Ty
= Ctx
.getBaseElementType(Arr
);
2207 const auto *Record
= Ty
->getAsCXXRecordDecl();
2211 // We've already checked this type, or are in the process of checking it.
2212 if (!Seen
.insert(Record
).second
)
2215 assert(Record
->hasDefinition() &&
2216 "Incomplete types should already be diagnosed");
2218 if (Record
->isDynamicClass())
2221 for (FieldDecl
*F
: Record
->fields()) {
2222 if (TypeRequiresBuiltinLaunderImp(Ctx
, F
->getType(), Seen
))
2228 /// Determine if the specified type requires laundering by checking if it is a
2229 /// dynamic class type or contains a subobject which is a dynamic class type.
2230 static bool TypeRequiresBuiltinLaunder(CodeGenModule
&CGM
, QualType Ty
) {
2231 if (!CGM
.getCodeGenOpts().StrictVTablePointers
)
2233 llvm::SmallPtrSet
<const Decl
*, 16> Seen
;
2234 return TypeRequiresBuiltinLaunderImp(CGM
.getContext(), Ty
, Seen
);
2237 RValue
CodeGenFunction::emitRotate(const CallExpr
*E
, bool IsRotateRight
) {
2238 llvm::Value
*Src
= EmitScalarExpr(E
->getArg(0));
2239 llvm::Value
*ShiftAmt
= EmitScalarExpr(E
->getArg(1));
2241 // The builtin's shift arg may have a different type than the source arg and
2242 // result, but the LLVM intrinsic uses the same type for all values.
2243 llvm::Type
*Ty
= Src
->getType();
2244 ShiftAmt
= Builder
.CreateIntCast(ShiftAmt
, Ty
, false);
2246 // Rotate is a special case of LLVM funnel shift - 1st 2 args are the same.
2247 unsigned IID
= IsRotateRight
? Intrinsic::fshr
: Intrinsic::fshl
;
2248 Function
*F
= CGM
.getIntrinsic(IID
, Ty
);
2249 return RValue::get(Builder
.CreateCall(F
, { Src
, Src
, ShiftAmt
}));
2252 // Map math builtins for long-double to f128 version.
2253 static unsigned mutateLongDoubleBuiltin(unsigned BuiltinID
) {
2254 switch (BuiltinID
) {
2255 #define MUTATE_LDBL(func) \
2256 case Builtin::BI__builtin_##func##l: \
2257 return Builtin::BI__builtin_##func##f128;
2278 MUTATE_LDBL(nearbyint
)
2282 MUTATE_LDBL(llround
)
2308 MUTATE_LDBL(huge_val
)
2309 MUTATE_LDBL(copysign
)
2310 MUTATE_LDBL(nextafter
)
2311 MUTATE_LDBL(nexttoward
)
2312 MUTATE_LDBL(remainder
)
2314 MUTATE_LDBL(scalbln
)
2324 static Value
*tryUseTestFPKind(CodeGenFunction
&CGF
, unsigned BuiltinID
,
2326 if (CGF
.Builder
.getIsFPConstrained() &&
2327 CGF
.Builder
.getDefaultConstrainedExcept() != fp::ebIgnore
) {
2329 CGF
.getTargetHooks().testFPKind(V
, BuiltinID
, CGF
.Builder
, CGF
.CGM
))
2335 static RValue
EmitHipStdParUnsupportedBuiltin(CodeGenFunction
*CGF
,
2336 const FunctionDecl
*FD
) {
2337 auto Name
= FD
->getNameAsString() + "__hipstdpar_unsupported";
2338 auto FnTy
= CGF
->CGM
.getTypes().GetFunctionType(FD
);
2339 auto UBF
= CGF
->CGM
.getModule().getOrInsertFunction(Name
, FnTy
);
2341 SmallVector
<Value
*, 16> Args
;
2342 for (auto &&FormalTy
: FnTy
->params())
2343 Args
.push_back(llvm::PoisonValue::get(FormalTy
));
2345 return RValue::get(CGF
->Builder
.CreateCall(UBF
, Args
));
2348 RValue
CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD
, unsigned BuiltinID
,
2350 ReturnValueSlot ReturnValue
) {
2351 const FunctionDecl
*FD
= GD
.getDecl()->getAsFunction();
2352 // See if we can constant fold this builtin. If so, don't emit it at all.
2353 // TODO: Extend this handling to all builtin calls that we can constant-fold.
2354 Expr::EvalResult Result
;
2355 if (E
->isPRValue() && E
->EvaluateAsRValue(Result
, CGM
.getContext()) &&
2356 !Result
.hasSideEffects()) {
2357 if (Result
.Val
.isInt())
2358 return RValue::get(llvm::ConstantInt::get(getLLVMContext(),
2359 Result
.Val
.getInt()));
2360 if (Result
.Val
.isFloat())
2361 return RValue::get(llvm::ConstantFP::get(getLLVMContext(),
2362 Result
.Val
.getFloat()));
2365 // If current long-double semantics is IEEE 128-bit, replace math builtins
2366 // of long-double with f128 equivalent.
2367 // TODO: This mutation should also be applied to other targets other than PPC,
2368 // after backend supports IEEE 128-bit style libcalls.
2369 if (getTarget().getTriple().isPPC64() &&
2370 &getTarget().getLongDoubleFormat() == &llvm::APFloat::IEEEquad())
2371 BuiltinID
= mutateLongDoubleBuiltin(BuiltinID
);
2373 // If the builtin has been declared explicitly with an assembler label,
2374 // disable the specialized emitting below. Ideally we should communicate the
2375 // rename in IR, or at least avoid generating the intrinsic calls that are
2376 // likely to get lowered to the renamed library functions.
2377 const unsigned BuiltinIDIfNoAsmLabel
=
2378 FD
->hasAttr
<AsmLabelAttr
>() ? 0 : BuiltinID
;
2380 std::optional
<bool> ErrnoOverriden
;
2381 // ErrnoOverriden is true if math-errno is overriden via the
2382 // '#pragma float_control(precise, on)'. This pragma disables fast-math,
2383 // which implies math-errno.
2384 if (E
->hasStoredFPFeatures()) {
2385 FPOptionsOverride OP
= E
->getFPFeatures();
2386 if (OP
.hasMathErrnoOverride())
2387 ErrnoOverriden
= OP
.getMathErrnoOverride();
2389 // True if 'atttibute__((optnone)) is used. This attibute overrides
2390 // fast-math which implies math-errno.
2391 bool OptNone
= CurFuncDecl
&& CurFuncDecl
->hasAttr
<OptimizeNoneAttr
>();
2393 // True if we are compiling at -O2 and errno has been disabled
2394 // using the '#pragma float_control(precise, off)', and
2395 // attribute opt-none hasn't been seen.
2396 bool ErrnoOverridenToFalseWithOpt
=
2397 ErrnoOverriden
.has_value() && !ErrnoOverriden
.value() && !OptNone
&&
2398 CGM
.getCodeGenOpts().OptimizationLevel
!= 0;
2400 // There are LLVM math intrinsics/instructions corresponding to math library
2401 // functions except the LLVM op will never set errno while the math library
2402 // might. Also, math builtins have the same semantics as their math library
2403 // twins. Thus, we can transform math library and builtin calls to their
2404 // LLVM counterparts if the call is marked 'const' (known to never set errno).
2405 // In case FP exceptions are enabled, the experimental versions of the
2406 // intrinsics model those.
2408 getContext().BuiltinInfo
.isConst(BuiltinID
);
2410 // There's a special case with the fma builtins where they are always const
2411 // if the target environment is GNU or the target is OS is Windows and we're
2412 // targeting the MSVCRT.dll environment.
2413 // FIXME: This list can be become outdated. Need to find a way to get it some
2415 switch (BuiltinID
) {
2416 case Builtin::BI__builtin_fma
:
2417 case Builtin::BI__builtin_fmaf
:
2418 case Builtin::BI__builtin_fmal
:
2419 case Builtin::BIfma
:
2420 case Builtin::BIfmaf
:
2421 case Builtin::BIfmal
: {
2422 auto &Trip
= CGM
.getTriple();
2423 if (Trip
.isGNUEnvironment() || Trip
.isOSMSVCRT())
2431 bool ConstWithoutErrnoAndExceptions
=
2432 getContext().BuiltinInfo
.isConstWithoutErrnoAndExceptions(BuiltinID
);
2433 bool ConstWithoutExceptions
=
2434 getContext().BuiltinInfo
.isConstWithoutExceptions(BuiltinID
);
2436 // ConstAttr is enabled in fast-math mode. In fast-math mode, math-errno is
2438 // Math intrinsics are generated only when math-errno is disabled. Any pragmas
2439 // or attributes that affect math-errno should prevent or allow math
2440 // intrincs to be generated. Intrinsics are generated:
2441 // 1- In fast math mode, unless math-errno is overriden
2442 // via '#pragma float_control(precise, on)', or via an
2443 // 'attribute__((optnone))'.
2444 // 2- If math-errno was enabled on command line but overriden
2445 // to false via '#pragma float_control(precise, off))' and
2446 // 'attribute__((optnone))' hasn't been used.
2447 // 3- If we are compiling with optimization and errno has been disabled
2448 // via '#pragma float_control(precise, off)', and
2449 // 'attribute__((optnone))' hasn't been used.
2451 bool ConstWithoutErrnoOrExceptions
=
2452 ConstWithoutErrnoAndExceptions
|| ConstWithoutExceptions
;
2453 bool GenerateIntrinsics
=
2454 (ConstAlways
&& !OptNone
) ||
2455 (!getLangOpts().MathErrno
&&
2456 !(ErrnoOverriden
.has_value() && ErrnoOverriden
.value()) && !OptNone
);
2457 if (!GenerateIntrinsics
) {
2458 GenerateIntrinsics
=
2459 ConstWithoutErrnoOrExceptions
&& !ConstWithoutErrnoAndExceptions
;
2460 if (!GenerateIntrinsics
)
2461 GenerateIntrinsics
=
2462 ConstWithoutErrnoOrExceptions
&&
2463 (!getLangOpts().MathErrno
&&
2464 !(ErrnoOverriden
.has_value() && ErrnoOverriden
.value()) && !OptNone
);
2465 if (!GenerateIntrinsics
)
2466 GenerateIntrinsics
=
2467 ConstWithoutErrnoOrExceptions
&& ErrnoOverridenToFalseWithOpt
;
2469 if (GenerateIntrinsics
) {
2470 switch (BuiltinIDIfNoAsmLabel
) {
2471 case Builtin::BIceil
:
2472 case Builtin::BIceilf
:
2473 case Builtin::BIceill
:
2474 case Builtin::BI__builtin_ceil
:
2475 case Builtin::BI__builtin_ceilf
:
2476 case Builtin::BI__builtin_ceilf16
:
2477 case Builtin::BI__builtin_ceill
:
2478 case Builtin::BI__builtin_ceilf128
:
2479 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E
,
2481 Intrinsic::experimental_constrained_ceil
));
2483 case Builtin::BIcopysign
:
2484 case Builtin::BIcopysignf
:
2485 case Builtin::BIcopysignl
:
2486 case Builtin::BI__builtin_copysign
:
2487 case Builtin::BI__builtin_copysignf
:
2488 case Builtin::BI__builtin_copysignf16
:
2489 case Builtin::BI__builtin_copysignl
:
2490 case Builtin::BI__builtin_copysignf128
:
2491 return RValue::get(emitBinaryBuiltin(*this, E
, Intrinsic::copysign
));
2493 case Builtin::BIcos
:
2494 case Builtin::BIcosf
:
2495 case Builtin::BIcosl
:
2496 case Builtin::BI__builtin_cos
:
2497 case Builtin::BI__builtin_cosf
:
2498 case Builtin::BI__builtin_cosf16
:
2499 case Builtin::BI__builtin_cosl
:
2500 case Builtin::BI__builtin_cosf128
:
2501 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E
,
2503 Intrinsic::experimental_constrained_cos
));
2505 case Builtin::BIexp
:
2506 case Builtin::BIexpf
:
2507 case Builtin::BIexpl
:
2508 case Builtin::BI__builtin_exp
:
2509 case Builtin::BI__builtin_expf
:
2510 case Builtin::BI__builtin_expf16
:
2511 case Builtin::BI__builtin_expl
:
2512 case Builtin::BI__builtin_expf128
:
2513 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E
,
2515 Intrinsic::experimental_constrained_exp
));
2517 case Builtin::BIexp2
:
2518 case Builtin::BIexp2f
:
2519 case Builtin::BIexp2l
:
2520 case Builtin::BI__builtin_exp2
:
2521 case Builtin::BI__builtin_exp2f
:
2522 case Builtin::BI__builtin_exp2f16
:
2523 case Builtin::BI__builtin_exp2l
:
2524 case Builtin::BI__builtin_exp2f128
:
2525 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E
,
2527 Intrinsic::experimental_constrained_exp2
));
2528 case Builtin::BI__builtin_exp10
:
2529 case Builtin::BI__builtin_exp10f
:
2530 case Builtin::BI__builtin_exp10f16
:
2531 case Builtin::BI__builtin_exp10l
:
2532 case Builtin::BI__builtin_exp10f128
: {
2533 // TODO: strictfp support
2534 if (Builder
.getIsFPConstrained())
2536 return RValue::get(emitUnaryBuiltin(*this, E
, Intrinsic::exp10
));
2538 case Builtin::BIfabs
:
2539 case Builtin::BIfabsf
:
2540 case Builtin::BIfabsl
:
2541 case Builtin::BI__builtin_fabs
:
2542 case Builtin::BI__builtin_fabsf
:
2543 case Builtin::BI__builtin_fabsf16
:
2544 case Builtin::BI__builtin_fabsl
:
2545 case Builtin::BI__builtin_fabsf128
:
2546 return RValue::get(emitUnaryBuiltin(*this, E
, Intrinsic::fabs
));
2548 case Builtin::BIfloor
:
2549 case Builtin::BIfloorf
:
2550 case Builtin::BIfloorl
:
2551 case Builtin::BI__builtin_floor
:
2552 case Builtin::BI__builtin_floorf
:
2553 case Builtin::BI__builtin_floorf16
:
2554 case Builtin::BI__builtin_floorl
:
2555 case Builtin::BI__builtin_floorf128
:
2556 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E
,
2558 Intrinsic::experimental_constrained_floor
));
2560 case Builtin::BIfma
:
2561 case Builtin::BIfmaf
:
2562 case Builtin::BIfmal
:
2563 case Builtin::BI__builtin_fma
:
2564 case Builtin::BI__builtin_fmaf
:
2565 case Builtin::BI__builtin_fmaf16
:
2566 case Builtin::BI__builtin_fmal
:
2567 case Builtin::BI__builtin_fmaf128
:
2568 return RValue::get(emitTernaryMaybeConstrainedFPBuiltin(*this, E
,
2570 Intrinsic::experimental_constrained_fma
));
2572 case Builtin::BIfmax
:
2573 case Builtin::BIfmaxf
:
2574 case Builtin::BIfmaxl
:
2575 case Builtin::BI__builtin_fmax
:
2576 case Builtin::BI__builtin_fmaxf
:
2577 case Builtin::BI__builtin_fmaxf16
:
2578 case Builtin::BI__builtin_fmaxl
:
2579 case Builtin::BI__builtin_fmaxf128
:
2580 return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E
,
2582 Intrinsic::experimental_constrained_maxnum
));
2584 case Builtin::BIfmin
:
2585 case Builtin::BIfminf
:
2586 case Builtin::BIfminl
:
2587 case Builtin::BI__builtin_fmin
:
2588 case Builtin::BI__builtin_fminf
:
2589 case Builtin::BI__builtin_fminf16
:
2590 case Builtin::BI__builtin_fminl
:
2591 case Builtin::BI__builtin_fminf128
:
2592 return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E
,
2594 Intrinsic::experimental_constrained_minnum
));
2596 // fmod() is a special-case. It maps to the frem instruction rather than an
2598 case Builtin::BIfmod
:
2599 case Builtin::BIfmodf
:
2600 case Builtin::BIfmodl
:
2601 case Builtin::BI__builtin_fmod
:
2602 case Builtin::BI__builtin_fmodf
:
2603 case Builtin::BI__builtin_fmodf16
:
2604 case Builtin::BI__builtin_fmodl
:
2605 case Builtin::BI__builtin_fmodf128
: {
2606 CodeGenFunction::CGFPOptionsRAII
FPOptsRAII(*this, E
);
2607 Value
*Arg1
= EmitScalarExpr(E
->getArg(0));
2608 Value
*Arg2
= EmitScalarExpr(E
->getArg(1));
2609 return RValue::get(Builder
.CreateFRem(Arg1
, Arg2
, "fmod"));
2612 case Builtin::BIlog
:
2613 case Builtin::BIlogf
:
2614 case Builtin::BIlogl
:
2615 case Builtin::BI__builtin_log
:
2616 case Builtin::BI__builtin_logf
:
2617 case Builtin::BI__builtin_logf16
:
2618 case Builtin::BI__builtin_logl
:
2619 case Builtin::BI__builtin_logf128
:
2620 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E
,
2622 Intrinsic::experimental_constrained_log
));
2624 case Builtin::BIlog10
:
2625 case Builtin::BIlog10f
:
2626 case Builtin::BIlog10l
:
2627 case Builtin::BI__builtin_log10
:
2628 case Builtin::BI__builtin_log10f
:
2629 case Builtin::BI__builtin_log10f16
:
2630 case Builtin::BI__builtin_log10l
:
2631 case Builtin::BI__builtin_log10f128
:
2632 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E
,
2634 Intrinsic::experimental_constrained_log10
));
2636 case Builtin::BIlog2
:
2637 case Builtin::BIlog2f
:
2638 case Builtin::BIlog2l
:
2639 case Builtin::BI__builtin_log2
:
2640 case Builtin::BI__builtin_log2f
:
2641 case Builtin::BI__builtin_log2f16
:
2642 case Builtin::BI__builtin_log2l
:
2643 case Builtin::BI__builtin_log2f128
:
2644 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E
,
2646 Intrinsic::experimental_constrained_log2
));
2648 case Builtin::BInearbyint
:
2649 case Builtin::BInearbyintf
:
2650 case Builtin::BInearbyintl
:
2651 case Builtin::BI__builtin_nearbyint
:
2652 case Builtin::BI__builtin_nearbyintf
:
2653 case Builtin::BI__builtin_nearbyintl
:
2654 case Builtin::BI__builtin_nearbyintf128
:
2655 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E
,
2656 Intrinsic::nearbyint
,
2657 Intrinsic::experimental_constrained_nearbyint
));
2659 case Builtin::BIpow
:
2660 case Builtin::BIpowf
:
2661 case Builtin::BIpowl
:
2662 case Builtin::BI__builtin_pow
:
2663 case Builtin::BI__builtin_powf
:
2664 case Builtin::BI__builtin_powf16
:
2665 case Builtin::BI__builtin_powl
:
2666 case Builtin::BI__builtin_powf128
:
2667 return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E
,
2669 Intrinsic::experimental_constrained_pow
));
2671 case Builtin::BIrint
:
2672 case Builtin::BIrintf
:
2673 case Builtin::BIrintl
:
2674 case Builtin::BI__builtin_rint
:
2675 case Builtin::BI__builtin_rintf
:
2676 case Builtin::BI__builtin_rintf16
:
2677 case Builtin::BI__builtin_rintl
:
2678 case Builtin::BI__builtin_rintf128
:
2679 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E
,
2681 Intrinsic::experimental_constrained_rint
));
2683 case Builtin::BIround
:
2684 case Builtin::BIroundf
:
2685 case Builtin::BIroundl
:
2686 case Builtin::BI__builtin_round
:
2687 case Builtin::BI__builtin_roundf
:
2688 case Builtin::BI__builtin_roundf16
:
2689 case Builtin::BI__builtin_roundl
:
2690 case Builtin::BI__builtin_roundf128
:
2691 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E
,
2693 Intrinsic::experimental_constrained_round
));
2695 case Builtin::BIroundeven
:
2696 case Builtin::BIroundevenf
:
2697 case Builtin::BIroundevenl
:
2698 case Builtin::BI__builtin_roundeven
:
2699 case Builtin::BI__builtin_roundevenf
:
2700 case Builtin::BI__builtin_roundevenf16
:
2701 case Builtin::BI__builtin_roundevenl
:
2702 case Builtin::BI__builtin_roundevenf128
:
2703 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E
,
2704 Intrinsic::roundeven
,
2705 Intrinsic::experimental_constrained_roundeven
));
2707 case Builtin::BIsin
:
2708 case Builtin::BIsinf
:
2709 case Builtin::BIsinl
:
2710 case Builtin::BI__builtin_sin
:
2711 case Builtin::BI__builtin_sinf
:
2712 case Builtin::BI__builtin_sinf16
:
2713 case Builtin::BI__builtin_sinl
:
2714 case Builtin::BI__builtin_sinf128
:
2715 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E
,
2717 Intrinsic::experimental_constrained_sin
));
2719 case Builtin::BIsqrt
:
2720 case Builtin::BIsqrtf
:
2721 case Builtin::BIsqrtl
:
2722 case Builtin::BI__builtin_sqrt
:
2723 case Builtin::BI__builtin_sqrtf
:
2724 case Builtin::BI__builtin_sqrtf16
:
2725 case Builtin::BI__builtin_sqrtl
:
2726 case Builtin::BI__builtin_sqrtf128
:
2727 case Builtin::BI__builtin_elementwise_sqrt
: {
2728 llvm::Value
*Call
= emitUnaryMaybeConstrainedFPBuiltin(
2729 *this, E
, Intrinsic::sqrt
, Intrinsic::experimental_constrained_sqrt
);
2730 SetSqrtFPAccuracy(Call
);
2731 return RValue::get(Call
);
2733 case Builtin::BItrunc
:
2734 case Builtin::BItruncf
:
2735 case Builtin::BItruncl
:
2736 case Builtin::BI__builtin_trunc
:
2737 case Builtin::BI__builtin_truncf
:
2738 case Builtin::BI__builtin_truncf16
:
2739 case Builtin::BI__builtin_truncl
:
2740 case Builtin::BI__builtin_truncf128
:
2741 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E
,
2743 Intrinsic::experimental_constrained_trunc
));
2745 case Builtin::BIlround
:
2746 case Builtin::BIlroundf
:
2747 case Builtin::BIlroundl
:
2748 case Builtin::BI__builtin_lround
:
2749 case Builtin::BI__builtin_lroundf
:
2750 case Builtin::BI__builtin_lroundl
:
2751 case Builtin::BI__builtin_lroundf128
:
2752 return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin(
2753 *this, E
, Intrinsic::lround
,
2754 Intrinsic::experimental_constrained_lround
));
2756 case Builtin::BIllround
:
2757 case Builtin::BIllroundf
:
2758 case Builtin::BIllroundl
:
2759 case Builtin::BI__builtin_llround
:
2760 case Builtin::BI__builtin_llroundf
:
2761 case Builtin::BI__builtin_llroundl
:
2762 case Builtin::BI__builtin_llroundf128
:
2763 return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin(
2764 *this, E
, Intrinsic::llround
,
2765 Intrinsic::experimental_constrained_llround
));
2767 case Builtin::BIlrint
:
2768 case Builtin::BIlrintf
:
2769 case Builtin::BIlrintl
:
2770 case Builtin::BI__builtin_lrint
:
2771 case Builtin::BI__builtin_lrintf
:
2772 case Builtin::BI__builtin_lrintl
:
2773 case Builtin::BI__builtin_lrintf128
:
2774 return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin(
2775 *this, E
, Intrinsic::lrint
,
2776 Intrinsic::experimental_constrained_lrint
));
2778 case Builtin::BIllrint
:
2779 case Builtin::BIllrintf
:
2780 case Builtin::BIllrintl
:
2781 case Builtin::BI__builtin_llrint
:
2782 case Builtin::BI__builtin_llrintf
:
2783 case Builtin::BI__builtin_llrintl
:
2784 case Builtin::BI__builtin_llrintf128
:
2785 return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin(
2786 *this, E
, Intrinsic::llrint
,
2787 Intrinsic::experimental_constrained_llrint
));
2788 case Builtin::BI__builtin_ldexp
:
2789 case Builtin::BI__builtin_ldexpf
:
2790 case Builtin::BI__builtin_ldexpl
:
2791 case Builtin::BI__builtin_ldexpf16
:
2792 case Builtin::BI__builtin_ldexpf128
: {
2793 return RValue::get(emitBinaryExpMaybeConstrainedFPBuiltin(
2794 *this, E
, Intrinsic::ldexp
,
2795 Intrinsic::experimental_constrained_ldexp
));
2802 // Check NonnullAttribute/NullabilityArg and Alignment.
2803 auto EmitArgCheck
= [&](TypeCheckKind Kind
, Address A
, const Expr
*Arg
,
2805 Value
*Val
= A
.getPointer();
2806 EmitNonNullArgCheck(RValue::get(Val
), Arg
->getType(), Arg
->getExprLoc(), FD
,
2809 if (SanOpts
.has(SanitizerKind::Alignment
) && ClSanitizeAlignmentBuiltin
) {
2810 SanitizerSet SkippedChecks
;
2811 SkippedChecks
.set(SanitizerKind::All
);
2812 SkippedChecks
.clear(SanitizerKind::Alignment
);
2813 SourceLocation Loc
= Arg
->getExprLoc();
2814 // Strip an implicit cast.
2815 if (auto *CE
= dyn_cast
<ImplicitCastExpr
>(Arg
))
2816 if (CE
->getCastKind() == CK_BitCast
)
2817 Arg
= CE
->getSubExpr();
2818 EmitTypeCheck(Kind
, Loc
, Val
, Arg
->getType(), A
.getAlignment(),
2823 switch (BuiltinIDIfNoAsmLabel
) {
2825 case Builtin::BI__builtin___CFStringMakeConstantString
:
2826 case Builtin::BI__builtin___NSStringMakeConstantString
:
2827 return RValue::get(ConstantEmitter(*this).emitAbstract(E
, E
->getType()));
2828 case Builtin::BI__builtin_stdarg_start
:
2829 case Builtin::BI__builtin_va_start
:
2830 case Builtin::BI__va_start
:
2831 case Builtin::BI__builtin_va_end
:
2832 EmitVAStartEnd(BuiltinID
== Builtin::BI__va_start
2833 ? EmitScalarExpr(E
->getArg(0))
2834 : EmitVAListRef(E
->getArg(0)).getPointer(),
2835 BuiltinID
!= Builtin::BI__builtin_va_end
);
2836 return RValue::get(nullptr);
2837 case Builtin::BI__builtin_va_copy
: {
2838 Value
*DstPtr
= EmitVAListRef(E
->getArg(0)).getPointer();
2839 Value
*SrcPtr
= EmitVAListRef(E
->getArg(1)).getPointer();
2840 Builder
.CreateCall(CGM
.getIntrinsic(Intrinsic::vacopy
), {DstPtr
, SrcPtr
});
2841 return RValue::get(nullptr);
2843 case Builtin::BIabs
:
2844 case Builtin::BIlabs
:
2845 case Builtin::BIllabs
:
2846 case Builtin::BI__builtin_abs
:
2847 case Builtin::BI__builtin_labs
:
2848 case Builtin::BI__builtin_llabs
: {
2849 bool SanitizeOverflow
= SanOpts
.has(SanitizerKind::SignedIntegerOverflow
);
2852 switch (getLangOpts().getSignedOverflowBehavior()) {
2853 case LangOptions::SOB_Defined
:
2854 Result
= EmitAbs(*this, EmitScalarExpr(E
->getArg(0)), false);
2856 case LangOptions::SOB_Undefined
:
2857 if (!SanitizeOverflow
) {
2858 Result
= EmitAbs(*this, EmitScalarExpr(E
->getArg(0)), true);
2862 case LangOptions::SOB_Trapping
:
2863 // TODO: Somehow handle the corner case when the address of abs is taken.
2864 Result
= EmitOverflowCheckedAbs(*this, E
, SanitizeOverflow
);
2867 return RValue::get(Result
);
2869 case Builtin::BI__builtin_complex
: {
2870 Value
*Real
= EmitScalarExpr(E
->getArg(0));
2871 Value
*Imag
= EmitScalarExpr(E
->getArg(1));
2872 return RValue::getComplex({Real
, Imag
});
2874 case Builtin::BI__builtin_conj
:
2875 case Builtin::BI__builtin_conjf
:
2876 case Builtin::BI__builtin_conjl
:
2877 case Builtin::BIconj
:
2878 case Builtin::BIconjf
:
2879 case Builtin::BIconjl
: {
2880 ComplexPairTy ComplexVal
= EmitComplexExpr(E
->getArg(0));
2881 Value
*Real
= ComplexVal
.first
;
2882 Value
*Imag
= ComplexVal
.second
;
2883 Imag
= Builder
.CreateFNeg(Imag
, "neg");
2884 return RValue::getComplex(std::make_pair(Real
, Imag
));
2886 case Builtin::BI__builtin_creal
:
2887 case Builtin::BI__builtin_crealf
:
2888 case Builtin::BI__builtin_creall
:
2889 case Builtin::BIcreal
:
2890 case Builtin::BIcrealf
:
2891 case Builtin::BIcreall
: {
2892 ComplexPairTy ComplexVal
= EmitComplexExpr(E
->getArg(0));
2893 return RValue::get(ComplexVal
.first
);
2896 case Builtin::BI__builtin_preserve_access_index
: {
2897 // Only enabled preserved access index region when debuginfo
2898 // is available as debuginfo is needed to preserve user-level
2900 if (!getDebugInfo()) {
2901 CGM
.Error(E
->getExprLoc(), "using builtin_preserve_access_index() without -g");
2902 return RValue::get(EmitScalarExpr(E
->getArg(0)));
2905 // Nested builtin_preserve_access_index() not supported
2906 if (IsInPreservedAIRegion
) {
2907 CGM
.Error(E
->getExprLoc(), "nested builtin_preserve_access_index() not supported");
2908 return RValue::get(EmitScalarExpr(E
->getArg(0)));
2911 IsInPreservedAIRegion
= true;
2912 Value
*Res
= EmitScalarExpr(E
->getArg(0));
2913 IsInPreservedAIRegion
= false;
2914 return RValue::get(Res
);
2917 case Builtin::BI__builtin_cimag
:
2918 case Builtin::BI__builtin_cimagf
:
2919 case Builtin::BI__builtin_cimagl
:
2920 case Builtin::BIcimag
:
2921 case Builtin::BIcimagf
:
2922 case Builtin::BIcimagl
: {
2923 ComplexPairTy ComplexVal
= EmitComplexExpr(E
->getArg(0));
2924 return RValue::get(ComplexVal
.second
);
2927 case Builtin::BI__builtin_clrsb
:
2928 case Builtin::BI__builtin_clrsbl
:
2929 case Builtin::BI__builtin_clrsbll
: {
2930 // clrsb(x) -> clz(x < 0 ? ~x : x) - 1 or
2931 Value
*ArgValue
= EmitScalarExpr(E
->getArg(0));
2933 llvm::Type
*ArgType
= ArgValue
->getType();
2934 Function
*F
= CGM
.getIntrinsic(Intrinsic::ctlz
, ArgType
);
2936 llvm::Type
*ResultType
= ConvertType(E
->getType());
2937 Value
*Zero
= llvm::Constant::getNullValue(ArgType
);
2938 Value
*IsNeg
= Builder
.CreateICmpSLT(ArgValue
, Zero
, "isneg");
2939 Value
*Inverse
= Builder
.CreateNot(ArgValue
, "not");
2940 Value
*Tmp
= Builder
.CreateSelect(IsNeg
, Inverse
, ArgValue
);
2941 Value
*Ctlz
= Builder
.CreateCall(F
, {Tmp
, Builder
.getFalse()});
2942 Value
*Result
= Builder
.CreateSub(Ctlz
, llvm::ConstantInt::get(ArgType
, 1));
2943 Result
= Builder
.CreateIntCast(Result
, ResultType
, /*isSigned*/true,
2945 return RValue::get(Result
);
2947 case Builtin::BI__builtin_ctzs
:
2948 case Builtin::BI__builtin_ctz
:
2949 case Builtin::BI__builtin_ctzl
:
2950 case Builtin::BI__builtin_ctzll
: {
2951 Value
*ArgValue
= EmitCheckedArgForBuiltin(E
->getArg(0), BCK_CTZPassedZero
);
2953 llvm::Type
*ArgType
= ArgValue
->getType();
2954 Function
*F
= CGM
.getIntrinsic(Intrinsic::cttz
, ArgType
);
2956 llvm::Type
*ResultType
= ConvertType(E
->getType());
2957 Value
*ZeroUndef
= Builder
.getInt1(getTarget().isCLZForZeroUndef());
2958 Value
*Result
= Builder
.CreateCall(F
, {ArgValue
, ZeroUndef
});
2959 if (Result
->getType() != ResultType
)
2960 Result
= Builder
.CreateIntCast(Result
, ResultType
, /*isSigned*/true,
2962 return RValue::get(Result
);
2964 case Builtin::BI__builtin_clzs
:
2965 case Builtin::BI__builtin_clz
:
2966 case Builtin::BI__builtin_clzl
:
2967 case Builtin::BI__builtin_clzll
: {
2968 Value
*ArgValue
= EmitCheckedArgForBuiltin(E
->getArg(0), BCK_CLZPassedZero
);
2970 llvm::Type
*ArgType
= ArgValue
->getType();
2971 Function
*F
= CGM
.getIntrinsic(Intrinsic::ctlz
, ArgType
);
2973 llvm::Type
*ResultType
= ConvertType(E
->getType());
2974 Value
*ZeroUndef
= Builder
.getInt1(getTarget().isCLZForZeroUndef());
2975 Value
*Result
= Builder
.CreateCall(F
, {ArgValue
, ZeroUndef
});
2976 if (Result
->getType() != ResultType
)
2977 Result
= Builder
.CreateIntCast(Result
, ResultType
, /*isSigned*/true,
2979 return RValue::get(Result
);
2981 case Builtin::BI__builtin_ffs
:
2982 case Builtin::BI__builtin_ffsl
:
2983 case Builtin::BI__builtin_ffsll
: {
2984 // ffs(x) -> x ? cttz(x) + 1 : 0
2985 Value
*ArgValue
= EmitScalarExpr(E
->getArg(0));
2987 llvm::Type
*ArgType
= ArgValue
->getType();
2988 Function
*F
= CGM
.getIntrinsic(Intrinsic::cttz
, ArgType
);
2990 llvm::Type
*ResultType
= ConvertType(E
->getType());
2992 Builder
.CreateAdd(Builder
.CreateCall(F
, {ArgValue
, Builder
.getTrue()}),
2993 llvm::ConstantInt::get(ArgType
, 1));
2994 Value
*Zero
= llvm::Constant::getNullValue(ArgType
);
2995 Value
*IsZero
= Builder
.CreateICmpEQ(ArgValue
, Zero
, "iszero");
2996 Value
*Result
= Builder
.CreateSelect(IsZero
, Zero
, Tmp
, "ffs");
2997 if (Result
->getType() != ResultType
)
2998 Result
= Builder
.CreateIntCast(Result
, ResultType
, /*isSigned*/true,
3000 return RValue::get(Result
);
3002 case Builtin::BI__builtin_parity
:
3003 case Builtin::BI__builtin_parityl
:
3004 case Builtin::BI__builtin_parityll
: {
3005 // parity(x) -> ctpop(x) & 1
3006 Value
*ArgValue
= EmitScalarExpr(E
->getArg(0));
3008 llvm::Type
*ArgType
= ArgValue
->getType();
3009 Function
*F
= CGM
.getIntrinsic(Intrinsic::ctpop
, ArgType
);
3011 llvm::Type
*ResultType
= ConvertType(E
->getType());
3012 Value
*Tmp
= Builder
.CreateCall(F
, ArgValue
);
3013 Value
*Result
= Builder
.CreateAnd(Tmp
, llvm::ConstantInt::get(ArgType
, 1));
3014 if (Result
->getType() != ResultType
)
3015 Result
= Builder
.CreateIntCast(Result
, ResultType
, /*isSigned*/true,
3017 return RValue::get(Result
);
3019 case Builtin::BI__lzcnt16
:
3020 case Builtin::BI__lzcnt
:
3021 case Builtin::BI__lzcnt64
: {
3022 Value
*ArgValue
= EmitScalarExpr(E
->getArg(0));
3024 llvm::Type
*ArgType
= ArgValue
->getType();
3025 Function
*F
= CGM
.getIntrinsic(Intrinsic::ctlz
, ArgType
);
3027 llvm::Type
*ResultType
= ConvertType(E
->getType());
3028 Value
*Result
= Builder
.CreateCall(F
, {ArgValue
, Builder
.getFalse()});
3029 if (Result
->getType() != ResultType
)
3030 Result
= Builder
.CreateIntCast(Result
, ResultType
, /*isSigned*/true,
3032 return RValue::get(Result
);
3034 case Builtin::BI__popcnt16
:
3035 case Builtin::BI__popcnt
:
3036 case Builtin::BI__popcnt64
:
3037 case Builtin::BI__builtin_popcount
:
3038 case Builtin::BI__builtin_popcountl
:
3039 case Builtin::BI__builtin_popcountll
: {
3040 Value
*ArgValue
= EmitScalarExpr(E
->getArg(0));
3042 llvm::Type
*ArgType
= ArgValue
->getType();
3043 Function
*F
= CGM
.getIntrinsic(Intrinsic::ctpop
, ArgType
);
3045 llvm::Type
*ResultType
= ConvertType(E
->getType());
3046 Value
*Result
= Builder
.CreateCall(F
, ArgValue
);
3047 if (Result
->getType() != ResultType
)
3048 Result
= Builder
.CreateIntCast(Result
, ResultType
, /*isSigned*/true,
3050 return RValue::get(Result
);
3052 case Builtin::BI__builtin_unpredictable
: {
3053 // Always return the argument of __builtin_unpredictable. LLVM does not
3054 // handle this builtin. Metadata for this builtin should be added directly
3055 // to instructions such as branches or switches that use it.
3056 return RValue::get(EmitScalarExpr(E
->getArg(0)));
3058 case Builtin::BI__builtin_expect
: {
3059 Value
*ArgValue
= EmitScalarExpr(E
->getArg(0));
3060 llvm::Type
*ArgType
= ArgValue
->getType();
3062 Value
*ExpectedValue
= EmitScalarExpr(E
->getArg(1));
3063 // Don't generate llvm.expect on -O0 as the backend won't use it for
3065 // Note, we still IRGen ExpectedValue because it could have side-effects.
3066 if (CGM
.getCodeGenOpts().OptimizationLevel
== 0)
3067 return RValue::get(ArgValue
);
3069 Function
*FnExpect
= CGM
.getIntrinsic(Intrinsic::expect
, ArgType
);
3071 Builder
.CreateCall(FnExpect
, {ArgValue
, ExpectedValue
}, "expval");
3072 return RValue::get(Result
);
3074 case Builtin::BI__builtin_expect_with_probability
: {
3075 Value
*ArgValue
= EmitScalarExpr(E
->getArg(0));
3076 llvm::Type
*ArgType
= ArgValue
->getType();
3078 Value
*ExpectedValue
= EmitScalarExpr(E
->getArg(1));
3079 llvm::APFloat
Probability(0.0);
3080 const Expr
*ProbArg
= E
->getArg(2);
3081 bool EvalSucceed
= ProbArg
->EvaluateAsFloat(Probability
, CGM
.getContext());
3082 assert(EvalSucceed
&& "probability should be able to evaluate as float");
3084 bool LoseInfo
= false;
3085 Probability
.convert(llvm::APFloat::IEEEdouble(),
3086 llvm::RoundingMode::Dynamic
, &LoseInfo
);
3087 llvm::Type
*Ty
= ConvertType(ProbArg
->getType());
3088 Constant
*Confidence
= ConstantFP::get(Ty
, Probability
);
3089 // Don't generate llvm.expect.with.probability on -O0 as the backend
3090 // won't use it for anything.
3091 // Note, we still IRGen ExpectedValue because it could have side-effects.
3092 if (CGM
.getCodeGenOpts().OptimizationLevel
== 0)
3093 return RValue::get(ArgValue
);
3095 Function
*FnExpect
=
3096 CGM
.getIntrinsic(Intrinsic::expect_with_probability
, ArgType
);
3097 Value
*Result
= Builder
.CreateCall(
3098 FnExpect
, {ArgValue
, ExpectedValue
, Confidence
}, "expval");
3099 return RValue::get(Result
);
3101 case Builtin::BI__builtin_assume_aligned
: {
3102 const Expr
*Ptr
= E
->getArg(0);
3103 Value
*PtrValue
= EmitScalarExpr(Ptr
);
3104 Value
*OffsetValue
=
3105 (E
->getNumArgs() > 2) ? EmitScalarExpr(E
->getArg(2)) : nullptr;
3107 Value
*AlignmentValue
= EmitScalarExpr(E
->getArg(1));
3108 ConstantInt
*AlignmentCI
= cast
<ConstantInt
>(AlignmentValue
);
3109 if (AlignmentCI
->getValue().ugt(llvm::Value::MaximumAlignment
))
3110 AlignmentCI
= ConstantInt::get(AlignmentCI
->getType(),
3111 llvm::Value::MaximumAlignment
);
3113 emitAlignmentAssumption(PtrValue
, Ptr
,
3114 /*The expr loc is sufficient.*/ SourceLocation(),
3115 AlignmentCI
, OffsetValue
);
3116 return RValue::get(PtrValue
);
3118 case Builtin::BI__assume
:
3119 case Builtin::BI__builtin_assume
: {
3120 if (E
->getArg(0)->HasSideEffects(getContext()))
3121 return RValue::get(nullptr);
3123 Value
*ArgValue
= EmitScalarExpr(E
->getArg(0));
3124 Function
*FnAssume
= CGM
.getIntrinsic(Intrinsic::assume
);
3125 Builder
.CreateCall(FnAssume
, ArgValue
);
3126 return RValue::get(nullptr);
3128 case Builtin::BI__builtin_assume_separate_storage
: {
3129 const Expr
*Arg0
= E
->getArg(0);
3130 const Expr
*Arg1
= E
->getArg(1);
3132 Value
*Value0
= EmitScalarExpr(Arg0
);
3133 Value
*Value1
= EmitScalarExpr(Arg1
);
3135 Value
*Values
[] = {Value0
, Value1
};
3136 OperandBundleDefT
<Value
*> OBD("separate_storage", Values
);
3137 Builder
.CreateAssumption(ConstantInt::getTrue(getLLVMContext()), {OBD
});
3138 return RValue::get(nullptr);
3140 case Builtin::BI__arithmetic_fence
: {
3141 // Create the builtin call if FastMath is selected, and the target
3142 // supports the builtin, otherwise just return the argument.
3143 CodeGenFunction::CGFPOptionsRAII
FPOptsRAII(*this, E
);
3144 llvm::FastMathFlags FMF
= Builder
.getFastMathFlags();
3145 bool isArithmeticFenceEnabled
=
3146 FMF
.allowReassoc() &&
3147 getContext().getTargetInfo().checkArithmeticFenceSupported();
3148 QualType ArgType
= E
->getArg(0)->getType();
3149 if (ArgType
->isComplexType()) {
3150 if (isArithmeticFenceEnabled
) {
3151 QualType ElementType
= ArgType
->castAs
<ComplexType
>()->getElementType();
3152 ComplexPairTy ComplexVal
= EmitComplexExpr(E
->getArg(0));
3153 Value
*Real
= Builder
.CreateArithmeticFence(ComplexVal
.first
,
3154 ConvertType(ElementType
));
3155 Value
*Imag
= Builder
.CreateArithmeticFence(ComplexVal
.second
,
3156 ConvertType(ElementType
));
3157 return RValue::getComplex(std::make_pair(Real
, Imag
));
3159 ComplexPairTy ComplexVal
= EmitComplexExpr(E
->getArg(0));
3160 Value
*Real
= ComplexVal
.first
;
3161 Value
*Imag
= ComplexVal
.second
;
3162 return RValue::getComplex(std::make_pair(Real
, Imag
));
3164 Value
*ArgValue
= EmitScalarExpr(E
->getArg(0));
3165 if (isArithmeticFenceEnabled
)
3167 Builder
.CreateArithmeticFence(ArgValue
, ConvertType(ArgType
)));
3168 return RValue::get(ArgValue
);
3170 case Builtin::BI__builtin_bswap16
:
3171 case Builtin::BI__builtin_bswap32
:
3172 case Builtin::BI__builtin_bswap64
:
3173 case Builtin::BI_byteswap_ushort
:
3174 case Builtin::BI_byteswap_ulong
:
3175 case Builtin::BI_byteswap_uint64
: {
3176 return RValue::get(emitUnaryBuiltin(*this, E
, Intrinsic::bswap
));
3178 case Builtin::BI__builtin_bitreverse8
:
3179 case Builtin::BI__builtin_bitreverse16
:
3180 case Builtin::BI__builtin_bitreverse32
:
3181 case Builtin::BI__builtin_bitreverse64
: {
3182 return RValue::get(emitUnaryBuiltin(*this, E
, Intrinsic::bitreverse
));
3184 case Builtin::BI__builtin_rotateleft8
:
3185 case Builtin::BI__builtin_rotateleft16
:
3186 case Builtin::BI__builtin_rotateleft32
:
3187 case Builtin::BI__builtin_rotateleft64
:
3188 case Builtin::BI_rotl8
: // Microsoft variants of rotate left
3189 case Builtin::BI_rotl16
:
3190 case Builtin::BI_rotl
:
3191 case Builtin::BI_lrotl
:
3192 case Builtin::BI_rotl64
:
3193 return emitRotate(E
, false);
3195 case Builtin::BI__builtin_rotateright8
:
3196 case Builtin::BI__builtin_rotateright16
:
3197 case Builtin::BI__builtin_rotateright32
:
3198 case Builtin::BI__builtin_rotateright64
:
3199 case Builtin::BI_rotr8
: // Microsoft variants of rotate right
3200 case Builtin::BI_rotr16
:
3201 case Builtin::BI_rotr
:
3202 case Builtin::BI_lrotr
:
3203 case Builtin::BI_rotr64
:
3204 return emitRotate(E
, true);
3206 case Builtin::BI__builtin_constant_p
: {
3207 llvm::Type
*ResultType
= ConvertType(E
->getType());
3209 const Expr
*Arg
= E
->getArg(0);
3210 QualType ArgType
= Arg
->getType();
3211 // FIXME: The allowance for Obj-C pointers and block pointers is historical
3212 // and likely a mistake.
3213 if (!ArgType
->isIntegralOrEnumerationType() && !ArgType
->isFloatingType() &&
3214 !ArgType
->isObjCObjectPointerType() && !ArgType
->isBlockPointerType())
3215 // Per the GCC documentation, only numeric constants are recognized after
3217 return RValue::get(ConstantInt::get(ResultType
, 0));
3219 if (Arg
->HasSideEffects(getContext()))
3220 // The argument is unevaluated, so be conservative if it might have
3222 return RValue::get(ConstantInt::get(ResultType
, 0));
3224 Value
*ArgValue
= EmitScalarExpr(Arg
);
3225 if (ArgType
->isObjCObjectPointerType()) {
3226 // Convert Objective-C objects to id because we cannot distinguish between
3227 // LLVM types for Obj-C classes as they are opaque.
3228 ArgType
= CGM
.getContext().getObjCIdType();
3229 ArgValue
= Builder
.CreateBitCast(ArgValue
, ConvertType(ArgType
));
3232 CGM
.getIntrinsic(Intrinsic::is_constant
, ConvertType(ArgType
));
3233 Value
*Result
= Builder
.CreateCall(F
, ArgValue
);
3234 if (Result
->getType() != ResultType
)
3235 Result
= Builder
.CreateIntCast(Result
, ResultType
, /*isSigned*/false);
3236 return RValue::get(Result
);
3238 case Builtin::BI__builtin_dynamic_object_size
:
3239 case Builtin::BI__builtin_object_size
: {
3241 E
->getArg(1)->EvaluateKnownConstInt(getContext()).getZExtValue();
3242 auto *ResType
= cast
<llvm::IntegerType
>(ConvertType(E
->getType()));
3244 // We pass this builtin onto the optimizer so that it can figure out the
3245 // object size in more complex cases.
3246 bool IsDynamic
= BuiltinID
== Builtin::BI__builtin_dynamic_object_size
;
3247 return RValue::get(emitBuiltinObjectSize(E
->getArg(0), Type
, ResType
,
3248 /*EmittedE=*/nullptr, IsDynamic
));
3250 case Builtin::BI__builtin_prefetch
: {
3251 Value
*Locality
, *RW
, *Address
= EmitScalarExpr(E
->getArg(0));
3252 // FIXME: Technically these constants should of type 'int', yes?
3253 RW
= (E
->getNumArgs() > 1) ? EmitScalarExpr(E
->getArg(1)) :
3254 llvm::ConstantInt::get(Int32Ty
, 0);
3255 Locality
= (E
->getNumArgs() > 2) ? EmitScalarExpr(E
->getArg(2)) :
3256 llvm::ConstantInt::get(Int32Ty
, 3);
3257 Value
*Data
= llvm::ConstantInt::get(Int32Ty
, 1);
3258 Function
*F
= CGM
.getIntrinsic(Intrinsic::prefetch
, Address
->getType());
3259 Builder
.CreateCall(F
, {Address
, RW
, Locality
, Data
});
3260 return RValue::get(nullptr);
3262 case Builtin::BI__builtin_readcyclecounter
: {
3263 Function
*F
= CGM
.getIntrinsic(Intrinsic::readcyclecounter
);
3264 return RValue::get(Builder
.CreateCall(F
));
3266 case Builtin::BI__builtin___clear_cache
: {
3267 Value
*Begin
= EmitScalarExpr(E
->getArg(0));
3268 Value
*End
= EmitScalarExpr(E
->getArg(1));
3269 Function
*F
= CGM
.getIntrinsic(Intrinsic::clear_cache
);
3270 return RValue::get(Builder
.CreateCall(F
, {Begin
, End
}));
3272 case Builtin::BI__builtin_trap
:
3273 EmitTrapCall(Intrinsic::trap
);
3274 return RValue::get(nullptr);
3275 case Builtin::BI__debugbreak
:
3276 EmitTrapCall(Intrinsic::debugtrap
);
3277 return RValue::get(nullptr);
3278 case Builtin::BI__builtin_unreachable
: {
3279 EmitUnreachable(E
->getExprLoc());
3281 // We do need to preserve an insertion point.
3282 EmitBlock(createBasicBlock("unreachable.cont"));
3284 return RValue::get(nullptr);
3287 case Builtin::BI__builtin_powi
:
3288 case Builtin::BI__builtin_powif
:
3289 case Builtin::BI__builtin_powil
: {
3290 llvm::Value
*Src0
= EmitScalarExpr(E
->getArg(0));
3291 llvm::Value
*Src1
= EmitScalarExpr(E
->getArg(1));
3293 if (Builder
.getIsFPConstrained()) {
3294 // FIXME: llvm.powi has 2 mangling types,
3295 // llvm.experimental.constrained.powi has one.
3296 CodeGenFunction::CGFPOptionsRAII
FPOptsRAII(*this, E
);
3297 Function
*F
= CGM
.getIntrinsic(Intrinsic::experimental_constrained_powi
,
3299 return RValue::get(Builder
.CreateConstrainedFPCall(F
, { Src0
, Src1
}));
3302 Function
*F
= CGM
.getIntrinsic(Intrinsic::powi
,
3303 { Src0
->getType(), Src1
->getType() });
3304 return RValue::get(Builder
.CreateCall(F
, { Src0
, Src1
}));
3306 case Builtin::BI__builtin_frexp
:
3307 case Builtin::BI__builtin_frexpf
:
3308 case Builtin::BI__builtin_frexpl
:
3309 case Builtin::BI__builtin_frexpf128
:
3310 case Builtin::BI__builtin_frexpf16
:
3311 return RValue::get(emitFrexpBuiltin(*this, E
, Intrinsic::frexp
));
3312 case Builtin::BI__builtin_isgreater
:
3313 case Builtin::BI__builtin_isgreaterequal
:
3314 case Builtin::BI__builtin_isless
:
3315 case Builtin::BI__builtin_islessequal
:
3316 case Builtin::BI__builtin_islessgreater
:
3317 case Builtin::BI__builtin_isunordered
: {
3318 // Ordered comparisons: we know the arguments to these are matching scalar
3319 // floating point values.
3320 CodeGenFunction::CGFPOptionsRAII
FPOptsRAII(*this, E
);
3321 Value
*LHS
= EmitScalarExpr(E
->getArg(0));
3322 Value
*RHS
= EmitScalarExpr(E
->getArg(1));
3324 switch (BuiltinID
) {
3325 default: llvm_unreachable("Unknown ordered comparison");
3326 case Builtin::BI__builtin_isgreater
:
3327 LHS
= Builder
.CreateFCmpOGT(LHS
, RHS
, "cmp");
3329 case Builtin::BI__builtin_isgreaterequal
:
3330 LHS
= Builder
.CreateFCmpOGE(LHS
, RHS
, "cmp");
3332 case Builtin::BI__builtin_isless
:
3333 LHS
= Builder
.CreateFCmpOLT(LHS
, RHS
, "cmp");
3335 case Builtin::BI__builtin_islessequal
:
3336 LHS
= Builder
.CreateFCmpOLE(LHS
, RHS
, "cmp");
3338 case Builtin::BI__builtin_islessgreater
:
3339 LHS
= Builder
.CreateFCmpONE(LHS
, RHS
, "cmp");
3341 case Builtin::BI__builtin_isunordered
:
3342 LHS
= Builder
.CreateFCmpUNO(LHS
, RHS
, "cmp");
3345 // ZExt bool to int type.
3346 return RValue::get(Builder
.CreateZExt(LHS
, ConvertType(E
->getType())));
3349 case Builtin::BI__builtin_isnan
: {
3350 CodeGenFunction::CGFPOptionsRAII
FPOptsRAII(*this, E
);
3351 Value
*V
= EmitScalarExpr(E
->getArg(0));
3352 if (Value
*Result
= tryUseTestFPKind(*this, BuiltinID
, V
))
3353 return RValue::get(Result
);
3355 Builder
.CreateZExt(Builder
.createIsFPClass(V
, FPClassTest::fcNan
),
3356 ConvertType(E
->getType())));
3359 case Builtin::BI__builtin_issignaling
: {
3360 CodeGenFunction::CGFPOptionsRAII
FPOptsRAII(*this, E
);
3361 Value
*V
= EmitScalarExpr(E
->getArg(0));
3363 Builder
.CreateZExt(Builder
.createIsFPClass(V
, FPClassTest::fcSNan
),
3364 ConvertType(E
->getType())));
3367 case Builtin::BI__builtin_isinf
: {
3368 CodeGenFunction::CGFPOptionsRAII
FPOptsRAII(*this, E
);
3369 Value
*V
= EmitScalarExpr(E
->getArg(0));
3370 if (Value
*Result
= tryUseTestFPKind(*this, BuiltinID
, V
))
3371 return RValue::get(Result
);
3373 Builder
.CreateZExt(Builder
.createIsFPClass(V
, FPClassTest::fcInf
),
3374 ConvertType(E
->getType())));
3377 case Builtin::BIfinite
:
3378 case Builtin::BI__finite
:
3379 case Builtin::BIfinitef
:
3380 case Builtin::BI__finitef
:
3381 case Builtin::BIfinitel
:
3382 case Builtin::BI__finitel
:
3383 case Builtin::BI__builtin_isfinite
: {
3384 CodeGenFunction::CGFPOptionsRAII
FPOptsRAII(*this, E
);
3385 Value
*V
= EmitScalarExpr(E
->getArg(0));
3386 if (Value
*Result
= tryUseTestFPKind(*this, BuiltinID
, V
))
3387 return RValue::get(Result
);
3389 Builder
.CreateZExt(Builder
.createIsFPClass(V
, FPClassTest::fcFinite
),
3390 ConvertType(E
->getType())));
3393 case Builtin::BI__builtin_isnormal
: {
3394 CodeGenFunction::CGFPOptionsRAII
FPOptsRAII(*this, E
);
3395 Value
*V
= EmitScalarExpr(E
->getArg(0));
3397 Builder
.CreateZExt(Builder
.createIsFPClass(V
, FPClassTest::fcNormal
),
3398 ConvertType(E
->getType())));
3401 case Builtin::BI__builtin_issubnormal
: {
3402 CodeGenFunction::CGFPOptionsRAII
FPOptsRAII(*this, E
);
3403 Value
*V
= EmitScalarExpr(E
->getArg(0));
3405 Builder
.CreateZExt(Builder
.createIsFPClass(V
, FPClassTest::fcSubnormal
),
3406 ConvertType(E
->getType())));
3409 case Builtin::BI__builtin_iszero
: {
3410 CodeGenFunction::CGFPOptionsRAII
FPOptsRAII(*this, E
);
3411 Value
*V
= EmitScalarExpr(E
->getArg(0));
3413 Builder
.CreateZExt(Builder
.createIsFPClass(V
, FPClassTest::fcZero
),
3414 ConvertType(E
->getType())));
3417 case Builtin::BI__builtin_isfpclass
: {
3418 Expr::EvalResult Result
;
3419 if (!E
->getArg(1)->EvaluateAsInt(Result
, CGM
.getContext()))
3421 uint64_t Test
= Result
.Val
.getInt().getLimitedValue();
3422 CodeGenFunction::CGFPOptionsRAII
FPOptsRAII(*this, E
);
3423 Value
*V
= EmitScalarExpr(E
->getArg(0));
3424 return RValue::get(Builder
.CreateZExt(Builder
.createIsFPClass(V
, Test
),
3425 ConvertType(E
->getType())));
3428 case Builtin::BI__builtin_nondeterministic_value
: {
3429 llvm::Type
*Ty
= ConvertType(E
->getArg(0)->getType());
3431 Value
*Result
= PoisonValue::get(Ty
);
3432 Result
= Builder
.CreateFreeze(Result
);
3434 return RValue::get(Result
);
3437 case Builtin::BI__builtin_elementwise_abs
: {
3439 QualType QT
= E
->getArg(0)->getType();
3441 if (auto *VecTy
= QT
->getAs
<VectorType
>())
3442 QT
= VecTy
->getElementType();
3443 if (QT
->isIntegerType())
3444 Result
= Builder
.CreateBinaryIntrinsic(
3445 llvm::Intrinsic::abs
, EmitScalarExpr(E
->getArg(0)),
3446 Builder
.getFalse(), nullptr, "elt.abs");
3448 Result
= emitUnaryBuiltin(*this, E
, llvm::Intrinsic::fabs
, "elt.abs");
3450 return RValue::get(Result
);
3453 case Builtin::BI__builtin_elementwise_ceil
:
3455 emitUnaryBuiltin(*this, E
, llvm::Intrinsic::ceil
, "elt.ceil"));
3456 case Builtin::BI__builtin_elementwise_exp
:
3458 emitUnaryBuiltin(*this, E
, llvm::Intrinsic::exp
, "elt.exp"));
3459 case Builtin::BI__builtin_elementwise_exp2
:
3461 emitUnaryBuiltin(*this, E
, llvm::Intrinsic::exp2
, "elt.exp2"));
3462 case Builtin::BI__builtin_elementwise_log
:
3464 emitUnaryBuiltin(*this, E
, llvm::Intrinsic::log
, "elt.log"));
3465 case Builtin::BI__builtin_elementwise_log2
:
3467 emitUnaryBuiltin(*this, E
, llvm::Intrinsic::log2
, "elt.log2"));
3468 case Builtin::BI__builtin_elementwise_log10
:
3470 emitUnaryBuiltin(*this, E
, llvm::Intrinsic::log10
, "elt.log10"));
3471 case Builtin::BI__builtin_elementwise_pow
: {
3472 return RValue::get(emitBinaryBuiltin(*this, E
, llvm::Intrinsic::pow
));
3474 case Builtin::BI__builtin_elementwise_bitreverse
:
3475 return RValue::get(emitUnaryBuiltin(*this, E
, llvm::Intrinsic::bitreverse
,
3477 case Builtin::BI__builtin_elementwise_cos
:
3479 emitUnaryBuiltin(*this, E
, llvm::Intrinsic::cos
, "elt.cos"));
3480 case Builtin::BI__builtin_elementwise_floor
:
3482 emitUnaryBuiltin(*this, E
, llvm::Intrinsic::floor
, "elt.floor"));
3483 case Builtin::BI__builtin_elementwise_roundeven
:
3484 return RValue::get(emitUnaryBuiltin(*this, E
, llvm::Intrinsic::roundeven
,
3486 case Builtin::BI__builtin_elementwise_round
:
3487 return RValue::get(emitUnaryBuiltin(*this, E
, llvm::Intrinsic::round
,
3489 case Builtin::BI__builtin_elementwise_rint
:
3490 return RValue::get(emitUnaryBuiltin(*this, E
, llvm::Intrinsic::rint
,
3492 case Builtin::BI__builtin_elementwise_nearbyint
:
3493 return RValue::get(emitUnaryBuiltin(*this, E
, llvm::Intrinsic::nearbyint
,
3495 case Builtin::BI__builtin_elementwise_sin
:
3497 emitUnaryBuiltin(*this, E
, llvm::Intrinsic::sin
, "elt.sin"));
3499 case Builtin::BI__builtin_elementwise_trunc
:
3501 emitUnaryBuiltin(*this, E
, llvm::Intrinsic::trunc
, "elt.trunc"));
3502 case Builtin::BI__builtin_elementwise_canonicalize
:
3504 emitUnaryBuiltin(*this, E
, llvm::Intrinsic::canonicalize
, "elt.canonicalize"));
3505 case Builtin::BI__builtin_elementwise_copysign
:
3506 return RValue::get(emitBinaryBuiltin(*this, E
, llvm::Intrinsic::copysign
));
3507 case Builtin::BI__builtin_elementwise_fma
:
3508 return RValue::get(emitTernaryBuiltin(*this, E
, llvm::Intrinsic::fma
));
3509 case Builtin::BI__builtin_elementwise_add_sat
:
3510 case Builtin::BI__builtin_elementwise_sub_sat
: {
3511 Value
*Op0
= EmitScalarExpr(E
->getArg(0));
3512 Value
*Op1
= EmitScalarExpr(E
->getArg(1));
3514 assert(Op0
->getType()->isIntOrIntVectorTy() && "integer type expected");
3515 QualType Ty
= E
->getArg(0)->getType();
3516 if (auto *VecTy
= Ty
->getAs
<VectorType
>())
3517 Ty
= VecTy
->getElementType();
3518 bool IsSigned
= Ty
->isSignedIntegerType();
3520 if (BuiltinIDIfNoAsmLabel
== Builtin::BI__builtin_elementwise_add_sat
)
3521 Opc
= IsSigned
? llvm::Intrinsic::sadd_sat
: llvm::Intrinsic::uadd_sat
;
3523 Opc
= IsSigned
? llvm::Intrinsic::ssub_sat
: llvm::Intrinsic::usub_sat
;
3524 Result
= Builder
.CreateBinaryIntrinsic(Opc
, Op0
, Op1
, nullptr, "elt.sat");
3525 return RValue::get(Result
);
3528 case Builtin::BI__builtin_elementwise_max
: {
3529 Value
*Op0
= EmitScalarExpr(E
->getArg(0));
3530 Value
*Op1
= EmitScalarExpr(E
->getArg(1));
3532 if (Op0
->getType()->isIntOrIntVectorTy()) {
3533 QualType Ty
= E
->getArg(0)->getType();
3534 if (auto *VecTy
= Ty
->getAs
<VectorType
>())
3535 Ty
= VecTy
->getElementType();
3536 Result
= Builder
.CreateBinaryIntrinsic(Ty
->isSignedIntegerType()
3537 ? llvm::Intrinsic::smax
3538 : llvm::Intrinsic::umax
,
3539 Op0
, Op1
, nullptr, "elt.max");
3541 Result
= Builder
.CreateMaxNum(Op0
, Op1
, "elt.max");
3542 return RValue::get(Result
);
3544 case Builtin::BI__builtin_elementwise_min
: {
3545 Value
*Op0
= EmitScalarExpr(E
->getArg(0));
3546 Value
*Op1
= EmitScalarExpr(E
->getArg(1));
3548 if (Op0
->getType()->isIntOrIntVectorTy()) {
3549 QualType Ty
= E
->getArg(0)->getType();
3550 if (auto *VecTy
= Ty
->getAs
<VectorType
>())
3551 Ty
= VecTy
->getElementType();
3552 Result
= Builder
.CreateBinaryIntrinsic(Ty
->isSignedIntegerType()
3553 ? llvm::Intrinsic::smin
3554 : llvm::Intrinsic::umin
,
3555 Op0
, Op1
, nullptr, "elt.min");
3557 Result
= Builder
.CreateMinNum(Op0
, Op1
, "elt.min");
3558 return RValue::get(Result
);
3561 case Builtin::BI__builtin_reduce_max
: {
3562 auto GetIntrinsicID
= [](QualType QT
) {
3563 if (auto *VecTy
= QT
->getAs
<VectorType
>())
3564 QT
= VecTy
->getElementType();
3565 if (QT
->isSignedIntegerType())
3566 return llvm::Intrinsic::vector_reduce_smax
;
3567 if (QT
->isUnsignedIntegerType())
3568 return llvm::Intrinsic::vector_reduce_umax
;
3569 assert(QT
->isFloatingType() && "must have a float here");
3570 return llvm::Intrinsic::vector_reduce_fmax
;
3572 return RValue::get(emitUnaryBuiltin(
3573 *this, E
, GetIntrinsicID(E
->getArg(0)->getType()), "rdx.min"));
3576 case Builtin::BI__builtin_reduce_min
: {
3577 auto GetIntrinsicID
= [](QualType QT
) {
3578 if (auto *VecTy
= QT
->getAs
<VectorType
>())
3579 QT
= VecTy
->getElementType();
3580 if (QT
->isSignedIntegerType())
3581 return llvm::Intrinsic::vector_reduce_smin
;
3582 if (QT
->isUnsignedIntegerType())
3583 return llvm::Intrinsic::vector_reduce_umin
;
3584 assert(QT
->isFloatingType() && "must have a float here");
3585 return llvm::Intrinsic::vector_reduce_fmin
;
3588 return RValue::get(emitUnaryBuiltin(
3589 *this, E
, GetIntrinsicID(E
->getArg(0)->getType()), "rdx.min"));
3592 case Builtin::BI__builtin_reduce_add
:
3593 return RValue::get(emitUnaryBuiltin(
3594 *this, E
, llvm::Intrinsic::vector_reduce_add
, "rdx.add"));
3595 case Builtin::BI__builtin_reduce_mul
:
3596 return RValue::get(emitUnaryBuiltin(
3597 *this, E
, llvm::Intrinsic::vector_reduce_mul
, "rdx.mul"));
3598 case Builtin::BI__builtin_reduce_xor
:
3599 return RValue::get(emitUnaryBuiltin(
3600 *this, E
, llvm::Intrinsic::vector_reduce_xor
, "rdx.xor"));
3601 case Builtin::BI__builtin_reduce_or
:
3602 return RValue::get(emitUnaryBuiltin(
3603 *this, E
, llvm::Intrinsic::vector_reduce_or
, "rdx.or"));
3604 case Builtin::BI__builtin_reduce_and
:
3605 return RValue::get(emitUnaryBuiltin(
3606 *this, E
, llvm::Intrinsic::vector_reduce_and
, "rdx.and"));
3608 case Builtin::BI__builtin_matrix_transpose
: {
3609 auto *MatrixTy
= E
->getArg(0)->getType()->castAs
<ConstantMatrixType
>();
3610 Value
*MatValue
= EmitScalarExpr(E
->getArg(0));
3611 MatrixBuilder
MB(Builder
);
3612 Value
*Result
= MB
.CreateMatrixTranspose(MatValue
, MatrixTy
->getNumRows(),
3613 MatrixTy
->getNumColumns());
3614 return RValue::get(Result
);
3617 case Builtin::BI__builtin_matrix_column_major_load
: {
3618 MatrixBuilder
MB(Builder
);
3619 // Emit everything that isn't dependent on the first parameter type
3620 Value
*Stride
= EmitScalarExpr(E
->getArg(3));
3621 const auto *ResultTy
= E
->getType()->getAs
<ConstantMatrixType
>();
3622 auto *PtrTy
= E
->getArg(0)->getType()->getAs
<PointerType
>();
3623 assert(PtrTy
&& "arg0 must be of pointer type");
3624 bool IsVolatile
= PtrTy
->getPointeeType().isVolatileQualified();
3626 Address Src
= EmitPointerWithAlignment(E
->getArg(0));
3627 EmitNonNullArgCheck(RValue::get(Src
.getPointer()), E
->getArg(0)->getType(),
3628 E
->getArg(0)->getExprLoc(), FD
, 0);
3629 Value
*Result
= MB
.CreateColumnMajorLoad(
3630 Src
.getElementType(), Src
.getPointer(),
3631 Align(Src
.getAlignment().getQuantity()), Stride
, IsVolatile
,
3632 ResultTy
->getNumRows(), ResultTy
->getNumColumns(),
3634 return RValue::get(Result
);
3637 case Builtin::BI__builtin_matrix_column_major_store
: {
3638 MatrixBuilder
MB(Builder
);
3639 Value
*Matrix
= EmitScalarExpr(E
->getArg(0));
3640 Address Dst
= EmitPointerWithAlignment(E
->getArg(1));
3641 Value
*Stride
= EmitScalarExpr(E
->getArg(2));
3643 const auto *MatrixTy
= E
->getArg(0)->getType()->getAs
<ConstantMatrixType
>();
3644 auto *PtrTy
= E
->getArg(1)->getType()->getAs
<PointerType
>();
3645 assert(PtrTy
&& "arg1 must be of pointer type");
3646 bool IsVolatile
= PtrTy
->getPointeeType().isVolatileQualified();
3648 EmitNonNullArgCheck(RValue::get(Dst
.getPointer()), E
->getArg(1)->getType(),
3649 E
->getArg(1)->getExprLoc(), FD
, 0);
3650 Value
*Result
= MB
.CreateColumnMajorStore(
3651 Matrix
, Dst
.getPointer(), Align(Dst
.getAlignment().getQuantity()),
3652 Stride
, IsVolatile
, MatrixTy
->getNumRows(), MatrixTy
->getNumColumns());
3653 return RValue::get(Result
);
3656 case Builtin::BI__builtin_isinf_sign
: {
3657 // isinf_sign(x) -> fabs(x) == infinity ? (signbit(x) ? -1 : 1) : 0
3658 CodeGenFunction::CGFPOptionsRAII
FPOptsRAII(*this, E
);
3659 // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here.
3660 Value
*Arg
= EmitScalarExpr(E
->getArg(0));
3661 Value
*AbsArg
= EmitFAbs(*this, Arg
);
3662 Value
*IsInf
= Builder
.CreateFCmpOEQ(
3663 AbsArg
, ConstantFP::getInfinity(Arg
->getType()), "isinf");
3664 Value
*IsNeg
= EmitSignBit(*this, Arg
);
3666 llvm::Type
*IntTy
= ConvertType(E
->getType());
3667 Value
*Zero
= Constant::getNullValue(IntTy
);
3668 Value
*One
= ConstantInt::get(IntTy
, 1);
3669 Value
*NegativeOne
= ConstantInt::get(IntTy
, -1);
3670 Value
*SignResult
= Builder
.CreateSelect(IsNeg
, NegativeOne
, One
);
3671 Value
*Result
= Builder
.CreateSelect(IsInf
, SignResult
, Zero
);
3672 return RValue::get(Result
);
3675 case Builtin::BI__builtin_flt_rounds
: {
3676 Function
*F
= CGM
.getIntrinsic(Intrinsic::get_rounding
);
3678 llvm::Type
*ResultType
= ConvertType(E
->getType());
3679 Value
*Result
= Builder
.CreateCall(F
);
3680 if (Result
->getType() != ResultType
)
3681 Result
= Builder
.CreateIntCast(Result
, ResultType
, /*isSigned*/true,
3683 return RValue::get(Result
);
3686 case Builtin::BI__builtin_set_flt_rounds
: {
3687 Function
*F
= CGM
.getIntrinsic(Intrinsic::set_rounding
);
3689 Value
*V
= EmitScalarExpr(E
->getArg(0));
3690 Builder
.CreateCall(F
, V
);
3691 return RValue::get(nullptr);
3694 case Builtin::BI__builtin_fpclassify
: {
3695 CodeGenFunction::CGFPOptionsRAII
FPOptsRAII(*this, E
);
3696 // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here.
3697 Value
*V
= EmitScalarExpr(E
->getArg(5));
3698 llvm::Type
*Ty
= ConvertType(E
->getArg(5)->getType());
3701 BasicBlock
*Begin
= Builder
.GetInsertBlock();
3702 BasicBlock
*End
= createBasicBlock("fpclassify_end", this->CurFn
);
3703 Builder
.SetInsertPoint(End
);
3705 Builder
.CreatePHI(ConvertType(E
->getArg(0)->getType()), 4,
3706 "fpclassify_result");
3708 // if (V==0) return FP_ZERO
3709 Builder
.SetInsertPoint(Begin
);
3710 Value
*IsZero
= Builder
.CreateFCmpOEQ(V
, Constant::getNullValue(Ty
),
3712 Value
*ZeroLiteral
= EmitScalarExpr(E
->getArg(4));
3713 BasicBlock
*NotZero
= createBasicBlock("fpclassify_not_zero", this->CurFn
);
3714 Builder
.CreateCondBr(IsZero
, End
, NotZero
);
3715 Result
->addIncoming(ZeroLiteral
, Begin
);
3717 // if (V != V) return FP_NAN
3718 Builder
.SetInsertPoint(NotZero
);
3719 Value
*IsNan
= Builder
.CreateFCmpUNO(V
, V
, "cmp");
3720 Value
*NanLiteral
= EmitScalarExpr(E
->getArg(0));
3721 BasicBlock
*NotNan
= createBasicBlock("fpclassify_not_nan", this->CurFn
);
3722 Builder
.CreateCondBr(IsNan
, End
, NotNan
);
3723 Result
->addIncoming(NanLiteral
, NotZero
);
3725 // if (fabs(V) == infinity) return FP_INFINITY
3726 Builder
.SetInsertPoint(NotNan
);
3727 Value
*VAbs
= EmitFAbs(*this, V
);
3729 Builder
.CreateFCmpOEQ(VAbs
, ConstantFP::getInfinity(V
->getType()),
3731 Value
*InfLiteral
= EmitScalarExpr(E
->getArg(1));
3732 BasicBlock
*NotInf
= createBasicBlock("fpclassify_not_inf", this->CurFn
);
3733 Builder
.CreateCondBr(IsInf
, End
, NotInf
);
3734 Result
->addIncoming(InfLiteral
, NotNan
);
3736 // if (fabs(V) >= MIN_NORMAL) return FP_NORMAL else FP_SUBNORMAL
3737 Builder
.SetInsertPoint(NotInf
);
3738 APFloat Smallest
= APFloat::getSmallestNormalized(
3739 getContext().getFloatTypeSemantics(E
->getArg(5)->getType()));
3741 Builder
.CreateFCmpUGE(VAbs
, ConstantFP::get(V
->getContext(), Smallest
),
3743 Value
*NormalResult
=
3744 Builder
.CreateSelect(IsNormal
, EmitScalarExpr(E
->getArg(2)),
3745 EmitScalarExpr(E
->getArg(3)));
3746 Builder
.CreateBr(End
);
3747 Result
->addIncoming(NormalResult
, NotInf
);
3750 Builder
.SetInsertPoint(End
);
3751 return RValue::get(Result
);
3754 // An alloca will always return a pointer to the alloca (stack) address
3755 // space. This address space need not be the same as the AST / Language
3756 // default (e.g. in C / C++ auto vars are in the generic address space). At
3757 // the AST level this is handled within CreateTempAlloca et al., but for the
3758 // builtin / dynamic alloca we have to handle it here. We use an explicit cast
3759 // instead of passing an AS to CreateAlloca so as to not inhibit optimisation.
3760 case Builtin::BIalloca
:
3761 case Builtin::BI_alloca
:
3762 case Builtin::BI__builtin_alloca_uninitialized
:
3763 case Builtin::BI__builtin_alloca
: {
3764 Value
*Size
= EmitScalarExpr(E
->getArg(0));
3765 const TargetInfo
&TI
= getContext().getTargetInfo();
3766 // The alignment of the alloca should correspond to __BIGGEST_ALIGNMENT__.
3767 const Align SuitableAlignmentInBytes
=
3769 .toCharUnitsFromBits(TI
.getSuitableAlign())
3771 AllocaInst
*AI
= Builder
.CreateAlloca(Builder
.getInt8Ty(), Size
);
3772 AI
->setAlignment(SuitableAlignmentInBytes
);
3773 if (BuiltinID
!= Builtin::BI__builtin_alloca_uninitialized
)
3774 initializeAlloca(*this, AI
, Size
, SuitableAlignmentInBytes
);
3775 LangAS AAS
= getASTAllocaAddressSpace();
3776 LangAS EAS
= E
->getType()->getPointeeType().getAddressSpace();
3778 llvm::Type
*Ty
= CGM
.getTypes().ConvertType(E
->getType());
3779 return RValue::get(getTargetHooks().performAddrSpaceCast(*this, AI
, AAS
,
3782 return RValue::get(AI
);
3785 case Builtin::BI__builtin_alloca_with_align_uninitialized
:
3786 case Builtin::BI__builtin_alloca_with_align
: {
3787 Value
*Size
= EmitScalarExpr(E
->getArg(0));
3788 Value
*AlignmentInBitsValue
= EmitScalarExpr(E
->getArg(1));
3789 auto *AlignmentInBitsCI
= cast
<ConstantInt
>(AlignmentInBitsValue
);
3790 unsigned AlignmentInBits
= AlignmentInBitsCI
->getZExtValue();
3791 const Align AlignmentInBytes
=
3792 CGM
.getContext().toCharUnitsFromBits(AlignmentInBits
).getAsAlign();
3793 AllocaInst
*AI
= Builder
.CreateAlloca(Builder
.getInt8Ty(), Size
);
3794 AI
->setAlignment(AlignmentInBytes
);
3795 if (BuiltinID
!= Builtin::BI__builtin_alloca_with_align_uninitialized
)
3796 initializeAlloca(*this, AI
, Size
, AlignmentInBytes
);
3797 LangAS AAS
= getASTAllocaAddressSpace();
3798 LangAS EAS
= E
->getType()->getPointeeType().getAddressSpace();
3800 llvm::Type
*Ty
= CGM
.getTypes().ConvertType(E
->getType());
3801 return RValue::get(getTargetHooks().performAddrSpaceCast(*this, AI
, AAS
,
3804 return RValue::get(AI
);
3807 case Builtin::BIbzero
:
3808 case Builtin::BI__builtin_bzero
: {
3809 Address Dest
= EmitPointerWithAlignment(E
->getArg(0));
3810 Value
*SizeVal
= EmitScalarExpr(E
->getArg(1));
3811 EmitNonNullArgCheck(RValue::get(Dest
.getPointer()), E
->getArg(0)->getType(),
3812 E
->getArg(0)->getExprLoc(), FD
, 0);
3813 Builder
.CreateMemSet(Dest
, Builder
.getInt8(0), SizeVal
, false);
3814 return RValue::get(nullptr);
3817 case Builtin::BIbcopy
:
3818 case Builtin::BI__builtin_bcopy
: {
3819 Address Src
= EmitPointerWithAlignment(E
->getArg(0));
3820 Address Dest
= EmitPointerWithAlignment(E
->getArg(1));
3821 Value
*SizeVal
= EmitScalarExpr(E
->getArg(2));
3822 EmitNonNullArgCheck(RValue::get(Src
.getPointer()), E
->getArg(0)->getType(),
3823 E
->getArg(0)->getExprLoc(), FD
, 0);
3824 EmitNonNullArgCheck(RValue::get(Dest
.getPointer()), E
->getArg(1)->getType(),
3825 E
->getArg(1)->getExprLoc(), FD
, 0);
3826 Builder
.CreateMemMove(Dest
, Src
, SizeVal
, false);
3827 return RValue::get(Dest
.getPointer());
3830 case Builtin::BImemcpy
:
3831 case Builtin::BI__builtin_memcpy
:
3832 case Builtin::BImempcpy
:
3833 case Builtin::BI__builtin_mempcpy
: {
3834 Address Dest
= EmitPointerWithAlignment(E
->getArg(0));
3835 Address Src
= EmitPointerWithAlignment(E
->getArg(1));
3836 Value
*SizeVal
= EmitScalarExpr(E
->getArg(2));
3837 EmitArgCheck(TCK_Store
, Dest
, E
->getArg(0), 0);
3838 EmitArgCheck(TCK_Load
, Src
, E
->getArg(1), 1);
3839 Builder
.CreateMemCpy(Dest
, Src
, SizeVal
, false);
3840 if (BuiltinID
== Builtin::BImempcpy
||
3841 BuiltinID
== Builtin::BI__builtin_mempcpy
)
3842 return RValue::get(Builder
.CreateInBoundsGEP(Dest
.getElementType(),
3843 Dest
.getPointer(), SizeVal
));
3845 return RValue::get(Dest
.getPointer());
3848 case Builtin::BI__builtin_memcpy_inline
: {
3849 Address Dest
= EmitPointerWithAlignment(E
->getArg(0));
3850 Address Src
= EmitPointerWithAlignment(E
->getArg(1));
3852 E
->getArg(2)->EvaluateKnownConstInt(getContext()).getZExtValue();
3853 EmitArgCheck(TCK_Store
, Dest
, E
->getArg(0), 0);
3854 EmitArgCheck(TCK_Load
, Src
, E
->getArg(1), 1);
3855 Builder
.CreateMemCpyInline(Dest
, Src
, Size
);
3856 return RValue::get(nullptr);
3859 case Builtin::BI__builtin_char_memchr
:
3860 BuiltinID
= Builtin::BI__builtin_memchr
;
3863 case Builtin::BI__builtin___memcpy_chk
: {
3864 // fold __builtin_memcpy_chk(x, y, cst1, cst2) to memcpy iff cst1<=cst2.
3865 Expr::EvalResult SizeResult
, DstSizeResult
;
3866 if (!E
->getArg(2)->EvaluateAsInt(SizeResult
, CGM
.getContext()) ||
3867 !E
->getArg(3)->EvaluateAsInt(DstSizeResult
, CGM
.getContext()))
3869 llvm::APSInt Size
= SizeResult
.Val
.getInt();
3870 llvm::APSInt DstSize
= DstSizeResult
.Val
.getInt();
3871 if (Size
.ugt(DstSize
))
3873 Address Dest
= EmitPointerWithAlignment(E
->getArg(0));
3874 Address Src
= EmitPointerWithAlignment(E
->getArg(1));
3875 Value
*SizeVal
= llvm::ConstantInt::get(Builder
.getContext(), Size
);
3876 Builder
.CreateMemCpy(Dest
, Src
, SizeVal
, false);
3877 return RValue::get(Dest
.getPointer());
3880 case Builtin::BI__builtin_objc_memmove_collectable
: {
3881 Address DestAddr
= EmitPointerWithAlignment(E
->getArg(0));
3882 Address SrcAddr
= EmitPointerWithAlignment(E
->getArg(1));
3883 Value
*SizeVal
= EmitScalarExpr(E
->getArg(2));
3884 CGM
.getObjCRuntime().EmitGCMemmoveCollectable(*this,
3885 DestAddr
, SrcAddr
, SizeVal
);
3886 return RValue::get(DestAddr
.getPointer());
3889 case Builtin::BI__builtin___memmove_chk
: {
3890 // fold __builtin_memmove_chk(x, y, cst1, cst2) to memmove iff cst1<=cst2.
3891 Expr::EvalResult SizeResult
, DstSizeResult
;
3892 if (!E
->getArg(2)->EvaluateAsInt(SizeResult
, CGM
.getContext()) ||
3893 !E
->getArg(3)->EvaluateAsInt(DstSizeResult
, CGM
.getContext()))
3895 llvm::APSInt Size
= SizeResult
.Val
.getInt();
3896 llvm::APSInt DstSize
= DstSizeResult
.Val
.getInt();
3897 if (Size
.ugt(DstSize
))
3899 Address Dest
= EmitPointerWithAlignment(E
->getArg(0));
3900 Address Src
= EmitPointerWithAlignment(E
->getArg(1));
3901 Value
*SizeVal
= llvm::ConstantInt::get(Builder
.getContext(), Size
);
3902 Builder
.CreateMemMove(Dest
, Src
, SizeVal
, false);
3903 return RValue::get(Dest
.getPointer());
3906 case Builtin::BImemmove
:
3907 case Builtin::BI__builtin_memmove
: {
3908 Address Dest
= EmitPointerWithAlignment(E
->getArg(0));
3909 Address Src
= EmitPointerWithAlignment(E
->getArg(1));
3910 Value
*SizeVal
= EmitScalarExpr(E
->getArg(2));
3911 EmitArgCheck(TCK_Store
, Dest
, E
->getArg(0), 0);
3912 EmitArgCheck(TCK_Load
, Src
, E
->getArg(1), 1);
3913 Builder
.CreateMemMove(Dest
, Src
, SizeVal
, false);
3914 return RValue::get(Dest
.getPointer());
3916 case Builtin::BImemset
:
3917 case Builtin::BI__builtin_memset
: {
3918 Address Dest
= EmitPointerWithAlignment(E
->getArg(0));
3919 Value
*ByteVal
= Builder
.CreateTrunc(EmitScalarExpr(E
->getArg(1)),
3920 Builder
.getInt8Ty());
3921 Value
*SizeVal
= EmitScalarExpr(E
->getArg(2));
3922 EmitNonNullArgCheck(RValue::get(Dest
.getPointer()), E
->getArg(0)->getType(),
3923 E
->getArg(0)->getExprLoc(), FD
, 0);
3924 Builder
.CreateMemSet(Dest
, ByteVal
, SizeVal
, false);
3925 return RValue::get(Dest
.getPointer());
3927 case Builtin::BI__builtin_memset_inline
: {
3928 Address Dest
= EmitPointerWithAlignment(E
->getArg(0));
3930 Builder
.CreateTrunc(EmitScalarExpr(E
->getArg(1)), Builder
.getInt8Ty());
3932 E
->getArg(2)->EvaluateKnownConstInt(getContext()).getZExtValue();
3933 EmitNonNullArgCheck(RValue::get(Dest
.getPointer()), E
->getArg(0)->getType(),
3934 E
->getArg(0)->getExprLoc(), FD
, 0);
3935 Builder
.CreateMemSetInline(Dest
, ByteVal
, Size
);
3936 return RValue::get(nullptr);
3938 case Builtin::BI__builtin___memset_chk
: {
3939 // fold __builtin_memset_chk(x, y, cst1, cst2) to memset iff cst1<=cst2.
3940 Expr::EvalResult SizeResult
, DstSizeResult
;
3941 if (!E
->getArg(2)->EvaluateAsInt(SizeResult
, CGM
.getContext()) ||
3942 !E
->getArg(3)->EvaluateAsInt(DstSizeResult
, CGM
.getContext()))
3944 llvm::APSInt Size
= SizeResult
.Val
.getInt();
3945 llvm::APSInt DstSize
= DstSizeResult
.Val
.getInt();
3946 if (Size
.ugt(DstSize
))
3948 Address Dest
= EmitPointerWithAlignment(E
->getArg(0));
3949 Value
*ByteVal
= Builder
.CreateTrunc(EmitScalarExpr(E
->getArg(1)),
3950 Builder
.getInt8Ty());
3951 Value
*SizeVal
= llvm::ConstantInt::get(Builder
.getContext(), Size
);
3952 Builder
.CreateMemSet(Dest
, ByteVal
, SizeVal
, false);
3953 return RValue::get(Dest
.getPointer());
3955 case Builtin::BI__builtin_wmemchr
: {
3956 // The MSVC runtime library does not provide a definition of wmemchr, so we
3957 // need an inline implementation.
3958 if (!getTarget().getTriple().isOSMSVCRT())
3961 llvm::Type
*WCharTy
= ConvertType(getContext().WCharTy
);
3962 Value
*Str
= EmitScalarExpr(E
->getArg(0));
3963 Value
*Chr
= EmitScalarExpr(E
->getArg(1));
3964 Value
*Size
= EmitScalarExpr(E
->getArg(2));
3966 BasicBlock
*Entry
= Builder
.GetInsertBlock();
3967 BasicBlock
*CmpEq
= createBasicBlock("wmemchr.eq");
3968 BasicBlock
*Next
= createBasicBlock("wmemchr.next");
3969 BasicBlock
*Exit
= createBasicBlock("wmemchr.exit");
3970 Value
*SizeEq0
= Builder
.CreateICmpEQ(Size
, ConstantInt::get(SizeTy
, 0));
3971 Builder
.CreateCondBr(SizeEq0
, Exit
, CmpEq
);
3974 PHINode
*StrPhi
= Builder
.CreatePHI(Str
->getType(), 2);
3975 StrPhi
->addIncoming(Str
, Entry
);
3976 PHINode
*SizePhi
= Builder
.CreatePHI(SizeTy
, 2);
3977 SizePhi
->addIncoming(Size
, Entry
);
3978 CharUnits WCharAlign
=
3979 getContext().getTypeAlignInChars(getContext().WCharTy
);
3980 Value
*StrCh
= Builder
.CreateAlignedLoad(WCharTy
, StrPhi
, WCharAlign
);
3981 Value
*FoundChr
= Builder
.CreateConstInBoundsGEP1_32(WCharTy
, StrPhi
, 0);
3982 Value
*StrEqChr
= Builder
.CreateICmpEQ(StrCh
, Chr
);
3983 Builder
.CreateCondBr(StrEqChr
, Exit
, Next
);
3986 Value
*NextStr
= Builder
.CreateConstInBoundsGEP1_32(WCharTy
, StrPhi
, 1);
3987 Value
*NextSize
= Builder
.CreateSub(SizePhi
, ConstantInt::get(SizeTy
, 1));
3988 Value
*NextSizeEq0
=
3989 Builder
.CreateICmpEQ(NextSize
, ConstantInt::get(SizeTy
, 0));
3990 Builder
.CreateCondBr(NextSizeEq0
, Exit
, CmpEq
);
3991 StrPhi
->addIncoming(NextStr
, Next
);
3992 SizePhi
->addIncoming(NextSize
, Next
);
3995 PHINode
*Ret
= Builder
.CreatePHI(Str
->getType(), 3);
3996 Ret
->addIncoming(llvm::Constant::getNullValue(Str
->getType()), Entry
);
3997 Ret
->addIncoming(llvm::Constant::getNullValue(Str
->getType()), Next
);
3998 Ret
->addIncoming(FoundChr
, CmpEq
);
3999 return RValue::get(Ret
);
4001 case Builtin::BI__builtin_wmemcmp
: {
4002 // The MSVC runtime library does not provide a definition of wmemcmp, so we
4003 // need an inline implementation.
4004 if (!getTarget().getTriple().isOSMSVCRT())
4007 llvm::Type
*WCharTy
= ConvertType(getContext().WCharTy
);
4009 Value
*Dst
= EmitScalarExpr(E
->getArg(0));
4010 Value
*Src
= EmitScalarExpr(E
->getArg(1));
4011 Value
*Size
= EmitScalarExpr(E
->getArg(2));
4013 BasicBlock
*Entry
= Builder
.GetInsertBlock();
4014 BasicBlock
*CmpGT
= createBasicBlock("wmemcmp.gt");
4015 BasicBlock
*CmpLT
= createBasicBlock("wmemcmp.lt");
4016 BasicBlock
*Next
= createBasicBlock("wmemcmp.next");
4017 BasicBlock
*Exit
= createBasicBlock("wmemcmp.exit");
4018 Value
*SizeEq0
= Builder
.CreateICmpEQ(Size
, ConstantInt::get(SizeTy
, 0));
4019 Builder
.CreateCondBr(SizeEq0
, Exit
, CmpGT
);
4022 PHINode
*DstPhi
= Builder
.CreatePHI(Dst
->getType(), 2);
4023 DstPhi
->addIncoming(Dst
, Entry
);
4024 PHINode
*SrcPhi
= Builder
.CreatePHI(Src
->getType(), 2);
4025 SrcPhi
->addIncoming(Src
, Entry
);
4026 PHINode
*SizePhi
= Builder
.CreatePHI(SizeTy
, 2);
4027 SizePhi
->addIncoming(Size
, Entry
);
4028 CharUnits WCharAlign
=
4029 getContext().getTypeAlignInChars(getContext().WCharTy
);
4030 Value
*DstCh
= Builder
.CreateAlignedLoad(WCharTy
, DstPhi
, WCharAlign
);
4031 Value
*SrcCh
= Builder
.CreateAlignedLoad(WCharTy
, SrcPhi
, WCharAlign
);
4032 Value
*DstGtSrc
= Builder
.CreateICmpUGT(DstCh
, SrcCh
);
4033 Builder
.CreateCondBr(DstGtSrc
, Exit
, CmpLT
);
4036 Value
*DstLtSrc
= Builder
.CreateICmpULT(DstCh
, SrcCh
);
4037 Builder
.CreateCondBr(DstLtSrc
, Exit
, Next
);
4040 Value
*NextDst
= Builder
.CreateConstInBoundsGEP1_32(WCharTy
, DstPhi
, 1);
4041 Value
*NextSrc
= Builder
.CreateConstInBoundsGEP1_32(WCharTy
, SrcPhi
, 1);
4042 Value
*NextSize
= Builder
.CreateSub(SizePhi
, ConstantInt::get(SizeTy
, 1));
4043 Value
*NextSizeEq0
=
4044 Builder
.CreateICmpEQ(NextSize
, ConstantInt::get(SizeTy
, 0));
4045 Builder
.CreateCondBr(NextSizeEq0
, Exit
, CmpGT
);
4046 DstPhi
->addIncoming(NextDst
, Next
);
4047 SrcPhi
->addIncoming(NextSrc
, Next
);
4048 SizePhi
->addIncoming(NextSize
, Next
);
4051 PHINode
*Ret
= Builder
.CreatePHI(IntTy
, 4);
4052 Ret
->addIncoming(ConstantInt::get(IntTy
, 0), Entry
);
4053 Ret
->addIncoming(ConstantInt::get(IntTy
, 1), CmpGT
);
4054 Ret
->addIncoming(ConstantInt::get(IntTy
, -1), CmpLT
);
4055 Ret
->addIncoming(ConstantInt::get(IntTy
, 0), Next
);
4056 return RValue::get(Ret
);
4058 case Builtin::BI__builtin_dwarf_cfa
: {
4059 // The offset in bytes from the first argument to the CFA.
4061 // Why on earth is this in the frontend? Is there any reason at
4062 // all that the backend can't reasonably determine this while
4063 // lowering llvm.eh.dwarf.cfa()?
4065 // TODO: If there's a satisfactory reason, add a target hook for
4066 // this instead of hard-coding 0, which is correct for most targets.
4069 Function
*F
= CGM
.getIntrinsic(Intrinsic::eh_dwarf_cfa
);
4070 return RValue::get(Builder
.CreateCall(F
,
4071 llvm::ConstantInt::get(Int32Ty
, Offset
)));
4073 case Builtin::BI__builtin_return_address
: {
4074 Value
*Depth
= ConstantEmitter(*this).emitAbstract(E
->getArg(0),
4075 getContext().UnsignedIntTy
);
4076 Function
*F
= CGM
.getIntrinsic(Intrinsic::returnaddress
);
4077 return RValue::get(Builder
.CreateCall(F
, Depth
));
4079 case Builtin::BI_ReturnAddress
: {
4080 Function
*F
= CGM
.getIntrinsic(Intrinsic::returnaddress
);
4081 return RValue::get(Builder
.CreateCall(F
, Builder
.getInt32(0)));
4083 case Builtin::BI__builtin_frame_address
: {
4084 Value
*Depth
= ConstantEmitter(*this).emitAbstract(E
->getArg(0),
4085 getContext().UnsignedIntTy
);
4086 Function
*F
= CGM
.getIntrinsic(Intrinsic::frameaddress
, AllocaInt8PtrTy
);
4087 return RValue::get(Builder
.CreateCall(F
, Depth
));
4089 case Builtin::BI__builtin_extract_return_addr
: {
4090 Value
*Address
= EmitScalarExpr(E
->getArg(0));
4091 Value
*Result
= getTargetHooks().decodeReturnAddress(*this, Address
);
4092 return RValue::get(Result
);
4094 case Builtin::BI__builtin_frob_return_addr
: {
4095 Value
*Address
= EmitScalarExpr(E
->getArg(0));
4096 Value
*Result
= getTargetHooks().encodeReturnAddress(*this, Address
);
4097 return RValue::get(Result
);
4099 case Builtin::BI__builtin_dwarf_sp_column
: {
4100 llvm::IntegerType
*Ty
4101 = cast
<llvm::IntegerType
>(ConvertType(E
->getType()));
4102 int Column
= getTargetHooks().getDwarfEHStackPointer(CGM
);
4104 CGM
.ErrorUnsupported(E
, "__builtin_dwarf_sp_column");
4105 return RValue::get(llvm::UndefValue::get(Ty
));
4107 return RValue::get(llvm::ConstantInt::get(Ty
, Column
, true));
4109 case Builtin::BI__builtin_init_dwarf_reg_size_table
: {
4110 Value
*Address
= EmitScalarExpr(E
->getArg(0));
4111 if (getTargetHooks().initDwarfEHRegSizeTable(*this, Address
))
4112 CGM
.ErrorUnsupported(E
, "__builtin_init_dwarf_reg_size_table");
4113 return RValue::get(llvm::UndefValue::get(ConvertType(E
->getType())));
4115 case Builtin::BI__builtin_eh_return
: {
4116 Value
*Int
= EmitScalarExpr(E
->getArg(0));
4117 Value
*Ptr
= EmitScalarExpr(E
->getArg(1));
4119 llvm::IntegerType
*IntTy
= cast
<llvm::IntegerType
>(Int
->getType());
4120 assert((IntTy
->getBitWidth() == 32 || IntTy
->getBitWidth() == 64) &&
4121 "LLVM's __builtin_eh_return only supports 32- and 64-bit variants");
4123 CGM
.getIntrinsic(IntTy
->getBitWidth() == 32 ? Intrinsic::eh_return_i32
4124 : Intrinsic::eh_return_i64
);
4125 Builder
.CreateCall(F
, {Int
, Ptr
});
4126 Builder
.CreateUnreachable();
4128 // We do need to preserve an insertion point.
4129 EmitBlock(createBasicBlock("builtin_eh_return.cont"));
4131 return RValue::get(nullptr);
4133 case Builtin::BI__builtin_unwind_init
: {
4134 Function
*F
= CGM
.getIntrinsic(Intrinsic::eh_unwind_init
);
4135 Builder
.CreateCall(F
);
4136 return RValue::get(nullptr);
4138 case Builtin::BI__builtin_extend_pointer
: {
4139 // Extends a pointer to the size of an _Unwind_Word, which is
4140 // uint64_t on all platforms. Generally this gets poked into a
4141 // register and eventually used as an address, so if the
4142 // addressing registers are wider than pointers and the platform
4143 // doesn't implicitly ignore high-order bits when doing
4144 // addressing, we need to make sure we zext / sext based on
4145 // the platform's expectations.
4147 // See: http://gcc.gnu.org/ml/gcc-bugs/2002-02/msg00237.html
4149 // Cast the pointer to intptr_t.
4150 Value
*Ptr
= EmitScalarExpr(E
->getArg(0));
4151 Value
*Result
= Builder
.CreatePtrToInt(Ptr
, IntPtrTy
, "extend.cast");
4153 // If that's 64 bits, we're done.
4154 if (IntPtrTy
->getBitWidth() == 64)
4155 return RValue::get(Result
);
4157 // Otherwise, ask the codegen data what to do.
4158 if (getTargetHooks().extendPointerWithSExt())
4159 return RValue::get(Builder
.CreateSExt(Result
, Int64Ty
, "extend.sext"));
4161 return RValue::get(Builder
.CreateZExt(Result
, Int64Ty
, "extend.zext"));
4163 case Builtin::BI__builtin_setjmp
: {
4164 // Buffer is a void**.
4165 Address Buf
= EmitPointerWithAlignment(E
->getArg(0));
4167 // Store the frame pointer to the setjmp buffer.
4168 Value
*FrameAddr
= Builder
.CreateCall(
4169 CGM
.getIntrinsic(Intrinsic::frameaddress
, AllocaInt8PtrTy
),
4170 ConstantInt::get(Int32Ty
, 0));
4171 Builder
.CreateStore(FrameAddr
, Buf
);
4173 // Store the stack pointer to the setjmp buffer.
4174 Value
*StackAddr
= Builder
.CreateStackSave();
4175 assert(Buf
.getPointer()->getType() == StackAddr
->getType());
4177 Address StackSaveSlot
= Builder
.CreateConstInBoundsGEP(Buf
, 2);
4178 Builder
.CreateStore(StackAddr
, StackSaveSlot
);
4180 // Call LLVM's EH setjmp, which is lightweight.
4181 Function
*F
= CGM
.getIntrinsic(Intrinsic::eh_sjlj_setjmp
);
4182 return RValue::get(Builder
.CreateCall(F
, Buf
.getPointer()));
4184 case Builtin::BI__builtin_longjmp
: {
4185 Value
*Buf
= EmitScalarExpr(E
->getArg(0));
4187 // Call LLVM's EH longjmp, which is lightweight.
4188 Builder
.CreateCall(CGM
.getIntrinsic(Intrinsic::eh_sjlj_longjmp
), Buf
);
4190 // longjmp doesn't return; mark this as unreachable.
4191 Builder
.CreateUnreachable();
4193 // We do need to preserve an insertion point.
4194 EmitBlock(createBasicBlock("longjmp.cont"));
4196 return RValue::get(nullptr);
4198 case Builtin::BI__builtin_launder
: {
4199 const Expr
*Arg
= E
->getArg(0);
4200 QualType ArgTy
= Arg
->getType()->getPointeeType();
4201 Value
*Ptr
= EmitScalarExpr(Arg
);
4202 if (TypeRequiresBuiltinLaunder(CGM
, ArgTy
))
4203 Ptr
= Builder
.CreateLaunderInvariantGroup(Ptr
);
4205 return RValue::get(Ptr
);
4207 case Builtin::BI__sync_fetch_and_add
:
4208 case Builtin::BI__sync_fetch_and_sub
:
4209 case Builtin::BI__sync_fetch_and_or
:
4210 case Builtin::BI__sync_fetch_and_and
:
4211 case Builtin::BI__sync_fetch_and_xor
:
4212 case Builtin::BI__sync_fetch_and_nand
:
4213 case Builtin::BI__sync_add_and_fetch
:
4214 case Builtin::BI__sync_sub_and_fetch
:
4215 case Builtin::BI__sync_and_and_fetch
:
4216 case Builtin::BI__sync_or_and_fetch
:
4217 case Builtin::BI__sync_xor_and_fetch
:
4218 case Builtin::BI__sync_nand_and_fetch
:
4219 case Builtin::BI__sync_val_compare_and_swap
:
4220 case Builtin::BI__sync_bool_compare_and_swap
:
4221 case Builtin::BI__sync_lock_test_and_set
:
4222 case Builtin::BI__sync_lock_release
:
4223 case Builtin::BI__sync_swap
:
4224 llvm_unreachable("Shouldn't make it through sema");
4225 case Builtin::BI__sync_fetch_and_add_1
:
4226 case Builtin::BI__sync_fetch_and_add_2
:
4227 case Builtin::BI__sync_fetch_and_add_4
:
4228 case Builtin::BI__sync_fetch_and_add_8
:
4229 case Builtin::BI__sync_fetch_and_add_16
:
4230 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Add
, E
);
4231 case Builtin::BI__sync_fetch_and_sub_1
:
4232 case Builtin::BI__sync_fetch_and_sub_2
:
4233 case Builtin::BI__sync_fetch_and_sub_4
:
4234 case Builtin::BI__sync_fetch_and_sub_8
:
4235 case Builtin::BI__sync_fetch_and_sub_16
:
4236 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Sub
, E
);
4237 case Builtin::BI__sync_fetch_and_or_1
:
4238 case Builtin::BI__sync_fetch_and_or_2
:
4239 case Builtin::BI__sync_fetch_and_or_4
:
4240 case Builtin::BI__sync_fetch_and_or_8
:
4241 case Builtin::BI__sync_fetch_and_or_16
:
4242 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Or
, E
);
4243 case Builtin::BI__sync_fetch_and_and_1
:
4244 case Builtin::BI__sync_fetch_and_and_2
:
4245 case Builtin::BI__sync_fetch_and_and_4
:
4246 case Builtin::BI__sync_fetch_and_and_8
:
4247 case Builtin::BI__sync_fetch_and_and_16
:
4248 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::And
, E
);
4249 case Builtin::BI__sync_fetch_and_xor_1
:
4250 case Builtin::BI__sync_fetch_and_xor_2
:
4251 case Builtin::BI__sync_fetch_and_xor_4
:
4252 case Builtin::BI__sync_fetch_and_xor_8
:
4253 case Builtin::BI__sync_fetch_and_xor_16
:
4254 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xor
, E
);
4255 case Builtin::BI__sync_fetch_and_nand_1
:
4256 case Builtin::BI__sync_fetch_and_nand_2
:
4257 case Builtin::BI__sync_fetch_and_nand_4
:
4258 case Builtin::BI__sync_fetch_and_nand_8
:
4259 case Builtin::BI__sync_fetch_and_nand_16
:
4260 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Nand
, E
);
4262 // Clang extensions: not overloaded yet.
4263 case Builtin::BI__sync_fetch_and_min
:
4264 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Min
, E
);
4265 case Builtin::BI__sync_fetch_and_max
:
4266 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Max
, E
);
4267 case Builtin::BI__sync_fetch_and_umin
:
4268 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMin
, E
);
4269 case Builtin::BI__sync_fetch_and_umax
:
4270 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMax
, E
);
4272 case Builtin::BI__sync_add_and_fetch_1
:
4273 case Builtin::BI__sync_add_and_fetch_2
:
4274 case Builtin::BI__sync_add_and_fetch_4
:
4275 case Builtin::BI__sync_add_and_fetch_8
:
4276 case Builtin::BI__sync_add_and_fetch_16
:
4277 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Add
, E
,
4278 llvm::Instruction::Add
);
4279 case Builtin::BI__sync_sub_and_fetch_1
:
4280 case Builtin::BI__sync_sub_and_fetch_2
:
4281 case Builtin::BI__sync_sub_and_fetch_4
:
4282 case Builtin::BI__sync_sub_and_fetch_8
:
4283 case Builtin::BI__sync_sub_and_fetch_16
:
4284 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Sub
, E
,
4285 llvm::Instruction::Sub
);
4286 case Builtin::BI__sync_and_and_fetch_1
:
4287 case Builtin::BI__sync_and_and_fetch_2
:
4288 case Builtin::BI__sync_and_and_fetch_4
:
4289 case Builtin::BI__sync_and_and_fetch_8
:
4290 case Builtin::BI__sync_and_and_fetch_16
:
4291 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::And
, E
,
4292 llvm::Instruction::And
);
4293 case Builtin::BI__sync_or_and_fetch_1
:
4294 case Builtin::BI__sync_or_and_fetch_2
:
4295 case Builtin::BI__sync_or_and_fetch_4
:
4296 case Builtin::BI__sync_or_and_fetch_8
:
4297 case Builtin::BI__sync_or_and_fetch_16
:
4298 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Or
, E
,
4299 llvm::Instruction::Or
);
4300 case Builtin::BI__sync_xor_and_fetch_1
:
4301 case Builtin::BI__sync_xor_and_fetch_2
:
4302 case Builtin::BI__sync_xor_and_fetch_4
:
4303 case Builtin::BI__sync_xor_and_fetch_8
:
4304 case Builtin::BI__sync_xor_and_fetch_16
:
4305 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Xor
, E
,
4306 llvm::Instruction::Xor
);
4307 case Builtin::BI__sync_nand_and_fetch_1
:
4308 case Builtin::BI__sync_nand_and_fetch_2
:
4309 case Builtin::BI__sync_nand_and_fetch_4
:
4310 case Builtin::BI__sync_nand_and_fetch_8
:
4311 case Builtin::BI__sync_nand_and_fetch_16
:
4312 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Nand
, E
,
4313 llvm::Instruction::And
, true);
4315 case Builtin::BI__sync_val_compare_and_swap_1
:
4316 case Builtin::BI__sync_val_compare_and_swap_2
:
4317 case Builtin::BI__sync_val_compare_and_swap_4
:
4318 case Builtin::BI__sync_val_compare_and_swap_8
:
4319 case Builtin::BI__sync_val_compare_and_swap_16
:
4320 return RValue::get(MakeAtomicCmpXchgValue(*this, E
, false));
4322 case Builtin::BI__sync_bool_compare_and_swap_1
:
4323 case Builtin::BI__sync_bool_compare_and_swap_2
:
4324 case Builtin::BI__sync_bool_compare_and_swap_4
:
4325 case Builtin::BI__sync_bool_compare_and_swap_8
:
4326 case Builtin::BI__sync_bool_compare_and_swap_16
:
4327 return RValue::get(MakeAtomicCmpXchgValue(*this, E
, true));
4329 case Builtin::BI__sync_swap_1
:
4330 case Builtin::BI__sync_swap_2
:
4331 case Builtin::BI__sync_swap_4
:
4332 case Builtin::BI__sync_swap_8
:
4333 case Builtin::BI__sync_swap_16
:
4334 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg
, E
);
4336 case Builtin::BI__sync_lock_test_and_set_1
:
4337 case Builtin::BI__sync_lock_test_and_set_2
:
4338 case Builtin::BI__sync_lock_test_and_set_4
:
4339 case Builtin::BI__sync_lock_test_and_set_8
:
4340 case Builtin::BI__sync_lock_test_and_set_16
:
4341 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg
, E
);
4343 case Builtin::BI__sync_lock_release_1
:
4344 case Builtin::BI__sync_lock_release_2
:
4345 case Builtin::BI__sync_lock_release_4
:
4346 case Builtin::BI__sync_lock_release_8
:
4347 case Builtin::BI__sync_lock_release_16
: {
4348 Value
*Ptr
= CheckAtomicAlignment(*this, E
);
4349 QualType ElTy
= E
->getArg(0)->getType()->getPointeeType();
4350 CharUnits StoreSize
= getContext().getTypeSizeInChars(ElTy
);
4352 llvm::IntegerType::get(getLLVMContext(), StoreSize
.getQuantity() * 8);
4353 llvm::StoreInst
*Store
=
4354 Builder
.CreateAlignedStore(llvm::Constant::getNullValue(ITy
), Ptr
,
4356 Store
->setAtomic(llvm::AtomicOrdering::Release
);
4357 return RValue::get(nullptr);
4360 case Builtin::BI__sync_synchronize
: {
4361 // We assume this is supposed to correspond to a C++0x-style
4362 // sequentially-consistent fence (i.e. this is only usable for
4363 // synchronization, not device I/O or anything like that). This intrinsic
4364 // is really badly designed in the sense that in theory, there isn't
4365 // any way to safely use it... but in practice, it mostly works
4366 // to use it with non-atomic loads and stores to get acquire/release
4368 Builder
.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent
);
4369 return RValue::get(nullptr);
4372 case Builtin::BI__builtin_nontemporal_load
:
4373 return RValue::get(EmitNontemporalLoad(*this, E
));
4374 case Builtin::BI__builtin_nontemporal_store
:
4375 return RValue::get(EmitNontemporalStore(*this, E
));
4376 case Builtin::BI__c11_atomic_is_lock_free
:
4377 case Builtin::BI__atomic_is_lock_free
: {
4378 // Call "bool __atomic_is_lock_free(size_t size, void *ptr)". For the
4379 // __c11 builtin, ptr is 0 (indicating a properly-aligned object), since
4380 // _Atomic(T) is always properly-aligned.
4381 const char *LibCallName
= "__atomic_is_lock_free";
4383 Args
.add(RValue::get(EmitScalarExpr(E
->getArg(0))),
4384 getContext().getSizeType());
4385 if (BuiltinID
== Builtin::BI__atomic_is_lock_free
)
4386 Args
.add(RValue::get(EmitScalarExpr(E
->getArg(1))),
4387 getContext().VoidPtrTy
);
4389 Args
.add(RValue::get(llvm::Constant::getNullValue(VoidPtrTy
)),
4390 getContext().VoidPtrTy
);
4391 const CGFunctionInfo
&FuncInfo
=
4392 CGM
.getTypes().arrangeBuiltinFunctionCall(E
->getType(), Args
);
4393 llvm::FunctionType
*FTy
= CGM
.getTypes().GetFunctionType(FuncInfo
);
4394 llvm::FunctionCallee Func
= CGM
.CreateRuntimeFunction(FTy
, LibCallName
);
4395 return EmitCall(FuncInfo
, CGCallee::forDirect(Func
),
4396 ReturnValueSlot(), Args
);
4399 case Builtin::BI__atomic_test_and_set
: {
4400 // Look at the argument type to determine whether this is a volatile
4401 // operation. The parameter type is always volatile.
4402 QualType PtrTy
= E
->getArg(0)->IgnoreImpCasts()->getType();
4404 PtrTy
->castAs
<PointerType
>()->getPointeeType().isVolatileQualified();
4406 Value
*Ptr
= EmitScalarExpr(E
->getArg(0));
4407 Value
*NewVal
= Builder
.getInt8(1);
4408 Value
*Order
= EmitScalarExpr(E
->getArg(1));
4409 if (isa
<llvm::ConstantInt
>(Order
)) {
4410 int ord
= cast
<llvm::ConstantInt
>(Order
)->getZExtValue();
4411 AtomicRMWInst
*Result
= nullptr;
4413 case 0: // memory_order_relaxed
4414 default: // invalid order
4415 Result
= Builder
.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg
, Ptr
, NewVal
,
4416 llvm::AtomicOrdering::Monotonic
);
4418 case 1: // memory_order_consume
4419 case 2: // memory_order_acquire
4420 Result
= Builder
.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg
, Ptr
, NewVal
,
4421 llvm::AtomicOrdering::Acquire
);
4423 case 3: // memory_order_release
4424 Result
= Builder
.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg
, Ptr
, NewVal
,
4425 llvm::AtomicOrdering::Release
);
4427 case 4: // memory_order_acq_rel
4429 Result
= Builder
.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg
, Ptr
, NewVal
,
4430 llvm::AtomicOrdering::AcquireRelease
);
4432 case 5: // memory_order_seq_cst
4433 Result
= Builder
.CreateAtomicRMW(
4434 llvm::AtomicRMWInst::Xchg
, Ptr
, NewVal
,
4435 llvm::AtomicOrdering::SequentiallyConsistent
);
4438 Result
->setVolatile(Volatile
);
4439 return RValue::get(Builder
.CreateIsNotNull(Result
, "tobool"));
4442 llvm::BasicBlock
*ContBB
= createBasicBlock("atomic.continue", CurFn
);
4444 llvm::BasicBlock
*BBs
[5] = {
4445 createBasicBlock("monotonic", CurFn
),
4446 createBasicBlock("acquire", CurFn
),
4447 createBasicBlock("release", CurFn
),
4448 createBasicBlock("acqrel", CurFn
),
4449 createBasicBlock("seqcst", CurFn
)
4451 llvm::AtomicOrdering Orders
[5] = {
4452 llvm::AtomicOrdering::Monotonic
, llvm::AtomicOrdering::Acquire
,
4453 llvm::AtomicOrdering::Release
, llvm::AtomicOrdering::AcquireRelease
,
4454 llvm::AtomicOrdering::SequentiallyConsistent
};
4456 Order
= Builder
.CreateIntCast(Order
, Builder
.getInt32Ty(), false);
4457 llvm::SwitchInst
*SI
= Builder
.CreateSwitch(Order
, BBs
[0]);
4459 Builder
.SetInsertPoint(ContBB
);
4460 PHINode
*Result
= Builder
.CreatePHI(Int8Ty
, 5, "was_set");
4462 for (unsigned i
= 0; i
< 5; ++i
) {
4463 Builder
.SetInsertPoint(BBs
[i
]);
4464 AtomicRMWInst
*RMW
= Builder
.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg
,
4465 Ptr
, NewVal
, Orders
[i
]);
4466 RMW
->setVolatile(Volatile
);
4467 Result
->addIncoming(RMW
, BBs
[i
]);
4468 Builder
.CreateBr(ContBB
);
4471 SI
->addCase(Builder
.getInt32(0), BBs
[0]);
4472 SI
->addCase(Builder
.getInt32(1), BBs
[1]);
4473 SI
->addCase(Builder
.getInt32(2), BBs
[1]);
4474 SI
->addCase(Builder
.getInt32(3), BBs
[2]);
4475 SI
->addCase(Builder
.getInt32(4), BBs
[3]);
4476 SI
->addCase(Builder
.getInt32(5), BBs
[4]);
4478 Builder
.SetInsertPoint(ContBB
);
4479 return RValue::get(Builder
.CreateIsNotNull(Result
, "tobool"));
4482 case Builtin::BI__atomic_clear
: {
4483 QualType PtrTy
= E
->getArg(0)->IgnoreImpCasts()->getType();
4485 PtrTy
->castAs
<PointerType
>()->getPointeeType().isVolatileQualified();
4487 Address Ptr
= EmitPointerWithAlignment(E
->getArg(0));
4488 Ptr
= Ptr
.withElementType(Int8Ty
);
4489 Value
*NewVal
= Builder
.getInt8(0);
4490 Value
*Order
= EmitScalarExpr(E
->getArg(1));
4491 if (isa
<llvm::ConstantInt
>(Order
)) {
4492 int ord
= cast
<llvm::ConstantInt
>(Order
)->getZExtValue();
4493 StoreInst
*Store
= Builder
.CreateStore(NewVal
, Ptr
, Volatile
);
4495 case 0: // memory_order_relaxed
4496 default: // invalid order
4497 Store
->setOrdering(llvm::AtomicOrdering::Monotonic
);
4499 case 3: // memory_order_release
4500 Store
->setOrdering(llvm::AtomicOrdering::Release
);
4502 case 5: // memory_order_seq_cst
4503 Store
->setOrdering(llvm::AtomicOrdering::SequentiallyConsistent
);
4506 return RValue::get(nullptr);
4509 llvm::BasicBlock
*ContBB
= createBasicBlock("atomic.continue", CurFn
);
4511 llvm::BasicBlock
*BBs
[3] = {
4512 createBasicBlock("monotonic", CurFn
),
4513 createBasicBlock("release", CurFn
),
4514 createBasicBlock("seqcst", CurFn
)
4516 llvm::AtomicOrdering Orders
[3] = {
4517 llvm::AtomicOrdering::Monotonic
, llvm::AtomicOrdering::Release
,
4518 llvm::AtomicOrdering::SequentiallyConsistent
};
4520 Order
= Builder
.CreateIntCast(Order
, Builder
.getInt32Ty(), false);
4521 llvm::SwitchInst
*SI
= Builder
.CreateSwitch(Order
, BBs
[0]);
4523 for (unsigned i
= 0; i
< 3; ++i
) {
4524 Builder
.SetInsertPoint(BBs
[i
]);
4525 StoreInst
*Store
= Builder
.CreateStore(NewVal
, Ptr
, Volatile
);
4526 Store
->setOrdering(Orders
[i
]);
4527 Builder
.CreateBr(ContBB
);
4530 SI
->addCase(Builder
.getInt32(0), BBs
[0]);
4531 SI
->addCase(Builder
.getInt32(3), BBs
[1]);
4532 SI
->addCase(Builder
.getInt32(5), BBs
[2]);
4534 Builder
.SetInsertPoint(ContBB
);
4535 return RValue::get(nullptr);
4538 case Builtin::BI__atomic_thread_fence
:
4539 case Builtin::BI__atomic_signal_fence
:
4540 case Builtin::BI__c11_atomic_thread_fence
:
4541 case Builtin::BI__c11_atomic_signal_fence
: {
4542 llvm::SyncScope::ID SSID
;
4543 if (BuiltinID
== Builtin::BI__atomic_signal_fence
||
4544 BuiltinID
== Builtin::BI__c11_atomic_signal_fence
)
4545 SSID
= llvm::SyncScope::SingleThread
;
4547 SSID
= llvm::SyncScope::System
;
4548 Value
*Order
= EmitScalarExpr(E
->getArg(0));
4549 if (isa
<llvm::ConstantInt
>(Order
)) {
4550 int ord
= cast
<llvm::ConstantInt
>(Order
)->getZExtValue();
4552 case 0: // memory_order_relaxed
4553 default: // invalid order
4555 case 1: // memory_order_consume
4556 case 2: // memory_order_acquire
4557 Builder
.CreateFence(llvm::AtomicOrdering::Acquire
, SSID
);
4559 case 3: // memory_order_release
4560 Builder
.CreateFence(llvm::AtomicOrdering::Release
, SSID
);
4562 case 4: // memory_order_acq_rel
4563 Builder
.CreateFence(llvm::AtomicOrdering::AcquireRelease
, SSID
);
4565 case 5: // memory_order_seq_cst
4566 Builder
.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent
, SSID
);
4569 return RValue::get(nullptr);
4572 llvm::BasicBlock
*AcquireBB
, *ReleaseBB
, *AcqRelBB
, *SeqCstBB
;
4573 AcquireBB
= createBasicBlock("acquire", CurFn
);
4574 ReleaseBB
= createBasicBlock("release", CurFn
);
4575 AcqRelBB
= createBasicBlock("acqrel", CurFn
);
4576 SeqCstBB
= createBasicBlock("seqcst", CurFn
);
4577 llvm::BasicBlock
*ContBB
= createBasicBlock("atomic.continue", CurFn
);
4579 Order
= Builder
.CreateIntCast(Order
, Builder
.getInt32Ty(), false);
4580 llvm::SwitchInst
*SI
= Builder
.CreateSwitch(Order
, ContBB
);
4582 Builder
.SetInsertPoint(AcquireBB
);
4583 Builder
.CreateFence(llvm::AtomicOrdering::Acquire
, SSID
);
4584 Builder
.CreateBr(ContBB
);
4585 SI
->addCase(Builder
.getInt32(1), AcquireBB
);
4586 SI
->addCase(Builder
.getInt32(2), AcquireBB
);
4588 Builder
.SetInsertPoint(ReleaseBB
);
4589 Builder
.CreateFence(llvm::AtomicOrdering::Release
, SSID
);
4590 Builder
.CreateBr(ContBB
);
4591 SI
->addCase(Builder
.getInt32(3), ReleaseBB
);
4593 Builder
.SetInsertPoint(AcqRelBB
);
4594 Builder
.CreateFence(llvm::AtomicOrdering::AcquireRelease
, SSID
);
4595 Builder
.CreateBr(ContBB
);
4596 SI
->addCase(Builder
.getInt32(4), AcqRelBB
);
4598 Builder
.SetInsertPoint(SeqCstBB
);
4599 Builder
.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent
, SSID
);
4600 Builder
.CreateBr(ContBB
);
4601 SI
->addCase(Builder
.getInt32(5), SeqCstBB
);
4603 Builder
.SetInsertPoint(ContBB
);
4604 return RValue::get(nullptr);
4607 case Builtin::BI__builtin_signbit
:
4608 case Builtin::BI__builtin_signbitf
:
4609 case Builtin::BI__builtin_signbitl
: {
4611 Builder
.CreateZExt(EmitSignBit(*this, EmitScalarExpr(E
->getArg(0))),
4612 ConvertType(E
->getType())));
4614 case Builtin::BI__warn_memset_zero_len
:
4615 return RValue::getIgnored();
4616 case Builtin::BI__annotation
: {
4617 // Re-encode each wide string to UTF8 and make an MDString.
4618 SmallVector
<Metadata
*, 1> Strings
;
4619 for (const Expr
*Arg
: E
->arguments()) {
4620 const auto *Str
= cast
<StringLiteral
>(Arg
->IgnoreParenCasts());
4621 assert(Str
->getCharByteWidth() == 2);
4622 StringRef WideBytes
= Str
->getBytes();
4623 std::string StrUtf8
;
4624 if (!convertUTF16ToUTF8String(
4625 ArrayRef(WideBytes
.data(), WideBytes
.size()), StrUtf8
)) {
4626 CGM
.ErrorUnsupported(E
, "non-UTF16 __annotation argument");
4629 Strings
.push_back(llvm::MDString::get(getLLVMContext(), StrUtf8
));
4632 // Build and MDTuple of MDStrings and emit the intrinsic call.
4634 CGM
.getIntrinsic(llvm::Intrinsic::codeview_annotation
, {});
4635 MDTuple
*StrTuple
= MDTuple::get(getLLVMContext(), Strings
);
4636 Builder
.CreateCall(F
, MetadataAsValue::get(getLLVMContext(), StrTuple
));
4637 return RValue::getIgnored();
4639 case Builtin::BI__builtin_annotation
: {
4640 llvm::Value
*AnnVal
= EmitScalarExpr(E
->getArg(0));
4642 CGM
.getIntrinsic(llvm::Intrinsic::annotation
,
4643 {AnnVal
->getType(), CGM
.ConstGlobalsPtrTy
});
4645 // Get the annotation string, go through casts. Sema requires this to be a
4646 // non-wide string literal, potentially casted, so the cast<> is safe.
4647 const Expr
*AnnotationStrExpr
= E
->getArg(1)->IgnoreParenCasts();
4648 StringRef Str
= cast
<StringLiteral
>(AnnotationStrExpr
)->getString();
4650 EmitAnnotationCall(F
, AnnVal
, Str
, E
->getExprLoc(), nullptr));
4652 case Builtin::BI__builtin_addcb
:
4653 case Builtin::BI__builtin_addcs
:
4654 case Builtin::BI__builtin_addc
:
4655 case Builtin::BI__builtin_addcl
:
4656 case Builtin::BI__builtin_addcll
:
4657 case Builtin::BI__builtin_subcb
:
4658 case Builtin::BI__builtin_subcs
:
4659 case Builtin::BI__builtin_subc
:
4660 case Builtin::BI__builtin_subcl
:
4661 case Builtin::BI__builtin_subcll
: {
4663 // We translate all of these builtins from expressions of the form:
4664 // int x = ..., y = ..., carryin = ..., carryout, result;
4665 // result = __builtin_addc(x, y, carryin, &carryout);
4667 // to LLVM IR of the form:
4669 // %tmp1 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %x, i32 %y)
4670 // %tmpsum1 = extractvalue {i32, i1} %tmp1, 0
4671 // %carry1 = extractvalue {i32, i1} %tmp1, 1
4672 // %tmp2 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %tmpsum1,
4674 // %result = extractvalue {i32, i1} %tmp2, 0
4675 // %carry2 = extractvalue {i32, i1} %tmp2, 1
4676 // %tmp3 = or i1 %carry1, %carry2
4677 // %tmp4 = zext i1 %tmp3 to i32
4678 // store i32 %tmp4, i32* %carryout
4680 // Scalarize our inputs.
4681 llvm::Value
*X
= EmitScalarExpr(E
->getArg(0));
4682 llvm::Value
*Y
= EmitScalarExpr(E
->getArg(1));
4683 llvm::Value
*Carryin
= EmitScalarExpr(E
->getArg(2));
4684 Address CarryOutPtr
= EmitPointerWithAlignment(E
->getArg(3));
4686 // Decide if we are lowering to a uadd.with.overflow or usub.with.overflow.
4687 llvm::Intrinsic::ID IntrinsicId
;
4688 switch (BuiltinID
) {
4689 default: llvm_unreachable("Unknown multiprecision builtin id.");
4690 case Builtin::BI__builtin_addcb
:
4691 case Builtin::BI__builtin_addcs
:
4692 case Builtin::BI__builtin_addc
:
4693 case Builtin::BI__builtin_addcl
:
4694 case Builtin::BI__builtin_addcll
:
4695 IntrinsicId
= llvm::Intrinsic::uadd_with_overflow
;
4697 case Builtin::BI__builtin_subcb
:
4698 case Builtin::BI__builtin_subcs
:
4699 case Builtin::BI__builtin_subc
:
4700 case Builtin::BI__builtin_subcl
:
4701 case Builtin::BI__builtin_subcll
:
4702 IntrinsicId
= llvm::Intrinsic::usub_with_overflow
;
4706 // Construct our resulting LLVM IR expression.
4707 llvm::Value
*Carry1
;
4708 llvm::Value
*Sum1
= EmitOverflowIntrinsic(*this, IntrinsicId
,
4710 llvm::Value
*Carry2
;
4711 llvm::Value
*Sum2
= EmitOverflowIntrinsic(*this, IntrinsicId
,
4712 Sum1
, Carryin
, Carry2
);
4713 llvm::Value
*CarryOut
= Builder
.CreateZExt(Builder
.CreateOr(Carry1
, Carry2
),
4715 Builder
.CreateStore(CarryOut
, CarryOutPtr
);
4716 return RValue::get(Sum2
);
4719 case Builtin::BI__builtin_add_overflow
:
4720 case Builtin::BI__builtin_sub_overflow
:
4721 case Builtin::BI__builtin_mul_overflow
: {
4722 const clang::Expr
*LeftArg
= E
->getArg(0);
4723 const clang::Expr
*RightArg
= E
->getArg(1);
4724 const clang::Expr
*ResultArg
= E
->getArg(2);
4726 clang::QualType ResultQTy
=
4727 ResultArg
->getType()->castAs
<PointerType
>()->getPointeeType();
4729 WidthAndSignedness LeftInfo
=
4730 getIntegerWidthAndSignedness(CGM
.getContext(), LeftArg
->getType());
4731 WidthAndSignedness RightInfo
=
4732 getIntegerWidthAndSignedness(CGM
.getContext(), RightArg
->getType());
4733 WidthAndSignedness ResultInfo
=
4734 getIntegerWidthAndSignedness(CGM
.getContext(), ResultQTy
);
4736 // Handle mixed-sign multiplication as a special case, because adding
4737 // runtime or backend support for our generic irgen would be too expensive.
4738 if (isSpecialMixedSignMultiply(BuiltinID
, LeftInfo
, RightInfo
, ResultInfo
))
4739 return EmitCheckedMixedSignMultiply(*this, LeftArg
, LeftInfo
, RightArg
,
4740 RightInfo
, ResultArg
, ResultQTy
,
4743 if (isSpecialUnsignedMultiplySignedResult(BuiltinID
, LeftInfo
, RightInfo
,
4745 return EmitCheckedUnsignedMultiplySignedResult(
4746 *this, LeftArg
, LeftInfo
, RightArg
, RightInfo
, ResultArg
, ResultQTy
,
4749 WidthAndSignedness EncompassingInfo
=
4750 EncompassingIntegerType({LeftInfo
, RightInfo
, ResultInfo
});
4752 llvm::Type
*EncompassingLLVMTy
=
4753 llvm::IntegerType::get(CGM
.getLLVMContext(), EncompassingInfo
.Width
);
4755 llvm::Type
*ResultLLVMTy
= CGM
.getTypes().ConvertType(ResultQTy
);
4757 llvm::Intrinsic::ID IntrinsicId
;
4758 switch (BuiltinID
) {
4760 llvm_unreachable("Unknown overflow builtin id.");
4761 case Builtin::BI__builtin_add_overflow
:
4762 IntrinsicId
= EncompassingInfo
.Signed
4763 ? llvm::Intrinsic::sadd_with_overflow
4764 : llvm::Intrinsic::uadd_with_overflow
;
4766 case Builtin::BI__builtin_sub_overflow
:
4767 IntrinsicId
= EncompassingInfo
.Signed
4768 ? llvm::Intrinsic::ssub_with_overflow
4769 : llvm::Intrinsic::usub_with_overflow
;
4771 case Builtin::BI__builtin_mul_overflow
:
4772 IntrinsicId
= EncompassingInfo
.Signed
4773 ? llvm::Intrinsic::smul_with_overflow
4774 : llvm::Intrinsic::umul_with_overflow
;
4778 llvm::Value
*Left
= EmitScalarExpr(LeftArg
);
4779 llvm::Value
*Right
= EmitScalarExpr(RightArg
);
4780 Address ResultPtr
= EmitPointerWithAlignment(ResultArg
);
4782 // Extend each operand to the encompassing type.
4783 Left
= Builder
.CreateIntCast(Left
, EncompassingLLVMTy
, LeftInfo
.Signed
);
4784 Right
= Builder
.CreateIntCast(Right
, EncompassingLLVMTy
, RightInfo
.Signed
);
4786 // Perform the operation on the extended values.
4787 llvm::Value
*Overflow
, *Result
;
4788 Result
= EmitOverflowIntrinsic(*this, IntrinsicId
, Left
, Right
, Overflow
);
4790 if (EncompassingInfo
.Width
> ResultInfo
.Width
) {
4791 // The encompassing type is wider than the result type, so we need to
4793 llvm::Value
*ResultTrunc
= Builder
.CreateTrunc(Result
, ResultLLVMTy
);
4795 // To see if the truncation caused an overflow, we will extend
4796 // the result and then compare it to the original result.
4797 llvm::Value
*ResultTruncExt
= Builder
.CreateIntCast(
4798 ResultTrunc
, EncompassingLLVMTy
, ResultInfo
.Signed
);
4799 llvm::Value
*TruncationOverflow
=
4800 Builder
.CreateICmpNE(Result
, ResultTruncExt
);
4802 Overflow
= Builder
.CreateOr(Overflow
, TruncationOverflow
);
4803 Result
= ResultTrunc
;
4806 // Finally, store the result using the pointer.
4808 ResultArg
->getType()->getPointeeType().isVolatileQualified();
4809 Builder
.CreateStore(EmitToMemory(Result
, ResultQTy
), ResultPtr
, isVolatile
);
4811 return RValue::get(Overflow
);
4814 case Builtin::BI__builtin_uadd_overflow
:
4815 case Builtin::BI__builtin_uaddl_overflow
:
4816 case Builtin::BI__builtin_uaddll_overflow
:
4817 case Builtin::BI__builtin_usub_overflow
:
4818 case Builtin::BI__builtin_usubl_overflow
:
4819 case Builtin::BI__builtin_usubll_overflow
:
4820 case Builtin::BI__builtin_umul_overflow
:
4821 case Builtin::BI__builtin_umull_overflow
:
4822 case Builtin::BI__builtin_umulll_overflow
:
4823 case Builtin::BI__builtin_sadd_overflow
:
4824 case Builtin::BI__builtin_saddl_overflow
:
4825 case Builtin::BI__builtin_saddll_overflow
:
4826 case Builtin::BI__builtin_ssub_overflow
:
4827 case Builtin::BI__builtin_ssubl_overflow
:
4828 case Builtin::BI__builtin_ssubll_overflow
:
4829 case Builtin::BI__builtin_smul_overflow
:
4830 case Builtin::BI__builtin_smull_overflow
:
4831 case Builtin::BI__builtin_smulll_overflow
: {
4833 // We translate all of these builtins directly to the relevant llvm IR node.
4835 // Scalarize our inputs.
4836 llvm::Value
*X
= EmitScalarExpr(E
->getArg(0));
4837 llvm::Value
*Y
= EmitScalarExpr(E
->getArg(1));
4838 Address SumOutPtr
= EmitPointerWithAlignment(E
->getArg(2));
4840 // Decide which of the overflow intrinsics we are lowering to:
4841 llvm::Intrinsic::ID IntrinsicId
;
4842 switch (BuiltinID
) {
4843 default: llvm_unreachable("Unknown overflow builtin id.");
4844 case Builtin::BI__builtin_uadd_overflow
:
4845 case Builtin::BI__builtin_uaddl_overflow
:
4846 case Builtin::BI__builtin_uaddll_overflow
:
4847 IntrinsicId
= llvm::Intrinsic::uadd_with_overflow
;
4849 case Builtin::BI__builtin_usub_overflow
:
4850 case Builtin::BI__builtin_usubl_overflow
:
4851 case Builtin::BI__builtin_usubll_overflow
:
4852 IntrinsicId
= llvm::Intrinsic::usub_with_overflow
;
4854 case Builtin::BI__builtin_umul_overflow
:
4855 case Builtin::BI__builtin_umull_overflow
:
4856 case Builtin::BI__builtin_umulll_overflow
:
4857 IntrinsicId
= llvm::Intrinsic::umul_with_overflow
;
4859 case Builtin::BI__builtin_sadd_overflow
:
4860 case Builtin::BI__builtin_saddl_overflow
:
4861 case Builtin::BI__builtin_saddll_overflow
:
4862 IntrinsicId
= llvm::Intrinsic::sadd_with_overflow
;
4864 case Builtin::BI__builtin_ssub_overflow
:
4865 case Builtin::BI__builtin_ssubl_overflow
:
4866 case Builtin::BI__builtin_ssubll_overflow
:
4867 IntrinsicId
= llvm::Intrinsic::ssub_with_overflow
;
4869 case Builtin::BI__builtin_smul_overflow
:
4870 case Builtin::BI__builtin_smull_overflow
:
4871 case Builtin::BI__builtin_smulll_overflow
:
4872 IntrinsicId
= llvm::Intrinsic::smul_with_overflow
;
4878 llvm::Value
*Sum
= EmitOverflowIntrinsic(*this, IntrinsicId
, X
, Y
, Carry
);
4879 Builder
.CreateStore(Sum
, SumOutPtr
);
4881 return RValue::get(Carry
);
4883 case Builtin::BIaddressof
:
4884 case Builtin::BI__addressof
:
4885 case Builtin::BI__builtin_addressof
:
4886 return RValue::get(EmitLValue(E
->getArg(0)).getPointer(*this));
4887 case Builtin::BI__builtin_function_start
:
4888 return RValue::get(CGM
.GetFunctionStart(
4889 E
->getArg(0)->getAsBuiltinConstantDeclRef(CGM
.getContext())));
4890 case Builtin::BI__builtin_operator_new
:
4891 return EmitBuiltinNewDeleteCall(
4892 E
->getCallee()->getType()->castAs
<FunctionProtoType
>(), E
, false);
4893 case Builtin::BI__builtin_operator_delete
:
4894 EmitBuiltinNewDeleteCall(
4895 E
->getCallee()->getType()->castAs
<FunctionProtoType
>(), E
, true);
4896 return RValue::get(nullptr);
4898 case Builtin::BI__builtin_is_aligned
:
4899 return EmitBuiltinIsAligned(E
);
4900 case Builtin::BI__builtin_align_up
:
4901 return EmitBuiltinAlignTo(E
, true);
4902 case Builtin::BI__builtin_align_down
:
4903 return EmitBuiltinAlignTo(E
, false);
4905 case Builtin::BI__noop
:
4906 // __noop always evaluates to an integer literal zero.
4907 return RValue::get(ConstantInt::get(IntTy
, 0));
4908 case Builtin::BI__builtin_call_with_static_chain
: {
4909 const CallExpr
*Call
= cast
<CallExpr
>(E
->getArg(0));
4910 const Expr
*Chain
= E
->getArg(1);
4911 return EmitCall(Call
->getCallee()->getType(),
4912 EmitCallee(Call
->getCallee()), Call
, ReturnValue
,
4913 EmitScalarExpr(Chain
));
4915 case Builtin::BI_InterlockedExchange8
:
4916 case Builtin::BI_InterlockedExchange16
:
4917 case Builtin::BI_InterlockedExchange
:
4918 case Builtin::BI_InterlockedExchangePointer
:
4920 EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange
, E
));
4921 case Builtin::BI_InterlockedCompareExchangePointer
:
4922 case Builtin::BI_InterlockedCompareExchangePointer_nf
: {
4924 llvm::IntegerType
*IntType
= IntegerType::get(
4925 getLLVMContext(), getContext().getTypeSize(E
->getType()));
4927 llvm::Value
*Destination
= EmitScalarExpr(E
->getArg(0));
4929 llvm::Value
*Exchange
= EmitScalarExpr(E
->getArg(1));
4930 RTy
= Exchange
->getType();
4931 Exchange
= Builder
.CreatePtrToInt(Exchange
, IntType
);
4933 llvm::Value
*Comparand
=
4934 Builder
.CreatePtrToInt(EmitScalarExpr(E
->getArg(2)), IntType
);
4937 BuiltinID
== Builtin::BI_InterlockedCompareExchangePointer_nf
?
4938 AtomicOrdering::Monotonic
: AtomicOrdering::SequentiallyConsistent
;
4940 auto Result
= Builder
.CreateAtomicCmpXchg(Destination
, Comparand
, Exchange
,
4941 Ordering
, Ordering
);
4942 Result
->setVolatile(true);
4944 return RValue::get(Builder
.CreateIntToPtr(Builder
.CreateExtractValue(Result
,
4948 case Builtin::BI_InterlockedCompareExchange8
:
4949 case Builtin::BI_InterlockedCompareExchange16
:
4950 case Builtin::BI_InterlockedCompareExchange
:
4951 case Builtin::BI_InterlockedCompareExchange64
:
4952 return RValue::get(EmitAtomicCmpXchgForMSIntrin(*this, E
));
4953 case Builtin::BI_InterlockedIncrement16
:
4954 case Builtin::BI_InterlockedIncrement
:
4956 EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement
, E
));
4957 case Builtin::BI_InterlockedDecrement16
:
4958 case Builtin::BI_InterlockedDecrement
:
4960 EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement
, E
));
4961 case Builtin::BI_InterlockedAnd8
:
4962 case Builtin::BI_InterlockedAnd16
:
4963 case Builtin::BI_InterlockedAnd
:
4964 return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd
, E
));
4965 case Builtin::BI_InterlockedExchangeAdd8
:
4966 case Builtin::BI_InterlockedExchangeAdd16
:
4967 case Builtin::BI_InterlockedExchangeAdd
:
4969 EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd
, E
));
4970 case Builtin::BI_InterlockedExchangeSub8
:
4971 case Builtin::BI_InterlockedExchangeSub16
:
4972 case Builtin::BI_InterlockedExchangeSub
:
4974 EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeSub
, E
));
4975 case Builtin::BI_InterlockedOr8
:
4976 case Builtin::BI_InterlockedOr16
:
4977 case Builtin::BI_InterlockedOr
:
4978 return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr
, E
));
4979 case Builtin::BI_InterlockedXor8
:
4980 case Builtin::BI_InterlockedXor16
:
4981 case Builtin::BI_InterlockedXor
:
4982 return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor
, E
));
4984 case Builtin::BI_bittest64
:
4985 case Builtin::BI_bittest
:
4986 case Builtin::BI_bittestandcomplement64
:
4987 case Builtin::BI_bittestandcomplement
:
4988 case Builtin::BI_bittestandreset64
:
4989 case Builtin::BI_bittestandreset
:
4990 case Builtin::BI_bittestandset64
:
4991 case Builtin::BI_bittestandset
:
4992 case Builtin::BI_interlockedbittestandreset
:
4993 case Builtin::BI_interlockedbittestandreset64
:
4994 case Builtin::BI_interlockedbittestandset64
:
4995 case Builtin::BI_interlockedbittestandset
:
4996 case Builtin::BI_interlockedbittestandset_acq
:
4997 case Builtin::BI_interlockedbittestandset_rel
:
4998 case Builtin::BI_interlockedbittestandset_nf
:
4999 case Builtin::BI_interlockedbittestandreset_acq
:
5000 case Builtin::BI_interlockedbittestandreset_rel
:
5001 case Builtin::BI_interlockedbittestandreset_nf
:
5002 return RValue::get(EmitBitTestIntrinsic(*this, BuiltinID
, E
));
5004 // These builtins exist to emit regular volatile loads and stores not
5005 // affected by the -fms-volatile setting.
5006 case Builtin::BI__iso_volatile_load8
:
5007 case Builtin::BI__iso_volatile_load16
:
5008 case Builtin::BI__iso_volatile_load32
:
5009 case Builtin::BI__iso_volatile_load64
:
5010 return RValue::get(EmitISOVolatileLoad(*this, E
));
5011 case Builtin::BI__iso_volatile_store8
:
5012 case Builtin::BI__iso_volatile_store16
:
5013 case Builtin::BI__iso_volatile_store32
:
5014 case Builtin::BI__iso_volatile_store64
:
5015 return RValue::get(EmitISOVolatileStore(*this, E
));
5017 case Builtin::BI__exception_code
:
5018 case Builtin::BI_exception_code
:
5019 return RValue::get(EmitSEHExceptionCode());
5020 case Builtin::BI__exception_info
:
5021 case Builtin::BI_exception_info
:
5022 return RValue::get(EmitSEHExceptionInfo());
5023 case Builtin::BI__abnormal_termination
:
5024 case Builtin::BI_abnormal_termination
:
5025 return RValue::get(EmitSEHAbnormalTermination());
5026 case Builtin::BI_setjmpex
:
5027 if (getTarget().getTriple().isOSMSVCRT() && E
->getNumArgs() == 1 &&
5028 E
->getArg(0)->getType()->isPointerType())
5029 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmpex
, E
);
5031 case Builtin::BI_setjmp
:
5032 if (getTarget().getTriple().isOSMSVCRT() && E
->getNumArgs() == 1 &&
5033 E
->getArg(0)->getType()->isPointerType()) {
5034 if (getTarget().getTriple().getArch() == llvm::Triple::x86
)
5035 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmp3
, E
);
5036 else if (getTarget().getTriple().getArch() == llvm::Triple::aarch64
)
5037 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmpex
, E
);
5038 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmp
, E
);
5042 // C++ std:: builtins.
5043 case Builtin::BImove
:
5044 case Builtin::BImove_if_noexcept
:
5045 case Builtin::BIforward
:
5046 case Builtin::BIforward_like
:
5047 case Builtin::BIas_const
:
5048 return RValue::get(EmitLValue(E
->getArg(0)).getPointer(*this));
5049 case Builtin::BI__GetExceptionInfo
: {
5050 if (llvm::GlobalVariable
*GV
=
5051 CGM
.getCXXABI().getThrowInfo(FD
->getParamDecl(0)->getType()))
5052 return RValue::get(GV
);
5056 case Builtin::BI__fastfail
:
5057 return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::__fastfail
, E
));
5059 case Builtin::BI__builtin_coro_id
:
5060 return EmitCoroutineIntrinsic(E
, Intrinsic::coro_id
);
5061 case Builtin::BI__builtin_coro_promise
:
5062 return EmitCoroutineIntrinsic(E
, Intrinsic::coro_promise
);
5063 case Builtin::BI__builtin_coro_resume
:
5064 EmitCoroutineIntrinsic(E
, Intrinsic::coro_resume
);
5065 return RValue::get(nullptr);
5066 case Builtin::BI__builtin_coro_frame
:
5067 return EmitCoroutineIntrinsic(E
, Intrinsic::coro_frame
);
5068 case Builtin::BI__builtin_coro_noop
:
5069 return EmitCoroutineIntrinsic(E
, Intrinsic::coro_noop
);
5070 case Builtin::BI__builtin_coro_free
:
5071 return EmitCoroutineIntrinsic(E
, Intrinsic::coro_free
);
5072 case Builtin::BI__builtin_coro_destroy
:
5073 EmitCoroutineIntrinsic(E
, Intrinsic::coro_destroy
);
5074 return RValue::get(nullptr);
5075 case Builtin::BI__builtin_coro_done
:
5076 return EmitCoroutineIntrinsic(E
, Intrinsic::coro_done
);
5077 case Builtin::BI__builtin_coro_alloc
:
5078 return EmitCoroutineIntrinsic(E
, Intrinsic::coro_alloc
);
5079 case Builtin::BI__builtin_coro_begin
:
5080 return EmitCoroutineIntrinsic(E
, Intrinsic::coro_begin
);
5081 case Builtin::BI__builtin_coro_end
:
5082 return EmitCoroutineIntrinsic(E
, Intrinsic::coro_end
);
5083 case Builtin::BI__builtin_coro_suspend
:
5084 return EmitCoroutineIntrinsic(E
, Intrinsic::coro_suspend
);
5085 case Builtin::BI__builtin_coro_size
:
5086 return EmitCoroutineIntrinsic(E
, Intrinsic::coro_size
);
5087 case Builtin::BI__builtin_coro_align
:
5088 return EmitCoroutineIntrinsic(E
, Intrinsic::coro_align
);
5090 // OpenCL v2.0 s6.13.16.2, Built-in pipe read and write functions
5091 case Builtin::BIread_pipe
:
5092 case Builtin::BIwrite_pipe
: {
5093 Value
*Arg0
= EmitScalarExpr(E
->getArg(0)),
5094 *Arg1
= EmitScalarExpr(E
->getArg(1));
5095 CGOpenCLRuntime
OpenCLRT(CGM
);
5096 Value
*PacketSize
= OpenCLRT
.getPipeElemSize(E
->getArg(0));
5097 Value
*PacketAlign
= OpenCLRT
.getPipeElemAlign(E
->getArg(0));
5099 // Type of the generic packet parameter.
5100 unsigned GenericAS
=
5101 getContext().getTargetAddressSpace(LangAS::opencl_generic
);
5102 llvm::Type
*I8PTy
= llvm::PointerType::get(getLLVMContext(), GenericAS
);
5104 // Testing which overloaded version we should generate the call for.
5105 if (2U == E
->getNumArgs()) {
5106 const char *Name
= (BuiltinID
== Builtin::BIread_pipe
) ? "__read_pipe_2"
5108 // Creating a generic function type to be able to call with any builtin or
5109 // user defined type.
5110 llvm::Type
*ArgTys
[] = {Arg0
->getType(), I8PTy
, Int32Ty
, Int32Ty
};
5111 llvm::FunctionType
*FTy
= llvm::FunctionType::get(
5112 Int32Ty
, llvm::ArrayRef
<llvm::Type
*>(ArgTys
), false);
5113 Value
*BCast
= Builder
.CreatePointerCast(Arg1
, I8PTy
);
5115 EmitRuntimeCall(CGM
.CreateRuntimeFunction(FTy
, Name
),
5116 {Arg0
, BCast
, PacketSize
, PacketAlign
}));
5118 assert(4 == E
->getNumArgs() &&
5119 "Illegal number of parameters to pipe function");
5120 const char *Name
= (BuiltinID
== Builtin::BIread_pipe
) ? "__read_pipe_4"
5123 llvm::Type
*ArgTys
[] = {Arg0
->getType(), Arg1
->getType(), Int32Ty
, I8PTy
,
5125 Value
*Arg2
= EmitScalarExpr(E
->getArg(2)),
5126 *Arg3
= EmitScalarExpr(E
->getArg(3));
5127 llvm::FunctionType
*FTy
= llvm::FunctionType::get(
5128 Int32Ty
, llvm::ArrayRef
<llvm::Type
*>(ArgTys
), false);
5129 Value
*BCast
= Builder
.CreatePointerCast(Arg3
, I8PTy
);
5130 // We know the third argument is an integer type, but we may need to cast
5132 if (Arg2
->getType() != Int32Ty
)
5133 Arg2
= Builder
.CreateZExtOrTrunc(Arg2
, Int32Ty
);
5135 EmitRuntimeCall(CGM
.CreateRuntimeFunction(FTy
, Name
),
5136 {Arg0
, Arg1
, Arg2
, BCast
, PacketSize
, PacketAlign
}));
5139 // OpenCL v2.0 s6.13.16 ,s9.17.3.5 - Built-in pipe reserve read and write
5141 case Builtin::BIreserve_read_pipe
:
5142 case Builtin::BIreserve_write_pipe
:
5143 case Builtin::BIwork_group_reserve_read_pipe
:
5144 case Builtin::BIwork_group_reserve_write_pipe
:
5145 case Builtin::BIsub_group_reserve_read_pipe
:
5146 case Builtin::BIsub_group_reserve_write_pipe
: {
5147 // Composing the mangled name for the function.
5149 if (BuiltinID
== Builtin::BIreserve_read_pipe
)
5150 Name
= "__reserve_read_pipe";
5151 else if (BuiltinID
== Builtin::BIreserve_write_pipe
)
5152 Name
= "__reserve_write_pipe";
5153 else if (BuiltinID
== Builtin::BIwork_group_reserve_read_pipe
)
5154 Name
= "__work_group_reserve_read_pipe";
5155 else if (BuiltinID
== Builtin::BIwork_group_reserve_write_pipe
)
5156 Name
= "__work_group_reserve_write_pipe";
5157 else if (BuiltinID
== Builtin::BIsub_group_reserve_read_pipe
)
5158 Name
= "__sub_group_reserve_read_pipe";
5160 Name
= "__sub_group_reserve_write_pipe";
5162 Value
*Arg0
= EmitScalarExpr(E
->getArg(0)),
5163 *Arg1
= EmitScalarExpr(E
->getArg(1));
5164 llvm::Type
*ReservedIDTy
= ConvertType(getContext().OCLReserveIDTy
);
5165 CGOpenCLRuntime
OpenCLRT(CGM
);
5166 Value
*PacketSize
= OpenCLRT
.getPipeElemSize(E
->getArg(0));
5167 Value
*PacketAlign
= OpenCLRT
.getPipeElemAlign(E
->getArg(0));
5169 // Building the generic function prototype.
5170 llvm::Type
*ArgTys
[] = {Arg0
->getType(), Int32Ty
, Int32Ty
, Int32Ty
};
5171 llvm::FunctionType
*FTy
= llvm::FunctionType::get(
5172 ReservedIDTy
, llvm::ArrayRef
<llvm::Type
*>(ArgTys
), false);
5173 // We know the second argument is an integer type, but we may need to cast
5175 if (Arg1
->getType() != Int32Ty
)
5176 Arg1
= Builder
.CreateZExtOrTrunc(Arg1
, Int32Ty
);
5177 return RValue::get(EmitRuntimeCall(CGM
.CreateRuntimeFunction(FTy
, Name
),
5178 {Arg0
, Arg1
, PacketSize
, PacketAlign
}));
5180 // OpenCL v2.0 s6.13.16, s9.17.3.5 - Built-in pipe commit read and write
5182 case Builtin::BIcommit_read_pipe
:
5183 case Builtin::BIcommit_write_pipe
:
5184 case Builtin::BIwork_group_commit_read_pipe
:
5185 case Builtin::BIwork_group_commit_write_pipe
:
5186 case Builtin::BIsub_group_commit_read_pipe
:
5187 case Builtin::BIsub_group_commit_write_pipe
: {
5189 if (BuiltinID
== Builtin::BIcommit_read_pipe
)
5190 Name
= "__commit_read_pipe";
5191 else if (BuiltinID
== Builtin::BIcommit_write_pipe
)
5192 Name
= "__commit_write_pipe";
5193 else if (BuiltinID
== Builtin::BIwork_group_commit_read_pipe
)
5194 Name
= "__work_group_commit_read_pipe";
5195 else if (BuiltinID
== Builtin::BIwork_group_commit_write_pipe
)
5196 Name
= "__work_group_commit_write_pipe";
5197 else if (BuiltinID
== Builtin::BIsub_group_commit_read_pipe
)
5198 Name
= "__sub_group_commit_read_pipe";
5200 Name
= "__sub_group_commit_write_pipe";
5202 Value
*Arg0
= EmitScalarExpr(E
->getArg(0)),
5203 *Arg1
= EmitScalarExpr(E
->getArg(1));
5204 CGOpenCLRuntime
OpenCLRT(CGM
);
5205 Value
*PacketSize
= OpenCLRT
.getPipeElemSize(E
->getArg(0));
5206 Value
*PacketAlign
= OpenCLRT
.getPipeElemAlign(E
->getArg(0));
5208 // Building the generic function prototype.
5209 llvm::Type
*ArgTys
[] = {Arg0
->getType(), Arg1
->getType(), Int32Ty
, Int32Ty
};
5210 llvm::FunctionType
*FTy
=
5211 llvm::FunctionType::get(llvm::Type::getVoidTy(getLLVMContext()),
5212 llvm::ArrayRef
<llvm::Type
*>(ArgTys
), false);
5214 return RValue::get(EmitRuntimeCall(CGM
.CreateRuntimeFunction(FTy
, Name
),
5215 {Arg0
, Arg1
, PacketSize
, PacketAlign
}));
5217 // OpenCL v2.0 s6.13.16.4 Built-in pipe query functions
5218 case Builtin::BIget_pipe_num_packets
:
5219 case Builtin::BIget_pipe_max_packets
: {
5220 const char *BaseName
;
5221 const auto *PipeTy
= E
->getArg(0)->getType()->castAs
<PipeType
>();
5222 if (BuiltinID
== Builtin::BIget_pipe_num_packets
)
5223 BaseName
= "__get_pipe_num_packets";
5225 BaseName
= "__get_pipe_max_packets";
5226 std::string Name
= std::string(BaseName
) +
5227 std::string(PipeTy
->isReadOnly() ? "_ro" : "_wo");
5229 // Building the generic function prototype.
5230 Value
*Arg0
= EmitScalarExpr(E
->getArg(0));
5231 CGOpenCLRuntime
OpenCLRT(CGM
);
5232 Value
*PacketSize
= OpenCLRT
.getPipeElemSize(E
->getArg(0));
5233 Value
*PacketAlign
= OpenCLRT
.getPipeElemAlign(E
->getArg(0));
5234 llvm::Type
*ArgTys
[] = {Arg0
->getType(), Int32Ty
, Int32Ty
};
5235 llvm::FunctionType
*FTy
= llvm::FunctionType::get(
5236 Int32Ty
, llvm::ArrayRef
<llvm::Type
*>(ArgTys
), false);
5238 return RValue::get(EmitRuntimeCall(CGM
.CreateRuntimeFunction(FTy
, Name
),
5239 {Arg0
, PacketSize
, PacketAlign
}));
5242 // OpenCL v2.0 s6.13.9 - Address space qualifier functions.
5243 case Builtin::BIto_global
:
5244 case Builtin::BIto_local
:
5245 case Builtin::BIto_private
: {
5246 auto Arg0
= EmitScalarExpr(E
->getArg(0));
5247 auto NewArgT
= llvm::PointerType::get(
5249 CGM
.getContext().getTargetAddressSpace(LangAS::opencl_generic
));
5250 auto NewRetT
= llvm::PointerType::get(
5252 CGM
.getContext().getTargetAddressSpace(
5253 E
->getType()->getPointeeType().getAddressSpace()));
5254 auto FTy
= llvm::FunctionType::get(NewRetT
, {NewArgT
}, false);
5255 llvm::Value
*NewArg
;
5256 if (Arg0
->getType()->getPointerAddressSpace() !=
5257 NewArgT
->getPointerAddressSpace())
5258 NewArg
= Builder
.CreateAddrSpaceCast(Arg0
, NewArgT
);
5260 NewArg
= Builder
.CreateBitOrPointerCast(Arg0
, NewArgT
);
5261 auto NewName
= std::string("__") + E
->getDirectCallee()->getName().str();
5263 EmitRuntimeCall(CGM
.CreateRuntimeFunction(FTy
, NewName
), {NewArg
});
5264 return RValue::get(Builder
.CreateBitOrPointerCast(NewCall
,
5265 ConvertType(E
->getType())));
5268 // OpenCL v2.0, s6.13.17 - Enqueue kernel function.
5269 // It contains four different overload formats specified in Table 6.13.17.1.
5270 case Builtin::BIenqueue_kernel
: {
5271 StringRef Name
; // Generated function call name
5272 unsigned NumArgs
= E
->getNumArgs();
5274 llvm::Type
*QueueTy
= ConvertType(getContext().OCLQueueTy
);
5275 llvm::Type
*GenericVoidPtrTy
= Builder
.getPtrTy(
5276 getContext().getTargetAddressSpace(LangAS::opencl_generic
));
5278 llvm::Value
*Queue
= EmitScalarExpr(E
->getArg(0));
5279 llvm::Value
*Flags
= EmitScalarExpr(E
->getArg(1));
5280 LValue NDRangeL
= EmitAggExprToLValue(E
->getArg(2));
5281 llvm::Value
*Range
= NDRangeL
.getAddress(*this).getPointer();
5282 llvm::Type
*RangeTy
= NDRangeL
.getAddress(*this).getType();
5285 // The most basic form of the call with parameters:
5286 // queue_t, kernel_enqueue_flags_t, ndrange_t, block(void)
5287 Name
= "__enqueue_kernel_basic";
5288 llvm::Type
*ArgTys
[] = {QueueTy
, Int32Ty
, RangeTy
, GenericVoidPtrTy
,
5290 llvm::FunctionType
*FTy
= llvm::FunctionType::get(
5291 Int32Ty
, llvm::ArrayRef
<llvm::Type
*>(ArgTys
), false);
5294 CGM
.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E
->getArg(3));
5295 llvm::Value
*Kernel
=
5296 Builder
.CreatePointerCast(Info
.KernelHandle
, GenericVoidPtrTy
);
5297 llvm::Value
*Block
=
5298 Builder
.CreatePointerCast(Info
.BlockArg
, GenericVoidPtrTy
);
5300 AttrBuilder
B(Builder
.getContext());
5301 B
.addByValAttr(NDRangeL
.getAddress(*this).getElementType());
5302 llvm::AttributeList ByValAttrSet
=
5303 llvm::AttributeList::get(CGM
.getModule().getContext(), 3U, B
);
5306 EmitRuntimeCall(CGM
.CreateRuntimeFunction(FTy
, Name
, ByValAttrSet
),
5307 {Queue
, Flags
, Range
, Kernel
, Block
});
5308 RTCall
->setAttributes(ByValAttrSet
);
5309 return RValue::get(RTCall
);
5311 assert(NumArgs
>= 5 && "Invalid enqueue_kernel signature");
5313 // Create a temporary array to hold the sizes of local pointer arguments
5314 // for the block. \p First is the position of the first size argument.
5315 auto CreateArrayForSizeVar
= [=](unsigned First
)
5316 -> std::tuple
<llvm::Value
*, llvm::Value
*, llvm::Value
*> {
5317 llvm::APInt
ArraySize(32, NumArgs
- First
);
5318 QualType SizeArrayTy
= getContext().getConstantArrayType(
5319 getContext().getSizeType(), ArraySize
, nullptr,
5320 ArraySizeModifier::Normal
,
5321 /*IndexTypeQuals=*/0);
5322 auto Tmp
= CreateMemTemp(SizeArrayTy
, "block_sizes");
5323 llvm::Value
*TmpPtr
= Tmp
.getPointer();
5324 llvm::Value
*TmpSize
= EmitLifetimeStart(
5325 CGM
.getDataLayout().getTypeAllocSize(Tmp
.getElementType()), TmpPtr
);
5326 llvm::Value
*ElemPtr
;
5327 // Each of the following arguments specifies the size of the corresponding
5328 // argument passed to the enqueued block.
5329 auto *Zero
= llvm::ConstantInt::get(IntTy
, 0);
5330 for (unsigned I
= First
; I
< NumArgs
; ++I
) {
5331 auto *Index
= llvm::ConstantInt::get(IntTy
, I
- First
);
5332 auto *GEP
= Builder
.CreateGEP(Tmp
.getElementType(), TmpPtr
,
5337 Builder
.CreateZExtOrTrunc(EmitScalarExpr(E
->getArg(I
)), SizeTy
);
5338 Builder
.CreateAlignedStore(
5339 V
, GEP
, CGM
.getDataLayout().getPrefTypeAlign(SizeTy
));
5341 return std::tie(ElemPtr
, TmpSize
, TmpPtr
);
5344 // Could have events and/or varargs.
5345 if (E
->getArg(3)->getType()->isBlockPointerType()) {
5346 // No events passed, but has variadic arguments.
5347 Name
= "__enqueue_kernel_varargs";
5349 CGM
.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E
->getArg(3));
5350 llvm::Value
*Kernel
=
5351 Builder
.CreatePointerCast(Info
.KernelHandle
, GenericVoidPtrTy
);
5352 auto *Block
= Builder
.CreatePointerCast(Info
.BlockArg
, GenericVoidPtrTy
);
5353 llvm::Value
*ElemPtr
, *TmpSize
, *TmpPtr
;
5354 std::tie(ElemPtr
, TmpSize
, TmpPtr
) = CreateArrayForSizeVar(4);
5356 // Create a vector of the arguments, as well as a constant value to
5357 // express to the runtime the number of variadic arguments.
5358 llvm::Value
*const Args
[] = {Queue
, Flags
,
5360 Block
, ConstantInt::get(IntTy
, NumArgs
- 4),
5362 llvm::Type
*const ArgTys
[] = {
5363 QueueTy
, IntTy
, RangeTy
, GenericVoidPtrTy
,
5364 GenericVoidPtrTy
, IntTy
, ElemPtr
->getType()};
5366 llvm::FunctionType
*FTy
= llvm::FunctionType::get(Int32Ty
, ArgTys
, false);
5367 auto Call
= RValue::get(
5368 EmitRuntimeCall(CGM
.CreateRuntimeFunction(FTy
, Name
), Args
));
5370 EmitLifetimeEnd(TmpSize
, TmpPtr
);
5373 // Any calls now have event arguments passed.
5375 llvm::PointerType
*PtrTy
= llvm::PointerType::get(
5376 CGM
.getLLVMContext(),
5377 CGM
.getContext().getTargetAddressSpace(LangAS::opencl_generic
));
5379 llvm::Value
*NumEvents
=
5380 Builder
.CreateZExtOrTrunc(EmitScalarExpr(E
->getArg(3)), Int32Ty
);
5382 // Since SemaOpenCLBuiltinEnqueueKernel allows fifth and sixth arguments
5383 // to be a null pointer constant (including `0` literal), we can take it
5384 // into account and emit null pointer directly.
5385 llvm::Value
*EventWaitList
= nullptr;
5386 if (E
->getArg(4)->isNullPointerConstant(
5387 getContext(), Expr::NPC_ValueDependentIsNotNull
)) {
5388 EventWaitList
= llvm::ConstantPointerNull::get(PtrTy
);
5390 EventWaitList
= E
->getArg(4)->getType()->isArrayType()
5391 ? EmitArrayToPointerDecay(E
->getArg(4)).getPointer()
5392 : EmitScalarExpr(E
->getArg(4));
5393 // Convert to generic address space.
5394 EventWaitList
= Builder
.CreatePointerCast(EventWaitList
, PtrTy
);
5396 llvm::Value
*EventRet
= nullptr;
5397 if (E
->getArg(5)->isNullPointerConstant(
5398 getContext(), Expr::NPC_ValueDependentIsNotNull
)) {
5399 EventRet
= llvm::ConstantPointerNull::get(PtrTy
);
5402 Builder
.CreatePointerCast(EmitScalarExpr(E
->getArg(5)), PtrTy
);
5406 CGM
.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E
->getArg(6));
5407 llvm::Value
*Kernel
=
5408 Builder
.CreatePointerCast(Info
.KernelHandle
, GenericVoidPtrTy
);
5409 llvm::Value
*Block
=
5410 Builder
.CreatePointerCast(Info
.BlockArg
, GenericVoidPtrTy
);
5412 std::vector
<llvm::Type
*> ArgTys
= {
5413 QueueTy
, Int32Ty
, RangeTy
, Int32Ty
,
5414 PtrTy
, PtrTy
, GenericVoidPtrTy
, GenericVoidPtrTy
};
5416 std::vector
<llvm::Value
*> Args
= {Queue
, Flags
, Range
,
5417 NumEvents
, EventWaitList
, EventRet
,
5421 // Has events but no variadics.
5422 Name
= "__enqueue_kernel_basic_events";
5423 llvm::FunctionType
*FTy
= llvm::FunctionType::get(
5424 Int32Ty
, llvm::ArrayRef
<llvm::Type
*>(ArgTys
), false);
5426 EmitRuntimeCall(CGM
.CreateRuntimeFunction(FTy
, Name
),
5427 llvm::ArrayRef
<llvm::Value
*>(Args
)));
5429 // Has event info and variadics
5430 // Pass the number of variadics to the runtime function too.
5431 Args
.push_back(ConstantInt::get(Int32Ty
, NumArgs
- 7));
5432 ArgTys
.push_back(Int32Ty
);
5433 Name
= "__enqueue_kernel_events_varargs";
5435 llvm::Value
*ElemPtr
, *TmpSize
, *TmpPtr
;
5436 std::tie(ElemPtr
, TmpSize
, TmpPtr
) = CreateArrayForSizeVar(7);
5437 Args
.push_back(ElemPtr
);
5438 ArgTys
.push_back(ElemPtr
->getType());
5440 llvm::FunctionType
*FTy
= llvm::FunctionType::get(
5441 Int32Ty
, llvm::ArrayRef
<llvm::Type
*>(ArgTys
), false);
5443 RValue::get(EmitRuntimeCall(CGM
.CreateRuntimeFunction(FTy
, Name
),
5444 llvm::ArrayRef
<llvm::Value
*>(Args
)));
5446 EmitLifetimeEnd(TmpSize
, TmpPtr
);
5451 // OpenCL v2.0 s6.13.17.6 - Kernel query functions need bitcast of block
5453 case Builtin::BIget_kernel_work_group_size
: {
5454 llvm::Type
*GenericVoidPtrTy
= Builder
.getPtrTy(
5455 getContext().getTargetAddressSpace(LangAS::opencl_generic
));
5457 CGM
.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E
->getArg(0));
5459 Builder
.CreatePointerCast(Info
.KernelHandle
, GenericVoidPtrTy
);
5460 Value
*Arg
= Builder
.CreatePointerCast(Info
.BlockArg
, GenericVoidPtrTy
);
5461 return RValue::get(EmitRuntimeCall(
5462 CGM
.CreateRuntimeFunction(
5463 llvm::FunctionType::get(IntTy
, {GenericVoidPtrTy
, GenericVoidPtrTy
},
5465 "__get_kernel_work_group_size_impl"),
5468 case Builtin::BIget_kernel_preferred_work_group_size_multiple
: {
5469 llvm::Type
*GenericVoidPtrTy
= Builder
.getPtrTy(
5470 getContext().getTargetAddressSpace(LangAS::opencl_generic
));
5472 CGM
.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E
->getArg(0));
5474 Builder
.CreatePointerCast(Info
.KernelHandle
, GenericVoidPtrTy
);
5475 Value
*Arg
= Builder
.CreatePointerCast(Info
.BlockArg
, GenericVoidPtrTy
);
5476 return RValue::get(EmitRuntimeCall(
5477 CGM
.CreateRuntimeFunction(
5478 llvm::FunctionType::get(IntTy
, {GenericVoidPtrTy
, GenericVoidPtrTy
},
5480 "__get_kernel_preferred_work_group_size_multiple_impl"),
5483 case Builtin::BIget_kernel_max_sub_group_size_for_ndrange
:
5484 case Builtin::BIget_kernel_sub_group_count_for_ndrange
: {
5485 llvm::Type
*GenericVoidPtrTy
= Builder
.getPtrTy(
5486 getContext().getTargetAddressSpace(LangAS::opencl_generic
));
5487 LValue NDRangeL
= EmitAggExprToLValue(E
->getArg(0));
5488 llvm::Value
*NDRange
= NDRangeL
.getAddress(*this).getPointer();
5490 CGM
.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E
->getArg(1));
5492 Builder
.CreatePointerCast(Info
.KernelHandle
, GenericVoidPtrTy
);
5493 Value
*Block
= Builder
.CreatePointerCast(Info
.BlockArg
, GenericVoidPtrTy
);
5495 BuiltinID
== Builtin::BIget_kernel_max_sub_group_size_for_ndrange
5496 ? "__get_kernel_max_sub_group_size_for_ndrange_impl"
5497 : "__get_kernel_sub_group_count_for_ndrange_impl";
5498 return RValue::get(EmitRuntimeCall(
5499 CGM
.CreateRuntimeFunction(
5500 llvm::FunctionType::get(
5501 IntTy
, {NDRange
->getType(), GenericVoidPtrTy
, GenericVoidPtrTy
},
5504 {NDRange
, Kernel
, Block
}));
5507 case Builtin::BI__builtin_store_half
:
5508 case Builtin::BI__builtin_store_halff
: {
5509 Value
*Val
= EmitScalarExpr(E
->getArg(0));
5510 Address Address
= EmitPointerWithAlignment(E
->getArg(1));
5511 Value
*HalfVal
= Builder
.CreateFPTrunc(Val
, Builder
.getHalfTy());
5512 Builder
.CreateStore(HalfVal
, Address
);
5513 return RValue::get(nullptr);
5515 case Builtin::BI__builtin_load_half
: {
5516 Address Address
= EmitPointerWithAlignment(E
->getArg(0));
5517 Value
*HalfVal
= Builder
.CreateLoad(Address
);
5518 return RValue::get(Builder
.CreateFPExt(HalfVal
, Builder
.getDoubleTy()));
5520 case Builtin::BI__builtin_load_halff
: {
5521 Address Address
= EmitPointerWithAlignment(E
->getArg(0));
5522 Value
*HalfVal
= Builder
.CreateLoad(Address
);
5523 return RValue::get(Builder
.CreateFPExt(HalfVal
, Builder
.getFloatTy()));
5525 case Builtin::BIprintf
:
5526 if (getTarget().getTriple().isNVPTX() ||
5527 getTarget().getTriple().isAMDGCN()) {
5528 if (getLangOpts().OpenMPIsTargetDevice
)
5529 return EmitOpenMPDevicePrintfCallExpr(E
);
5530 if (getTarget().getTriple().isNVPTX())
5531 return EmitNVPTXDevicePrintfCallExpr(E
);
5532 if (getTarget().getTriple().isAMDGCN() && getLangOpts().HIP
)
5533 return EmitAMDGPUDevicePrintfCallExpr(E
);
5537 case Builtin::BI__builtin_canonicalize
:
5538 case Builtin::BI__builtin_canonicalizef
:
5539 case Builtin::BI__builtin_canonicalizef16
:
5540 case Builtin::BI__builtin_canonicalizel
:
5541 return RValue::get(emitUnaryBuiltin(*this, E
, Intrinsic::canonicalize
));
5543 case Builtin::BI__builtin_thread_pointer
: {
5544 if (!getContext().getTargetInfo().isTLSSupported())
5545 CGM
.ErrorUnsupported(E
, "__builtin_thread_pointer");
5546 // Fall through - it's already mapped to the intrinsic by ClangBuiltin.
5549 case Builtin::BI__builtin_os_log_format
:
5550 return emitBuiltinOSLogFormat(*E
);
5552 case Builtin::BI__xray_customevent
: {
5553 if (!ShouldXRayInstrumentFunction())
5554 return RValue::getIgnored();
5556 if (!CGM
.getCodeGenOpts().XRayInstrumentationBundle
.has(
5557 XRayInstrKind::Custom
))
5558 return RValue::getIgnored();
5560 if (const auto *XRayAttr
= CurFuncDecl
->getAttr
<XRayInstrumentAttr
>())
5561 if (XRayAttr
->neverXRayInstrument() && !AlwaysEmitXRayCustomEvents())
5562 return RValue::getIgnored();
5564 Function
*F
= CGM
.getIntrinsic(Intrinsic::xray_customevent
);
5565 auto FTy
= F
->getFunctionType();
5566 auto Arg0
= E
->getArg(0);
5567 auto Arg0Val
= EmitScalarExpr(Arg0
);
5568 auto Arg0Ty
= Arg0
->getType();
5569 auto PTy0
= FTy
->getParamType(0);
5570 if (PTy0
!= Arg0Val
->getType()) {
5571 if (Arg0Ty
->isArrayType())
5572 Arg0Val
= EmitArrayToPointerDecay(Arg0
).getPointer();
5574 Arg0Val
= Builder
.CreatePointerCast(Arg0Val
, PTy0
);
5576 auto Arg1
= EmitScalarExpr(E
->getArg(1));
5577 auto PTy1
= FTy
->getParamType(1);
5578 if (PTy1
!= Arg1
->getType())
5579 Arg1
= Builder
.CreateTruncOrBitCast(Arg1
, PTy1
);
5580 return RValue::get(Builder
.CreateCall(F
, {Arg0Val
, Arg1
}));
5583 case Builtin::BI__xray_typedevent
: {
5584 // TODO: There should be a way to always emit events even if the current
5585 // function is not instrumented. Losing events in a stream can cripple
5587 if (!ShouldXRayInstrumentFunction())
5588 return RValue::getIgnored();
5590 if (!CGM
.getCodeGenOpts().XRayInstrumentationBundle
.has(
5591 XRayInstrKind::Typed
))
5592 return RValue::getIgnored();
5594 if (const auto *XRayAttr
= CurFuncDecl
->getAttr
<XRayInstrumentAttr
>())
5595 if (XRayAttr
->neverXRayInstrument() && !AlwaysEmitXRayTypedEvents())
5596 return RValue::getIgnored();
5598 Function
*F
= CGM
.getIntrinsic(Intrinsic::xray_typedevent
);
5599 auto FTy
= F
->getFunctionType();
5600 auto Arg0
= EmitScalarExpr(E
->getArg(0));
5601 auto PTy0
= FTy
->getParamType(0);
5602 if (PTy0
!= Arg0
->getType())
5603 Arg0
= Builder
.CreateTruncOrBitCast(Arg0
, PTy0
);
5604 auto Arg1
= E
->getArg(1);
5605 auto Arg1Val
= EmitScalarExpr(Arg1
);
5606 auto Arg1Ty
= Arg1
->getType();
5607 auto PTy1
= FTy
->getParamType(1);
5608 if (PTy1
!= Arg1Val
->getType()) {
5609 if (Arg1Ty
->isArrayType())
5610 Arg1Val
= EmitArrayToPointerDecay(Arg1
).getPointer();
5612 Arg1Val
= Builder
.CreatePointerCast(Arg1Val
, PTy1
);
5614 auto Arg2
= EmitScalarExpr(E
->getArg(2));
5615 auto PTy2
= FTy
->getParamType(2);
5616 if (PTy2
!= Arg2
->getType())
5617 Arg2
= Builder
.CreateTruncOrBitCast(Arg2
, PTy2
);
5618 return RValue::get(Builder
.CreateCall(F
, {Arg0
, Arg1Val
, Arg2
}));
5621 case Builtin::BI__builtin_ms_va_start
:
5622 case Builtin::BI__builtin_ms_va_end
:
5624 EmitVAStartEnd(EmitMSVAListRef(E
->getArg(0)).getPointer(),
5625 BuiltinID
== Builtin::BI__builtin_ms_va_start
));
5627 case Builtin::BI__builtin_ms_va_copy
: {
5628 // Lower this manually. We can't reliably determine whether or not any
5629 // given va_copy() is for a Win64 va_list from the calling convention
5630 // alone, because it's legal to do this from a System V ABI function.
5631 // With opaque pointer types, we won't have enough information in LLVM
5632 // IR to determine this from the argument types, either. Best to do it
5633 // now, while we have enough information.
5634 Address DestAddr
= EmitMSVAListRef(E
->getArg(0));
5635 Address SrcAddr
= EmitMSVAListRef(E
->getArg(1));
5637 llvm::Type
*BPP
= Int8PtrPtrTy
;
5639 DestAddr
= Address(Builder
.CreateBitCast(DestAddr
.getPointer(), BPP
, "cp"),
5640 Int8PtrTy
, DestAddr
.getAlignment());
5641 SrcAddr
= Address(Builder
.CreateBitCast(SrcAddr
.getPointer(), BPP
, "ap"),
5642 Int8PtrTy
, SrcAddr
.getAlignment());
5644 Value
*ArgPtr
= Builder
.CreateLoad(SrcAddr
, "ap.val");
5645 return RValue::get(Builder
.CreateStore(ArgPtr
, DestAddr
));
5648 case Builtin::BI__builtin_get_device_side_mangled_name
: {
5649 auto Name
= CGM
.getCUDARuntime().getDeviceSideName(
5650 cast
<DeclRefExpr
>(E
->getArg(0)->IgnoreImpCasts())->getDecl());
5651 auto Str
= CGM
.GetAddrOfConstantCString(Name
, "");
5652 llvm::Constant
*Zeros
[] = {llvm::ConstantInt::get(SizeTy
, 0),
5653 llvm::ConstantInt::get(SizeTy
, 0)};
5654 auto *Ptr
= llvm::ConstantExpr::getGetElementPtr(Str
.getElementType(),
5655 Str
.getPointer(), Zeros
);
5656 return RValue::get(Ptr
);
5660 // If this is an alias for a lib function (e.g. __builtin_sin), emit
5661 // the call using the normal call path, but using the unmangled
5662 // version of the function name.
5663 if (getContext().BuiltinInfo
.isLibFunction(BuiltinID
))
5664 return emitLibraryCall(*this, FD
, E
,
5665 CGM
.getBuiltinLibFunction(FD
, BuiltinID
));
5667 // If this is a predefined lib function (e.g. malloc), emit the call
5668 // using exactly the normal call path.
5669 if (getContext().BuiltinInfo
.isPredefinedLibFunction(BuiltinID
))
5670 return emitLibraryCall(*this, FD
, E
,
5671 cast
<llvm::Constant
>(EmitScalarExpr(E
->getCallee())));
5673 // Check that a call to a target specific builtin has the correct target
5675 // This is down here to avoid non-target specific builtins, however, if
5676 // generic builtins start to require generic target features then we
5677 // can move this up to the beginning of the function.
5678 checkTargetFeatures(E
, FD
);
5680 if (unsigned VectorWidth
= getContext().BuiltinInfo
.getRequiredVectorWidth(BuiltinID
))
5681 LargestVectorWidth
= std::max(LargestVectorWidth
, VectorWidth
);
5683 // See if we have a target specific intrinsic.
5684 StringRef Name
= getContext().BuiltinInfo
.getName(BuiltinID
);
5685 Intrinsic::ID IntrinsicID
= Intrinsic::not_intrinsic
;
5687 llvm::Triple::getArchTypePrefix(getTarget().getTriple().getArch());
5688 if (!Prefix
.empty()) {
5689 IntrinsicID
= Intrinsic::getIntrinsicForClangBuiltin(Prefix
.data(), Name
);
5690 // NOTE we don't need to perform a compatibility flag check here since the
5691 // intrinsics are declared in Builtins*.def via LANGBUILTIN which filter the
5692 // MS builtins via ALL_MS_LANGUAGES and are filtered earlier.
5693 if (IntrinsicID
== Intrinsic::not_intrinsic
)
5694 IntrinsicID
= Intrinsic::getIntrinsicForMSBuiltin(Prefix
.data(), Name
);
5697 if (IntrinsicID
!= Intrinsic::not_intrinsic
) {
5698 SmallVector
<Value
*, 16> Args
;
5700 // Find out if any arguments are required to be integer constant
5702 unsigned ICEArguments
= 0;
5703 ASTContext::GetBuiltinTypeError Error
;
5704 getContext().GetBuiltinType(BuiltinID
, Error
, &ICEArguments
);
5705 assert(Error
== ASTContext::GE_None
&& "Should not codegen an error");
5707 Function
*F
= CGM
.getIntrinsic(IntrinsicID
);
5708 llvm::FunctionType
*FTy
= F
->getFunctionType();
5710 for (unsigned i
= 0, e
= E
->getNumArgs(); i
!= e
; ++i
) {
5712 // If this is a normal argument, just emit it as a scalar.
5713 if ((ICEArguments
& (1 << i
)) == 0) {
5714 ArgValue
= EmitScalarExpr(E
->getArg(i
));
5716 // If this is required to be a constant, constant fold it so that we
5717 // know that the generated intrinsic gets a ConstantInt.
5718 ArgValue
= llvm::ConstantInt::get(
5720 *E
->getArg(i
)->getIntegerConstantExpr(getContext()));
5723 // If the intrinsic arg type is different from the builtin arg type
5724 // we need to do a bit cast.
5725 llvm::Type
*PTy
= FTy
->getParamType(i
);
5726 if (PTy
!= ArgValue
->getType()) {
5727 // XXX - vector of pointers?
5728 if (auto *PtrTy
= dyn_cast
<llvm::PointerType
>(PTy
)) {
5729 if (PtrTy
->getAddressSpace() !=
5730 ArgValue
->getType()->getPointerAddressSpace()) {
5731 ArgValue
= Builder
.CreateAddrSpaceCast(
5732 ArgValue
, llvm::PointerType::get(getLLVMContext(),
5733 PtrTy
->getAddressSpace()));
5737 assert(PTy
->canLosslesslyBitCastTo(FTy
->getParamType(i
)) &&
5738 "Must be able to losslessly bit cast to param");
5739 // Cast vector type (e.g., v256i32) to x86_amx, this only happen
5740 // in amx intrinsics.
5741 if (PTy
->isX86_AMXTy())
5742 ArgValue
= Builder
.CreateIntrinsic(Intrinsic::x86_cast_vector_to_tile
,
5743 {ArgValue
->getType()}, {ArgValue
});
5745 ArgValue
= Builder
.CreateBitCast(ArgValue
, PTy
);
5748 Args
.push_back(ArgValue
);
5751 Value
*V
= Builder
.CreateCall(F
, Args
);
5752 QualType BuiltinRetType
= E
->getType();
5754 llvm::Type
*RetTy
= VoidTy
;
5755 if (!BuiltinRetType
->isVoidType())
5756 RetTy
= ConvertType(BuiltinRetType
);
5758 if (RetTy
!= V
->getType()) {
5759 // XXX - vector of pointers?
5760 if (auto *PtrTy
= dyn_cast
<llvm::PointerType
>(RetTy
)) {
5761 if (PtrTy
->getAddressSpace() != V
->getType()->getPointerAddressSpace()) {
5762 V
= Builder
.CreateAddrSpaceCast(
5763 V
, llvm::PointerType::get(getLLVMContext(),
5764 PtrTy
->getAddressSpace()));
5768 assert(V
->getType()->canLosslesslyBitCastTo(RetTy
) &&
5769 "Must be able to losslessly bit cast result type");
5770 // Cast x86_amx to vector type (e.g., v256i32), this only happen
5771 // in amx intrinsics.
5772 if (V
->getType()->isX86_AMXTy())
5773 V
= Builder
.CreateIntrinsic(Intrinsic::x86_cast_tile_to_vector
, {RetTy
},
5776 V
= Builder
.CreateBitCast(V
, RetTy
);
5779 if (RetTy
->isVoidTy())
5780 return RValue::get(nullptr);
5782 return RValue::get(V
);
5785 // Some target-specific builtins can have aggregate return values, e.g.
5786 // __builtin_arm_mve_vld2q_u32. So if the result is an aggregate, force
5787 // ReturnValue to be non-null, so that the target-specific emission code can
5788 // always just emit into it.
5789 TypeEvaluationKind EvalKind
= getEvaluationKind(E
->getType());
5790 if (EvalKind
== TEK_Aggregate
&& ReturnValue
.isNull()) {
5791 Address DestPtr
= CreateMemTemp(E
->getType(), "agg.tmp");
5792 ReturnValue
= ReturnValueSlot(DestPtr
, false);
5795 // Now see if we can emit a target-specific builtin.
5796 if (Value
*V
= EmitTargetBuiltinExpr(BuiltinID
, E
, ReturnValue
)) {
5799 if (V
->getType()->isVoidTy())
5800 return RValue::get(nullptr);
5801 return RValue::get(V
);
5803 return RValue::getAggregate(ReturnValue
.getValue(),
5804 ReturnValue
.isVolatile());
5806 llvm_unreachable("No current target builtin returns complex");
5808 llvm_unreachable("Bad evaluation kind in EmitBuiltinExpr");
5811 if (getLangOpts().HIPStdPar
&& getLangOpts().CUDAIsDevice
)
5812 return EmitHipStdParUnsupportedBuiltin(this, FD
);
5814 ErrorUnsupported(E
, "builtin function");
5816 // Unknown builtin, for now just dump it out and return undef.
5817 return GetUndefRValue(E
->getType());
5820 static Value
*EmitTargetArchBuiltinExpr(CodeGenFunction
*CGF
,
5821 unsigned BuiltinID
, const CallExpr
*E
,
5822 ReturnValueSlot ReturnValue
,
5823 llvm::Triple::ArchType Arch
) {
5824 // When compiling in HipStdPar mode we have to be conservative in rejecting
5825 // target specific features in the FE, and defer the possible error to the
5826 // AcceleratorCodeSelection pass, wherein iff an unsupported target builtin is
5827 // referenced by an accelerator executable function, we emit an error.
5828 // Returning nullptr here leads to the builtin being handled in
5829 // EmitStdParUnsupportedBuiltin.
5830 if (CGF
->getLangOpts().HIPStdPar
&& CGF
->getLangOpts().CUDAIsDevice
&&
5831 Arch
!= CGF
->getTarget().getTriple().getArch())
5835 case llvm::Triple::arm
:
5836 case llvm::Triple::armeb
:
5837 case llvm::Triple::thumb
:
5838 case llvm::Triple::thumbeb
:
5839 return CGF
->EmitARMBuiltinExpr(BuiltinID
, E
, ReturnValue
, Arch
);
5840 case llvm::Triple::aarch64
:
5841 case llvm::Triple::aarch64_32
:
5842 case llvm::Triple::aarch64_be
:
5843 return CGF
->EmitAArch64BuiltinExpr(BuiltinID
, E
, Arch
);
5844 case llvm::Triple::bpfeb
:
5845 case llvm::Triple::bpfel
:
5846 return CGF
->EmitBPFBuiltinExpr(BuiltinID
, E
);
5847 case llvm::Triple::x86
:
5848 case llvm::Triple::x86_64
:
5849 return CGF
->EmitX86BuiltinExpr(BuiltinID
, E
);
5850 case llvm::Triple::ppc
:
5851 case llvm::Triple::ppcle
:
5852 case llvm::Triple::ppc64
:
5853 case llvm::Triple::ppc64le
:
5854 return CGF
->EmitPPCBuiltinExpr(BuiltinID
, E
);
5855 case llvm::Triple::r600
:
5856 case llvm::Triple::amdgcn
:
5857 return CGF
->EmitAMDGPUBuiltinExpr(BuiltinID
, E
);
5858 case llvm::Triple::systemz
:
5859 return CGF
->EmitSystemZBuiltinExpr(BuiltinID
, E
);
5860 case llvm::Triple::nvptx
:
5861 case llvm::Triple::nvptx64
:
5862 return CGF
->EmitNVPTXBuiltinExpr(BuiltinID
, E
);
5863 case llvm::Triple::wasm32
:
5864 case llvm::Triple::wasm64
:
5865 return CGF
->EmitWebAssemblyBuiltinExpr(BuiltinID
, E
);
5866 case llvm::Triple::hexagon
:
5867 return CGF
->EmitHexagonBuiltinExpr(BuiltinID
, E
);
5868 case llvm::Triple::riscv32
:
5869 case llvm::Triple::riscv64
:
5870 return CGF
->EmitRISCVBuiltinExpr(BuiltinID
, E
, ReturnValue
);
5876 Value
*CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID
,
5878 ReturnValueSlot ReturnValue
) {
5879 if (getContext().BuiltinInfo
.isAuxBuiltinID(BuiltinID
)) {
5880 assert(getContext().getAuxTargetInfo() && "Missing aux target info");
5881 return EmitTargetArchBuiltinExpr(
5882 this, getContext().BuiltinInfo
.getAuxBuiltinID(BuiltinID
), E
,
5883 ReturnValue
, getContext().getAuxTargetInfo()->getTriple().getArch());
5886 return EmitTargetArchBuiltinExpr(this, BuiltinID
, E
, ReturnValue
,
5887 getTarget().getTriple().getArch());
5890 static llvm::FixedVectorType
*GetNeonType(CodeGenFunction
*CGF
,
5891 NeonTypeFlags TypeFlags
,
5892 bool HasLegalHalfType
= true,
5894 bool AllowBFloatArgsAndRet
= true) {
5895 int IsQuad
= TypeFlags
.isQuad();
5896 switch (TypeFlags
.getEltType()) {
5897 case NeonTypeFlags::Int8
:
5898 case NeonTypeFlags::Poly8
:
5899 return llvm::FixedVectorType::get(CGF
->Int8Ty
, V1Ty
? 1 : (8 << IsQuad
));
5900 case NeonTypeFlags::Int16
:
5901 case NeonTypeFlags::Poly16
:
5902 return llvm::FixedVectorType::get(CGF
->Int16Ty
, V1Ty
? 1 : (4 << IsQuad
));
5903 case NeonTypeFlags::BFloat16
:
5904 if (AllowBFloatArgsAndRet
)
5905 return llvm::FixedVectorType::get(CGF
->BFloatTy
, V1Ty
? 1 : (4 << IsQuad
));
5907 return llvm::FixedVectorType::get(CGF
->Int16Ty
, V1Ty
? 1 : (4 << IsQuad
));
5908 case NeonTypeFlags::Float16
:
5909 if (HasLegalHalfType
)
5910 return llvm::FixedVectorType::get(CGF
->HalfTy
, V1Ty
? 1 : (4 << IsQuad
));
5912 return llvm::FixedVectorType::get(CGF
->Int16Ty
, V1Ty
? 1 : (4 << IsQuad
));
5913 case NeonTypeFlags::Int32
:
5914 return llvm::FixedVectorType::get(CGF
->Int32Ty
, V1Ty
? 1 : (2 << IsQuad
));
5915 case NeonTypeFlags::Int64
:
5916 case NeonTypeFlags::Poly64
:
5917 return llvm::FixedVectorType::get(CGF
->Int64Ty
, V1Ty
? 1 : (1 << IsQuad
));
5918 case NeonTypeFlags::Poly128
:
5919 // FIXME: i128 and f128 doesn't get fully support in Clang and llvm.
5920 // There is a lot of i128 and f128 API missing.
5921 // so we use v16i8 to represent poly128 and get pattern matched.
5922 return llvm::FixedVectorType::get(CGF
->Int8Ty
, 16);
5923 case NeonTypeFlags::Float32
:
5924 return llvm::FixedVectorType::get(CGF
->FloatTy
, V1Ty
? 1 : (2 << IsQuad
));
5925 case NeonTypeFlags::Float64
:
5926 return llvm::FixedVectorType::get(CGF
->DoubleTy
, V1Ty
? 1 : (1 << IsQuad
));
5928 llvm_unreachable("Unknown vector element type!");
5931 static llvm::VectorType
*GetFloatNeonType(CodeGenFunction
*CGF
,
5932 NeonTypeFlags IntTypeFlags
) {
5933 int IsQuad
= IntTypeFlags
.isQuad();
5934 switch (IntTypeFlags
.getEltType()) {
5935 case NeonTypeFlags::Int16
:
5936 return llvm::FixedVectorType::get(CGF
->HalfTy
, (4 << IsQuad
));
5937 case NeonTypeFlags::Int32
:
5938 return llvm::FixedVectorType::get(CGF
->FloatTy
, (2 << IsQuad
));
5939 case NeonTypeFlags::Int64
:
5940 return llvm::FixedVectorType::get(CGF
->DoubleTy
, (1 << IsQuad
));
5942 llvm_unreachable("Type can't be converted to floating-point!");
5946 Value
*CodeGenFunction::EmitNeonSplat(Value
*V
, Constant
*C
,
5947 const ElementCount
&Count
) {
5948 Value
*SV
= llvm::ConstantVector::getSplat(Count
, C
);
5949 return Builder
.CreateShuffleVector(V
, V
, SV
, "lane");
5952 Value
*CodeGenFunction::EmitNeonSplat(Value
*V
, Constant
*C
) {
5953 ElementCount EC
= cast
<llvm::VectorType
>(V
->getType())->getElementCount();
5954 return EmitNeonSplat(V
, C
, EC
);
5957 Value
*CodeGenFunction::EmitNeonCall(Function
*F
, SmallVectorImpl
<Value
*> &Ops
,
5959 unsigned shift
, bool rightshift
) {
5961 for (Function::const_arg_iterator ai
= F
->arg_begin(), ae
= F
->arg_end();
5962 ai
!= ae
; ++ai
, ++j
) {
5963 if (F
->isConstrainedFPIntrinsic())
5964 if (ai
->getType()->isMetadataTy())
5966 if (shift
> 0 && shift
== j
)
5967 Ops
[j
] = EmitNeonShiftVector(Ops
[j
], ai
->getType(), rightshift
);
5969 Ops
[j
] = Builder
.CreateBitCast(Ops
[j
], ai
->getType(), name
);
5972 if (F
->isConstrainedFPIntrinsic())
5973 return Builder
.CreateConstrainedFPCall(F
, Ops
, name
);
5975 return Builder
.CreateCall(F
, Ops
, name
);
5978 Value
*CodeGenFunction::EmitNeonShiftVector(Value
*V
, llvm::Type
*Ty
,
5980 int SV
= cast
<ConstantInt
>(V
)->getSExtValue();
5981 return ConstantInt::get(Ty
, neg
? -SV
: SV
);
5984 // Right-shift a vector by a constant.
5985 Value
*CodeGenFunction::EmitNeonRShiftImm(Value
*Vec
, Value
*Shift
,
5986 llvm::Type
*Ty
, bool usgn
,
5988 llvm::VectorType
*VTy
= cast
<llvm::VectorType
>(Ty
);
5990 int ShiftAmt
= cast
<ConstantInt
>(Shift
)->getSExtValue();
5991 int EltSize
= VTy
->getScalarSizeInBits();
5993 Vec
= Builder
.CreateBitCast(Vec
, Ty
);
5995 // lshr/ashr are undefined when the shift amount is equal to the vector
5997 if (ShiftAmt
== EltSize
) {
5999 // Right-shifting an unsigned value by its size yields 0.
6000 return llvm::ConstantAggregateZero::get(VTy
);
6002 // Right-shifting a signed value by its size is equivalent
6003 // to a shift of size-1.
6005 Shift
= ConstantInt::get(VTy
->getElementType(), ShiftAmt
);
6009 Shift
= EmitNeonShiftVector(Shift
, Ty
, false);
6011 return Builder
.CreateLShr(Vec
, Shift
, name
);
6013 return Builder
.CreateAShr(Vec
, Shift
, name
);
6017 AddRetType
= (1 << 0),
6018 Add1ArgType
= (1 << 1),
6019 Add2ArgTypes
= (1 << 2),
6021 VectorizeRetType
= (1 << 3),
6022 VectorizeArgTypes
= (1 << 4),
6024 InventFloatType
= (1 << 5),
6025 UnsignedAlts
= (1 << 6),
6027 Use64BitVectors
= (1 << 7),
6028 Use128BitVectors
= (1 << 8),
6030 Vectorize1ArgType
= Add1ArgType
| VectorizeArgTypes
,
6031 VectorRet
= AddRetType
| VectorizeRetType
,
6032 VectorRetGetArgs01
=
6033 AddRetType
| Add2ArgTypes
| VectorizeRetType
| VectorizeArgTypes
,
6035 AddRetType
| VectorizeRetType
| Add1ArgType
| InventFloatType
6039 struct ARMVectorIntrinsicInfo
{
6040 const char *NameHint
;
6042 unsigned LLVMIntrinsic
;
6043 unsigned AltLLVMIntrinsic
;
6044 uint64_t TypeModifier
;
6046 bool operator<(unsigned RHSBuiltinID
) const {
6047 return BuiltinID
< RHSBuiltinID
;
6049 bool operator<(const ARMVectorIntrinsicInfo
&TE
) const {
6050 return BuiltinID
< TE
.BuiltinID
;
6053 } // end anonymous namespace
6055 #define NEONMAP0(NameBase) \
6056 { #NameBase, NEON::BI__builtin_neon_ ## NameBase, 0, 0, 0 }
6058 #define NEONMAP1(NameBase, LLVMIntrinsic, TypeModifier) \
6059 { #NameBase, NEON:: BI__builtin_neon_ ## NameBase, \
6060 Intrinsic::LLVMIntrinsic, 0, TypeModifier }
6062 #define NEONMAP2(NameBase, LLVMIntrinsic, AltLLVMIntrinsic, TypeModifier) \
6063 { #NameBase, NEON:: BI__builtin_neon_ ## NameBase, \
6064 Intrinsic::LLVMIntrinsic, Intrinsic::AltLLVMIntrinsic, \
6067 static const ARMVectorIntrinsicInfo ARMSIMDIntrinsicMap
[] = {
6068 NEONMAP1(__a32_vcvt_bf16_f32
, arm_neon_vcvtfp2bf
, 0),
6069 NEONMAP0(splat_lane_v
),
6070 NEONMAP0(splat_laneq_v
),
6071 NEONMAP0(splatq_lane_v
),
6072 NEONMAP0(splatq_laneq_v
),
6073 NEONMAP2(vabd_v
, arm_neon_vabdu
, arm_neon_vabds
, Add1ArgType
| UnsignedAlts
),
6074 NEONMAP2(vabdq_v
, arm_neon_vabdu
, arm_neon_vabds
, Add1ArgType
| UnsignedAlts
),
6075 NEONMAP1(vabs_v
, arm_neon_vabs
, 0),
6076 NEONMAP1(vabsq_v
, arm_neon_vabs
, 0),
6080 NEONMAP1(vaesdq_u8
, arm_neon_aesd
, 0),
6081 NEONMAP1(vaeseq_u8
, arm_neon_aese
, 0),
6082 NEONMAP1(vaesimcq_u8
, arm_neon_aesimc
, 0),
6083 NEONMAP1(vaesmcq_u8
, arm_neon_aesmc
, 0),
6084 NEONMAP1(vbfdot_f32
, arm_neon_bfdot
, 0),
6085 NEONMAP1(vbfdotq_f32
, arm_neon_bfdot
, 0),
6086 NEONMAP1(vbfmlalbq_f32
, arm_neon_bfmlalb
, 0),
6087 NEONMAP1(vbfmlaltq_f32
, arm_neon_bfmlalt
, 0),
6088 NEONMAP1(vbfmmlaq_f32
, arm_neon_bfmmla
, 0),
6089 NEONMAP1(vbsl_v
, arm_neon_vbsl
, AddRetType
),
6090 NEONMAP1(vbslq_v
, arm_neon_vbsl
, AddRetType
),
6091 NEONMAP1(vcadd_rot270_f16
, arm_neon_vcadd_rot270
, Add1ArgType
),
6092 NEONMAP1(vcadd_rot270_f32
, arm_neon_vcadd_rot270
, Add1ArgType
),
6093 NEONMAP1(vcadd_rot90_f16
, arm_neon_vcadd_rot90
, Add1ArgType
),
6094 NEONMAP1(vcadd_rot90_f32
, arm_neon_vcadd_rot90
, Add1ArgType
),
6095 NEONMAP1(vcaddq_rot270_f16
, arm_neon_vcadd_rot270
, Add1ArgType
),
6096 NEONMAP1(vcaddq_rot270_f32
, arm_neon_vcadd_rot270
, Add1ArgType
),
6097 NEONMAP1(vcaddq_rot270_f64
, arm_neon_vcadd_rot270
, Add1ArgType
),
6098 NEONMAP1(vcaddq_rot90_f16
, arm_neon_vcadd_rot90
, Add1ArgType
),
6099 NEONMAP1(vcaddq_rot90_f32
, arm_neon_vcadd_rot90
, Add1ArgType
),
6100 NEONMAP1(vcaddq_rot90_f64
, arm_neon_vcadd_rot90
, Add1ArgType
),
6101 NEONMAP1(vcage_v
, arm_neon_vacge
, 0),
6102 NEONMAP1(vcageq_v
, arm_neon_vacge
, 0),
6103 NEONMAP1(vcagt_v
, arm_neon_vacgt
, 0),
6104 NEONMAP1(vcagtq_v
, arm_neon_vacgt
, 0),
6105 NEONMAP1(vcale_v
, arm_neon_vacge
, 0),
6106 NEONMAP1(vcaleq_v
, arm_neon_vacge
, 0),
6107 NEONMAP1(vcalt_v
, arm_neon_vacgt
, 0),
6108 NEONMAP1(vcaltq_v
, arm_neon_vacgt
, 0),
6117 NEONMAP1(vcls_v
, arm_neon_vcls
, Add1ArgType
),
6118 NEONMAP1(vclsq_v
, arm_neon_vcls
, Add1ArgType
),
6121 NEONMAP1(vclz_v
, ctlz
, Add1ArgType
),
6122 NEONMAP1(vclzq_v
, ctlz
, Add1ArgType
),
6123 NEONMAP1(vcnt_v
, ctpop
, Add1ArgType
),
6124 NEONMAP1(vcntq_v
, ctpop
, Add1ArgType
),
6125 NEONMAP1(vcvt_f16_f32
, arm_neon_vcvtfp2hf
, 0),
6126 NEONMAP0(vcvt_f16_s16
),
6127 NEONMAP0(vcvt_f16_u16
),
6128 NEONMAP1(vcvt_f32_f16
, arm_neon_vcvthf2fp
, 0),
6129 NEONMAP0(vcvt_f32_v
),
6130 NEONMAP1(vcvt_n_f16_s16
, arm_neon_vcvtfxs2fp
, 0),
6131 NEONMAP1(vcvt_n_f16_u16
, arm_neon_vcvtfxu2fp
, 0),
6132 NEONMAP2(vcvt_n_f32_v
, arm_neon_vcvtfxu2fp
, arm_neon_vcvtfxs2fp
, 0),
6133 NEONMAP1(vcvt_n_s16_f16
, arm_neon_vcvtfp2fxs
, 0),
6134 NEONMAP1(vcvt_n_s32_v
, arm_neon_vcvtfp2fxs
, 0),
6135 NEONMAP1(vcvt_n_s64_v
, arm_neon_vcvtfp2fxs
, 0),
6136 NEONMAP1(vcvt_n_u16_f16
, arm_neon_vcvtfp2fxu
, 0),
6137 NEONMAP1(vcvt_n_u32_v
, arm_neon_vcvtfp2fxu
, 0),
6138 NEONMAP1(vcvt_n_u64_v
, arm_neon_vcvtfp2fxu
, 0),
6139 NEONMAP0(vcvt_s16_f16
),
6140 NEONMAP0(vcvt_s32_v
),
6141 NEONMAP0(vcvt_s64_v
),
6142 NEONMAP0(vcvt_u16_f16
),
6143 NEONMAP0(vcvt_u32_v
),
6144 NEONMAP0(vcvt_u64_v
),
6145 NEONMAP1(vcvta_s16_f16
, arm_neon_vcvtas
, 0),
6146 NEONMAP1(vcvta_s32_v
, arm_neon_vcvtas
, 0),
6147 NEONMAP1(vcvta_s64_v
, arm_neon_vcvtas
, 0),
6148 NEONMAP1(vcvta_u16_f16
, arm_neon_vcvtau
, 0),
6149 NEONMAP1(vcvta_u32_v
, arm_neon_vcvtau
, 0),
6150 NEONMAP1(vcvta_u64_v
, arm_neon_vcvtau
, 0),
6151 NEONMAP1(vcvtaq_s16_f16
, arm_neon_vcvtas
, 0),
6152 NEONMAP1(vcvtaq_s32_v
, arm_neon_vcvtas
, 0),
6153 NEONMAP1(vcvtaq_s64_v
, arm_neon_vcvtas
, 0),
6154 NEONMAP1(vcvtaq_u16_f16
, arm_neon_vcvtau
, 0),
6155 NEONMAP1(vcvtaq_u32_v
, arm_neon_vcvtau
, 0),
6156 NEONMAP1(vcvtaq_u64_v
, arm_neon_vcvtau
, 0),
6157 NEONMAP1(vcvth_bf16_f32
, arm_neon_vcvtbfp2bf
, 0),
6158 NEONMAP1(vcvtm_s16_f16
, arm_neon_vcvtms
, 0),
6159 NEONMAP1(vcvtm_s32_v
, arm_neon_vcvtms
, 0),
6160 NEONMAP1(vcvtm_s64_v
, arm_neon_vcvtms
, 0),
6161 NEONMAP1(vcvtm_u16_f16
, arm_neon_vcvtmu
, 0),
6162 NEONMAP1(vcvtm_u32_v
, arm_neon_vcvtmu
, 0),
6163 NEONMAP1(vcvtm_u64_v
, arm_neon_vcvtmu
, 0),
6164 NEONMAP1(vcvtmq_s16_f16
, arm_neon_vcvtms
, 0),
6165 NEONMAP1(vcvtmq_s32_v
, arm_neon_vcvtms
, 0),
6166 NEONMAP1(vcvtmq_s64_v
, arm_neon_vcvtms
, 0),
6167 NEONMAP1(vcvtmq_u16_f16
, arm_neon_vcvtmu
, 0),
6168 NEONMAP1(vcvtmq_u32_v
, arm_neon_vcvtmu
, 0),
6169 NEONMAP1(vcvtmq_u64_v
, arm_neon_vcvtmu
, 0),
6170 NEONMAP1(vcvtn_s16_f16
, arm_neon_vcvtns
, 0),
6171 NEONMAP1(vcvtn_s32_v
, arm_neon_vcvtns
, 0),
6172 NEONMAP1(vcvtn_s64_v
, arm_neon_vcvtns
, 0),
6173 NEONMAP1(vcvtn_u16_f16
, arm_neon_vcvtnu
, 0),
6174 NEONMAP1(vcvtn_u32_v
, arm_neon_vcvtnu
, 0),
6175 NEONMAP1(vcvtn_u64_v
, arm_neon_vcvtnu
, 0),
6176 NEONMAP1(vcvtnq_s16_f16
, arm_neon_vcvtns
, 0),
6177 NEONMAP1(vcvtnq_s32_v
, arm_neon_vcvtns
, 0),
6178 NEONMAP1(vcvtnq_s64_v
, arm_neon_vcvtns
, 0),
6179 NEONMAP1(vcvtnq_u16_f16
, arm_neon_vcvtnu
, 0),
6180 NEONMAP1(vcvtnq_u32_v
, arm_neon_vcvtnu
, 0),
6181 NEONMAP1(vcvtnq_u64_v
, arm_neon_vcvtnu
, 0),
6182 NEONMAP1(vcvtp_s16_f16
, arm_neon_vcvtps
, 0),
6183 NEONMAP1(vcvtp_s32_v
, arm_neon_vcvtps
, 0),
6184 NEONMAP1(vcvtp_s64_v
, arm_neon_vcvtps
, 0),
6185 NEONMAP1(vcvtp_u16_f16
, arm_neon_vcvtpu
, 0),
6186 NEONMAP1(vcvtp_u32_v
, arm_neon_vcvtpu
, 0),
6187 NEONMAP1(vcvtp_u64_v
, arm_neon_vcvtpu
, 0),
6188 NEONMAP1(vcvtpq_s16_f16
, arm_neon_vcvtps
, 0),
6189 NEONMAP1(vcvtpq_s32_v
, arm_neon_vcvtps
, 0),
6190 NEONMAP1(vcvtpq_s64_v
, arm_neon_vcvtps
, 0),
6191 NEONMAP1(vcvtpq_u16_f16
, arm_neon_vcvtpu
, 0),
6192 NEONMAP1(vcvtpq_u32_v
, arm_neon_vcvtpu
, 0),
6193 NEONMAP1(vcvtpq_u64_v
, arm_neon_vcvtpu
, 0),
6194 NEONMAP0(vcvtq_f16_s16
),
6195 NEONMAP0(vcvtq_f16_u16
),
6196 NEONMAP0(vcvtq_f32_v
),
6197 NEONMAP1(vcvtq_n_f16_s16
, arm_neon_vcvtfxs2fp
, 0),
6198 NEONMAP1(vcvtq_n_f16_u16
, arm_neon_vcvtfxu2fp
, 0),
6199 NEONMAP2(vcvtq_n_f32_v
, arm_neon_vcvtfxu2fp
, arm_neon_vcvtfxs2fp
, 0),
6200 NEONMAP1(vcvtq_n_s16_f16
, arm_neon_vcvtfp2fxs
, 0),
6201 NEONMAP1(vcvtq_n_s32_v
, arm_neon_vcvtfp2fxs
, 0),
6202 NEONMAP1(vcvtq_n_s64_v
, arm_neon_vcvtfp2fxs
, 0),
6203 NEONMAP1(vcvtq_n_u16_f16
, arm_neon_vcvtfp2fxu
, 0),
6204 NEONMAP1(vcvtq_n_u32_v
, arm_neon_vcvtfp2fxu
, 0),
6205 NEONMAP1(vcvtq_n_u64_v
, arm_neon_vcvtfp2fxu
, 0),
6206 NEONMAP0(vcvtq_s16_f16
),
6207 NEONMAP0(vcvtq_s32_v
),
6208 NEONMAP0(vcvtq_s64_v
),
6209 NEONMAP0(vcvtq_u16_f16
),
6210 NEONMAP0(vcvtq_u32_v
),
6211 NEONMAP0(vcvtq_u64_v
),
6212 NEONMAP1(vdot_s32
, arm_neon_sdot
, 0),
6213 NEONMAP1(vdot_u32
, arm_neon_udot
, 0),
6214 NEONMAP1(vdotq_s32
, arm_neon_sdot
, 0),
6215 NEONMAP1(vdotq_u32
, arm_neon_udot
, 0),
6220 NEONMAP2(vhadd_v
, arm_neon_vhaddu
, arm_neon_vhadds
, Add1ArgType
| UnsignedAlts
),
6221 NEONMAP2(vhaddq_v
, arm_neon_vhaddu
, arm_neon_vhadds
, Add1ArgType
| UnsignedAlts
),
6222 NEONMAP2(vhsub_v
, arm_neon_vhsubu
, arm_neon_vhsubs
, Add1ArgType
| UnsignedAlts
),
6223 NEONMAP2(vhsubq_v
, arm_neon_vhsubu
, arm_neon_vhsubs
, Add1ArgType
| UnsignedAlts
),
6224 NEONMAP0(vld1_dup_v
),
6225 NEONMAP1(vld1_v
, arm_neon_vld1
, 0),
6226 NEONMAP1(vld1_x2_v
, arm_neon_vld1x2
, 0),
6227 NEONMAP1(vld1_x3_v
, arm_neon_vld1x3
, 0),
6228 NEONMAP1(vld1_x4_v
, arm_neon_vld1x4
, 0),
6229 NEONMAP0(vld1q_dup_v
),
6230 NEONMAP1(vld1q_v
, arm_neon_vld1
, 0),
6231 NEONMAP1(vld1q_x2_v
, arm_neon_vld1x2
, 0),
6232 NEONMAP1(vld1q_x3_v
, arm_neon_vld1x3
, 0),
6233 NEONMAP1(vld1q_x4_v
, arm_neon_vld1x4
, 0),
6234 NEONMAP1(vld2_dup_v
, arm_neon_vld2dup
, 0),
6235 NEONMAP1(vld2_lane_v
, arm_neon_vld2lane
, 0),
6236 NEONMAP1(vld2_v
, arm_neon_vld2
, 0),
6237 NEONMAP1(vld2q_dup_v
, arm_neon_vld2dup
, 0),
6238 NEONMAP1(vld2q_lane_v
, arm_neon_vld2lane
, 0),
6239 NEONMAP1(vld2q_v
, arm_neon_vld2
, 0),
6240 NEONMAP1(vld3_dup_v
, arm_neon_vld3dup
, 0),
6241 NEONMAP1(vld3_lane_v
, arm_neon_vld3lane
, 0),
6242 NEONMAP1(vld3_v
, arm_neon_vld3
, 0),
6243 NEONMAP1(vld3q_dup_v
, arm_neon_vld3dup
, 0),
6244 NEONMAP1(vld3q_lane_v
, arm_neon_vld3lane
, 0),
6245 NEONMAP1(vld3q_v
, arm_neon_vld3
, 0),
6246 NEONMAP1(vld4_dup_v
, arm_neon_vld4dup
, 0),
6247 NEONMAP1(vld4_lane_v
, arm_neon_vld4lane
, 0),
6248 NEONMAP1(vld4_v
, arm_neon_vld4
, 0),
6249 NEONMAP1(vld4q_dup_v
, arm_neon_vld4dup
, 0),
6250 NEONMAP1(vld4q_lane_v
, arm_neon_vld4lane
, 0),
6251 NEONMAP1(vld4q_v
, arm_neon_vld4
, 0),
6252 NEONMAP2(vmax_v
, arm_neon_vmaxu
, arm_neon_vmaxs
, Add1ArgType
| UnsignedAlts
),
6253 NEONMAP1(vmaxnm_v
, arm_neon_vmaxnm
, Add1ArgType
),
6254 NEONMAP1(vmaxnmq_v
, arm_neon_vmaxnm
, Add1ArgType
),
6255 NEONMAP2(vmaxq_v
, arm_neon_vmaxu
, arm_neon_vmaxs
, Add1ArgType
| UnsignedAlts
),
6256 NEONMAP2(vmin_v
, arm_neon_vminu
, arm_neon_vmins
, Add1ArgType
| UnsignedAlts
),
6257 NEONMAP1(vminnm_v
, arm_neon_vminnm
, Add1ArgType
),
6258 NEONMAP1(vminnmq_v
, arm_neon_vminnm
, Add1ArgType
),
6259 NEONMAP2(vminq_v
, arm_neon_vminu
, arm_neon_vmins
, Add1ArgType
| UnsignedAlts
),
6260 NEONMAP1(vmmlaq_s32
, arm_neon_smmla
, 0),
6261 NEONMAP1(vmmlaq_u32
, arm_neon_ummla
, 0),
6264 NEONMAP1(vmul_v
, arm_neon_vmulp
, Add1ArgType
),
6266 NEONMAP1(vmulq_v
, arm_neon_vmulp
, Add1ArgType
),
6267 NEONMAP2(vpadal_v
, arm_neon_vpadalu
, arm_neon_vpadals
, UnsignedAlts
),
6268 NEONMAP2(vpadalq_v
, arm_neon_vpadalu
, arm_neon_vpadals
, UnsignedAlts
),
6269 NEONMAP1(vpadd_v
, arm_neon_vpadd
, Add1ArgType
),
6270 NEONMAP2(vpaddl_v
, arm_neon_vpaddlu
, arm_neon_vpaddls
, UnsignedAlts
),
6271 NEONMAP2(vpaddlq_v
, arm_neon_vpaddlu
, arm_neon_vpaddls
, UnsignedAlts
),
6272 NEONMAP1(vpaddq_v
, arm_neon_vpadd
, Add1ArgType
),
6273 NEONMAP2(vpmax_v
, arm_neon_vpmaxu
, arm_neon_vpmaxs
, Add1ArgType
| UnsignedAlts
),
6274 NEONMAP2(vpmin_v
, arm_neon_vpminu
, arm_neon_vpmins
, Add1ArgType
| UnsignedAlts
),
6275 NEONMAP1(vqabs_v
, arm_neon_vqabs
, Add1ArgType
),
6276 NEONMAP1(vqabsq_v
, arm_neon_vqabs
, Add1ArgType
),
6277 NEONMAP2(vqadd_v
, uadd_sat
, sadd_sat
, Add1ArgType
| UnsignedAlts
),
6278 NEONMAP2(vqaddq_v
, uadd_sat
, sadd_sat
, Add1ArgType
| UnsignedAlts
),
6279 NEONMAP2(vqdmlal_v
, arm_neon_vqdmull
, sadd_sat
, 0),
6280 NEONMAP2(vqdmlsl_v
, arm_neon_vqdmull
, ssub_sat
, 0),
6281 NEONMAP1(vqdmulh_v
, arm_neon_vqdmulh
, Add1ArgType
),
6282 NEONMAP1(vqdmulhq_v
, arm_neon_vqdmulh
, Add1ArgType
),
6283 NEONMAP1(vqdmull_v
, arm_neon_vqdmull
, Add1ArgType
),
6284 NEONMAP2(vqmovn_v
, arm_neon_vqmovnu
, arm_neon_vqmovns
, Add1ArgType
| UnsignedAlts
),
6285 NEONMAP1(vqmovun_v
, arm_neon_vqmovnsu
, Add1ArgType
),
6286 NEONMAP1(vqneg_v
, arm_neon_vqneg
, Add1ArgType
),
6287 NEONMAP1(vqnegq_v
, arm_neon_vqneg
, Add1ArgType
),
6288 NEONMAP1(vqrdmlah_s16
, arm_neon_vqrdmlah
, Add1ArgType
),
6289 NEONMAP1(vqrdmlah_s32
, arm_neon_vqrdmlah
, Add1ArgType
),
6290 NEONMAP1(vqrdmlahq_s16
, arm_neon_vqrdmlah
, Add1ArgType
),
6291 NEONMAP1(vqrdmlahq_s32
, arm_neon_vqrdmlah
, Add1ArgType
),
6292 NEONMAP1(vqrdmlsh_s16
, arm_neon_vqrdmlsh
, Add1ArgType
),
6293 NEONMAP1(vqrdmlsh_s32
, arm_neon_vqrdmlsh
, Add1ArgType
),
6294 NEONMAP1(vqrdmlshq_s16
, arm_neon_vqrdmlsh
, Add1ArgType
),
6295 NEONMAP1(vqrdmlshq_s32
, arm_neon_vqrdmlsh
, Add1ArgType
),
6296 NEONMAP1(vqrdmulh_v
, arm_neon_vqrdmulh
, Add1ArgType
),
6297 NEONMAP1(vqrdmulhq_v
, arm_neon_vqrdmulh
, Add1ArgType
),
6298 NEONMAP2(vqrshl_v
, arm_neon_vqrshiftu
, arm_neon_vqrshifts
, Add1ArgType
| UnsignedAlts
),
6299 NEONMAP2(vqrshlq_v
, arm_neon_vqrshiftu
, arm_neon_vqrshifts
, Add1ArgType
| UnsignedAlts
),
6300 NEONMAP2(vqshl_n_v
, arm_neon_vqshiftu
, arm_neon_vqshifts
, UnsignedAlts
),
6301 NEONMAP2(vqshl_v
, arm_neon_vqshiftu
, arm_neon_vqshifts
, Add1ArgType
| UnsignedAlts
),
6302 NEONMAP2(vqshlq_n_v
, arm_neon_vqshiftu
, arm_neon_vqshifts
, UnsignedAlts
),
6303 NEONMAP2(vqshlq_v
, arm_neon_vqshiftu
, arm_neon_vqshifts
, Add1ArgType
| UnsignedAlts
),
6304 NEONMAP1(vqshlu_n_v
, arm_neon_vqshiftsu
, 0),
6305 NEONMAP1(vqshluq_n_v
, arm_neon_vqshiftsu
, 0),
6306 NEONMAP2(vqsub_v
, usub_sat
, ssub_sat
, Add1ArgType
| UnsignedAlts
),
6307 NEONMAP2(vqsubq_v
, usub_sat
, ssub_sat
, Add1ArgType
| UnsignedAlts
),
6308 NEONMAP1(vraddhn_v
, arm_neon_vraddhn
, Add1ArgType
),
6309 NEONMAP2(vrecpe_v
, arm_neon_vrecpe
, arm_neon_vrecpe
, 0),
6310 NEONMAP2(vrecpeq_v
, arm_neon_vrecpe
, arm_neon_vrecpe
, 0),
6311 NEONMAP1(vrecps_v
, arm_neon_vrecps
, Add1ArgType
),
6312 NEONMAP1(vrecpsq_v
, arm_neon_vrecps
, Add1ArgType
),
6313 NEONMAP2(vrhadd_v
, arm_neon_vrhaddu
, arm_neon_vrhadds
, Add1ArgType
| UnsignedAlts
),
6314 NEONMAP2(vrhaddq_v
, arm_neon_vrhaddu
, arm_neon_vrhadds
, Add1ArgType
| UnsignedAlts
),
6315 NEONMAP1(vrnd_v
, arm_neon_vrintz
, Add1ArgType
),
6316 NEONMAP1(vrnda_v
, arm_neon_vrinta
, Add1ArgType
),
6317 NEONMAP1(vrndaq_v
, arm_neon_vrinta
, Add1ArgType
),
6320 NEONMAP1(vrndm_v
, arm_neon_vrintm
, Add1ArgType
),
6321 NEONMAP1(vrndmq_v
, arm_neon_vrintm
, Add1ArgType
),
6322 NEONMAP1(vrndn_v
, arm_neon_vrintn
, Add1ArgType
),
6323 NEONMAP1(vrndnq_v
, arm_neon_vrintn
, Add1ArgType
),
6324 NEONMAP1(vrndp_v
, arm_neon_vrintp
, Add1ArgType
),
6325 NEONMAP1(vrndpq_v
, arm_neon_vrintp
, Add1ArgType
),
6326 NEONMAP1(vrndq_v
, arm_neon_vrintz
, Add1ArgType
),
6327 NEONMAP1(vrndx_v
, arm_neon_vrintx
, Add1ArgType
),
6328 NEONMAP1(vrndxq_v
, arm_neon_vrintx
, Add1ArgType
),
6329 NEONMAP2(vrshl_v
, arm_neon_vrshiftu
, arm_neon_vrshifts
, Add1ArgType
| UnsignedAlts
),
6330 NEONMAP2(vrshlq_v
, arm_neon_vrshiftu
, arm_neon_vrshifts
, Add1ArgType
| UnsignedAlts
),
6331 NEONMAP2(vrshr_n_v
, arm_neon_vrshiftu
, arm_neon_vrshifts
, UnsignedAlts
),
6332 NEONMAP2(vrshrq_n_v
, arm_neon_vrshiftu
, arm_neon_vrshifts
, UnsignedAlts
),
6333 NEONMAP2(vrsqrte_v
, arm_neon_vrsqrte
, arm_neon_vrsqrte
, 0),
6334 NEONMAP2(vrsqrteq_v
, arm_neon_vrsqrte
, arm_neon_vrsqrte
, 0),
6335 NEONMAP1(vrsqrts_v
, arm_neon_vrsqrts
, Add1ArgType
),
6336 NEONMAP1(vrsqrtsq_v
, arm_neon_vrsqrts
, Add1ArgType
),
6337 NEONMAP1(vrsubhn_v
, arm_neon_vrsubhn
, Add1ArgType
),
6338 NEONMAP1(vsha1su0q_u32
, arm_neon_sha1su0
, 0),
6339 NEONMAP1(vsha1su1q_u32
, arm_neon_sha1su1
, 0),
6340 NEONMAP1(vsha256h2q_u32
, arm_neon_sha256h2
, 0),
6341 NEONMAP1(vsha256hq_u32
, arm_neon_sha256h
, 0),
6342 NEONMAP1(vsha256su0q_u32
, arm_neon_sha256su0
, 0),
6343 NEONMAP1(vsha256su1q_u32
, arm_neon_sha256su1
, 0),
6345 NEONMAP2(vshl_v
, arm_neon_vshiftu
, arm_neon_vshifts
, Add1ArgType
| UnsignedAlts
),
6346 NEONMAP0(vshll_n_v
),
6347 NEONMAP0(vshlq_n_v
),
6348 NEONMAP2(vshlq_v
, arm_neon_vshiftu
, arm_neon_vshifts
, Add1ArgType
| UnsignedAlts
),
6350 NEONMAP0(vshrn_n_v
),
6351 NEONMAP0(vshrq_n_v
),
6352 NEONMAP1(vst1_v
, arm_neon_vst1
, 0),
6353 NEONMAP1(vst1_x2_v
, arm_neon_vst1x2
, 0),
6354 NEONMAP1(vst1_x3_v
, arm_neon_vst1x3
, 0),
6355 NEONMAP1(vst1_x4_v
, arm_neon_vst1x4
, 0),
6356 NEONMAP1(vst1q_v
, arm_neon_vst1
, 0),
6357 NEONMAP1(vst1q_x2_v
, arm_neon_vst1x2
, 0),
6358 NEONMAP1(vst1q_x3_v
, arm_neon_vst1x3
, 0),
6359 NEONMAP1(vst1q_x4_v
, arm_neon_vst1x4
, 0),
6360 NEONMAP1(vst2_lane_v
, arm_neon_vst2lane
, 0),
6361 NEONMAP1(vst2_v
, arm_neon_vst2
, 0),
6362 NEONMAP1(vst2q_lane_v
, arm_neon_vst2lane
, 0),
6363 NEONMAP1(vst2q_v
, arm_neon_vst2
, 0),
6364 NEONMAP1(vst3_lane_v
, arm_neon_vst3lane
, 0),
6365 NEONMAP1(vst3_v
, arm_neon_vst3
, 0),
6366 NEONMAP1(vst3q_lane_v
, arm_neon_vst3lane
, 0),
6367 NEONMAP1(vst3q_v
, arm_neon_vst3
, 0),
6368 NEONMAP1(vst4_lane_v
, arm_neon_vst4lane
, 0),
6369 NEONMAP1(vst4_v
, arm_neon_vst4
, 0),
6370 NEONMAP1(vst4q_lane_v
, arm_neon_vst4lane
, 0),
6371 NEONMAP1(vst4q_v
, arm_neon_vst4
, 0),
6377 NEONMAP1(vusdot_s32
, arm_neon_usdot
, 0),
6378 NEONMAP1(vusdotq_s32
, arm_neon_usdot
, 0),
6379 NEONMAP1(vusmmlaq_s32
, arm_neon_usmmla
, 0),
6386 static const ARMVectorIntrinsicInfo AArch64SIMDIntrinsicMap
[] = {
6387 NEONMAP1(__a64_vcvtq_low_bf16_f32
, aarch64_neon_bfcvtn
, 0),
6388 NEONMAP0(splat_lane_v
),
6389 NEONMAP0(splat_laneq_v
),
6390 NEONMAP0(splatq_lane_v
),
6391 NEONMAP0(splatq_laneq_v
),
6392 NEONMAP1(vabs_v
, aarch64_neon_abs
, 0),
6393 NEONMAP1(vabsq_v
, aarch64_neon_abs
, 0),
6396 NEONMAP0(vaddq_p128
),
6398 NEONMAP1(vaesdq_u8
, aarch64_crypto_aesd
, 0),
6399 NEONMAP1(vaeseq_u8
, aarch64_crypto_aese
, 0),
6400 NEONMAP1(vaesimcq_u8
, aarch64_crypto_aesimc
, 0),
6401 NEONMAP1(vaesmcq_u8
, aarch64_crypto_aesmc
, 0),
6402 NEONMAP2(vbcaxq_s16
, aarch64_crypto_bcaxu
, aarch64_crypto_bcaxs
, Add1ArgType
| UnsignedAlts
),
6403 NEONMAP2(vbcaxq_s32
, aarch64_crypto_bcaxu
, aarch64_crypto_bcaxs
, Add1ArgType
| UnsignedAlts
),
6404 NEONMAP2(vbcaxq_s64
, aarch64_crypto_bcaxu
, aarch64_crypto_bcaxs
, Add1ArgType
| UnsignedAlts
),
6405 NEONMAP2(vbcaxq_s8
, aarch64_crypto_bcaxu
, aarch64_crypto_bcaxs
, Add1ArgType
| UnsignedAlts
),
6406 NEONMAP2(vbcaxq_u16
, aarch64_crypto_bcaxu
, aarch64_crypto_bcaxs
, Add1ArgType
| UnsignedAlts
),
6407 NEONMAP2(vbcaxq_u32
, aarch64_crypto_bcaxu
, aarch64_crypto_bcaxs
, Add1ArgType
| UnsignedAlts
),
6408 NEONMAP2(vbcaxq_u64
, aarch64_crypto_bcaxu
, aarch64_crypto_bcaxs
, Add1ArgType
| UnsignedAlts
),
6409 NEONMAP2(vbcaxq_u8
, aarch64_crypto_bcaxu
, aarch64_crypto_bcaxs
, Add1ArgType
| UnsignedAlts
),
6410 NEONMAP1(vbfdot_f32
, aarch64_neon_bfdot
, 0),
6411 NEONMAP1(vbfdotq_f32
, aarch64_neon_bfdot
, 0),
6412 NEONMAP1(vbfmlalbq_f32
, aarch64_neon_bfmlalb
, 0),
6413 NEONMAP1(vbfmlaltq_f32
, aarch64_neon_bfmlalt
, 0),
6414 NEONMAP1(vbfmmlaq_f32
, aarch64_neon_bfmmla
, 0),
6415 NEONMAP1(vcadd_rot270_f16
, aarch64_neon_vcadd_rot270
, Add1ArgType
),
6416 NEONMAP1(vcadd_rot270_f32
, aarch64_neon_vcadd_rot270
, Add1ArgType
),
6417 NEONMAP1(vcadd_rot90_f16
, aarch64_neon_vcadd_rot90
, Add1ArgType
),
6418 NEONMAP1(vcadd_rot90_f32
, aarch64_neon_vcadd_rot90
, Add1ArgType
),
6419 NEONMAP1(vcaddq_rot270_f16
, aarch64_neon_vcadd_rot270
, Add1ArgType
),
6420 NEONMAP1(vcaddq_rot270_f32
, aarch64_neon_vcadd_rot270
, Add1ArgType
),
6421 NEONMAP1(vcaddq_rot270_f64
, aarch64_neon_vcadd_rot270
, Add1ArgType
),
6422 NEONMAP1(vcaddq_rot90_f16
, aarch64_neon_vcadd_rot90
, Add1ArgType
),
6423 NEONMAP1(vcaddq_rot90_f32
, aarch64_neon_vcadd_rot90
, Add1ArgType
),
6424 NEONMAP1(vcaddq_rot90_f64
, aarch64_neon_vcadd_rot90
, Add1ArgType
),
6425 NEONMAP1(vcage_v
, aarch64_neon_facge
, 0),
6426 NEONMAP1(vcageq_v
, aarch64_neon_facge
, 0),
6427 NEONMAP1(vcagt_v
, aarch64_neon_facgt
, 0),
6428 NEONMAP1(vcagtq_v
, aarch64_neon_facgt
, 0),
6429 NEONMAP1(vcale_v
, aarch64_neon_facge
, 0),
6430 NEONMAP1(vcaleq_v
, aarch64_neon_facge
, 0),
6431 NEONMAP1(vcalt_v
, aarch64_neon_facgt
, 0),
6432 NEONMAP1(vcaltq_v
, aarch64_neon_facgt
, 0),
6441 NEONMAP1(vcls_v
, aarch64_neon_cls
, Add1ArgType
),
6442 NEONMAP1(vclsq_v
, aarch64_neon_cls
, Add1ArgType
),
6445 NEONMAP1(vclz_v
, ctlz
, Add1ArgType
),
6446 NEONMAP1(vclzq_v
, ctlz
, Add1ArgType
),
6447 NEONMAP1(vcmla_f16
, aarch64_neon_vcmla_rot0
, Add1ArgType
),
6448 NEONMAP1(vcmla_f32
, aarch64_neon_vcmla_rot0
, Add1ArgType
),
6449 NEONMAP1(vcmla_rot180_f16
, aarch64_neon_vcmla_rot180
, Add1ArgType
),
6450 NEONMAP1(vcmla_rot180_f32
, aarch64_neon_vcmla_rot180
, Add1ArgType
),
6451 NEONMAP1(vcmla_rot270_f16
, aarch64_neon_vcmla_rot270
, Add1ArgType
),
6452 NEONMAP1(vcmla_rot270_f32
, aarch64_neon_vcmla_rot270
, Add1ArgType
),
6453 NEONMAP1(vcmla_rot90_f16
, aarch64_neon_vcmla_rot90
, Add1ArgType
),
6454 NEONMAP1(vcmla_rot90_f32
, aarch64_neon_vcmla_rot90
, Add1ArgType
),
6455 NEONMAP1(vcmlaq_f16
, aarch64_neon_vcmla_rot0
, Add1ArgType
),
6456 NEONMAP1(vcmlaq_f32
, aarch64_neon_vcmla_rot0
, Add1ArgType
),
6457 NEONMAP1(vcmlaq_f64
, aarch64_neon_vcmla_rot0
, Add1ArgType
),
6458 NEONMAP1(vcmlaq_rot180_f16
, aarch64_neon_vcmla_rot180
, Add1ArgType
),
6459 NEONMAP1(vcmlaq_rot180_f32
, aarch64_neon_vcmla_rot180
, Add1ArgType
),
6460 NEONMAP1(vcmlaq_rot180_f64
, aarch64_neon_vcmla_rot180
, Add1ArgType
),
6461 NEONMAP1(vcmlaq_rot270_f16
, aarch64_neon_vcmla_rot270
, Add1ArgType
),
6462 NEONMAP1(vcmlaq_rot270_f32
, aarch64_neon_vcmla_rot270
, Add1ArgType
),
6463 NEONMAP1(vcmlaq_rot270_f64
, aarch64_neon_vcmla_rot270
, Add1ArgType
),
6464 NEONMAP1(vcmlaq_rot90_f16
, aarch64_neon_vcmla_rot90
, Add1ArgType
),
6465 NEONMAP1(vcmlaq_rot90_f32
, aarch64_neon_vcmla_rot90
, Add1ArgType
),
6466 NEONMAP1(vcmlaq_rot90_f64
, aarch64_neon_vcmla_rot90
, Add1ArgType
),
6467 NEONMAP1(vcnt_v
, ctpop
, Add1ArgType
),
6468 NEONMAP1(vcntq_v
, ctpop
, Add1ArgType
),
6469 NEONMAP1(vcvt_f16_f32
, aarch64_neon_vcvtfp2hf
, 0),
6470 NEONMAP0(vcvt_f16_s16
),
6471 NEONMAP0(vcvt_f16_u16
),
6472 NEONMAP1(vcvt_f32_f16
, aarch64_neon_vcvthf2fp
, 0),
6473 NEONMAP0(vcvt_f32_v
),
6474 NEONMAP1(vcvt_n_f16_s16
, aarch64_neon_vcvtfxs2fp
, 0),
6475 NEONMAP1(vcvt_n_f16_u16
, aarch64_neon_vcvtfxu2fp
, 0),
6476 NEONMAP2(vcvt_n_f32_v
, aarch64_neon_vcvtfxu2fp
, aarch64_neon_vcvtfxs2fp
, 0),
6477 NEONMAP2(vcvt_n_f64_v
, aarch64_neon_vcvtfxu2fp
, aarch64_neon_vcvtfxs2fp
, 0),
6478 NEONMAP1(vcvt_n_s16_f16
, aarch64_neon_vcvtfp2fxs
, 0),
6479 NEONMAP1(vcvt_n_s32_v
, aarch64_neon_vcvtfp2fxs
, 0),
6480 NEONMAP1(vcvt_n_s64_v
, aarch64_neon_vcvtfp2fxs
, 0),
6481 NEONMAP1(vcvt_n_u16_f16
, aarch64_neon_vcvtfp2fxu
, 0),
6482 NEONMAP1(vcvt_n_u32_v
, aarch64_neon_vcvtfp2fxu
, 0),
6483 NEONMAP1(vcvt_n_u64_v
, aarch64_neon_vcvtfp2fxu
, 0),
6484 NEONMAP0(vcvtq_f16_s16
),
6485 NEONMAP0(vcvtq_f16_u16
),
6486 NEONMAP0(vcvtq_f32_v
),
6487 NEONMAP1(vcvtq_high_bf16_f32
, aarch64_neon_bfcvtn2
, 0),
6488 NEONMAP1(vcvtq_n_f16_s16
, aarch64_neon_vcvtfxs2fp
, 0),
6489 NEONMAP1(vcvtq_n_f16_u16
, aarch64_neon_vcvtfxu2fp
, 0),
6490 NEONMAP2(vcvtq_n_f32_v
, aarch64_neon_vcvtfxu2fp
, aarch64_neon_vcvtfxs2fp
, 0),
6491 NEONMAP2(vcvtq_n_f64_v
, aarch64_neon_vcvtfxu2fp
, aarch64_neon_vcvtfxs2fp
, 0),
6492 NEONMAP1(vcvtq_n_s16_f16
, aarch64_neon_vcvtfp2fxs
, 0),
6493 NEONMAP1(vcvtq_n_s32_v
, aarch64_neon_vcvtfp2fxs
, 0),
6494 NEONMAP1(vcvtq_n_s64_v
, aarch64_neon_vcvtfp2fxs
, 0),
6495 NEONMAP1(vcvtq_n_u16_f16
, aarch64_neon_vcvtfp2fxu
, 0),
6496 NEONMAP1(vcvtq_n_u32_v
, aarch64_neon_vcvtfp2fxu
, 0),
6497 NEONMAP1(vcvtq_n_u64_v
, aarch64_neon_vcvtfp2fxu
, 0),
6498 NEONMAP1(vcvtx_f32_v
, aarch64_neon_fcvtxn
, AddRetType
| Add1ArgType
),
6499 NEONMAP1(vdot_s32
, aarch64_neon_sdot
, 0),
6500 NEONMAP1(vdot_u32
, aarch64_neon_udot
, 0),
6501 NEONMAP1(vdotq_s32
, aarch64_neon_sdot
, 0),
6502 NEONMAP1(vdotq_u32
, aarch64_neon_udot
, 0),
6503 NEONMAP2(veor3q_s16
, aarch64_crypto_eor3u
, aarch64_crypto_eor3s
, Add1ArgType
| UnsignedAlts
),
6504 NEONMAP2(veor3q_s32
, aarch64_crypto_eor3u
, aarch64_crypto_eor3s
, Add1ArgType
| UnsignedAlts
),
6505 NEONMAP2(veor3q_s64
, aarch64_crypto_eor3u
, aarch64_crypto_eor3s
, Add1ArgType
| UnsignedAlts
),
6506 NEONMAP2(veor3q_s8
, aarch64_crypto_eor3u
, aarch64_crypto_eor3s
, Add1ArgType
| UnsignedAlts
),
6507 NEONMAP2(veor3q_u16
, aarch64_crypto_eor3u
, aarch64_crypto_eor3s
, Add1ArgType
| UnsignedAlts
),
6508 NEONMAP2(veor3q_u32
, aarch64_crypto_eor3u
, aarch64_crypto_eor3s
, Add1ArgType
| UnsignedAlts
),
6509 NEONMAP2(veor3q_u64
, aarch64_crypto_eor3u
, aarch64_crypto_eor3s
, Add1ArgType
| UnsignedAlts
),
6510 NEONMAP2(veor3q_u8
, aarch64_crypto_eor3u
, aarch64_crypto_eor3s
, Add1ArgType
| UnsignedAlts
),
6515 NEONMAP1(vfmlal_high_f16
, aarch64_neon_fmlal2
, 0),
6516 NEONMAP1(vfmlal_low_f16
, aarch64_neon_fmlal
, 0),
6517 NEONMAP1(vfmlalq_high_f16
, aarch64_neon_fmlal2
, 0),
6518 NEONMAP1(vfmlalq_low_f16
, aarch64_neon_fmlal
, 0),
6519 NEONMAP1(vfmlsl_high_f16
, aarch64_neon_fmlsl2
, 0),
6520 NEONMAP1(vfmlsl_low_f16
, aarch64_neon_fmlsl
, 0),
6521 NEONMAP1(vfmlslq_high_f16
, aarch64_neon_fmlsl2
, 0),
6522 NEONMAP1(vfmlslq_low_f16
, aarch64_neon_fmlsl
, 0),
6523 NEONMAP2(vhadd_v
, aarch64_neon_uhadd
, aarch64_neon_shadd
, Add1ArgType
| UnsignedAlts
),
6524 NEONMAP2(vhaddq_v
, aarch64_neon_uhadd
, aarch64_neon_shadd
, Add1ArgType
| UnsignedAlts
),
6525 NEONMAP2(vhsub_v
, aarch64_neon_uhsub
, aarch64_neon_shsub
, Add1ArgType
| UnsignedAlts
),
6526 NEONMAP2(vhsubq_v
, aarch64_neon_uhsub
, aarch64_neon_shsub
, Add1ArgType
| UnsignedAlts
),
6527 NEONMAP1(vld1_x2_v
, aarch64_neon_ld1x2
, 0),
6528 NEONMAP1(vld1_x3_v
, aarch64_neon_ld1x3
, 0),
6529 NEONMAP1(vld1_x4_v
, aarch64_neon_ld1x4
, 0),
6530 NEONMAP1(vld1q_x2_v
, aarch64_neon_ld1x2
, 0),
6531 NEONMAP1(vld1q_x3_v
, aarch64_neon_ld1x3
, 0),
6532 NEONMAP1(vld1q_x4_v
, aarch64_neon_ld1x4
, 0),
6533 NEONMAP1(vmmlaq_s32
, aarch64_neon_smmla
, 0),
6534 NEONMAP1(vmmlaq_u32
, aarch64_neon_ummla
, 0),
6537 NEONMAP1(vmul_v
, aarch64_neon_pmul
, Add1ArgType
),
6538 NEONMAP1(vmulq_v
, aarch64_neon_pmul
, Add1ArgType
),
6539 NEONMAP1(vpadd_v
, aarch64_neon_addp
, Add1ArgType
),
6540 NEONMAP2(vpaddl_v
, aarch64_neon_uaddlp
, aarch64_neon_saddlp
, UnsignedAlts
),
6541 NEONMAP2(vpaddlq_v
, aarch64_neon_uaddlp
, aarch64_neon_saddlp
, UnsignedAlts
),
6542 NEONMAP1(vpaddq_v
, aarch64_neon_addp
, Add1ArgType
),
6543 NEONMAP1(vqabs_v
, aarch64_neon_sqabs
, Add1ArgType
),
6544 NEONMAP1(vqabsq_v
, aarch64_neon_sqabs
, Add1ArgType
),
6545 NEONMAP2(vqadd_v
, aarch64_neon_uqadd
, aarch64_neon_sqadd
, Add1ArgType
| UnsignedAlts
),
6546 NEONMAP2(vqaddq_v
, aarch64_neon_uqadd
, aarch64_neon_sqadd
, Add1ArgType
| UnsignedAlts
),
6547 NEONMAP2(vqdmlal_v
, aarch64_neon_sqdmull
, aarch64_neon_sqadd
, 0),
6548 NEONMAP2(vqdmlsl_v
, aarch64_neon_sqdmull
, aarch64_neon_sqsub
, 0),
6549 NEONMAP1(vqdmulh_lane_v
, aarch64_neon_sqdmulh_lane
, 0),
6550 NEONMAP1(vqdmulh_laneq_v
, aarch64_neon_sqdmulh_laneq
, 0),
6551 NEONMAP1(vqdmulh_v
, aarch64_neon_sqdmulh
, Add1ArgType
),
6552 NEONMAP1(vqdmulhq_lane_v
, aarch64_neon_sqdmulh_lane
, 0),
6553 NEONMAP1(vqdmulhq_laneq_v
, aarch64_neon_sqdmulh_laneq
, 0),
6554 NEONMAP1(vqdmulhq_v
, aarch64_neon_sqdmulh
, Add1ArgType
),
6555 NEONMAP1(vqdmull_v
, aarch64_neon_sqdmull
, Add1ArgType
),
6556 NEONMAP2(vqmovn_v
, aarch64_neon_uqxtn
, aarch64_neon_sqxtn
, Add1ArgType
| UnsignedAlts
),
6557 NEONMAP1(vqmovun_v
, aarch64_neon_sqxtun
, Add1ArgType
),
6558 NEONMAP1(vqneg_v
, aarch64_neon_sqneg
, Add1ArgType
),
6559 NEONMAP1(vqnegq_v
, aarch64_neon_sqneg
, Add1ArgType
),
6560 NEONMAP1(vqrdmlah_s16
, aarch64_neon_sqrdmlah
, Add1ArgType
),
6561 NEONMAP1(vqrdmlah_s32
, aarch64_neon_sqrdmlah
, Add1ArgType
),
6562 NEONMAP1(vqrdmlahq_s16
, aarch64_neon_sqrdmlah
, Add1ArgType
),
6563 NEONMAP1(vqrdmlahq_s32
, aarch64_neon_sqrdmlah
, Add1ArgType
),
6564 NEONMAP1(vqrdmlsh_s16
, aarch64_neon_sqrdmlsh
, Add1ArgType
),
6565 NEONMAP1(vqrdmlsh_s32
, aarch64_neon_sqrdmlsh
, Add1ArgType
),
6566 NEONMAP1(vqrdmlshq_s16
, aarch64_neon_sqrdmlsh
, Add1ArgType
),
6567 NEONMAP1(vqrdmlshq_s32
, aarch64_neon_sqrdmlsh
, Add1ArgType
),
6568 NEONMAP1(vqrdmulh_lane_v
, aarch64_neon_sqrdmulh_lane
, 0),
6569 NEONMAP1(vqrdmulh_laneq_v
, aarch64_neon_sqrdmulh_laneq
, 0),
6570 NEONMAP1(vqrdmulh_v
, aarch64_neon_sqrdmulh
, Add1ArgType
),
6571 NEONMAP1(vqrdmulhq_lane_v
, aarch64_neon_sqrdmulh_lane
, 0),
6572 NEONMAP1(vqrdmulhq_laneq_v
, aarch64_neon_sqrdmulh_laneq
, 0),
6573 NEONMAP1(vqrdmulhq_v
, aarch64_neon_sqrdmulh
, Add1ArgType
),
6574 NEONMAP2(vqrshl_v
, aarch64_neon_uqrshl
, aarch64_neon_sqrshl
, Add1ArgType
| UnsignedAlts
),
6575 NEONMAP2(vqrshlq_v
, aarch64_neon_uqrshl
, aarch64_neon_sqrshl
, Add1ArgType
| UnsignedAlts
),
6576 NEONMAP2(vqshl_n_v
, aarch64_neon_uqshl
, aarch64_neon_sqshl
, UnsignedAlts
),
6577 NEONMAP2(vqshl_v
, aarch64_neon_uqshl
, aarch64_neon_sqshl
, Add1ArgType
| UnsignedAlts
),
6578 NEONMAP2(vqshlq_n_v
, aarch64_neon_uqshl
, aarch64_neon_sqshl
,UnsignedAlts
),
6579 NEONMAP2(vqshlq_v
, aarch64_neon_uqshl
, aarch64_neon_sqshl
, Add1ArgType
| UnsignedAlts
),
6580 NEONMAP1(vqshlu_n_v
, aarch64_neon_sqshlu
, 0),
6581 NEONMAP1(vqshluq_n_v
, aarch64_neon_sqshlu
, 0),
6582 NEONMAP2(vqsub_v
, aarch64_neon_uqsub
, aarch64_neon_sqsub
, Add1ArgType
| UnsignedAlts
),
6583 NEONMAP2(vqsubq_v
, aarch64_neon_uqsub
, aarch64_neon_sqsub
, Add1ArgType
| UnsignedAlts
),
6584 NEONMAP1(vraddhn_v
, aarch64_neon_raddhn
, Add1ArgType
),
6585 NEONMAP1(vrax1q_u64
, aarch64_crypto_rax1
, 0),
6586 NEONMAP2(vrecpe_v
, aarch64_neon_frecpe
, aarch64_neon_urecpe
, 0),
6587 NEONMAP2(vrecpeq_v
, aarch64_neon_frecpe
, aarch64_neon_urecpe
, 0),
6588 NEONMAP1(vrecps_v
, aarch64_neon_frecps
, Add1ArgType
),
6589 NEONMAP1(vrecpsq_v
, aarch64_neon_frecps
, Add1ArgType
),
6590 NEONMAP2(vrhadd_v
, aarch64_neon_urhadd
, aarch64_neon_srhadd
, Add1ArgType
| UnsignedAlts
),
6591 NEONMAP2(vrhaddq_v
, aarch64_neon_urhadd
, aarch64_neon_srhadd
, Add1ArgType
| UnsignedAlts
),
6592 NEONMAP1(vrnd32x_f32
, aarch64_neon_frint32x
, Add1ArgType
),
6593 NEONMAP1(vrnd32x_f64
, aarch64_neon_frint32x
, Add1ArgType
),
6594 NEONMAP1(vrnd32xq_f32
, aarch64_neon_frint32x
, Add1ArgType
),
6595 NEONMAP1(vrnd32xq_f64
, aarch64_neon_frint32x
, Add1ArgType
),
6596 NEONMAP1(vrnd32z_f32
, aarch64_neon_frint32z
, Add1ArgType
),
6597 NEONMAP1(vrnd32z_f64
, aarch64_neon_frint32z
, Add1ArgType
),
6598 NEONMAP1(vrnd32zq_f32
, aarch64_neon_frint32z
, Add1ArgType
),
6599 NEONMAP1(vrnd32zq_f64
, aarch64_neon_frint32z
, Add1ArgType
),
6600 NEONMAP1(vrnd64x_f32
, aarch64_neon_frint64x
, Add1ArgType
),
6601 NEONMAP1(vrnd64x_f64
, aarch64_neon_frint64x
, Add1ArgType
),
6602 NEONMAP1(vrnd64xq_f32
, aarch64_neon_frint64x
, Add1ArgType
),
6603 NEONMAP1(vrnd64xq_f64
, aarch64_neon_frint64x
, Add1ArgType
),
6604 NEONMAP1(vrnd64z_f32
, aarch64_neon_frint64z
, Add1ArgType
),
6605 NEONMAP1(vrnd64z_f64
, aarch64_neon_frint64z
, Add1ArgType
),
6606 NEONMAP1(vrnd64zq_f32
, aarch64_neon_frint64z
, Add1ArgType
),
6607 NEONMAP1(vrnd64zq_f64
, aarch64_neon_frint64z
, Add1ArgType
),
6610 NEONMAP2(vrshl_v
, aarch64_neon_urshl
, aarch64_neon_srshl
, Add1ArgType
| UnsignedAlts
),
6611 NEONMAP2(vrshlq_v
, aarch64_neon_urshl
, aarch64_neon_srshl
, Add1ArgType
| UnsignedAlts
),
6612 NEONMAP2(vrshr_n_v
, aarch64_neon_urshl
, aarch64_neon_srshl
, UnsignedAlts
),
6613 NEONMAP2(vrshrq_n_v
, aarch64_neon_urshl
, aarch64_neon_srshl
, UnsignedAlts
),
6614 NEONMAP2(vrsqrte_v
, aarch64_neon_frsqrte
, aarch64_neon_ursqrte
, 0),
6615 NEONMAP2(vrsqrteq_v
, aarch64_neon_frsqrte
, aarch64_neon_ursqrte
, 0),
6616 NEONMAP1(vrsqrts_v
, aarch64_neon_frsqrts
, Add1ArgType
),
6617 NEONMAP1(vrsqrtsq_v
, aarch64_neon_frsqrts
, Add1ArgType
),
6618 NEONMAP1(vrsubhn_v
, aarch64_neon_rsubhn
, Add1ArgType
),
6619 NEONMAP1(vsha1su0q_u32
, aarch64_crypto_sha1su0
, 0),
6620 NEONMAP1(vsha1su1q_u32
, aarch64_crypto_sha1su1
, 0),
6621 NEONMAP1(vsha256h2q_u32
, aarch64_crypto_sha256h2
, 0),
6622 NEONMAP1(vsha256hq_u32
, aarch64_crypto_sha256h
, 0),
6623 NEONMAP1(vsha256su0q_u32
, aarch64_crypto_sha256su0
, 0),
6624 NEONMAP1(vsha256su1q_u32
, aarch64_crypto_sha256su1
, 0),
6625 NEONMAP1(vsha512h2q_u64
, aarch64_crypto_sha512h2
, 0),
6626 NEONMAP1(vsha512hq_u64
, aarch64_crypto_sha512h
, 0),
6627 NEONMAP1(vsha512su0q_u64
, aarch64_crypto_sha512su0
, 0),
6628 NEONMAP1(vsha512su1q_u64
, aarch64_crypto_sha512su1
, 0),
6630 NEONMAP2(vshl_v
, aarch64_neon_ushl
, aarch64_neon_sshl
, Add1ArgType
| UnsignedAlts
),
6631 NEONMAP0(vshll_n_v
),
6632 NEONMAP0(vshlq_n_v
),
6633 NEONMAP2(vshlq_v
, aarch64_neon_ushl
, aarch64_neon_sshl
, Add1ArgType
| UnsignedAlts
),
6635 NEONMAP0(vshrn_n_v
),
6636 NEONMAP0(vshrq_n_v
),
6637 NEONMAP1(vsm3partw1q_u32
, aarch64_crypto_sm3partw1
, 0),
6638 NEONMAP1(vsm3partw2q_u32
, aarch64_crypto_sm3partw2
, 0),
6639 NEONMAP1(vsm3ss1q_u32
, aarch64_crypto_sm3ss1
, 0),
6640 NEONMAP1(vsm3tt1aq_u32
, aarch64_crypto_sm3tt1a
, 0),
6641 NEONMAP1(vsm3tt1bq_u32
, aarch64_crypto_sm3tt1b
, 0),
6642 NEONMAP1(vsm3tt2aq_u32
, aarch64_crypto_sm3tt2a
, 0),
6643 NEONMAP1(vsm3tt2bq_u32
, aarch64_crypto_sm3tt2b
, 0),
6644 NEONMAP1(vsm4ekeyq_u32
, aarch64_crypto_sm4ekey
, 0),
6645 NEONMAP1(vsm4eq_u32
, aarch64_crypto_sm4e
, 0),
6646 NEONMAP1(vst1_x2_v
, aarch64_neon_st1x2
, 0),
6647 NEONMAP1(vst1_x3_v
, aarch64_neon_st1x3
, 0),
6648 NEONMAP1(vst1_x4_v
, aarch64_neon_st1x4
, 0),
6649 NEONMAP1(vst1q_x2_v
, aarch64_neon_st1x2
, 0),
6650 NEONMAP1(vst1q_x3_v
, aarch64_neon_st1x3
, 0),
6651 NEONMAP1(vst1q_x4_v
, aarch64_neon_st1x4
, 0),
6655 NEONMAP1(vusdot_s32
, aarch64_neon_usdot
, 0),
6656 NEONMAP1(vusdotq_s32
, aarch64_neon_usdot
, 0),
6657 NEONMAP1(vusmmlaq_s32
, aarch64_neon_usmmla
, 0),
6658 NEONMAP1(vxarq_u64
, aarch64_crypto_xar
, 0),
6661 static const ARMVectorIntrinsicInfo AArch64SISDIntrinsicMap
[] = {
6662 NEONMAP1(vabdd_f64
, aarch64_sisd_fabd
, Add1ArgType
),
6663 NEONMAP1(vabds_f32
, aarch64_sisd_fabd
, Add1ArgType
),
6664 NEONMAP1(vabsd_s64
, aarch64_neon_abs
, Add1ArgType
),
6665 NEONMAP1(vaddlv_s32
, aarch64_neon_saddlv
, AddRetType
| Add1ArgType
),
6666 NEONMAP1(vaddlv_u32
, aarch64_neon_uaddlv
, AddRetType
| Add1ArgType
),
6667 NEONMAP1(vaddlvq_s32
, aarch64_neon_saddlv
, AddRetType
| Add1ArgType
),
6668 NEONMAP1(vaddlvq_u32
, aarch64_neon_uaddlv
, AddRetType
| Add1ArgType
),
6669 NEONMAP1(vaddv_f32
, aarch64_neon_faddv
, AddRetType
| Add1ArgType
),
6670 NEONMAP1(vaddv_s32
, aarch64_neon_saddv
, AddRetType
| Add1ArgType
),
6671 NEONMAP1(vaddv_u32
, aarch64_neon_uaddv
, AddRetType
| Add1ArgType
),
6672 NEONMAP1(vaddvq_f32
, aarch64_neon_faddv
, AddRetType
| Add1ArgType
),
6673 NEONMAP1(vaddvq_f64
, aarch64_neon_faddv
, AddRetType
| Add1ArgType
),
6674 NEONMAP1(vaddvq_s32
, aarch64_neon_saddv
, AddRetType
| Add1ArgType
),
6675 NEONMAP1(vaddvq_s64
, aarch64_neon_saddv
, AddRetType
| Add1ArgType
),
6676 NEONMAP1(vaddvq_u32
, aarch64_neon_uaddv
, AddRetType
| Add1ArgType
),
6677 NEONMAP1(vaddvq_u64
, aarch64_neon_uaddv
, AddRetType
| Add1ArgType
),
6678 NEONMAP1(vcaged_f64
, aarch64_neon_facge
, AddRetType
| Add1ArgType
),
6679 NEONMAP1(vcages_f32
, aarch64_neon_facge
, AddRetType
| Add1ArgType
),
6680 NEONMAP1(vcagtd_f64
, aarch64_neon_facgt
, AddRetType
| Add1ArgType
),
6681 NEONMAP1(vcagts_f32
, aarch64_neon_facgt
, AddRetType
| Add1ArgType
),
6682 NEONMAP1(vcaled_f64
, aarch64_neon_facge
, AddRetType
| Add1ArgType
),
6683 NEONMAP1(vcales_f32
, aarch64_neon_facge
, AddRetType
| Add1ArgType
),
6684 NEONMAP1(vcaltd_f64
, aarch64_neon_facgt
, AddRetType
| Add1ArgType
),
6685 NEONMAP1(vcalts_f32
, aarch64_neon_facgt
, AddRetType
| Add1ArgType
),
6686 NEONMAP1(vcvtad_s64_f64
, aarch64_neon_fcvtas
, AddRetType
| Add1ArgType
),
6687 NEONMAP1(vcvtad_u64_f64
, aarch64_neon_fcvtau
, AddRetType
| Add1ArgType
),
6688 NEONMAP1(vcvtas_s32_f32
, aarch64_neon_fcvtas
, AddRetType
| Add1ArgType
),
6689 NEONMAP1(vcvtas_u32_f32
, aarch64_neon_fcvtau
, AddRetType
| Add1ArgType
),
6690 NEONMAP1(vcvtd_n_f64_s64
, aarch64_neon_vcvtfxs2fp
, AddRetType
| Add1ArgType
),
6691 NEONMAP1(vcvtd_n_f64_u64
, aarch64_neon_vcvtfxu2fp
, AddRetType
| Add1ArgType
),
6692 NEONMAP1(vcvtd_n_s64_f64
, aarch64_neon_vcvtfp2fxs
, AddRetType
| Add1ArgType
),
6693 NEONMAP1(vcvtd_n_u64_f64
, aarch64_neon_vcvtfp2fxu
, AddRetType
| Add1ArgType
),
6694 NEONMAP1(vcvtd_s64_f64
, aarch64_neon_fcvtzs
, AddRetType
| Add1ArgType
),
6695 NEONMAP1(vcvtd_u64_f64
, aarch64_neon_fcvtzu
, AddRetType
| Add1ArgType
),
6696 NEONMAP1(vcvth_bf16_f32
, aarch64_neon_bfcvt
, 0),
6697 NEONMAP1(vcvtmd_s64_f64
, aarch64_neon_fcvtms
, AddRetType
| Add1ArgType
),
6698 NEONMAP1(vcvtmd_u64_f64
, aarch64_neon_fcvtmu
, AddRetType
| Add1ArgType
),
6699 NEONMAP1(vcvtms_s32_f32
, aarch64_neon_fcvtms
, AddRetType
| Add1ArgType
),
6700 NEONMAP1(vcvtms_u32_f32
, aarch64_neon_fcvtmu
, AddRetType
| Add1ArgType
),
6701 NEONMAP1(vcvtnd_s64_f64
, aarch64_neon_fcvtns
, AddRetType
| Add1ArgType
),
6702 NEONMAP1(vcvtnd_u64_f64
, aarch64_neon_fcvtnu
, AddRetType
| Add1ArgType
),
6703 NEONMAP1(vcvtns_s32_f32
, aarch64_neon_fcvtns
, AddRetType
| Add1ArgType
),
6704 NEONMAP1(vcvtns_u32_f32
, aarch64_neon_fcvtnu
, AddRetType
| Add1ArgType
),
6705 NEONMAP1(vcvtpd_s64_f64
, aarch64_neon_fcvtps
, AddRetType
| Add1ArgType
),
6706 NEONMAP1(vcvtpd_u64_f64
, aarch64_neon_fcvtpu
, AddRetType
| Add1ArgType
),
6707 NEONMAP1(vcvtps_s32_f32
, aarch64_neon_fcvtps
, AddRetType
| Add1ArgType
),
6708 NEONMAP1(vcvtps_u32_f32
, aarch64_neon_fcvtpu
, AddRetType
| Add1ArgType
),
6709 NEONMAP1(vcvts_n_f32_s32
, aarch64_neon_vcvtfxs2fp
, AddRetType
| Add1ArgType
),
6710 NEONMAP1(vcvts_n_f32_u32
, aarch64_neon_vcvtfxu2fp
, AddRetType
| Add1ArgType
),
6711 NEONMAP1(vcvts_n_s32_f32
, aarch64_neon_vcvtfp2fxs
, AddRetType
| Add1ArgType
),
6712 NEONMAP1(vcvts_n_u32_f32
, aarch64_neon_vcvtfp2fxu
, AddRetType
| Add1ArgType
),
6713 NEONMAP1(vcvts_s32_f32
, aarch64_neon_fcvtzs
, AddRetType
| Add1ArgType
),
6714 NEONMAP1(vcvts_u32_f32
, aarch64_neon_fcvtzu
, AddRetType
| Add1ArgType
),
6715 NEONMAP1(vcvtxd_f32_f64
, aarch64_sisd_fcvtxn
, 0),
6716 NEONMAP1(vmaxnmv_f32
, aarch64_neon_fmaxnmv
, AddRetType
| Add1ArgType
),
6717 NEONMAP1(vmaxnmvq_f32
, aarch64_neon_fmaxnmv
, AddRetType
| Add1ArgType
),
6718 NEONMAP1(vmaxnmvq_f64
, aarch64_neon_fmaxnmv
, AddRetType
| Add1ArgType
),
6719 NEONMAP1(vmaxv_f32
, aarch64_neon_fmaxv
, AddRetType
| Add1ArgType
),
6720 NEONMAP1(vmaxv_s32
, aarch64_neon_smaxv
, AddRetType
| Add1ArgType
),
6721 NEONMAP1(vmaxv_u32
, aarch64_neon_umaxv
, AddRetType
| Add1ArgType
),
6722 NEONMAP1(vmaxvq_f32
, aarch64_neon_fmaxv
, AddRetType
| Add1ArgType
),
6723 NEONMAP1(vmaxvq_f64
, aarch64_neon_fmaxv
, AddRetType
| Add1ArgType
),
6724 NEONMAP1(vmaxvq_s32
, aarch64_neon_smaxv
, AddRetType
| Add1ArgType
),
6725 NEONMAP1(vmaxvq_u32
, aarch64_neon_umaxv
, AddRetType
| Add1ArgType
),
6726 NEONMAP1(vminnmv_f32
, aarch64_neon_fminnmv
, AddRetType
| Add1ArgType
),
6727 NEONMAP1(vminnmvq_f32
, aarch64_neon_fminnmv
, AddRetType
| Add1ArgType
),
6728 NEONMAP1(vminnmvq_f64
, aarch64_neon_fminnmv
, AddRetType
| Add1ArgType
),
6729 NEONMAP1(vminv_f32
, aarch64_neon_fminv
, AddRetType
| Add1ArgType
),
6730 NEONMAP1(vminv_s32
, aarch64_neon_sminv
, AddRetType
| Add1ArgType
),
6731 NEONMAP1(vminv_u32
, aarch64_neon_uminv
, AddRetType
| Add1ArgType
),
6732 NEONMAP1(vminvq_f32
, aarch64_neon_fminv
, AddRetType
| Add1ArgType
),
6733 NEONMAP1(vminvq_f64
, aarch64_neon_fminv
, AddRetType
| Add1ArgType
),
6734 NEONMAP1(vminvq_s32
, aarch64_neon_sminv
, AddRetType
| Add1ArgType
),
6735 NEONMAP1(vminvq_u32
, aarch64_neon_uminv
, AddRetType
| Add1ArgType
),
6736 NEONMAP1(vmull_p64
, aarch64_neon_pmull64
, 0),
6737 NEONMAP1(vmulxd_f64
, aarch64_neon_fmulx
, Add1ArgType
),
6738 NEONMAP1(vmulxs_f32
, aarch64_neon_fmulx
, Add1ArgType
),
6739 NEONMAP1(vpaddd_s64
, aarch64_neon_uaddv
, AddRetType
| Add1ArgType
),
6740 NEONMAP1(vpaddd_u64
, aarch64_neon_uaddv
, AddRetType
| Add1ArgType
),
6741 NEONMAP1(vpmaxnmqd_f64
, aarch64_neon_fmaxnmv
, AddRetType
| Add1ArgType
),
6742 NEONMAP1(vpmaxnms_f32
, aarch64_neon_fmaxnmv
, AddRetType
| Add1ArgType
),
6743 NEONMAP1(vpmaxqd_f64
, aarch64_neon_fmaxv
, AddRetType
| Add1ArgType
),
6744 NEONMAP1(vpmaxs_f32
, aarch64_neon_fmaxv
, AddRetType
| Add1ArgType
),
6745 NEONMAP1(vpminnmqd_f64
, aarch64_neon_fminnmv
, AddRetType
| Add1ArgType
),
6746 NEONMAP1(vpminnms_f32
, aarch64_neon_fminnmv
, AddRetType
| Add1ArgType
),
6747 NEONMAP1(vpminqd_f64
, aarch64_neon_fminv
, AddRetType
| Add1ArgType
),
6748 NEONMAP1(vpmins_f32
, aarch64_neon_fminv
, AddRetType
| Add1ArgType
),
6749 NEONMAP1(vqabsb_s8
, aarch64_neon_sqabs
, Vectorize1ArgType
| Use64BitVectors
),
6750 NEONMAP1(vqabsd_s64
, aarch64_neon_sqabs
, Add1ArgType
),
6751 NEONMAP1(vqabsh_s16
, aarch64_neon_sqabs
, Vectorize1ArgType
| Use64BitVectors
),
6752 NEONMAP1(vqabss_s32
, aarch64_neon_sqabs
, Add1ArgType
),
6753 NEONMAP1(vqaddb_s8
, aarch64_neon_sqadd
, Vectorize1ArgType
| Use64BitVectors
),
6754 NEONMAP1(vqaddb_u8
, aarch64_neon_uqadd
, Vectorize1ArgType
| Use64BitVectors
),
6755 NEONMAP1(vqaddd_s64
, aarch64_neon_sqadd
, Add1ArgType
),
6756 NEONMAP1(vqaddd_u64
, aarch64_neon_uqadd
, Add1ArgType
),
6757 NEONMAP1(vqaddh_s16
, aarch64_neon_sqadd
, Vectorize1ArgType
| Use64BitVectors
),
6758 NEONMAP1(vqaddh_u16
, aarch64_neon_uqadd
, Vectorize1ArgType
| Use64BitVectors
),
6759 NEONMAP1(vqadds_s32
, aarch64_neon_sqadd
, Add1ArgType
),
6760 NEONMAP1(vqadds_u32
, aarch64_neon_uqadd
, Add1ArgType
),
6761 NEONMAP1(vqdmulhh_s16
, aarch64_neon_sqdmulh
, Vectorize1ArgType
| Use64BitVectors
),
6762 NEONMAP1(vqdmulhs_s32
, aarch64_neon_sqdmulh
, Add1ArgType
),
6763 NEONMAP1(vqdmullh_s16
, aarch64_neon_sqdmull
, VectorRet
| Use128BitVectors
),
6764 NEONMAP1(vqdmulls_s32
, aarch64_neon_sqdmulls_scalar
, 0),
6765 NEONMAP1(vqmovnd_s64
, aarch64_neon_scalar_sqxtn
, AddRetType
| Add1ArgType
),
6766 NEONMAP1(vqmovnd_u64
, aarch64_neon_scalar_uqxtn
, AddRetType
| Add1ArgType
),
6767 NEONMAP1(vqmovnh_s16
, aarch64_neon_sqxtn
, VectorRet
| Use64BitVectors
),
6768 NEONMAP1(vqmovnh_u16
, aarch64_neon_uqxtn
, VectorRet
| Use64BitVectors
),
6769 NEONMAP1(vqmovns_s32
, aarch64_neon_sqxtn
, VectorRet
| Use64BitVectors
),
6770 NEONMAP1(vqmovns_u32
, aarch64_neon_uqxtn
, VectorRet
| Use64BitVectors
),
6771 NEONMAP1(vqmovund_s64
, aarch64_neon_scalar_sqxtun
, AddRetType
| Add1ArgType
),
6772 NEONMAP1(vqmovunh_s16
, aarch64_neon_sqxtun
, VectorRet
| Use64BitVectors
),
6773 NEONMAP1(vqmovuns_s32
, aarch64_neon_sqxtun
, VectorRet
| Use64BitVectors
),
6774 NEONMAP1(vqnegb_s8
, aarch64_neon_sqneg
, Vectorize1ArgType
| Use64BitVectors
),
6775 NEONMAP1(vqnegd_s64
, aarch64_neon_sqneg
, Add1ArgType
),
6776 NEONMAP1(vqnegh_s16
, aarch64_neon_sqneg
, Vectorize1ArgType
| Use64BitVectors
),
6777 NEONMAP1(vqnegs_s32
, aarch64_neon_sqneg
, Add1ArgType
),
6778 NEONMAP1(vqrdmlahh_s16
, aarch64_neon_sqrdmlah
, Vectorize1ArgType
| Use64BitVectors
),
6779 NEONMAP1(vqrdmlahs_s32
, aarch64_neon_sqrdmlah
, Add1ArgType
),
6780 NEONMAP1(vqrdmlshh_s16
, aarch64_neon_sqrdmlsh
, Vectorize1ArgType
| Use64BitVectors
),
6781 NEONMAP1(vqrdmlshs_s32
, aarch64_neon_sqrdmlsh
, Add1ArgType
),
6782 NEONMAP1(vqrdmulhh_s16
, aarch64_neon_sqrdmulh
, Vectorize1ArgType
| Use64BitVectors
),
6783 NEONMAP1(vqrdmulhs_s32
, aarch64_neon_sqrdmulh
, Add1ArgType
),
6784 NEONMAP1(vqrshlb_s8
, aarch64_neon_sqrshl
, Vectorize1ArgType
| Use64BitVectors
),
6785 NEONMAP1(vqrshlb_u8
, aarch64_neon_uqrshl
, Vectorize1ArgType
| Use64BitVectors
),
6786 NEONMAP1(vqrshld_s64
, aarch64_neon_sqrshl
, Add1ArgType
),
6787 NEONMAP1(vqrshld_u64
, aarch64_neon_uqrshl
, Add1ArgType
),
6788 NEONMAP1(vqrshlh_s16
, aarch64_neon_sqrshl
, Vectorize1ArgType
| Use64BitVectors
),
6789 NEONMAP1(vqrshlh_u16
, aarch64_neon_uqrshl
, Vectorize1ArgType
| Use64BitVectors
),
6790 NEONMAP1(vqrshls_s32
, aarch64_neon_sqrshl
, Add1ArgType
),
6791 NEONMAP1(vqrshls_u32
, aarch64_neon_uqrshl
, Add1ArgType
),
6792 NEONMAP1(vqrshrnd_n_s64
, aarch64_neon_sqrshrn
, AddRetType
),
6793 NEONMAP1(vqrshrnd_n_u64
, aarch64_neon_uqrshrn
, AddRetType
),
6794 NEONMAP1(vqrshrnh_n_s16
, aarch64_neon_sqrshrn
, VectorRet
| Use64BitVectors
),
6795 NEONMAP1(vqrshrnh_n_u16
, aarch64_neon_uqrshrn
, VectorRet
| Use64BitVectors
),
6796 NEONMAP1(vqrshrns_n_s32
, aarch64_neon_sqrshrn
, VectorRet
| Use64BitVectors
),
6797 NEONMAP1(vqrshrns_n_u32
, aarch64_neon_uqrshrn
, VectorRet
| Use64BitVectors
),
6798 NEONMAP1(vqrshrund_n_s64
, aarch64_neon_sqrshrun
, AddRetType
),
6799 NEONMAP1(vqrshrunh_n_s16
, aarch64_neon_sqrshrun
, VectorRet
| Use64BitVectors
),
6800 NEONMAP1(vqrshruns_n_s32
, aarch64_neon_sqrshrun
, VectorRet
| Use64BitVectors
),
6801 NEONMAP1(vqshlb_n_s8
, aarch64_neon_sqshl
, Vectorize1ArgType
| Use64BitVectors
),
6802 NEONMAP1(vqshlb_n_u8
, aarch64_neon_uqshl
, Vectorize1ArgType
| Use64BitVectors
),
6803 NEONMAP1(vqshlb_s8
, aarch64_neon_sqshl
, Vectorize1ArgType
| Use64BitVectors
),
6804 NEONMAP1(vqshlb_u8
, aarch64_neon_uqshl
, Vectorize1ArgType
| Use64BitVectors
),
6805 NEONMAP1(vqshld_s64
, aarch64_neon_sqshl
, Add1ArgType
),
6806 NEONMAP1(vqshld_u64
, aarch64_neon_uqshl
, Add1ArgType
),
6807 NEONMAP1(vqshlh_n_s16
, aarch64_neon_sqshl
, Vectorize1ArgType
| Use64BitVectors
),
6808 NEONMAP1(vqshlh_n_u16
, aarch64_neon_uqshl
, Vectorize1ArgType
| Use64BitVectors
),
6809 NEONMAP1(vqshlh_s16
, aarch64_neon_sqshl
, Vectorize1ArgType
| Use64BitVectors
),
6810 NEONMAP1(vqshlh_u16
, aarch64_neon_uqshl
, Vectorize1ArgType
| Use64BitVectors
),
6811 NEONMAP1(vqshls_n_s32
, aarch64_neon_sqshl
, Add1ArgType
),
6812 NEONMAP1(vqshls_n_u32
, aarch64_neon_uqshl
, Add1ArgType
),
6813 NEONMAP1(vqshls_s32
, aarch64_neon_sqshl
, Add1ArgType
),
6814 NEONMAP1(vqshls_u32
, aarch64_neon_uqshl
, Add1ArgType
),
6815 NEONMAP1(vqshlub_n_s8
, aarch64_neon_sqshlu
, Vectorize1ArgType
| Use64BitVectors
),
6816 NEONMAP1(vqshluh_n_s16
, aarch64_neon_sqshlu
, Vectorize1ArgType
| Use64BitVectors
),
6817 NEONMAP1(vqshlus_n_s32
, aarch64_neon_sqshlu
, Add1ArgType
),
6818 NEONMAP1(vqshrnd_n_s64
, aarch64_neon_sqshrn
, AddRetType
),
6819 NEONMAP1(vqshrnd_n_u64
, aarch64_neon_uqshrn
, AddRetType
),
6820 NEONMAP1(vqshrnh_n_s16
, aarch64_neon_sqshrn
, VectorRet
| Use64BitVectors
),
6821 NEONMAP1(vqshrnh_n_u16
, aarch64_neon_uqshrn
, VectorRet
| Use64BitVectors
),
6822 NEONMAP1(vqshrns_n_s32
, aarch64_neon_sqshrn
, VectorRet
| Use64BitVectors
),
6823 NEONMAP1(vqshrns_n_u32
, aarch64_neon_uqshrn
, VectorRet
| Use64BitVectors
),
6824 NEONMAP1(vqshrund_n_s64
, aarch64_neon_sqshrun
, AddRetType
),
6825 NEONMAP1(vqshrunh_n_s16
, aarch64_neon_sqshrun
, VectorRet
| Use64BitVectors
),
6826 NEONMAP1(vqshruns_n_s32
, aarch64_neon_sqshrun
, VectorRet
| Use64BitVectors
),
6827 NEONMAP1(vqsubb_s8
, aarch64_neon_sqsub
, Vectorize1ArgType
| Use64BitVectors
),
6828 NEONMAP1(vqsubb_u8
, aarch64_neon_uqsub
, Vectorize1ArgType
| Use64BitVectors
),
6829 NEONMAP1(vqsubd_s64
, aarch64_neon_sqsub
, Add1ArgType
),
6830 NEONMAP1(vqsubd_u64
, aarch64_neon_uqsub
, Add1ArgType
),
6831 NEONMAP1(vqsubh_s16
, aarch64_neon_sqsub
, Vectorize1ArgType
| Use64BitVectors
),
6832 NEONMAP1(vqsubh_u16
, aarch64_neon_uqsub
, Vectorize1ArgType
| Use64BitVectors
),
6833 NEONMAP1(vqsubs_s32
, aarch64_neon_sqsub
, Add1ArgType
),
6834 NEONMAP1(vqsubs_u32
, aarch64_neon_uqsub
, Add1ArgType
),
6835 NEONMAP1(vrecped_f64
, aarch64_neon_frecpe
, Add1ArgType
),
6836 NEONMAP1(vrecpes_f32
, aarch64_neon_frecpe
, Add1ArgType
),
6837 NEONMAP1(vrecpxd_f64
, aarch64_neon_frecpx
, Add1ArgType
),
6838 NEONMAP1(vrecpxs_f32
, aarch64_neon_frecpx
, Add1ArgType
),
6839 NEONMAP1(vrshld_s64
, aarch64_neon_srshl
, Add1ArgType
),
6840 NEONMAP1(vrshld_u64
, aarch64_neon_urshl
, Add1ArgType
),
6841 NEONMAP1(vrsqrted_f64
, aarch64_neon_frsqrte
, Add1ArgType
),
6842 NEONMAP1(vrsqrtes_f32
, aarch64_neon_frsqrte
, Add1ArgType
),
6843 NEONMAP1(vrsqrtsd_f64
, aarch64_neon_frsqrts
, Add1ArgType
),
6844 NEONMAP1(vrsqrtss_f32
, aarch64_neon_frsqrts
, Add1ArgType
),
6845 NEONMAP1(vsha1cq_u32
, aarch64_crypto_sha1c
, 0),
6846 NEONMAP1(vsha1h_u32
, aarch64_crypto_sha1h
, 0),
6847 NEONMAP1(vsha1mq_u32
, aarch64_crypto_sha1m
, 0),
6848 NEONMAP1(vsha1pq_u32
, aarch64_crypto_sha1p
, 0),
6849 NEONMAP1(vshld_s64
, aarch64_neon_sshl
, Add1ArgType
),
6850 NEONMAP1(vshld_u64
, aarch64_neon_ushl
, Add1ArgType
),
6851 NEONMAP1(vslid_n_s64
, aarch64_neon_vsli
, Vectorize1ArgType
),
6852 NEONMAP1(vslid_n_u64
, aarch64_neon_vsli
, Vectorize1ArgType
),
6853 NEONMAP1(vsqaddb_u8
, aarch64_neon_usqadd
, Vectorize1ArgType
| Use64BitVectors
),
6854 NEONMAP1(vsqaddd_u64
, aarch64_neon_usqadd
, Add1ArgType
),
6855 NEONMAP1(vsqaddh_u16
, aarch64_neon_usqadd
, Vectorize1ArgType
| Use64BitVectors
),
6856 NEONMAP1(vsqadds_u32
, aarch64_neon_usqadd
, Add1ArgType
),
6857 NEONMAP1(vsrid_n_s64
, aarch64_neon_vsri
, Vectorize1ArgType
),
6858 NEONMAP1(vsrid_n_u64
, aarch64_neon_vsri
, Vectorize1ArgType
),
6859 NEONMAP1(vuqaddb_s8
, aarch64_neon_suqadd
, Vectorize1ArgType
| Use64BitVectors
),
6860 NEONMAP1(vuqaddd_s64
, aarch64_neon_suqadd
, Add1ArgType
),
6861 NEONMAP1(vuqaddh_s16
, aarch64_neon_suqadd
, Vectorize1ArgType
| Use64BitVectors
),
6862 NEONMAP1(vuqadds_s32
, aarch64_neon_suqadd
, Add1ArgType
),
6863 // FP16 scalar intrinisics go here.
6864 NEONMAP1(vabdh_f16
, aarch64_sisd_fabd
, Add1ArgType
),
6865 NEONMAP1(vcvtah_s32_f16
, aarch64_neon_fcvtas
, AddRetType
| Add1ArgType
),
6866 NEONMAP1(vcvtah_s64_f16
, aarch64_neon_fcvtas
, AddRetType
| Add1ArgType
),
6867 NEONMAP1(vcvtah_u32_f16
, aarch64_neon_fcvtau
, AddRetType
| Add1ArgType
),
6868 NEONMAP1(vcvtah_u64_f16
, aarch64_neon_fcvtau
, AddRetType
| Add1ArgType
),
6869 NEONMAP1(vcvth_n_f16_s32
, aarch64_neon_vcvtfxs2fp
, AddRetType
| Add1ArgType
),
6870 NEONMAP1(vcvth_n_f16_s64
, aarch64_neon_vcvtfxs2fp
, AddRetType
| Add1ArgType
),
6871 NEONMAP1(vcvth_n_f16_u32
, aarch64_neon_vcvtfxu2fp
, AddRetType
| Add1ArgType
),
6872 NEONMAP1(vcvth_n_f16_u64
, aarch64_neon_vcvtfxu2fp
, AddRetType
| Add1ArgType
),
6873 NEONMAP1(vcvth_n_s32_f16
, aarch64_neon_vcvtfp2fxs
, AddRetType
| Add1ArgType
),
6874 NEONMAP1(vcvth_n_s64_f16
, aarch64_neon_vcvtfp2fxs
, AddRetType
| Add1ArgType
),
6875 NEONMAP1(vcvth_n_u32_f16
, aarch64_neon_vcvtfp2fxu
, AddRetType
| Add1ArgType
),
6876 NEONMAP1(vcvth_n_u64_f16
, aarch64_neon_vcvtfp2fxu
, AddRetType
| Add1ArgType
),
6877 NEONMAP1(vcvth_s32_f16
, aarch64_neon_fcvtzs
, AddRetType
| Add1ArgType
),
6878 NEONMAP1(vcvth_s64_f16
, aarch64_neon_fcvtzs
, AddRetType
| Add1ArgType
),
6879 NEONMAP1(vcvth_u32_f16
, aarch64_neon_fcvtzu
, AddRetType
| Add1ArgType
),
6880 NEONMAP1(vcvth_u64_f16
, aarch64_neon_fcvtzu
, AddRetType
| Add1ArgType
),
6881 NEONMAP1(vcvtmh_s32_f16
, aarch64_neon_fcvtms
, AddRetType
| Add1ArgType
),
6882 NEONMAP1(vcvtmh_s64_f16
, aarch64_neon_fcvtms
, AddRetType
| Add1ArgType
),
6883 NEONMAP1(vcvtmh_u32_f16
, aarch64_neon_fcvtmu
, AddRetType
| Add1ArgType
),
6884 NEONMAP1(vcvtmh_u64_f16
, aarch64_neon_fcvtmu
, AddRetType
| Add1ArgType
),
6885 NEONMAP1(vcvtnh_s32_f16
, aarch64_neon_fcvtns
, AddRetType
| Add1ArgType
),
6886 NEONMAP1(vcvtnh_s64_f16
, aarch64_neon_fcvtns
, AddRetType
| Add1ArgType
),
6887 NEONMAP1(vcvtnh_u32_f16
, aarch64_neon_fcvtnu
, AddRetType
| Add1ArgType
),
6888 NEONMAP1(vcvtnh_u64_f16
, aarch64_neon_fcvtnu
, AddRetType
| Add1ArgType
),
6889 NEONMAP1(vcvtph_s32_f16
, aarch64_neon_fcvtps
, AddRetType
| Add1ArgType
),
6890 NEONMAP1(vcvtph_s64_f16
, aarch64_neon_fcvtps
, AddRetType
| Add1ArgType
),
6891 NEONMAP1(vcvtph_u32_f16
, aarch64_neon_fcvtpu
, AddRetType
| Add1ArgType
),
6892 NEONMAP1(vcvtph_u64_f16
, aarch64_neon_fcvtpu
, AddRetType
| Add1ArgType
),
6893 NEONMAP1(vmulxh_f16
, aarch64_neon_fmulx
, Add1ArgType
),
6894 NEONMAP1(vrecpeh_f16
, aarch64_neon_frecpe
, Add1ArgType
),
6895 NEONMAP1(vrecpxh_f16
, aarch64_neon_frecpx
, Add1ArgType
),
6896 NEONMAP1(vrsqrteh_f16
, aarch64_neon_frsqrte
, Add1ArgType
),
6897 NEONMAP1(vrsqrtsh_f16
, aarch64_neon_frsqrts
, Add1ArgType
),
6900 // Some intrinsics are equivalent for codegen.
6901 static const std::pair
<unsigned, unsigned> NEONEquivalentIntrinsicMap
[] = {
6902 { NEON::BI__builtin_neon_splat_lane_bf16
, NEON::BI__builtin_neon_splat_lane_v
, },
6903 { NEON::BI__builtin_neon_splat_laneq_bf16
, NEON::BI__builtin_neon_splat_laneq_v
, },
6904 { NEON::BI__builtin_neon_splatq_lane_bf16
, NEON::BI__builtin_neon_splatq_lane_v
, },
6905 { NEON::BI__builtin_neon_splatq_laneq_bf16
, NEON::BI__builtin_neon_splatq_laneq_v
, },
6906 { NEON::BI__builtin_neon_vabd_f16
, NEON::BI__builtin_neon_vabd_v
, },
6907 { NEON::BI__builtin_neon_vabdq_f16
, NEON::BI__builtin_neon_vabdq_v
, },
6908 { NEON::BI__builtin_neon_vabs_f16
, NEON::BI__builtin_neon_vabs_v
, },
6909 { NEON::BI__builtin_neon_vabsq_f16
, NEON::BI__builtin_neon_vabsq_v
, },
6910 { NEON::BI__builtin_neon_vbsl_f16
, NEON::BI__builtin_neon_vbsl_v
, },
6911 { NEON::BI__builtin_neon_vbslq_f16
, NEON::BI__builtin_neon_vbslq_v
, },
6912 { NEON::BI__builtin_neon_vcage_f16
, NEON::BI__builtin_neon_vcage_v
, },
6913 { NEON::BI__builtin_neon_vcageq_f16
, NEON::BI__builtin_neon_vcageq_v
, },
6914 { NEON::BI__builtin_neon_vcagt_f16
, NEON::BI__builtin_neon_vcagt_v
, },
6915 { NEON::BI__builtin_neon_vcagtq_f16
, NEON::BI__builtin_neon_vcagtq_v
, },
6916 { NEON::BI__builtin_neon_vcale_f16
, NEON::BI__builtin_neon_vcale_v
, },
6917 { NEON::BI__builtin_neon_vcaleq_f16
, NEON::BI__builtin_neon_vcaleq_v
, },
6918 { NEON::BI__builtin_neon_vcalt_f16
, NEON::BI__builtin_neon_vcalt_v
, },
6919 { NEON::BI__builtin_neon_vcaltq_f16
, NEON::BI__builtin_neon_vcaltq_v
, },
6920 { NEON::BI__builtin_neon_vceqz_f16
, NEON::BI__builtin_neon_vceqz_v
, },
6921 { NEON::BI__builtin_neon_vceqzq_f16
, NEON::BI__builtin_neon_vceqzq_v
, },
6922 { NEON::BI__builtin_neon_vcgez_f16
, NEON::BI__builtin_neon_vcgez_v
, },
6923 { NEON::BI__builtin_neon_vcgezq_f16
, NEON::BI__builtin_neon_vcgezq_v
, },
6924 { NEON::BI__builtin_neon_vcgtz_f16
, NEON::BI__builtin_neon_vcgtz_v
, },
6925 { NEON::BI__builtin_neon_vcgtzq_f16
, NEON::BI__builtin_neon_vcgtzq_v
, },
6926 { NEON::BI__builtin_neon_vclez_f16
, NEON::BI__builtin_neon_vclez_v
, },
6927 { NEON::BI__builtin_neon_vclezq_f16
, NEON::BI__builtin_neon_vclezq_v
, },
6928 { NEON::BI__builtin_neon_vcltz_f16
, NEON::BI__builtin_neon_vcltz_v
, },
6929 { NEON::BI__builtin_neon_vcltzq_f16
, NEON::BI__builtin_neon_vcltzq_v
, },
6930 { NEON::BI__builtin_neon_vext_f16
, NEON::BI__builtin_neon_vext_v
, },
6931 { NEON::BI__builtin_neon_vextq_f16
, NEON::BI__builtin_neon_vextq_v
, },
6932 { NEON::BI__builtin_neon_vfma_f16
, NEON::BI__builtin_neon_vfma_v
, },
6933 { NEON::BI__builtin_neon_vfma_lane_f16
, NEON::BI__builtin_neon_vfma_lane_v
, },
6934 { NEON::BI__builtin_neon_vfma_laneq_f16
, NEON::BI__builtin_neon_vfma_laneq_v
, },
6935 { NEON::BI__builtin_neon_vfmaq_f16
, NEON::BI__builtin_neon_vfmaq_v
, },
6936 { NEON::BI__builtin_neon_vfmaq_lane_f16
, NEON::BI__builtin_neon_vfmaq_lane_v
, },
6937 { NEON::BI__builtin_neon_vfmaq_laneq_f16
, NEON::BI__builtin_neon_vfmaq_laneq_v
, },
6938 { NEON::BI__builtin_neon_vld1_bf16_x2
, NEON::BI__builtin_neon_vld1_x2_v
},
6939 { NEON::BI__builtin_neon_vld1_bf16_x3
, NEON::BI__builtin_neon_vld1_x3_v
},
6940 { NEON::BI__builtin_neon_vld1_bf16_x4
, NEON::BI__builtin_neon_vld1_x4_v
},
6941 { NEON::BI__builtin_neon_vld1_bf16
, NEON::BI__builtin_neon_vld1_v
},
6942 { NEON::BI__builtin_neon_vld1_dup_bf16
, NEON::BI__builtin_neon_vld1_dup_v
},
6943 { NEON::BI__builtin_neon_vld1_lane_bf16
, NEON::BI__builtin_neon_vld1_lane_v
},
6944 { NEON::BI__builtin_neon_vld1q_bf16_x2
, NEON::BI__builtin_neon_vld1q_x2_v
},
6945 { NEON::BI__builtin_neon_vld1q_bf16_x3
, NEON::BI__builtin_neon_vld1q_x3_v
},
6946 { NEON::BI__builtin_neon_vld1q_bf16_x4
, NEON::BI__builtin_neon_vld1q_x4_v
},
6947 { NEON::BI__builtin_neon_vld1q_bf16
, NEON::BI__builtin_neon_vld1q_v
},
6948 { NEON::BI__builtin_neon_vld1q_dup_bf16
, NEON::BI__builtin_neon_vld1q_dup_v
},
6949 { NEON::BI__builtin_neon_vld1q_lane_bf16
, NEON::BI__builtin_neon_vld1q_lane_v
},
6950 { NEON::BI__builtin_neon_vld2_bf16
, NEON::BI__builtin_neon_vld2_v
},
6951 { NEON::BI__builtin_neon_vld2_dup_bf16
, NEON::BI__builtin_neon_vld2_dup_v
},
6952 { NEON::BI__builtin_neon_vld2_lane_bf16
, NEON::BI__builtin_neon_vld2_lane_v
},
6953 { NEON::BI__builtin_neon_vld2q_bf16
, NEON::BI__builtin_neon_vld2q_v
},
6954 { NEON::BI__builtin_neon_vld2q_dup_bf16
, NEON::BI__builtin_neon_vld2q_dup_v
},
6955 { NEON::BI__builtin_neon_vld2q_lane_bf16
, NEON::BI__builtin_neon_vld2q_lane_v
},
6956 { NEON::BI__builtin_neon_vld3_bf16
, NEON::BI__builtin_neon_vld3_v
},
6957 { NEON::BI__builtin_neon_vld3_dup_bf16
, NEON::BI__builtin_neon_vld3_dup_v
},
6958 { NEON::BI__builtin_neon_vld3_lane_bf16
, NEON::BI__builtin_neon_vld3_lane_v
},
6959 { NEON::BI__builtin_neon_vld3q_bf16
, NEON::BI__builtin_neon_vld3q_v
},
6960 { NEON::BI__builtin_neon_vld3q_dup_bf16
, NEON::BI__builtin_neon_vld3q_dup_v
},
6961 { NEON::BI__builtin_neon_vld3q_lane_bf16
, NEON::BI__builtin_neon_vld3q_lane_v
},
6962 { NEON::BI__builtin_neon_vld4_bf16
, NEON::BI__builtin_neon_vld4_v
},
6963 { NEON::BI__builtin_neon_vld4_dup_bf16
, NEON::BI__builtin_neon_vld4_dup_v
},
6964 { NEON::BI__builtin_neon_vld4_lane_bf16
, NEON::BI__builtin_neon_vld4_lane_v
},
6965 { NEON::BI__builtin_neon_vld4q_bf16
, NEON::BI__builtin_neon_vld4q_v
},
6966 { NEON::BI__builtin_neon_vld4q_dup_bf16
, NEON::BI__builtin_neon_vld4q_dup_v
},
6967 { NEON::BI__builtin_neon_vld4q_lane_bf16
, NEON::BI__builtin_neon_vld4q_lane_v
},
6968 { NEON::BI__builtin_neon_vmax_f16
, NEON::BI__builtin_neon_vmax_v
, },
6969 { NEON::BI__builtin_neon_vmaxnm_f16
, NEON::BI__builtin_neon_vmaxnm_v
, },
6970 { NEON::BI__builtin_neon_vmaxnmq_f16
, NEON::BI__builtin_neon_vmaxnmq_v
, },
6971 { NEON::BI__builtin_neon_vmaxq_f16
, NEON::BI__builtin_neon_vmaxq_v
, },
6972 { NEON::BI__builtin_neon_vmin_f16
, NEON::BI__builtin_neon_vmin_v
, },
6973 { NEON::BI__builtin_neon_vminnm_f16
, NEON::BI__builtin_neon_vminnm_v
, },
6974 { NEON::BI__builtin_neon_vminnmq_f16
, NEON::BI__builtin_neon_vminnmq_v
, },
6975 { NEON::BI__builtin_neon_vminq_f16
, NEON::BI__builtin_neon_vminq_v
, },
6976 { NEON::BI__builtin_neon_vmulx_f16
, NEON::BI__builtin_neon_vmulx_v
, },
6977 { NEON::BI__builtin_neon_vmulxq_f16
, NEON::BI__builtin_neon_vmulxq_v
, },
6978 { NEON::BI__builtin_neon_vpadd_f16
, NEON::BI__builtin_neon_vpadd_v
, },
6979 { NEON::BI__builtin_neon_vpaddq_f16
, NEON::BI__builtin_neon_vpaddq_v
, },
6980 { NEON::BI__builtin_neon_vpmax_f16
, NEON::BI__builtin_neon_vpmax_v
, },
6981 { NEON::BI__builtin_neon_vpmaxnm_f16
, NEON::BI__builtin_neon_vpmaxnm_v
, },
6982 { NEON::BI__builtin_neon_vpmaxnmq_f16
, NEON::BI__builtin_neon_vpmaxnmq_v
, },
6983 { NEON::BI__builtin_neon_vpmaxq_f16
, NEON::BI__builtin_neon_vpmaxq_v
, },
6984 { NEON::BI__builtin_neon_vpmin_f16
, NEON::BI__builtin_neon_vpmin_v
, },
6985 { NEON::BI__builtin_neon_vpminnm_f16
, NEON::BI__builtin_neon_vpminnm_v
, },
6986 { NEON::BI__builtin_neon_vpminnmq_f16
, NEON::BI__builtin_neon_vpminnmq_v
, },
6987 { NEON::BI__builtin_neon_vpminq_f16
, NEON::BI__builtin_neon_vpminq_v
, },
6988 { NEON::BI__builtin_neon_vrecpe_f16
, NEON::BI__builtin_neon_vrecpe_v
, },
6989 { NEON::BI__builtin_neon_vrecpeq_f16
, NEON::BI__builtin_neon_vrecpeq_v
, },
6990 { NEON::BI__builtin_neon_vrecps_f16
, NEON::BI__builtin_neon_vrecps_v
, },
6991 { NEON::BI__builtin_neon_vrecpsq_f16
, NEON::BI__builtin_neon_vrecpsq_v
, },
6992 { NEON::BI__builtin_neon_vrnd_f16
, NEON::BI__builtin_neon_vrnd_v
, },
6993 { NEON::BI__builtin_neon_vrnda_f16
, NEON::BI__builtin_neon_vrnda_v
, },
6994 { NEON::BI__builtin_neon_vrndaq_f16
, NEON::BI__builtin_neon_vrndaq_v
, },
6995 { NEON::BI__builtin_neon_vrndi_f16
, NEON::BI__builtin_neon_vrndi_v
, },
6996 { NEON::BI__builtin_neon_vrndiq_f16
, NEON::BI__builtin_neon_vrndiq_v
, },
6997 { NEON::BI__builtin_neon_vrndm_f16
, NEON::BI__builtin_neon_vrndm_v
, },
6998 { NEON::BI__builtin_neon_vrndmq_f16
, NEON::BI__builtin_neon_vrndmq_v
, },
6999 { NEON::BI__builtin_neon_vrndn_f16
, NEON::BI__builtin_neon_vrndn_v
, },
7000 { NEON::BI__builtin_neon_vrndnq_f16
, NEON::BI__builtin_neon_vrndnq_v
, },
7001 { NEON::BI__builtin_neon_vrndp_f16
, NEON::BI__builtin_neon_vrndp_v
, },
7002 { NEON::BI__builtin_neon_vrndpq_f16
, NEON::BI__builtin_neon_vrndpq_v
, },
7003 { NEON::BI__builtin_neon_vrndq_f16
, NEON::BI__builtin_neon_vrndq_v
, },
7004 { NEON::BI__builtin_neon_vrndx_f16
, NEON::BI__builtin_neon_vrndx_v
, },
7005 { NEON::BI__builtin_neon_vrndxq_f16
, NEON::BI__builtin_neon_vrndxq_v
, },
7006 { NEON::BI__builtin_neon_vrsqrte_f16
, NEON::BI__builtin_neon_vrsqrte_v
, },
7007 { NEON::BI__builtin_neon_vrsqrteq_f16
, NEON::BI__builtin_neon_vrsqrteq_v
, },
7008 { NEON::BI__builtin_neon_vrsqrts_f16
, NEON::BI__builtin_neon_vrsqrts_v
, },
7009 { NEON::BI__builtin_neon_vrsqrtsq_f16
, NEON::BI__builtin_neon_vrsqrtsq_v
, },
7010 { NEON::BI__builtin_neon_vsqrt_f16
, NEON::BI__builtin_neon_vsqrt_v
, },
7011 { NEON::BI__builtin_neon_vsqrtq_f16
, NEON::BI__builtin_neon_vsqrtq_v
, },
7012 { NEON::BI__builtin_neon_vst1_bf16_x2
, NEON::BI__builtin_neon_vst1_x2_v
},
7013 { NEON::BI__builtin_neon_vst1_bf16_x3
, NEON::BI__builtin_neon_vst1_x3_v
},
7014 { NEON::BI__builtin_neon_vst1_bf16_x4
, NEON::BI__builtin_neon_vst1_x4_v
},
7015 { NEON::BI__builtin_neon_vst1_bf16
, NEON::BI__builtin_neon_vst1_v
},
7016 { NEON::BI__builtin_neon_vst1_lane_bf16
, NEON::BI__builtin_neon_vst1_lane_v
},
7017 { NEON::BI__builtin_neon_vst1q_bf16_x2
, NEON::BI__builtin_neon_vst1q_x2_v
},
7018 { NEON::BI__builtin_neon_vst1q_bf16_x3
, NEON::BI__builtin_neon_vst1q_x3_v
},
7019 { NEON::BI__builtin_neon_vst1q_bf16_x4
, NEON::BI__builtin_neon_vst1q_x4_v
},
7020 { NEON::BI__builtin_neon_vst1q_bf16
, NEON::BI__builtin_neon_vst1q_v
},
7021 { NEON::BI__builtin_neon_vst1q_lane_bf16
, NEON::BI__builtin_neon_vst1q_lane_v
},
7022 { NEON::BI__builtin_neon_vst2_bf16
, NEON::BI__builtin_neon_vst2_v
},
7023 { NEON::BI__builtin_neon_vst2_lane_bf16
, NEON::BI__builtin_neon_vst2_lane_v
},
7024 { NEON::BI__builtin_neon_vst2q_bf16
, NEON::BI__builtin_neon_vst2q_v
},
7025 { NEON::BI__builtin_neon_vst2q_lane_bf16
, NEON::BI__builtin_neon_vst2q_lane_v
},
7026 { NEON::BI__builtin_neon_vst3_bf16
, NEON::BI__builtin_neon_vst3_v
},
7027 { NEON::BI__builtin_neon_vst3_lane_bf16
, NEON::BI__builtin_neon_vst3_lane_v
},
7028 { NEON::BI__builtin_neon_vst3q_bf16
, NEON::BI__builtin_neon_vst3q_v
},
7029 { NEON::BI__builtin_neon_vst3q_lane_bf16
, NEON::BI__builtin_neon_vst3q_lane_v
},
7030 { NEON::BI__builtin_neon_vst4_bf16
, NEON::BI__builtin_neon_vst4_v
},
7031 { NEON::BI__builtin_neon_vst4_lane_bf16
, NEON::BI__builtin_neon_vst4_lane_v
},
7032 { NEON::BI__builtin_neon_vst4q_bf16
, NEON::BI__builtin_neon_vst4q_v
},
7033 { NEON::BI__builtin_neon_vst4q_lane_bf16
, NEON::BI__builtin_neon_vst4q_lane_v
},
7034 { NEON::BI__builtin_neon_vtrn_f16
, NEON::BI__builtin_neon_vtrn_v
, },
7035 { NEON::BI__builtin_neon_vtrnq_f16
, NEON::BI__builtin_neon_vtrnq_v
, },
7036 { NEON::BI__builtin_neon_vuzp_f16
, NEON::BI__builtin_neon_vuzp_v
, },
7037 { NEON::BI__builtin_neon_vuzpq_f16
, NEON::BI__builtin_neon_vuzpq_v
, },
7038 { NEON::BI__builtin_neon_vzip_f16
, NEON::BI__builtin_neon_vzip_v
, },
7039 { NEON::BI__builtin_neon_vzipq_f16
, NEON::BI__builtin_neon_vzipq_v
, },
7040 // The mangling rules cause us to have one ID for each type for vldap1(q)_lane
7041 // and vstl1(q)_lane, but codegen is equivalent for all of them. Choose an
7042 // arbitrary one to be handled as tha canonical variation.
7043 { NEON::BI__builtin_neon_vldap1_lane_u64
, NEON::BI__builtin_neon_vldap1_lane_s64
},
7044 { NEON::BI__builtin_neon_vldap1_lane_f64
, NEON::BI__builtin_neon_vldap1_lane_s64
},
7045 { NEON::BI__builtin_neon_vldap1_lane_p64
, NEON::BI__builtin_neon_vldap1_lane_s64
},
7046 { NEON::BI__builtin_neon_vldap1q_lane_u64
, NEON::BI__builtin_neon_vldap1q_lane_s64
},
7047 { NEON::BI__builtin_neon_vldap1q_lane_f64
, NEON::BI__builtin_neon_vldap1q_lane_s64
},
7048 { NEON::BI__builtin_neon_vldap1q_lane_p64
, NEON::BI__builtin_neon_vldap1q_lane_s64
},
7049 { NEON::BI__builtin_neon_vstl1_lane_u64
, NEON::BI__builtin_neon_vstl1_lane_s64
},
7050 { NEON::BI__builtin_neon_vstl1_lane_f64
, NEON::BI__builtin_neon_vstl1_lane_s64
},
7051 { NEON::BI__builtin_neon_vstl1_lane_p64
, NEON::BI__builtin_neon_vstl1_lane_s64
},
7052 { NEON::BI__builtin_neon_vstl1q_lane_u64
, NEON::BI__builtin_neon_vstl1q_lane_s64
},
7053 { NEON::BI__builtin_neon_vstl1q_lane_f64
, NEON::BI__builtin_neon_vstl1q_lane_s64
},
7054 { NEON::BI__builtin_neon_vstl1q_lane_p64
, NEON::BI__builtin_neon_vstl1q_lane_s64
},
7061 #define SVEMAP1(NameBase, LLVMIntrinsic, TypeModifier) \
7063 #NameBase, SVE::BI__builtin_sve_##NameBase, Intrinsic::LLVMIntrinsic, 0, \
7067 #define SVEMAP2(NameBase, TypeModifier) \
7068 { #NameBase, SVE::BI__builtin_sve_##NameBase, 0, 0, TypeModifier }
7069 static const ARMVectorIntrinsicInfo AArch64SVEIntrinsicMap
[] = {
7070 #define GET_SVE_LLVM_INTRINSIC_MAP
7071 #include "clang/Basic/arm_sve_builtin_cg.inc"
7072 #include "clang/Basic/BuiltinsAArch64NeonSVEBridge_cg.def"
7073 #undef GET_SVE_LLVM_INTRINSIC_MAP
7079 #define SMEMAP1(NameBase, LLVMIntrinsic, TypeModifier) \
7081 #NameBase, SME::BI__builtin_sme_##NameBase, Intrinsic::LLVMIntrinsic, 0, \
7085 #define SMEMAP2(NameBase, TypeModifier) \
7086 { #NameBase, SME::BI__builtin_sme_##NameBase, 0, 0, TypeModifier }
7087 static const ARMVectorIntrinsicInfo AArch64SMEIntrinsicMap
[] = {
7088 #define GET_SME_LLVM_INTRINSIC_MAP
7089 #include "clang/Basic/arm_sme_builtin_cg.inc"
7090 #undef GET_SME_LLVM_INTRINSIC_MAP
7096 static bool NEONSIMDIntrinsicsProvenSorted
= false;
7098 static bool AArch64SIMDIntrinsicsProvenSorted
= false;
7099 static bool AArch64SISDIntrinsicsProvenSorted
= false;
7100 static bool AArch64SVEIntrinsicsProvenSorted
= false;
7101 static bool AArch64SMEIntrinsicsProvenSorted
= false;
7103 static const ARMVectorIntrinsicInfo
*
7104 findARMVectorIntrinsicInMap(ArrayRef
<ARMVectorIntrinsicInfo
> IntrinsicMap
,
7105 unsigned BuiltinID
, bool &MapProvenSorted
) {
7108 if (!MapProvenSorted
) {
7109 assert(llvm::is_sorted(IntrinsicMap
));
7110 MapProvenSorted
= true;
7114 const ARMVectorIntrinsicInfo
*Builtin
=
7115 llvm::lower_bound(IntrinsicMap
, BuiltinID
);
7117 if (Builtin
!= IntrinsicMap
.end() && Builtin
->BuiltinID
== BuiltinID
)
7123 Function
*CodeGenFunction::LookupNeonLLVMIntrinsic(unsigned IntrinsicID
,
7125 llvm::Type
*ArgType
,
7126 const CallExpr
*E
) {
7128 if (Modifier
& Use64BitVectors
)
7130 else if (Modifier
& Use128BitVectors
)
7134 SmallVector
<llvm::Type
*, 3> Tys
;
7135 if (Modifier
& AddRetType
) {
7136 llvm::Type
*Ty
= ConvertType(E
->getCallReturnType(getContext()));
7137 if (Modifier
& VectorizeRetType
)
7138 Ty
= llvm::FixedVectorType::get(
7139 Ty
, VectorSize
? VectorSize
/ Ty
->getPrimitiveSizeInBits() : 1);
7145 if (Modifier
& VectorizeArgTypes
) {
7146 int Elts
= VectorSize
? VectorSize
/ ArgType
->getPrimitiveSizeInBits() : 1;
7147 ArgType
= llvm::FixedVectorType::get(ArgType
, Elts
);
7150 if (Modifier
& (Add1ArgType
| Add2ArgTypes
))
7151 Tys
.push_back(ArgType
);
7153 if (Modifier
& Add2ArgTypes
)
7154 Tys
.push_back(ArgType
);
7156 if (Modifier
& InventFloatType
)
7157 Tys
.push_back(FloatTy
);
7159 return CGM
.getIntrinsic(IntrinsicID
, Tys
);
7162 static Value
*EmitCommonNeonSISDBuiltinExpr(
7163 CodeGenFunction
&CGF
, const ARMVectorIntrinsicInfo
&SISDInfo
,
7164 SmallVectorImpl
<Value
*> &Ops
, const CallExpr
*E
) {
7165 unsigned BuiltinID
= SISDInfo
.BuiltinID
;
7166 unsigned int Int
= SISDInfo
.LLVMIntrinsic
;
7167 unsigned Modifier
= SISDInfo
.TypeModifier
;
7168 const char *s
= SISDInfo
.NameHint
;
7170 switch (BuiltinID
) {
7171 case NEON::BI__builtin_neon_vcled_s64
:
7172 case NEON::BI__builtin_neon_vcled_u64
:
7173 case NEON::BI__builtin_neon_vcles_f32
:
7174 case NEON::BI__builtin_neon_vcled_f64
:
7175 case NEON::BI__builtin_neon_vcltd_s64
:
7176 case NEON::BI__builtin_neon_vcltd_u64
:
7177 case NEON::BI__builtin_neon_vclts_f32
:
7178 case NEON::BI__builtin_neon_vcltd_f64
:
7179 case NEON::BI__builtin_neon_vcales_f32
:
7180 case NEON::BI__builtin_neon_vcaled_f64
:
7181 case NEON::BI__builtin_neon_vcalts_f32
:
7182 case NEON::BI__builtin_neon_vcaltd_f64
:
7183 // Only one direction of comparisons actually exist, cmle is actually a cmge
7184 // with swapped operands. The table gives us the right intrinsic but we
7185 // still need to do the swap.
7186 std::swap(Ops
[0], Ops
[1]);
7190 assert(Int
&& "Generic code assumes a valid intrinsic");
7192 // Determine the type(s) of this overloaded AArch64 intrinsic.
7193 const Expr
*Arg
= E
->getArg(0);
7194 llvm::Type
*ArgTy
= CGF
.ConvertType(Arg
->getType());
7195 Function
*F
= CGF
.LookupNeonLLVMIntrinsic(Int
, Modifier
, ArgTy
, E
);
7198 ConstantInt
*C0
= ConstantInt::get(CGF
.SizeTy
, 0);
7199 for (Function::const_arg_iterator ai
= F
->arg_begin(), ae
= F
->arg_end();
7200 ai
!= ae
; ++ai
, ++j
) {
7201 llvm::Type
*ArgTy
= ai
->getType();
7202 if (Ops
[j
]->getType()->getPrimitiveSizeInBits() ==
7203 ArgTy
->getPrimitiveSizeInBits())
7206 assert(ArgTy
->isVectorTy() && !Ops
[j
]->getType()->isVectorTy());
7207 // The constant argument to an _n_ intrinsic always has Int32Ty, so truncate
7208 // it before inserting.
7209 Ops
[j
] = CGF
.Builder
.CreateTruncOrBitCast(
7210 Ops
[j
], cast
<llvm::VectorType
>(ArgTy
)->getElementType());
7212 CGF
.Builder
.CreateInsertElement(PoisonValue::get(ArgTy
), Ops
[j
], C0
);
7215 Value
*Result
= CGF
.EmitNeonCall(F
, Ops
, s
);
7216 llvm::Type
*ResultType
= CGF
.ConvertType(E
->getType());
7217 if (ResultType
->getPrimitiveSizeInBits().getFixedValue() <
7218 Result
->getType()->getPrimitiveSizeInBits().getFixedValue())
7219 return CGF
.Builder
.CreateExtractElement(Result
, C0
);
7221 return CGF
.Builder
.CreateBitCast(Result
, ResultType
, s
);
7224 Value
*CodeGenFunction::EmitCommonNeonBuiltinExpr(
7225 unsigned BuiltinID
, unsigned LLVMIntrinsic
, unsigned AltLLVMIntrinsic
,
7226 const char *NameHint
, unsigned Modifier
, const CallExpr
*E
,
7227 SmallVectorImpl
<llvm::Value
*> &Ops
, Address PtrOp0
, Address PtrOp1
,
7228 llvm::Triple::ArchType Arch
) {
7229 // Get the last argument, which specifies the vector type.
7230 const Expr
*Arg
= E
->getArg(E
->getNumArgs() - 1);
7231 std::optional
<llvm::APSInt
> NeonTypeConst
=
7232 Arg
->getIntegerConstantExpr(getContext());
7236 // Determine the type of this overloaded NEON intrinsic.
7237 NeonTypeFlags
Type(NeonTypeConst
->getZExtValue());
7238 bool Usgn
= Type
.isUnsigned();
7239 bool Quad
= Type
.isQuad();
7240 const bool HasLegalHalfType
= getTarget().hasLegalHalfType();
7241 const bool AllowBFloatArgsAndRet
=
7242 getTargetHooks().getABIInfo().allowBFloatArgsAndRet();
7244 llvm::FixedVectorType
*VTy
=
7245 GetNeonType(this, Type
, HasLegalHalfType
, false, AllowBFloatArgsAndRet
);
7246 llvm::Type
*Ty
= VTy
;
7250 auto getAlignmentValue32
= [&](Address addr
) -> Value
* {
7251 return Builder
.getInt32(addr
.getAlignment().getQuantity());
7254 unsigned Int
= LLVMIntrinsic
;
7255 if ((Modifier
& UnsignedAlts
) && !Usgn
)
7256 Int
= AltLLVMIntrinsic
;
7258 switch (BuiltinID
) {
7260 case NEON::BI__builtin_neon_splat_lane_v
:
7261 case NEON::BI__builtin_neon_splat_laneq_v
:
7262 case NEON::BI__builtin_neon_splatq_lane_v
:
7263 case NEON::BI__builtin_neon_splatq_laneq_v
: {
7264 auto NumElements
= VTy
->getElementCount();
7265 if (BuiltinID
== NEON::BI__builtin_neon_splatq_lane_v
)
7266 NumElements
= NumElements
* 2;
7267 if (BuiltinID
== NEON::BI__builtin_neon_splat_laneq_v
)
7268 NumElements
= NumElements
.divideCoefficientBy(2);
7270 Ops
[0] = Builder
.CreateBitCast(Ops
[0], VTy
);
7271 return EmitNeonSplat(Ops
[0], cast
<ConstantInt
>(Ops
[1]), NumElements
);
7273 case NEON::BI__builtin_neon_vpadd_v
:
7274 case NEON::BI__builtin_neon_vpaddq_v
:
7275 // We don't allow fp/int overloading of intrinsics.
7276 if (VTy
->getElementType()->isFloatingPointTy() &&
7277 Int
== Intrinsic::aarch64_neon_addp
)
7278 Int
= Intrinsic::aarch64_neon_faddp
;
7280 case NEON::BI__builtin_neon_vabs_v
:
7281 case NEON::BI__builtin_neon_vabsq_v
:
7282 if (VTy
->getElementType()->isFloatingPointTy())
7283 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::fabs
, Ty
), Ops
, "vabs");
7284 return EmitNeonCall(CGM
.getIntrinsic(LLVMIntrinsic
, Ty
), Ops
, "vabs");
7285 case NEON::BI__builtin_neon_vadd_v
:
7286 case NEON::BI__builtin_neon_vaddq_v
: {
7287 llvm::Type
*VTy
= llvm::FixedVectorType::get(Int8Ty
, Quad
? 16 : 8);
7288 Ops
[0] = Builder
.CreateBitCast(Ops
[0], VTy
);
7289 Ops
[1] = Builder
.CreateBitCast(Ops
[1], VTy
);
7290 Ops
[0] = Builder
.CreateXor(Ops
[0], Ops
[1]);
7291 return Builder
.CreateBitCast(Ops
[0], Ty
);
7293 case NEON::BI__builtin_neon_vaddhn_v
: {
7294 llvm::FixedVectorType
*SrcTy
=
7295 llvm::FixedVectorType::getExtendedElementVectorType(VTy
);
7297 // %sum = add <4 x i32> %lhs, %rhs
7298 Ops
[0] = Builder
.CreateBitCast(Ops
[0], SrcTy
);
7299 Ops
[1] = Builder
.CreateBitCast(Ops
[1], SrcTy
);
7300 Ops
[0] = Builder
.CreateAdd(Ops
[0], Ops
[1], "vaddhn");
7302 // %high = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16>
7303 Constant
*ShiftAmt
=
7304 ConstantInt::get(SrcTy
, SrcTy
->getScalarSizeInBits() / 2);
7305 Ops
[0] = Builder
.CreateLShr(Ops
[0], ShiftAmt
, "vaddhn");
7307 // %res = trunc <4 x i32> %high to <4 x i16>
7308 return Builder
.CreateTrunc(Ops
[0], VTy
, "vaddhn");
7310 case NEON::BI__builtin_neon_vcale_v
:
7311 case NEON::BI__builtin_neon_vcaleq_v
:
7312 case NEON::BI__builtin_neon_vcalt_v
:
7313 case NEON::BI__builtin_neon_vcaltq_v
:
7314 std::swap(Ops
[0], Ops
[1]);
7316 case NEON::BI__builtin_neon_vcage_v
:
7317 case NEON::BI__builtin_neon_vcageq_v
:
7318 case NEON::BI__builtin_neon_vcagt_v
:
7319 case NEON::BI__builtin_neon_vcagtq_v
: {
7321 switch (VTy
->getScalarSizeInBits()) {
7322 default: llvm_unreachable("unexpected type");
7333 auto *VecFlt
= llvm::FixedVectorType::get(Ty
, VTy
->getNumElements());
7334 llvm::Type
*Tys
[] = { VTy
, VecFlt
};
7335 Function
*F
= CGM
.getIntrinsic(LLVMIntrinsic
, Tys
);
7336 return EmitNeonCall(F
, Ops
, NameHint
);
7338 case NEON::BI__builtin_neon_vceqz_v
:
7339 case NEON::BI__builtin_neon_vceqzq_v
:
7340 return EmitAArch64CompareBuiltinExpr(Ops
[0], Ty
, ICmpInst::FCMP_OEQ
,
7341 ICmpInst::ICMP_EQ
, "vceqz");
7342 case NEON::BI__builtin_neon_vcgez_v
:
7343 case NEON::BI__builtin_neon_vcgezq_v
:
7344 return EmitAArch64CompareBuiltinExpr(Ops
[0], Ty
, ICmpInst::FCMP_OGE
,
7345 ICmpInst::ICMP_SGE
, "vcgez");
7346 case NEON::BI__builtin_neon_vclez_v
:
7347 case NEON::BI__builtin_neon_vclezq_v
:
7348 return EmitAArch64CompareBuiltinExpr(Ops
[0], Ty
, ICmpInst::FCMP_OLE
,
7349 ICmpInst::ICMP_SLE
, "vclez");
7350 case NEON::BI__builtin_neon_vcgtz_v
:
7351 case NEON::BI__builtin_neon_vcgtzq_v
:
7352 return EmitAArch64CompareBuiltinExpr(Ops
[0], Ty
, ICmpInst::FCMP_OGT
,
7353 ICmpInst::ICMP_SGT
, "vcgtz");
7354 case NEON::BI__builtin_neon_vcltz_v
:
7355 case NEON::BI__builtin_neon_vcltzq_v
:
7356 return EmitAArch64CompareBuiltinExpr(Ops
[0], Ty
, ICmpInst::FCMP_OLT
,
7357 ICmpInst::ICMP_SLT
, "vcltz");
7358 case NEON::BI__builtin_neon_vclz_v
:
7359 case NEON::BI__builtin_neon_vclzq_v
:
7360 // We generate target-independent intrinsic, which needs a second argument
7361 // for whether or not clz of zero is undefined; on ARM it isn't.
7362 Ops
.push_back(Builder
.getInt1(getTarget().isCLZForZeroUndef()));
7364 case NEON::BI__builtin_neon_vcvt_f32_v
:
7365 case NEON::BI__builtin_neon_vcvtq_f32_v
:
7366 Ops
[0] = Builder
.CreateBitCast(Ops
[0], Ty
);
7367 Ty
= GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32
, false, Quad
),
7369 return Usgn
? Builder
.CreateUIToFP(Ops
[0], Ty
, "vcvt")
7370 : Builder
.CreateSIToFP(Ops
[0], Ty
, "vcvt");
7371 case NEON::BI__builtin_neon_vcvt_f16_s16
:
7372 case NEON::BI__builtin_neon_vcvt_f16_u16
:
7373 case NEON::BI__builtin_neon_vcvtq_f16_s16
:
7374 case NEON::BI__builtin_neon_vcvtq_f16_u16
:
7375 Ops
[0] = Builder
.CreateBitCast(Ops
[0], Ty
);
7376 Ty
= GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float16
, false, Quad
),
7378 return Usgn
? Builder
.CreateUIToFP(Ops
[0], Ty
, "vcvt")
7379 : Builder
.CreateSIToFP(Ops
[0], Ty
, "vcvt");
7380 case NEON::BI__builtin_neon_vcvt_n_f16_s16
:
7381 case NEON::BI__builtin_neon_vcvt_n_f16_u16
:
7382 case NEON::BI__builtin_neon_vcvtq_n_f16_s16
:
7383 case NEON::BI__builtin_neon_vcvtq_n_f16_u16
: {
7384 llvm::Type
*Tys
[2] = { GetFloatNeonType(this, Type
), Ty
};
7385 Function
*F
= CGM
.getIntrinsic(Int
, Tys
);
7386 return EmitNeonCall(F
, Ops
, "vcvt_n");
7388 case NEON::BI__builtin_neon_vcvt_n_f32_v
:
7389 case NEON::BI__builtin_neon_vcvt_n_f64_v
:
7390 case NEON::BI__builtin_neon_vcvtq_n_f32_v
:
7391 case NEON::BI__builtin_neon_vcvtq_n_f64_v
: {
7392 llvm::Type
*Tys
[2] = { GetFloatNeonType(this, Type
), Ty
};
7393 Int
= Usgn
? LLVMIntrinsic
: AltLLVMIntrinsic
;
7394 Function
*F
= CGM
.getIntrinsic(Int
, Tys
);
7395 return EmitNeonCall(F
, Ops
, "vcvt_n");
7397 case NEON::BI__builtin_neon_vcvt_n_s16_f16
:
7398 case NEON::BI__builtin_neon_vcvt_n_s32_v
:
7399 case NEON::BI__builtin_neon_vcvt_n_u16_f16
:
7400 case NEON::BI__builtin_neon_vcvt_n_u32_v
:
7401 case NEON::BI__builtin_neon_vcvt_n_s64_v
:
7402 case NEON::BI__builtin_neon_vcvt_n_u64_v
:
7403 case NEON::BI__builtin_neon_vcvtq_n_s16_f16
:
7404 case NEON::BI__builtin_neon_vcvtq_n_s32_v
:
7405 case NEON::BI__builtin_neon_vcvtq_n_u16_f16
:
7406 case NEON::BI__builtin_neon_vcvtq_n_u32_v
:
7407 case NEON::BI__builtin_neon_vcvtq_n_s64_v
:
7408 case NEON::BI__builtin_neon_vcvtq_n_u64_v
: {
7409 llvm::Type
*Tys
[2] = { Ty
, GetFloatNeonType(this, Type
) };
7410 Function
*F
= CGM
.getIntrinsic(LLVMIntrinsic
, Tys
);
7411 return EmitNeonCall(F
, Ops
, "vcvt_n");
7413 case NEON::BI__builtin_neon_vcvt_s32_v
:
7414 case NEON::BI__builtin_neon_vcvt_u32_v
:
7415 case NEON::BI__builtin_neon_vcvt_s64_v
:
7416 case NEON::BI__builtin_neon_vcvt_u64_v
:
7417 case NEON::BI__builtin_neon_vcvt_s16_f16
:
7418 case NEON::BI__builtin_neon_vcvt_u16_f16
:
7419 case NEON::BI__builtin_neon_vcvtq_s32_v
:
7420 case NEON::BI__builtin_neon_vcvtq_u32_v
:
7421 case NEON::BI__builtin_neon_vcvtq_s64_v
:
7422 case NEON::BI__builtin_neon_vcvtq_u64_v
:
7423 case NEON::BI__builtin_neon_vcvtq_s16_f16
:
7424 case NEON::BI__builtin_neon_vcvtq_u16_f16
: {
7425 Ops
[0] = Builder
.CreateBitCast(Ops
[0], GetFloatNeonType(this, Type
));
7426 return Usgn
? Builder
.CreateFPToUI(Ops
[0], Ty
, "vcvt")
7427 : Builder
.CreateFPToSI(Ops
[0], Ty
, "vcvt");
7429 case NEON::BI__builtin_neon_vcvta_s16_f16
:
7430 case NEON::BI__builtin_neon_vcvta_s32_v
:
7431 case NEON::BI__builtin_neon_vcvta_s64_v
:
7432 case NEON::BI__builtin_neon_vcvta_u16_f16
:
7433 case NEON::BI__builtin_neon_vcvta_u32_v
:
7434 case NEON::BI__builtin_neon_vcvta_u64_v
:
7435 case NEON::BI__builtin_neon_vcvtaq_s16_f16
:
7436 case NEON::BI__builtin_neon_vcvtaq_s32_v
:
7437 case NEON::BI__builtin_neon_vcvtaq_s64_v
:
7438 case NEON::BI__builtin_neon_vcvtaq_u16_f16
:
7439 case NEON::BI__builtin_neon_vcvtaq_u32_v
:
7440 case NEON::BI__builtin_neon_vcvtaq_u64_v
:
7441 case NEON::BI__builtin_neon_vcvtn_s16_f16
:
7442 case NEON::BI__builtin_neon_vcvtn_s32_v
:
7443 case NEON::BI__builtin_neon_vcvtn_s64_v
:
7444 case NEON::BI__builtin_neon_vcvtn_u16_f16
:
7445 case NEON::BI__builtin_neon_vcvtn_u32_v
:
7446 case NEON::BI__builtin_neon_vcvtn_u64_v
:
7447 case NEON::BI__builtin_neon_vcvtnq_s16_f16
:
7448 case NEON::BI__builtin_neon_vcvtnq_s32_v
:
7449 case NEON::BI__builtin_neon_vcvtnq_s64_v
:
7450 case NEON::BI__builtin_neon_vcvtnq_u16_f16
:
7451 case NEON::BI__builtin_neon_vcvtnq_u32_v
:
7452 case NEON::BI__builtin_neon_vcvtnq_u64_v
:
7453 case NEON::BI__builtin_neon_vcvtp_s16_f16
:
7454 case NEON::BI__builtin_neon_vcvtp_s32_v
:
7455 case NEON::BI__builtin_neon_vcvtp_s64_v
:
7456 case NEON::BI__builtin_neon_vcvtp_u16_f16
:
7457 case NEON::BI__builtin_neon_vcvtp_u32_v
:
7458 case NEON::BI__builtin_neon_vcvtp_u64_v
:
7459 case NEON::BI__builtin_neon_vcvtpq_s16_f16
:
7460 case NEON::BI__builtin_neon_vcvtpq_s32_v
:
7461 case NEON::BI__builtin_neon_vcvtpq_s64_v
:
7462 case NEON::BI__builtin_neon_vcvtpq_u16_f16
:
7463 case NEON::BI__builtin_neon_vcvtpq_u32_v
:
7464 case NEON::BI__builtin_neon_vcvtpq_u64_v
:
7465 case NEON::BI__builtin_neon_vcvtm_s16_f16
:
7466 case NEON::BI__builtin_neon_vcvtm_s32_v
:
7467 case NEON::BI__builtin_neon_vcvtm_s64_v
:
7468 case NEON::BI__builtin_neon_vcvtm_u16_f16
:
7469 case NEON::BI__builtin_neon_vcvtm_u32_v
:
7470 case NEON::BI__builtin_neon_vcvtm_u64_v
:
7471 case NEON::BI__builtin_neon_vcvtmq_s16_f16
:
7472 case NEON::BI__builtin_neon_vcvtmq_s32_v
:
7473 case NEON::BI__builtin_neon_vcvtmq_s64_v
:
7474 case NEON::BI__builtin_neon_vcvtmq_u16_f16
:
7475 case NEON::BI__builtin_neon_vcvtmq_u32_v
:
7476 case NEON::BI__builtin_neon_vcvtmq_u64_v
: {
7477 llvm::Type
*Tys
[2] = { Ty
, GetFloatNeonType(this, Type
) };
7478 return EmitNeonCall(CGM
.getIntrinsic(LLVMIntrinsic
, Tys
), Ops
, NameHint
);
7480 case NEON::BI__builtin_neon_vcvtx_f32_v
: {
7481 llvm::Type
*Tys
[2] = { VTy
->getTruncatedElementVectorType(VTy
), Ty
};
7482 return EmitNeonCall(CGM
.getIntrinsic(LLVMIntrinsic
, Tys
), Ops
, NameHint
);
7485 case NEON::BI__builtin_neon_vext_v
:
7486 case NEON::BI__builtin_neon_vextq_v
: {
7487 int CV
= cast
<ConstantInt
>(Ops
[2])->getSExtValue();
7488 SmallVector
<int, 16> Indices
;
7489 for (unsigned i
= 0, e
= VTy
->getNumElements(); i
!= e
; ++i
)
7490 Indices
.push_back(i
+CV
);
7492 Ops
[0] = Builder
.CreateBitCast(Ops
[0], Ty
);
7493 Ops
[1] = Builder
.CreateBitCast(Ops
[1], Ty
);
7494 return Builder
.CreateShuffleVector(Ops
[0], Ops
[1], Indices
, "vext");
7496 case NEON::BI__builtin_neon_vfma_v
:
7497 case NEON::BI__builtin_neon_vfmaq_v
: {
7498 Ops
[0] = Builder
.CreateBitCast(Ops
[0], Ty
);
7499 Ops
[1] = Builder
.CreateBitCast(Ops
[1], Ty
);
7500 Ops
[2] = Builder
.CreateBitCast(Ops
[2], Ty
);
7502 // NEON intrinsic puts accumulator first, unlike the LLVM fma.
7503 return emitCallMaybeConstrainedFPBuiltin(
7504 *this, Intrinsic::fma
, Intrinsic::experimental_constrained_fma
, Ty
,
7505 {Ops
[1], Ops
[2], Ops
[0]});
7507 case NEON::BI__builtin_neon_vld1_v
:
7508 case NEON::BI__builtin_neon_vld1q_v
: {
7509 llvm::Type
*Tys
[] = {Ty
, Int8PtrTy
};
7510 Ops
.push_back(getAlignmentValue32(PtrOp0
));
7511 return EmitNeonCall(CGM
.getIntrinsic(LLVMIntrinsic
, Tys
), Ops
, "vld1");
7513 case NEON::BI__builtin_neon_vld1_x2_v
:
7514 case NEON::BI__builtin_neon_vld1q_x2_v
:
7515 case NEON::BI__builtin_neon_vld1_x3_v
:
7516 case NEON::BI__builtin_neon_vld1q_x3_v
:
7517 case NEON::BI__builtin_neon_vld1_x4_v
:
7518 case NEON::BI__builtin_neon_vld1q_x4_v
: {
7519 llvm::Type
*Tys
[2] = {VTy
, UnqualPtrTy
};
7520 Function
*F
= CGM
.getIntrinsic(LLVMIntrinsic
, Tys
);
7521 Ops
[1] = Builder
.CreateCall(F
, Ops
[1], "vld1xN");
7522 return Builder
.CreateDefaultAlignedStore(Ops
[1], Ops
[0]);
7524 case NEON::BI__builtin_neon_vld2_v
:
7525 case NEON::BI__builtin_neon_vld2q_v
:
7526 case NEON::BI__builtin_neon_vld3_v
:
7527 case NEON::BI__builtin_neon_vld3q_v
:
7528 case NEON::BI__builtin_neon_vld4_v
:
7529 case NEON::BI__builtin_neon_vld4q_v
:
7530 case NEON::BI__builtin_neon_vld2_dup_v
:
7531 case NEON::BI__builtin_neon_vld2q_dup_v
:
7532 case NEON::BI__builtin_neon_vld3_dup_v
:
7533 case NEON::BI__builtin_neon_vld3q_dup_v
:
7534 case NEON::BI__builtin_neon_vld4_dup_v
:
7535 case NEON::BI__builtin_neon_vld4q_dup_v
: {
7536 llvm::Type
*Tys
[] = {Ty
, Int8PtrTy
};
7537 Function
*F
= CGM
.getIntrinsic(LLVMIntrinsic
, Tys
);
7538 Value
*Align
= getAlignmentValue32(PtrOp1
);
7539 Ops
[1] = Builder
.CreateCall(F
, {Ops
[1], Align
}, NameHint
);
7540 return Builder
.CreateDefaultAlignedStore(Ops
[1], Ops
[0]);
7542 case NEON::BI__builtin_neon_vld1_dup_v
:
7543 case NEON::BI__builtin_neon_vld1q_dup_v
: {
7544 Value
*V
= PoisonValue::get(Ty
);
7545 PtrOp0
= PtrOp0
.withElementType(VTy
->getElementType());
7546 LoadInst
*Ld
= Builder
.CreateLoad(PtrOp0
);
7547 llvm::Constant
*CI
= ConstantInt::get(SizeTy
, 0);
7548 Ops
[0] = Builder
.CreateInsertElement(V
, Ld
, CI
);
7549 return EmitNeonSplat(Ops
[0], CI
);
7551 case NEON::BI__builtin_neon_vld2_lane_v
:
7552 case NEON::BI__builtin_neon_vld2q_lane_v
:
7553 case NEON::BI__builtin_neon_vld3_lane_v
:
7554 case NEON::BI__builtin_neon_vld3q_lane_v
:
7555 case NEON::BI__builtin_neon_vld4_lane_v
:
7556 case NEON::BI__builtin_neon_vld4q_lane_v
: {
7557 llvm::Type
*Tys
[] = {Ty
, Int8PtrTy
};
7558 Function
*F
= CGM
.getIntrinsic(LLVMIntrinsic
, Tys
);
7559 for (unsigned I
= 2; I
< Ops
.size() - 1; ++I
)
7560 Ops
[I
] = Builder
.CreateBitCast(Ops
[I
], Ty
);
7561 Ops
.push_back(getAlignmentValue32(PtrOp1
));
7562 Ops
[1] = Builder
.CreateCall(F
, ArrayRef(Ops
).slice(1), NameHint
);
7563 return Builder
.CreateDefaultAlignedStore(Ops
[1], Ops
[0]);
7565 case NEON::BI__builtin_neon_vmovl_v
: {
7566 llvm::FixedVectorType
*DTy
=
7567 llvm::FixedVectorType::getTruncatedElementVectorType(VTy
);
7568 Ops
[0] = Builder
.CreateBitCast(Ops
[0], DTy
);
7570 return Builder
.CreateZExt(Ops
[0], Ty
, "vmovl");
7571 return Builder
.CreateSExt(Ops
[0], Ty
, "vmovl");
7573 case NEON::BI__builtin_neon_vmovn_v
: {
7574 llvm::FixedVectorType
*QTy
=
7575 llvm::FixedVectorType::getExtendedElementVectorType(VTy
);
7576 Ops
[0] = Builder
.CreateBitCast(Ops
[0], QTy
);
7577 return Builder
.CreateTrunc(Ops
[0], Ty
, "vmovn");
7579 case NEON::BI__builtin_neon_vmull_v
:
7580 // FIXME: the integer vmull operations could be emitted in terms of pure
7581 // LLVM IR (2 exts followed by a mul). Unfortunately LLVM has a habit of
7582 // hoisting the exts outside loops. Until global ISel comes along that can
7583 // see through such movement this leads to bad CodeGen. So we need an
7584 // intrinsic for now.
7585 Int
= Usgn
? Intrinsic::arm_neon_vmullu
: Intrinsic::arm_neon_vmulls
;
7586 Int
= Type
.isPoly() ? (unsigned)Intrinsic::arm_neon_vmullp
: Int
;
7587 return EmitNeonCall(CGM
.getIntrinsic(Int
, Ty
), Ops
, "vmull");
7588 case NEON::BI__builtin_neon_vpadal_v
:
7589 case NEON::BI__builtin_neon_vpadalq_v
: {
7590 // The source operand type has twice as many elements of half the size.
7591 unsigned EltBits
= VTy
->getElementType()->getPrimitiveSizeInBits();
7593 llvm::IntegerType::get(getLLVMContext(), EltBits
/ 2);
7595 llvm::FixedVectorType::get(EltTy
, VTy
->getNumElements() * 2);
7596 llvm::Type
*Tys
[2] = { Ty
, NarrowTy
};
7597 return EmitNeonCall(CGM
.getIntrinsic(Int
, Tys
), Ops
, NameHint
);
7599 case NEON::BI__builtin_neon_vpaddl_v
:
7600 case NEON::BI__builtin_neon_vpaddlq_v
: {
7601 // The source operand type has twice as many elements of half the size.
7602 unsigned EltBits
= VTy
->getElementType()->getPrimitiveSizeInBits();
7603 llvm::Type
*EltTy
= llvm::IntegerType::get(getLLVMContext(), EltBits
/ 2);
7605 llvm::FixedVectorType::get(EltTy
, VTy
->getNumElements() * 2);
7606 llvm::Type
*Tys
[2] = { Ty
, NarrowTy
};
7607 return EmitNeonCall(CGM
.getIntrinsic(Int
, Tys
), Ops
, "vpaddl");
7609 case NEON::BI__builtin_neon_vqdmlal_v
:
7610 case NEON::BI__builtin_neon_vqdmlsl_v
: {
7611 SmallVector
<Value
*, 2> MulOps(Ops
.begin() + 1, Ops
.end());
7613 EmitNeonCall(CGM
.getIntrinsic(LLVMIntrinsic
, Ty
), MulOps
, "vqdmlal");
7615 return EmitNeonCall(CGM
.getIntrinsic(AltLLVMIntrinsic
, Ty
), Ops
, NameHint
);
7617 case NEON::BI__builtin_neon_vqdmulhq_lane_v
:
7618 case NEON::BI__builtin_neon_vqdmulh_lane_v
:
7619 case NEON::BI__builtin_neon_vqrdmulhq_lane_v
:
7620 case NEON::BI__builtin_neon_vqrdmulh_lane_v
: {
7621 auto *RTy
= cast
<llvm::FixedVectorType
>(Ty
);
7622 if (BuiltinID
== NEON::BI__builtin_neon_vqdmulhq_lane_v
||
7623 BuiltinID
== NEON::BI__builtin_neon_vqrdmulhq_lane_v
)
7624 RTy
= llvm::FixedVectorType::get(RTy
->getElementType(),
7625 RTy
->getNumElements() * 2);
7626 llvm::Type
*Tys
[2] = {
7627 RTy
, GetNeonType(this, NeonTypeFlags(Type
.getEltType(), false,
7628 /*isQuad*/ false))};
7629 return EmitNeonCall(CGM
.getIntrinsic(Int
, Tys
), Ops
, NameHint
);
7631 case NEON::BI__builtin_neon_vqdmulhq_laneq_v
:
7632 case NEON::BI__builtin_neon_vqdmulh_laneq_v
:
7633 case NEON::BI__builtin_neon_vqrdmulhq_laneq_v
:
7634 case NEON::BI__builtin_neon_vqrdmulh_laneq_v
: {
7635 llvm::Type
*Tys
[2] = {
7636 Ty
, GetNeonType(this, NeonTypeFlags(Type
.getEltType(), false,
7638 return EmitNeonCall(CGM
.getIntrinsic(Int
, Tys
), Ops
, NameHint
);
7640 case NEON::BI__builtin_neon_vqshl_n_v
:
7641 case NEON::BI__builtin_neon_vqshlq_n_v
:
7642 return EmitNeonCall(CGM
.getIntrinsic(Int
, Ty
), Ops
, "vqshl_n",
7644 case NEON::BI__builtin_neon_vqshlu_n_v
:
7645 case NEON::BI__builtin_neon_vqshluq_n_v
:
7646 return EmitNeonCall(CGM
.getIntrinsic(Int
, Ty
), Ops
, "vqshlu_n",
7648 case NEON::BI__builtin_neon_vrecpe_v
:
7649 case NEON::BI__builtin_neon_vrecpeq_v
:
7650 case NEON::BI__builtin_neon_vrsqrte_v
:
7651 case NEON::BI__builtin_neon_vrsqrteq_v
:
7652 Int
= Ty
->isFPOrFPVectorTy() ? LLVMIntrinsic
: AltLLVMIntrinsic
;
7653 return EmitNeonCall(CGM
.getIntrinsic(Int
, Ty
), Ops
, NameHint
);
7654 case NEON::BI__builtin_neon_vrndi_v
:
7655 case NEON::BI__builtin_neon_vrndiq_v
:
7656 Int
= Builder
.getIsFPConstrained()
7657 ? Intrinsic::experimental_constrained_nearbyint
7658 : Intrinsic::nearbyint
;
7659 return EmitNeonCall(CGM
.getIntrinsic(Int
, Ty
), Ops
, NameHint
);
7660 case NEON::BI__builtin_neon_vrshr_n_v
:
7661 case NEON::BI__builtin_neon_vrshrq_n_v
:
7662 return EmitNeonCall(CGM
.getIntrinsic(Int
, Ty
), Ops
, "vrshr_n",
7664 case NEON::BI__builtin_neon_vsha512hq_u64
:
7665 case NEON::BI__builtin_neon_vsha512h2q_u64
:
7666 case NEON::BI__builtin_neon_vsha512su0q_u64
:
7667 case NEON::BI__builtin_neon_vsha512su1q_u64
: {
7668 Function
*F
= CGM
.getIntrinsic(Int
);
7669 return EmitNeonCall(F
, Ops
, "");
7671 case NEON::BI__builtin_neon_vshl_n_v
:
7672 case NEON::BI__builtin_neon_vshlq_n_v
:
7673 Ops
[1] = EmitNeonShiftVector(Ops
[1], Ty
, false);
7674 return Builder
.CreateShl(Builder
.CreateBitCast(Ops
[0],Ty
), Ops
[1],
7676 case NEON::BI__builtin_neon_vshll_n_v
: {
7677 llvm::FixedVectorType
*SrcTy
=
7678 llvm::FixedVectorType::getTruncatedElementVectorType(VTy
);
7679 Ops
[0] = Builder
.CreateBitCast(Ops
[0], SrcTy
);
7681 Ops
[0] = Builder
.CreateZExt(Ops
[0], VTy
);
7683 Ops
[0] = Builder
.CreateSExt(Ops
[0], VTy
);
7684 Ops
[1] = EmitNeonShiftVector(Ops
[1], VTy
, false);
7685 return Builder
.CreateShl(Ops
[0], Ops
[1], "vshll_n");
7687 case NEON::BI__builtin_neon_vshrn_n_v
: {
7688 llvm::FixedVectorType
*SrcTy
=
7689 llvm::FixedVectorType::getExtendedElementVectorType(VTy
);
7690 Ops
[0] = Builder
.CreateBitCast(Ops
[0], SrcTy
);
7691 Ops
[1] = EmitNeonShiftVector(Ops
[1], SrcTy
, false);
7693 Ops
[0] = Builder
.CreateLShr(Ops
[0], Ops
[1]);
7695 Ops
[0] = Builder
.CreateAShr(Ops
[0], Ops
[1]);
7696 return Builder
.CreateTrunc(Ops
[0], Ty
, "vshrn_n");
7698 case NEON::BI__builtin_neon_vshr_n_v
:
7699 case NEON::BI__builtin_neon_vshrq_n_v
:
7700 return EmitNeonRShiftImm(Ops
[0], Ops
[1], Ty
, Usgn
, "vshr_n");
7701 case NEON::BI__builtin_neon_vst1_v
:
7702 case NEON::BI__builtin_neon_vst1q_v
:
7703 case NEON::BI__builtin_neon_vst2_v
:
7704 case NEON::BI__builtin_neon_vst2q_v
:
7705 case NEON::BI__builtin_neon_vst3_v
:
7706 case NEON::BI__builtin_neon_vst3q_v
:
7707 case NEON::BI__builtin_neon_vst4_v
:
7708 case NEON::BI__builtin_neon_vst4q_v
:
7709 case NEON::BI__builtin_neon_vst2_lane_v
:
7710 case NEON::BI__builtin_neon_vst2q_lane_v
:
7711 case NEON::BI__builtin_neon_vst3_lane_v
:
7712 case NEON::BI__builtin_neon_vst3q_lane_v
:
7713 case NEON::BI__builtin_neon_vst4_lane_v
:
7714 case NEON::BI__builtin_neon_vst4q_lane_v
: {
7715 llvm::Type
*Tys
[] = {Int8PtrTy
, Ty
};
7716 Ops
.push_back(getAlignmentValue32(PtrOp0
));
7717 return EmitNeonCall(CGM
.getIntrinsic(Int
, Tys
), Ops
, "");
7719 case NEON::BI__builtin_neon_vsm3partw1q_u32
:
7720 case NEON::BI__builtin_neon_vsm3partw2q_u32
:
7721 case NEON::BI__builtin_neon_vsm3ss1q_u32
:
7722 case NEON::BI__builtin_neon_vsm4ekeyq_u32
:
7723 case NEON::BI__builtin_neon_vsm4eq_u32
: {
7724 Function
*F
= CGM
.getIntrinsic(Int
);
7725 return EmitNeonCall(F
, Ops
, "");
7727 case NEON::BI__builtin_neon_vsm3tt1aq_u32
:
7728 case NEON::BI__builtin_neon_vsm3tt1bq_u32
:
7729 case NEON::BI__builtin_neon_vsm3tt2aq_u32
:
7730 case NEON::BI__builtin_neon_vsm3tt2bq_u32
: {
7731 Function
*F
= CGM
.getIntrinsic(Int
);
7732 Ops
[3] = Builder
.CreateZExt(Ops
[3], Int64Ty
);
7733 return EmitNeonCall(F
, Ops
, "");
7735 case NEON::BI__builtin_neon_vst1_x2_v
:
7736 case NEON::BI__builtin_neon_vst1q_x2_v
:
7737 case NEON::BI__builtin_neon_vst1_x3_v
:
7738 case NEON::BI__builtin_neon_vst1q_x3_v
:
7739 case NEON::BI__builtin_neon_vst1_x4_v
:
7740 case NEON::BI__builtin_neon_vst1q_x4_v
: {
7741 // TODO: Currently in AArch32 mode the pointer operand comes first, whereas
7742 // in AArch64 it comes last. We may want to stick to one or another.
7743 if (Arch
== llvm::Triple::aarch64
|| Arch
== llvm::Triple::aarch64_be
||
7744 Arch
== llvm::Triple::aarch64_32
) {
7745 llvm::Type
*Tys
[2] = {VTy
, UnqualPtrTy
};
7746 std::rotate(Ops
.begin(), Ops
.begin() + 1, Ops
.end());
7747 return EmitNeonCall(CGM
.getIntrinsic(LLVMIntrinsic
, Tys
), Ops
, "");
7749 llvm::Type
*Tys
[2] = {UnqualPtrTy
, VTy
};
7750 return EmitNeonCall(CGM
.getIntrinsic(LLVMIntrinsic
, Tys
), Ops
, "");
7752 case NEON::BI__builtin_neon_vsubhn_v
: {
7753 llvm::FixedVectorType
*SrcTy
=
7754 llvm::FixedVectorType::getExtendedElementVectorType(VTy
);
7756 // %sum = add <4 x i32> %lhs, %rhs
7757 Ops
[0] = Builder
.CreateBitCast(Ops
[0], SrcTy
);
7758 Ops
[1] = Builder
.CreateBitCast(Ops
[1], SrcTy
);
7759 Ops
[0] = Builder
.CreateSub(Ops
[0], Ops
[1], "vsubhn");
7761 // %high = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16>
7762 Constant
*ShiftAmt
=
7763 ConstantInt::get(SrcTy
, SrcTy
->getScalarSizeInBits() / 2);
7764 Ops
[0] = Builder
.CreateLShr(Ops
[0], ShiftAmt
, "vsubhn");
7766 // %res = trunc <4 x i32> %high to <4 x i16>
7767 return Builder
.CreateTrunc(Ops
[0], VTy
, "vsubhn");
7769 case NEON::BI__builtin_neon_vtrn_v
:
7770 case NEON::BI__builtin_neon_vtrnq_v
: {
7771 Ops
[1] = Builder
.CreateBitCast(Ops
[1], Ty
);
7772 Ops
[2] = Builder
.CreateBitCast(Ops
[2], Ty
);
7773 Value
*SV
= nullptr;
7775 for (unsigned vi
= 0; vi
!= 2; ++vi
) {
7776 SmallVector
<int, 16> Indices
;
7777 for (unsigned i
= 0, e
= VTy
->getNumElements(); i
!= e
; i
+= 2) {
7778 Indices
.push_back(i
+vi
);
7779 Indices
.push_back(i
+e
+vi
);
7781 Value
*Addr
= Builder
.CreateConstInBoundsGEP1_32(Ty
, Ops
[0], vi
);
7782 SV
= Builder
.CreateShuffleVector(Ops
[1], Ops
[2], Indices
, "vtrn");
7783 SV
= Builder
.CreateDefaultAlignedStore(SV
, Addr
);
7787 case NEON::BI__builtin_neon_vtst_v
:
7788 case NEON::BI__builtin_neon_vtstq_v
: {
7789 Ops
[0] = Builder
.CreateBitCast(Ops
[0], Ty
);
7790 Ops
[1] = Builder
.CreateBitCast(Ops
[1], Ty
);
7791 Ops
[0] = Builder
.CreateAnd(Ops
[0], Ops
[1]);
7792 Ops
[0] = Builder
.CreateICmp(ICmpInst::ICMP_NE
, Ops
[0],
7793 ConstantAggregateZero::get(Ty
));
7794 return Builder
.CreateSExt(Ops
[0], Ty
, "vtst");
7796 case NEON::BI__builtin_neon_vuzp_v
:
7797 case NEON::BI__builtin_neon_vuzpq_v
: {
7798 Ops
[1] = Builder
.CreateBitCast(Ops
[1], Ty
);
7799 Ops
[2] = Builder
.CreateBitCast(Ops
[2], Ty
);
7800 Value
*SV
= nullptr;
7802 for (unsigned vi
= 0; vi
!= 2; ++vi
) {
7803 SmallVector
<int, 16> Indices
;
7804 for (unsigned i
= 0, e
= VTy
->getNumElements(); i
!= e
; ++i
)
7805 Indices
.push_back(2*i
+vi
);
7807 Value
*Addr
= Builder
.CreateConstInBoundsGEP1_32(Ty
, Ops
[0], vi
);
7808 SV
= Builder
.CreateShuffleVector(Ops
[1], Ops
[2], Indices
, "vuzp");
7809 SV
= Builder
.CreateDefaultAlignedStore(SV
, Addr
);
7813 case NEON::BI__builtin_neon_vxarq_u64
: {
7814 Function
*F
= CGM
.getIntrinsic(Int
);
7815 Ops
[2] = Builder
.CreateZExt(Ops
[2], Int64Ty
);
7816 return EmitNeonCall(F
, Ops
, "");
7818 case NEON::BI__builtin_neon_vzip_v
:
7819 case NEON::BI__builtin_neon_vzipq_v
: {
7820 Ops
[1] = Builder
.CreateBitCast(Ops
[1], Ty
);
7821 Ops
[2] = Builder
.CreateBitCast(Ops
[2], Ty
);
7822 Value
*SV
= nullptr;
7824 for (unsigned vi
= 0; vi
!= 2; ++vi
) {
7825 SmallVector
<int, 16> Indices
;
7826 for (unsigned i
= 0, e
= VTy
->getNumElements(); i
!= e
; i
+= 2) {
7827 Indices
.push_back((i
+ vi
*e
) >> 1);
7828 Indices
.push_back(((i
+ vi
*e
) >> 1)+e
);
7830 Value
*Addr
= Builder
.CreateConstInBoundsGEP1_32(Ty
, Ops
[0], vi
);
7831 SV
= Builder
.CreateShuffleVector(Ops
[1], Ops
[2], Indices
, "vzip");
7832 SV
= Builder
.CreateDefaultAlignedStore(SV
, Addr
);
7836 case NEON::BI__builtin_neon_vdot_s32
:
7837 case NEON::BI__builtin_neon_vdot_u32
:
7838 case NEON::BI__builtin_neon_vdotq_s32
:
7839 case NEON::BI__builtin_neon_vdotq_u32
: {
7841 llvm::FixedVectorType::get(Int8Ty
, Ty
->getPrimitiveSizeInBits() / 8);
7842 llvm::Type
*Tys
[2] = { Ty
, InputTy
};
7843 return EmitNeonCall(CGM
.getIntrinsic(Int
, Tys
), Ops
, "vdot");
7845 case NEON::BI__builtin_neon_vfmlal_low_f16
:
7846 case NEON::BI__builtin_neon_vfmlalq_low_f16
: {
7848 llvm::FixedVectorType::get(HalfTy
, Ty
->getPrimitiveSizeInBits() / 16);
7849 llvm::Type
*Tys
[2] = { Ty
, InputTy
};
7850 return EmitNeonCall(CGM
.getIntrinsic(Int
, Tys
), Ops
, "vfmlal_low");
7852 case NEON::BI__builtin_neon_vfmlsl_low_f16
:
7853 case NEON::BI__builtin_neon_vfmlslq_low_f16
: {
7855 llvm::FixedVectorType::get(HalfTy
, Ty
->getPrimitiveSizeInBits() / 16);
7856 llvm::Type
*Tys
[2] = { Ty
, InputTy
};
7857 return EmitNeonCall(CGM
.getIntrinsic(Int
, Tys
), Ops
, "vfmlsl_low");
7859 case NEON::BI__builtin_neon_vfmlal_high_f16
:
7860 case NEON::BI__builtin_neon_vfmlalq_high_f16
: {
7862 llvm::FixedVectorType::get(HalfTy
, Ty
->getPrimitiveSizeInBits() / 16);
7863 llvm::Type
*Tys
[2] = { Ty
, InputTy
};
7864 return EmitNeonCall(CGM
.getIntrinsic(Int
, Tys
), Ops
, "vfmlal_high");
7866 case NEON::BI__builtin_neon_vfmlsl_high_f16
:
7867 case NEON::BI__builtin_neon_vfmlslq_high_f16
: {
7869 llvm::FixedVectorType::get(HalfTy
, Ty
->getPrimitiveSizeInBits() / 16);
7870 llvm::Type
*Tys
[2] = { Ty
, InputTy
};
7871 return EmitNeonCall(CGM
.getIntrinsic(Int
, Tys
), Ops
, "vfmlsl_high");
7873 case NEON::BI__builtin_neon_vmmlaq_s32
:
7874 case NEON::BI__builtin_neon_vmmlaq_u32
: {
7876 llvm::FixedVectorType::get(Int8Ty
, Ty
->getPrimitiveSizeInBits() / 8);
7877 llvm::Type
*Tys
[2] = { Ty
, InputTy
};
7878 return EmitNeonCall(CGM
.getIntrinsic(LLVMIntrinsic
, Tys
), Ops
, "vmmla");
7880 case NEON::BI__builtin_neon_vusmmlaq_s32
: {
7882 llvm::FixedVectorType::get(Int8Ty
, Ty
->getPrimitiveSizeInBits() / 8);
7883 llvm::Type
*Tys
[2] = { Ty
, InputTy
};
7884 return EmitNeonCall(CGM
.getIntrinsic(Int
, Tys
), Ops
, "vusmmla");
7886 case NEON::BI__builtin_neon_vusdot_s32
:
7887 case NEON::BI__builtin_neon_vusdotq_s32
: {
7889 llvm::FixedVectorType::get(Int8Ty
, Ty
->getPrimitiveSizeInBits() / 8);
7890 llvm::Type
*Tys
[2] = { Ty
, InputTy
};
7891 return EmitNeonCall(CGM
.getIntrinsic(Int
, Tys
), Ops
, "vusdot");
7893 case NEON::BI__builtin_neon_vbfdot_f32
:
7894 case NEON::BI__builtin_neon_vbfdotq_f32
: {
7895 llvm::Type
*InputTy
=
7896 llvm::FixedVectorType::get(BFloatTy
, Ty
->getPrimitiveSizeInBits() / 16);
7897 llvm::Type
*Tys
[2] = { Ty
, InputTy
};
7898 return EmitNeonCall(CGM
.getIntrinsic(Int
, Tys
), Ops
, "vbfdot");
7900 case NEON::BI__builtin_neon___a32_vcvt_bf16_f32
: {
7901 llvm::Type
*Tys
[1] = { Ty
};
7902 Function
*F
= CGM
.getIntrinsic(Int
, Tys
);
7903 return EmitNeonCall(F
, Ops
, "vcvtfp2bf");
7908 assert(Int
&& "Expected valid intrinsic number");
7910 // Determine the type(s) of this overloaded AArch64 intrinsic.
7911 Function
*F
= LookupNeonLLVMIntrinsic(Int
, Modifier
, Ty
, E
);
7913 Value
*Result
= EmitNeonCall(F
, Ops
, NameHint
);
7914 llvm::Type
*ResultType
= ConvertType(E
->getType());
7915 // AArch64 intrinsic one-element vector type cast to
7916 // scalar type expected by the builtin
7917 return Builder
.CreateBitCast(Result
, ResultType
, NameHint
);
7920 Value
*CodeGenFunction::EmitAArch64CompareBuiltinExpr(
7921 Value
*Op
, llvm::Type
*Ty
, const CmpInst::Predicate Fp
,
7922 const CmpInst::Predicate Ip
, const Twine
&Name
) {
7923 llvm::Type
*OTy
= Op
->getType();
7925 // FIXME: this is utterly horrific. We should not be looking at previous
7926 // codegen context to find out what needs doing. Unfortunately TableGen
7927 // currently gives us exactly the same calls for vceqz_f32 and vceqz_s32
7929 if (BitCastInst
*BI
= dyn_cast
<BitCastInst
>(Op
))
7930 OTy
= BI
->getOperand(0)->getType();
7932 Op
= Builder
.CreateBitCast(Op
, OTy
);
7933 if (OTy
->getScalarType()->isFloatingPointTy()) {
7934 if (Fp
== CmpInst::FCMP_OEQ
)
7935 Op
= Builder
.CreateFCmp(Fp
, Op
, Constant::getNullValue(OTy
));
7937 Op
= Builder
.CreateFCmpS(Fp
, Op
, Constant::getNullValue(OTy
));
7939 Op
= Builder
.CreateICmp(Ip
, Op
, Constant::getNullValue(OTy
));
7941 return Builder
.CreateSExt(Op
, Ty
, Name
);
7944 static Value
*packTBLDVectorList(CodeGenFunction
&CGF
, ArrayRef
<Value
*> Ops
,
7945 Value
*ExtOp
, Value
*IndexOp
,
7946 llvm::Type
*ResTy
, unsigned IntID
,
7948 SmallVector
<Value
*, 2> TblOps
;
7950 TblOps
.push_back(ExtOp
);
7952 // Build a vector containing sequential number like (0, 1, 2, ..., 15)
7953 SmallVector
<int, 16> Indices
;
7954 auto *TblTy
= cast
<llvm::FixedVectorType
>(Ops
[0]->getType());
7955 for (unsigned i
= 0, e
= TblTy
->getNumElements(); i
!= e
; ++i
) {
7956 Indices
.push_back(2*i
);
7957 Indices
.push_back(2*i
+1);
7960 int PairPos
= 0, End
= Ops
.size() - 1;
7961 while (PairPos
< End
) {
7962 TblOps
.push_back(CGF
.Builder
.CreateShuffleVector(Ops
[PairPos
],
7963 Ops
[PairPos
+1], Indices
,
7968 // If there's an odd number of 64-bit lookup table, fill the high 64-bit
7969 // of the 128-bit lookup table with zero.
7970 if (PairPos
== End
) {
7971 Value
*ZeroTbl
= ConstantAggregateZero::get(TblTy
);
7972 TblOps
.push_back(CGF
.Builder
.CreateShuffleVector(Ops
[PairPos
],
7973 ZeroTbl
, Indices
, Name
));
7977 TblOps
.push_back(IndexOp
);
7978 TblF
= CGF
.CGM
.getIntrinsic(IntID
, ResTy
);
7980 return CGF
.EmitNeonCall(TblF
, TblOps
, Name
);
7983 Value
*CodeGenFunction::GetValueForARMHint(unsigned BuiltinID
) {
7985 switch (BuiltinID
) {
7988 case clang::ARM::BI__builtin_arm_nop
:
7991 case clang::ARM::BI__builtin_arm_yield
:
7992 case clang::ARM::BI__yield
:
7995 case clang::ARM::BI__builtin_arm_wfe
:
7996 case clang::ARM::BI__wfe
:
7999 case clang::ARM::BI__builtin_arm_wfi
:
8000 case clang::ARM::BI__wfi
:
8003 case clang::ARM::BI__builtin_arm_sev
:
8004 case clang::ARM::BI__sev
:
8007 case clang::ARM::BI__builtin_arm_sevl
:
8008 case clang::ARM::BI__sevl
:
8013 return Builder
.CreateCall(CGM
.getIntrinsic(Intrinsic::arm_hint
),
8014 llvm::ConstantInt::get(Int32Ty
, Value
));
8017 enum SpecialRegisterAccessKind
{
8023 // Generates the IR for __builtin_read_exec_*.
8024 // Lowers the builtin to amdgcn_ballot intrinsic.
8025 static Value
*EmitAMDGCNBallotForExec(CodeGenFunction
&CGF
, const CallExpr
*E
,
8026 llvm::Type
*RegisterType
,
8027 llvm::Type
*ValueType
, bool isExecHi
) {
8028 CodeGen::CGBuilderTy
&Builder
= CGF
.Builder
;
8029 CodeGen::CodeGenModule
&CGM
= CGF
.CGM
;
8031 Function
*F
= CGM
.getIntrinsic(Intrinsic::amdgcn_ballot
, {RegisterType
});
8032 llvm::Value
*Call
= Builder
.CreateCall(F
, {Builder
.getInt1(true)});
8035 Value
*Rt2
= Builder
.CreateLShr(Call
, 32);
8036 Rt2
= Builder
.CreateTrunc(Rt2
, CGF
.Int32Ty
);
8043 // Generates the IR for the read/write special register builtin,
8044 // ValueType is the type of the value that is to be written or read,
8045 // RegisterType is the type of the register being written to or read from.
8046 static Value
*EmitSpecialRegisterBuiltin(CodeGenFunction
&CGF
,
8048 llvm::Type
*RegisterType
,
8049 llvm::Type
*ValueType
,
8050 SpecialRegisterAccessKind AccessKind
,
8051 StringRef SysReg
= "") {
8052 // write and register intrinsics only support 32, 64 and 128 bit operations.
8053 assert((RegisterType
->isIntegerTy(32) || RegisterType
->isIntegerTy(64) ||
8054 RegisterType
->isIntegerTy(128)) &&
8055 "Unsupported size for register.");
8057 CodeGen::CGBuilderTy
&Builder
= CGF
.Builder
;
8058 CodeGen::CodeGenModule
&CGM
= CGF
.CGM
;
8059 LLVMContext
&Context
= CGM
.getLLVMContext();
8061 if (SysReg
.empty()) {
8062 const Expr
*SysRegStrExpr
= E
->getArg(0)->IgnoreParenCasts();
8063 SysReg
= cast
<clang::StringLiteral
>(SysRegStrExpr
)->getString();
8066 llvm::Metadata
*Ops
[] = { llvm::MDString::get(Context
, SysReg
) };
8067 llvm::MDNode
*RegName
= llvm::MDNode::get(Context
, Ops
);
8068 llvm::Value
*Metadata
= llvm::MetadataAsValue::get(Context
, RegName
);
8070 llvm::Type
*Types
[] = { RegisterType
};
8072 bool MixedTypes
= RegisterType
->isIntegerTy(64) && ValueType
->isIntegerTy(32);
8073 assert(!(RegisterType
->isIntegerTy(32) && ValueType
->isIntegerTy(64))
8074 && "Can't fit 64-bit value in 32-bit register");
8076 if (AccessKind
!= Write
) {
8077 assert(AccessKind
== NormalRead
|| AccessKind
== VolatileRead
);
8078 llvm::Function
*F
= CGM
.getIntrinsic(
8079 AccessKind
== VolatileRead
? llvm::Intrinsic::read_volatile_register
8080 : llvm::Intrinsic::read_register
,
8082 llvm::Value
*Call
= Builder
.CreateCall(F
, Metadata
);
8085 // Read into 64 bit register and then truncate result to 32 bit.
8086 return Builder
.CreateTrunc(Call
, ValueType
);
8088 if (ValueType
->isPointerTy())
8089 // Have i32/i64 result (Call) but want to return a VoidPtrTy (i8*).
8090 return Builder
.CreateIntToPtr(Call
, ValueType
);
8095 llvm::Function
*F
= CGM
.getIntrinsic(llvm::Intrinsic::write_register
, Types
);
8096 llvm::Value
*ArgValue
= CGF
.EmitScalarExpr(E
->getArg(1));
8098 // Extend 32 bit write value to 64 bit to pass to write.
8099 ArgValue
= Builder
.CreateZExt(ArgValue
, RegisterType
);
8100 return Builder
.CreateCall(F
, { Metadata
, ArgValue
});
8103 if (ValueType
->isPointerTy()) {
8104 // Have VoidPtrTy ArgValue but want to return an i32/i64.
8105 ArgValue
= Builder
.CreatePtrToInt(ArgValue
, RegisterType
);
8106 return Builder
.CreateCall(F
, { Metadata
, ArgValue
});
8109 return Builder
.CreateCall(F
, { Metadata
, ArgValue
});
8112 /// Return true if BuiltinID is an overloaded Neon intrinsic with an extra
8113 /// argument that specifies the vector type.
8114 static bool HasExtraNeonArgument(unsigned BuiltinID
) {
8115 switch (BuiltinID
) {
8117 case NEON::BI__builtin_neon_vget_lane_i8
:
8118 case NEON::BI__builtin_neon_vget_lane_i16
:
8119 case NEON::BI__builtin_neon_vget_lane_bf16
:
8120 case NEON::BI__builtin_neon_vget_lane_i32
:
8121 case NEON::BI__builtin_neon_vget_lane_i64
:
8122 case NEON::BI__builtin_neon_vget_lane_f32
:
8123 case NEON::BI__builtin_neon_vgetq_lane_i8
:
8124 case NEON::BI__builtin_neon_vgetq_lane_i16
:
8125 case NEON::BI__builtin_neon_vgetq_lane_bf16
:
8126 case NEON::BI__builtin_neon_vgetq_lane_i32
:
8127 case NEON::BI__builtin_neon_vgetq_lane_i64
:
8128 case NEON::BI__builtin_neon_vgetq_lane_f32
:
8129 case NEON::BI__builtin_neon_vduph_lane_bf16
:
8130 case NEON::BI__builtin_neon_vduph_laneq_bf16
:
8131 case NEON::BI__builtin_neon_vset_lane_i8
:
8132 case NEON::BI__builtin_neon_vset_lane_i16
:
8133 case NEON::BI__builtin_neon_vset_lane_bf16
:
8134 case NEON::BI__builtin_neon_vset_lane_i32
:
8135 case NEON::BI__builtin_neon_vset_lane_i64
:
8136 case NEON::BI__builtin_neon_vset_lane_f32
:
8137 case NEON::BI__builtin_neon_vsetq_lane_i8
:
8138 case NEON::BI__builtin_neon_vsetq_lane_i16
:
8139 case NEON::BI__builtin_neon_vsetq_lane_bf16
:
8140 case NEON::BI__builtin_neon_vsetq_lane_i32
:
8141 case NEON::BI__builtin_neon_vsetq_lane_i64
:
8142 case NEON::BI__builtin_neon_vsetq_lane_f32
:
8143 case NEON::BI__builtin_neon_vsha1h_u32
:
8144 case NEON::BI__builtin_neon_vsha1cq_u32
:
8145 case NEON::BI__builtin_neon_vsha1pq_u32
:
8146 case NEON::BI__builtin_neon_vsha1mq_u32
:
8147 case NEON::BI__builtin_neon_vcvth_bf16_f32
:
8148 case clang::ARM::BI_MoveToCoprocessor
:
8149 case clang::ARM::BI_MoveToCoprocessor2
:
8155 Value
*CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID
,
8157 ReturnValueSlot ReturnValue
,
8158 llvm::Triple::ArchType Arch
) {
8159 if (auto Hint
= GetValueForARMHint(BuiltinID
))
8162 if (BuiltinID
== clang::ARM::BI__emit
) {
8163 bool IsThumb
= getTarget().getTriple().getArch() == llvm::Triple::thumb
;
8164 llvm::FunctionType
*FTy
=
8165 llvm::FunctionType::get(VoidTy
, /*Variadic=*/false);
8167 Expr::EvalResult Result
;
8168 if (!E
->getArg(0)->EvaluateAsInt(Result
, CGM
.getContext()))
8169 llvm_unreachable("Sema will ensure that the parameter is constant");
8171 llvm::APSInt Value
= Result
.Val
.getInt();
8172 uint64_t ZExtValue
= Value
.zextOrTrunc(IsThumb
? 16 : 32).getZExtValue();
8174 llvm::InlineAsm
*Emit
=
8175 IsThumb
? InlineAsm::get(FTy
, ".inst.n 0x" + utohexstr(ZExtValue
), "",
8176 /*hasSideEffects=*/true)
8177 : InlineAsm::get(FTy
, ".inst 0x" + utohexstr(ZExtValue
), "",
8178 /*hasSideEffects=*/true);
8180 return Builder
.CreateCall(Emit
);
8183 if (BuiltinID
== clang::ARM::BI__builtin_arm_dbg
) {
8184 Value
*Option
= EmitScalarExpr(E
->getArg(0));
8185 return Builder
.CreateCall(CGM
.getIntrinsic(Intrinsic::arm_dbg
), Option
);
8188 if (BuiltinID
== clang::ARM::BI__builtin_arm_prefetch
) {
8189 Value
*Address
= EmitScalarExpr(E
->getArg(0));
8190 Value
*RW
= EmitScalarExpr(E
->getArg(1));
8191 Value
*IsData
= EmitScalarExpr(E
->getArg(2));
8193 // Locality is not supported on ARM target
8194 Value
*Locality
= llvm::ConstantInt::get(Int32Ty
, 3);
8196 Function
*F
= CGM
.getIntrinsic(Intrinsic::prefetch
, Address
->getType());
8197 return Builder
.CreateCall(F
, {Address
, RW
, Locality
, IsData
});
8200 if (BuiltinID
== clang::ARM::BI__builtin_arm_rbit
) {
8201 llvm::Value
*Arg
= EmitScalarExpr(E
->getArg(0));
8202 return Builder
.CreateCall(
8203 CGM
.getIntrinsic(Intrinsic::bitreverse
, Arg
->getType()), Arg
, "rbit");
8206 if (BuiltinID
== clang::ARM::BI__builtin_arm_clz
||
8207 BuiltinID
== clang::ARM::BI__builtin_arm_clz64
) {
8208 llvm::Value
*Arg
= EmitScalarExpr(E
->getArg(0));
8209 Function
*F
= CGM
.getIntrinsic(Intrinsic::ctlz
, Arg
->getType());
8210 Value
*Res
= Builder
.CreateCall(F
, {Arg
, Builder
.getInt1(false)});
8211 if (BuiltinID
== clang::ARM::BI__builtin_arm_clz64
)
8212 Res
= Builder
.CreateTrunc(Res
, Builder
.getInt32Ty());
8217 if (BuiltinID
== clang::ARM::BI__builtin_arm_cls
) {
8218 llvm::Value
*Arg
= EmitScalarExpr(E
->getArg(0));
8219 return Builder
.CreateCall(CGM
.getIntrinsic(Intrinsic::arm_cls
), Arg
, "cls");
8221 if (BuiltinID
== clang::ARM::BI__builtin_arm_cls64
) {
8222 llvm::Value
*Arg
= EmitScalarExpr(E
->getArg(0));
8223 return Builder
.CreateCall(CGM
.getIntrinsic(Intrinsic::arm_cls64
), Arg
,
8227 if (BuiltinID
== clang::ARM::BI__clear_cache
) {
8228 assert(E
->getNumArgs() == 2 && "__clear_cache takes 2 arguments");
8229 const FunctionDecl
*FD
= E
->getDirectCallee();
8231 for (unsigned i
= 0; i
< 2; i
++)
8232 Ops
[i
] = EmitScalarExpr(E
->getArg(i
));
8233 llvm::Type
*Ty
= CGM
.getTypes().ConvertType(FD
->getType());
8234 llvm::FunctionType
*FTy
= cast
<llvm::FunctionType
>(Ty
);
8235 StringRef Name
= FD
->getName();
8236 return EmitNounwindRuntimeCall(CGM
.CreateRuntimeFunction(FTy
, Name
), Ops
);
8239 if (BuiltinID
== clang::ARM::BI__builtin_arm_mcrr
||
8240 BuiltinID
== clang::ARM::BI__builtin_arm_mcrr2
) {
8243 switch (BuiltinID
) {
8244 default: llvm_unreachable("unexpected builtin");
8245 case clang::ARM::BI__builtin_arm_mcrr
:
8246 F
= CGM
.getIntrinsic(Intrinsic::arm_mcrr
);
8248 case clang::ARM::BI__builtin_arm_mcrr2
:
8249 F
= CGM
.getIntrinsic(Intrinsic::arm_mcrr2
);
8253 // MCRR{2} instruction has 5 operands but
8254 // the intrinsic has 4 because Rt and Rt2
8255 // are represented as a single unsigned 64
8256 // bit integer in the intrinsic definition
8257 // but internally it's represented as 2 32
8260 Value
*Coproc
= EmitScalarExpr(E
->getArg(0));
8261 Value
*Opc1
= EmitScalarExpr(E
->getArg(1));
8262 Value
*RtAndRt2
= EmitScalarExpr(E
->getArg(2));
8263 Value
*CRm
= EmitScalarExpr(E
->getArg(3));
8265 Value
*C1
= llvm::ConstantInt::get(Int64Ty
, 32);
8266 Value
*Rt
= Builder
.CreateTruncOrBitCast(RtAndRt2
, Int32Ty
);
8267 Value
*Rt2
= Builder
.CreateLShr(RtAndRt2
, C1
);
8268 Rt2
= Builder
.CreateTruncOrBitCast(Rt2
, Int32Ty
);
8270 return Builder
.CreateCall(F
, {Coproc
, Opc1
, Rt
, Rt2
, CRm
});
8273 if (BuiltinID
== clang::ARM::BI__builtin_arm_mrrc
||
8274 BuiltinID
== clang::ARM::BI__builtin_arm_mrrc2
) {
8277 switch (BuiltinID
) {
8278 default: llvm_unreachable("unexpected builtin");
8279 case clang::ARM::BI__builtin_arm_mrrc
:
8280 F
= CGM
.getIntrinsic(Intrinsic::arm_mrrc
);
8282 case clang::ARM::BI__builtin_arm_mrrc2
:
8283 F
= CGM
.getIntrinsic(Intrinsic::arm_mrrc2
);
8287 Value
*Coproc
= EmitScalarExpr(E
->getArg(0));
8288 Value
*Opc1
= EmitScalarExpr(E
->getArg(1));
8289 Value
*CRm
= EmitScalarExpr(E
->getArg(2));
8290 Value
*RtAndRt2
= Builder
.CreateCall(F
, {Coproc
, Opc1
, CRm
});
8292 // Returns an unsigned 64 bit integer, represented
8293 // as two 32 bit integers.
8295 Value
*Rt
= Builder
.CreateExtractValue(RtAndRt2
, 1);
8296 Value
*Rt1
= Builder
.CreateExtractValue(RtAndRt2
, 0);
8297 Rt
= Builder
.CreateZExt(Rt
, Int64Ty
);
8298 Rt1
= Builder
.CreateZExt(Rt1
, Int64Ty
);
8300 Value
*ShiftCast
= llvm::ConstantInt::get(Int64Ty
, 32);
8301 RtAndRt2
= Builder
.CreateShl(Rt
, ShiftCast
, "shl", true);
8302 RtAndRt2
= Builder
.CreateOr(RtAndRt2
, Rt1
);
8304 return Builder
.CreateBitCast(RtAndRt2
, ConvertType(E
->getType()));
8307 if (BuiltinID
== clang::ARM::BI__builtin_arm_ldrexd
||
8308 ((BuiltinID
== clang::ARM::BI__builtin_arm_ldrex
||
8309 BuiltinID
== clang::ARM::BI__builtin_arm_ldaex
) &&
8310 getContext().getTypeSize(E
->getType()) == 64) ||
8311 BuiltinID
== clang::ARM::BI__ldrexd
) {
8314 switch (BuiltinID
) {
8315 default: llvm_unreachable("unexpected builtin");
8316 case clang::ARM::BI__builtin_arm_ldaex
:
8317 F
= CGM
.getIntrinsic(Intrinsic::arm_ldaexd
);
8319 case clang::ARM::BI__builtin_arm_ldrexd
:
8320 case clang::ARM::BI__builtin_arm_ldrex
:
8321 case clang::ARM::BI__ldrexd
:
8322 F
= CGM
.getIntrinsic(Intrinsic::arm_ldrexd
);
8326 Value
*LdPtr
= EmitScalarExpr(E
->getArg(0));
8327 Value
*Val
= Builder
.CreateCall(F
, Builder
.CreateBitCast(LdPtr
, Int8PtrTy
),
8330 Value
*Val0
= Builder
.CreateExtractValue(Val
, 1);
8331 Value
*Val1
= Builder
.CreateExtractValue(Val
, 0);
8332 Val0
= Builder
.CreateZExt(Val0
, Int64Ty
);
8333 Val1
= Builder
.CreateZExt(Val1
, Int64Ty
);
8335 Value
*ShiftCst
= llvm::ConstantInt::get(Int64Ty
, 32);
8336 Val
= Builder
.CreateShl(Val0
, ShiftCst
, "shl", true /* nuw */);
8337 Val
= Builder
.CreateOr(Val
, Val1
);
8338 return Builder
.CreateBitCast(Val
, ConvertType(E
->getType()));
8341 if (BuiltinID
== clang::ARM::BI__builtin_arm_ldrex
||
8342 BuiltinID
== clang::ARM::BI__builtin_arm_ldaex
) {
8343 Value
*LoadAddr
= EmitScalarExpr(E
->getArg(0));
8345 QualType Ty
= E
->getType();
8346 llvm::Type
*RealResTy
= ConvertType(Ty
);
8348 llvm::IntegerType::get(getLLVMContext(), getContext().getTypeSize(Ty
));
8350 Function
*F
= CGM
.getIntrinsic(
8351 BuiltinID
== clang::ARM::BI__builtin_arm_ldaex
? Intrinsic::arm_ldaex
8352 : Intrinsic::arm_ldrex
,
8354 CallInst
*Val
= Builder
.CreateCall(F
, LoadAddr
, "ldrex");
8356 0, Attribute::get(getLLVMContext(), Attribute::ElementType
, IntTy
));
8358 if (RealResTy
->isPointerTy())
8359 return Builder
.CreateIntToPtr(Val
, RealResTy
);
8361 llvm::Type
*IntResTy
= llvm::IntegerType::get(
8362 getLLVMContext(), CGM
.getDataLayout().getTypeSizeInBits(RealResTy
));
8363 return Builder
.CreateBitCast(Builder
.CreateTruncOrBitCast(Val
, IntResTy
),
8368 if (BuiltinID
== clang::ARM::BI__builtin_arm_strexd
||
8369 ((BuiltinID
== clang::ARM::BI__builtin_arm_stlex
||
8370 BuiltinID
== clang::ARM::BI__builtin_arm_strex
) &&
8371 getContext().getTypeSize(E
->getArg(0)->getType()) == 64)) {
8372 Function
*F
= CGM
.getIntrinsic(
8373 BuiltinID
== clang::ARM::BI__builtin_arm_stlex
? Intrinsic::arm_stlexd
8374 : Intrinsic::arm_strexd
);
8375 llvm::Type
*STy
= llvm::StructType::get(Int32Ty
, Int32Ty
);
8377 Address Tmp
= CreateMemTemp(E
->getArg(0)->getType());
8378 Value
*Val
= EmitScalarExpr(E
->getArg(0));
8379 Builder
.CreateStore(Val
, Tmp
);
8381 Address LdPtr
= Tmp
.withElementType(STy
);
8382 Val
= Builder
.CreateLoad(LdPtr
);
8384 Value
*Arg0
= Builder
.CreateExtractValue(Val
, 0);
8385 Value
*Arg1
= Builder
.CreateExtractValue(Val
, 1);
8386 Value
*StPtr
= Builder
.CreateBitCast(EmitScalarExpr(E
->getArg(1)), Int8PtrTy
);
8387 return Builder
.CreateCall(F
, {Arg0
, Arg1
, StPtr
}, "strexd");
8390 if (BuiltinID
== clang::ARM::BI__builtin_arm_strex
||
8391 BuiltinID
== clang::ARM::BI__builtin_arm_stlex
) {
8392 Value
*StoreVal
= EmitScalarExpr(E
->getArg(0));
8393 Value
*StoreAddr
= EmitScalarExpr(E
->getArg(1));
8395 QualType Ty
= E
->getArg(0)->getType();
8396 llvm::Type
*StoreTy
=
8397 llvm::IntegerType::get(getLLVMContext(), getContext().getTypeSize(Ty
));
8399 if (StoreVal
->getType()->isPointerTy())
8400 StoreVal
= Builder
.CreatePtrToInt(StoreVal
, Int32Ty
);
8402 llvm::Type
*IntTy
= llvm::IntegerType::get(
8404 CGM
.getDataLayout().getTypeSizeInBits(StoreVal
->getType()));
8405 StoreVal
= Builder
.CreateBitCast(StoreVal
, IntTy
);
8406 StoreVal
= Builder
.CreateZExtOrBitCast(StoreVal
, Int32Ty
);
8409 Function
*F
= CGM
.getIntrinsic(
8410 BuiltinID
== clang::ARM::BI__builtin_arm_stlex
? Intrinsic::arm_stlex
8411 : Intrinsic::arm_strex
,
8412 StoreAddr
->getType());
8414 CallInst
*CI
= Builder
.CreateCall(F
, {StoreVal
, StoreAddr
}, "strex");
8416 1, Attribute::get(getLLVMContext(), Attribute::ElementType
, StoreTy
));
8420 if (BuiltinID
== clang::ARM::BI__builtin_arm_clrex
) {
8421 Function
*F
= CGM
.getIntrinsic(Intrinsic::arm_clrex
);
8422 return Builder
.CreateCall(F
);
8426 Intrinsic::ID CRCIntrinsicID
= Intrinsic::not_intrinsic
;
8427 switch (BuiltinID
) {
8428 case clang::ARM::BI__builtin_arm_crc32b
:
8429 CRCIntrinsicID
= Intrinsic::arm_crc32b
; break;
8430 case clang::ARM::BI__builtin_arm_crc32cb
:
8431 CRCIntrinsicID
= Intrinsic::arm_crc32cb
; break;
8432 case clang::ARM::BI__builtin_arm_crc32h
:
8433 CRCIntrinsicID
= Intrinsic::arm_crc32h
; break;
8434 case clang::ARM::BI__builtin_arm_crc32ch
:
8435 CRCIntrinsicID
= Intrinsic::arm_crc32ch
; break;
8436 case clang::ARM::BI__builtin_arm_crc32w
:
8437 case clang::ARM::BI__builtin_arm_crc32d
:
8438 CRCIntrinsicID
= Intrinsic::arm_crc32w
; break;
8439 case clang::ARM::BI__builtin_arm_crc32cw
:
8440 case clang::ARM::BI__builtin_arm_crc32cd
:
8441 CRCIntrinsicID
= Intrinsic::arm_crc32cw
; break;
8444 if (CRCIntrinsicID
!= Intrinsic::not_intrinsic
) {
8445 Value
*Arg0
= EmitScalarExpr(E
->getArg(0));
8446 Value
*Arg1
= EmitScalarExpr(E
->getArg(1));
8448 // crc32{c,}d intrinsics are implemented as two calls to crc32{c,}w
8449 // intrinsics, hence we need different codegen for these cases.
8450 if (BuiltinID
== clang::ARM::BI__builtin_arm_crc32d
||
8451 BuiltinID
== clang::ARM::BI__builtin_arm_crc32cd
) {
8452 Value
*C1
= llvm::ConstantInt::get(Int64Ty
, 32);
8453 Value
*Arg1a
= Builder
.CreateTruncOrBitCast(Arg1
, Int32Ty
);
8454 Value
*Arg1b
= Builder
.CreateLShr(Arg1
, C1
);
8455 Arg1b
= Builder
.CreateTruncOrBitCast(Arg1b
, Int32Ty
);
8457 Function
*F
= CGM
.getIntrinsic(CRCIntrinsicID
);
8458 Value
*Res
= Builder
.CreateCall(F
, {Arg0
, Arg1a
});
8459 return Builder
.CreateCall(F
, {Res
, Arg1b
});
8461 Arg1
= Builder
.CreateZExtOrBitCast(Arg1
, Int32Ty
);
8463 Function
*F
= CGM
.getIntrinsic(CRCIntrinsicID
);
8464 return Builder
.CreateCall(F
, {Arg0
, Arg1
});
8468 if (BuiltinID
== clang::ARM::BI__builtin_arm_rsr
||
8469 BuiltinID
== clang::ARM::BI__builtin_arm_rsr64
||
8470 BuiltinID
== clang::ARM::BI__builtin_arm_rsrp
||
8471 BuiltinID
== clang::ARM::BI__builtin_arm_wsr
||
8472 BuiltinID
== clang::ARM::BI__builtin_arm_wsr64
||
8473 BuiltinID
== clang::ARM::BI__builtin_arm_wsrp
) {
8475 SpecialRegisterAccessKind AccessKind
= Write
;
8476 if (BuiltinID
== clang::ARM::BI__builtin_arm_rsr
||
8477 BuiltinID
== clang::ARM::BI__builtin_arm_rsr64
||
8478 BuiltinID
== clang::ARM::BI__builtin_arm_rsrp
)
8479 AccessKind
= VolatileRead
;
8481 bool IsPointerBuiltin
= BuiltinID
== clang::ARM::BI__builtin_arm_rsrp
||
8482 BuiltinID
== clang::ARM::BI__builtin_arm_wsrp
;
8484 bool Is64Bit
= BuiltinID
== clang::ARM::BI__builtin_arm_rsr64
||
8485 BuiltinID
== clang::ARM::BI__builtin_arm_wsr64
;
8487 llvm::Type
*ValueType
;
8488 llvm::Type
*RegisterType
;
8489 if (IsPointerBuiltin
) {
8490 ValueType
= VoidPtrTy
;
8491 RegisterType
= Int32Ty
;
8492 } else if (Is64Bit
) {
8493 ValueType
= RegisterType
= Int64Ty
;
8495 ValueType
= RegisterType
= Int32Ty
;
8498 return EmitSpecialRegisterBuiltin(*this, E
, RegisterType
, ValueType
,
8502 if (BuiltinID
== ARM::BI__builtin_sponentry
) {
8503 llvm::Function
*F
= CGM
.getIntrinsic(Intrinsic::sponentry
, AllocaInt8PtrTy
);
8504 return Builder
.CreateCall(F
);
8507 // Handle MSVC intrinsics before argument evaluation to prevent double
8509 if (std::optional
<MSVCIntrin
> MsvcIntId
= translateArmToMsvcIntrin(BuiltinID
))
8510 return EmitMSVCBuiltinExpr(*MsvcIntId
, E
);
8512 // Deal with MVE builtins
8513 if (Value
*Result
= EmitARMMVEBuiltinExpr(BuiltinID
, E
, ReturnValue
, Arch
))
8515 // Handle CDE builtins
8516 if (Value
*Result
= EmitARMCDEBuiltinExpr(BuiltinID
, E
, ReturnValue
, Arch
))
8519 // Some intrinsics are equivalent - if they are use the base intrinsic ID.
8520 auto It
= llvm::find_if(NEONEquivalentIntrinsicMap
, [BuiltinID
](auto &P
) {
8521 return P
.first
== BuiltinID
;
8523 if (It
!= end(NEONEquivalentIntrinsicMap
))
8524 BuiltinID
= It
->second
;
8526 // Find out if any arguments are required to be integer constant
8528 unsigned ICEArguments
= 0;
8529 ASTContext::GetBuiltinTypeError Error
;
8530 getContext().GetBuiltinType(BuiltinID
, Error
, &ICEArguments
);
8531 assert(Error
== ASTContext::GE_None
&& "Should not codegen an error");
8533 auto getAlignmentValue32
= [&](Address addr
) -> Value
* {
8534 return Builder
.getInt32(addr
.getAlignment().getQuantity());
8537 Address PtrOp0
= Address::invalid();
8538 Address PtrOp1
= Address::invalid();
8539 SmallVector
<Value
*, 4> Ops
;
8540 bool HasExtraArg
= HasExtraNeonArgument(BuiltinID
);
8541 unsigned NumArgs
= E
->getNumArgs() - (HasExtraArg
? 1 : 0);
8542 for (unsigned i
= 0, e
= NumArgs
; i
!= e
; i
++) {
8544 switch (BuiltinID
) {
8545 case NEON::BI__builtin_neon_vld1_v
:
8546 case NEON::BI__builtin_neon_vld1q_v
:
8547 case NEON::BI__builtin_neon_vld1q_lane_v
:
8548 case NEON::BI__builtin_neon_vld1_lane_v
:
8549 case NEON::BI__builtin_neon_vld1_dup_v
:
8550 case NEON::BI__builtin_neon_vld1q_dup_v
:
8551 case NEON::BI__builtin_neon_vst1_v
:
8552 case NEON::BI__builtin_neon_vst1q_v
:
8553 case NEON::BI__builtin_neon_vst1q_lane_v
:
8554 case NEON::BI__builtin_neon_vst1_lane_v
:
8555 case NEON::BI__builtin_neon_vst2_v
:
8556 case NEON::BI__builtin_neon_vst2q_v
:
8557 case NEON::BI__builtin_neon_vst2_lane_v
:
8558 case NEON::BI__builtin_neon_vst2q_lane_v
:
8559 case NEON::BI__builtin_neon_vst3_v
:
8560 case NEON::BI__builtin_neon_vst3q_v
:
8561 case NEON::BI__builtin_neon_vst3_lane_v
:
8562 case NEON::BI__builtin_neon_vst3q_lane_v
:
8563 case NEON::BI__builtin_neon_vst4_v
:
8564 case NEON::BI__builtin_neon_vst4q_v
:
8565 case NEON::BI__builtin_neon_vst4_lane_v
:
8566 case NEON::BI__builtin_neon_vst4q_lane_v
:
8567 // Get the alignment for the argument in addition to the value;
8568 // we'll use it later.
8569 PtrOp0
= EmitPointerWithAlignment(E
->getArg(0));
8570 Ops
.push_back(PtrOp0
.getPointer());
8575 switch (BuiltinID
) {
8576 case NEON::BI__builtin_neon_vld2_v
:
8577 case NEON::BI__builtin_neon_vld2q_v
:
8578 case NEON::BI__builtin_neon_vld3_v
:
8579 case NEON::BI__builtin_neon_vld3q_v
:
8580 case NEON::BI__builtin_neon_vld4_v
:
8581 case NEON::BI__builtin_neon_vld4q_v
:
8582 case NEON::BI__builtin_neon_vld2_lane_v
:
8583 case NEON::BI__builtin_neon_vld2q_lane_v
:
8584 case NEON::BI__builtin_neon_vld3_lane_v
:
8585 case NEON::BI__builtin_neon_vld3q_lane_v
:
8586 case NEON::BI__builtin_neon_vld4_lane_v
:
8587 case NEON::BI__builtin_neon_vld4q_lane_v
:
8588 case NEON::BI__builtin_neon_vld2_dup_v
:
8589 case NEON::BI__builtin_neon_vld2q_dup_v
:
8590 case NEON::BI__builtin_neon_vld3_dup_v
:
8591 case NEON::BI__builtin_neon_vld3q_dup_v
:
8592 case NEON::BI__builtin_neon_vld4_dup_v
:
8593 case NEON::BI__builtin_neon_vld4q_dup_v
:
8594 // Get the alignment for the argument in addition to the value;
8595 // we'll use it later.
8596 PtrOp1
= EmitPointerWithAlignment(E
->getArg(1));
8597 Ops
.push_back(PtrOp1
.getPointer());
8602 if ((ICEArguments
& (1 << i
)) == 0) {
8603 Ops
.push_back(EmitScalarExpr(E
->getArg(i
)));
8605 // If this is required to be a constant, constant fold it so that we know
8606 // that the generated intrinsic gets a ConstantInt.
8607 Ops
.push_back(llvm::ConstantInt::get(
8609 *E
->getArg(i
)->getIntegerConstantExpr(getContext())));
8613 switch (BuiltinID
) {
8616 case NEON::BI__builtin_neon_vget_lane_i8
:
8617 case NEON::BI__builtin_neon_vget_lane_i16
:
8618 case NEON::BI__builtin_neon_vget_lane_i32
:
8619 case NEON::BI__builtin_neon_vget_lane_i64
:
8620 case NEON::BI__builtin_neon_vget_lane_bf16
:
8621 case NEON::BI__builtin_neon_vget_lane_f32
:
8622 case NEON::BI__builtin_neon_vgetq_lane_i8
:
8623 case NEON::BI__builtin_neon_vgetq_lane_i16
:
8624 case NEON::BI__builtin_neon_vgetq_lane_i32
:
8625 case NEON::BI__builtin_neon_vgetq_lane_i64
:
8626 case NEON::BI__builtin_neon_vgetq_lane_bf16
:
8627 case NEON::BI__builtin_neon_vgetq_lane_f32
:
8628 case NEON::BI__builtin_neon_vduph_lane_bf16
:
8629 case NEON::BI__builtin_neon_vduph_laneq_bf16
:
8630 return Builder
.CreateExtractElement(Ops
[0], Ops
[1], "vget_lane");
8632 case NEON::BI__builtin_neon_vrndns_f32
: {
8633 Value
*Arg
= EmitScalarExpr(E
->getArg(0));
8634 llvm::Type
*Tys
[] = {Arg
->getType()};
8635 Function
*F
= CGM
.getIntrinsic(Intrinsic::arm_neon_vrintn
, Tys
);
8636 return Builder
.CreateCall(F
, {Arg
}, "vrndn"); }
8638 case NEON::BI__builtin_neon_vset_lane_i8
:
8639 case NEON::BI__builtin_neon_vset_lane_i16
:
8640 case NEON::BI__builtin_neon_vset_lane_i32
:
8641 case NEON::BI__builtin_neon_vset_lane_i64
:
8642 case NEON::BI__builtin_neon_vset_lane_bf16
:
8643 case NEON::BI__builtin_neon_vset_lane_f32
:
8644 case NEON::BI__builtin_neon_vsetq_lane_i8
:
8645 case NEON::BI__builtin_neon_vsetq_lane_i16
:
8646 case NEON::BI__builtin_neon_vsetq_lane_i32
:
8647 case NEON::BI__builtin_neon_vsetq_lane_i64
:
8648 case NEON::BI__builtin_neon_vsetq_lane_bf16
:
8649 case NEON::BI__builtin_neon_vsetq_lane_f32
:
8650 return Builder
.CreateInsertElement(Ops
[1], Ops
[0], Ops
[2], "vset_lane");
8652 case NEON::BI__builtin_neon_vsha1h_u32
:
8653 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::arm_neon_sha1h
), Ops
,
8655 case NEON::BI__builtin_neon_vsha1cq_u32
:
8656 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::arm_neon_sha1c
), Ops
,
8658 case NEON::BI__builtin_neon_vsha1pq_u32
:
8659 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::arm_neon_sha1p
), Ops
,
8661 case NEON::BI__builtin_neon_vsha1mq_u32
:
8662 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::arm_neon_sha1m
), Ops
,
8665 case NEON::BI__builtin_neon_vcvth_bf16_f32
: {
8666 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::arm_neon_vcvtbfp2bf
), Ops
,
8670 // The ARM _MoveToCoprocessor builtins put the input register value as
8671 // the first argument, but the LLVM intrinsic expects it as the third one.
8672 case clang::ARM::BI_MoveToCoprocessor
:
8673 case clang::ARM::BI_MoveToCoprocessor2
: {
8674 Function
*F
= CGM
.getIntrinsic(BuiltinID
== clang::ARM::BI_MoveToCoprocessor
8675 ? Intrinsic::arm_mcr
8676 : Intrinsic::arm_mcr2
);
8677 return Builder
.CreateCall(F
, {Ops
[1], Ops
[2], Ops
[0],
8678 Ops
[3], Ops
[4], Ops
[5]});
8682 // Get the last argument, which specifies the vector type.
8683 assert(HasExtraArg
);
8684 const Expr
*Arg
= E
->getArg(E
->getNumArgs()-1);
8685 std::optional
<llvm::APSInt
> Result
=
8686 Arg
->getIntegerConstantExpr(getContext());
8690 if (BuiltinID
== clang::ARM::BI__builtin_arm_vcvtr_f
||
8691 BuiltinID
== clang::ARM::BI__builtin_arm_vcvtr_d
) {
8692 // Determine the overloaded type of this builtin.
8694 if (BuiltinID
== clang::ARM::BI__builtin_arm_vcvtr_f
)
8699 // Determine whether this is an unsigned conversion or not.
8700 bool usgn
= Result
->getZExtValue() == 1;
8701 unsigned Int
= usgn
? Intrinsic::arm_vcvtru
: Intrinsic::arm_vcvtr
;
8703 // Call the appropriate intrinsic.
8704 Function
*F
= CGM
.getIntrinsic(Int
, Ty
);
8705 return Builder
.CreateCall(F
, Ops
, "vcvtr");
8708 // Determine the type of this overloaded NEON intrinsic.
8709 NeonTypeFlags Type
= Result
->getZExtValue();
8710 bool usgn
= Type
.isUnsigned();
8711 bool rightShift
= false;
8713 llvm::FixedVectorType
*VTy
=
8714 GetNeonType(this, Type
, getTarget().hasLegalHalfType(), false,
8715 getTarget().hasBFloat16Type());
8716 llvm::Type
*Ty
= VTy
;
8720 // Many NEON builtins have identical semantics and uses in ARM and
8721 // AArch64. Emit these in a single function.
8722 auto IntrinsicMap
= ArrayRef(ARMSIMDIntrinsicMap
);
8723 const ARMVectorIntrinsicInfo
*Builtin
= findARMVectorIntrinsicInMap(
8724 IntrinsicMap
, BuiltinID
, NEONSIMDIntrinsicsProvenSorted
);
8726 return EmitCommonNeonBuiltinExpr(
8727 Builtin
->BuiltinID
, Builtin
->LLVMIntrinsic
, Builtin
->AltLLVMIntrinsic
,
8728 Builtin
->NameHint
, Builtin
->TypeModifier
, E
, Ops
, PtrOp0
, PtrOp1
, Arch
);
8731 switch (BuiltinID
) {
8732 default: return nullptr;
8733 case NEON::BI__builtin_neon_vld1q_lane_v
:
8734 // Handle 64-bit integer elements as a special case. Use shuffles of
8735 // one-element vectors to avoid poor code for i64 in the backend.
8736 if (VTy
->getElementType()->isIntegerTy(64)) {
8737 // Extract the other lane.
8738 Ops
[1] = Builder
.CreateBitCast(Ops
[1], Ty
);
8739 int Lane
= cast
<ConstantInt
>(Ops
[2])->getZExtValue();
8740 Value
*SV
= llvm::ConstantVector::get(ConstantInt::get(Int32Ty
, 1-Lane
));
8741 Ops
[1] = Builder
.CreateShuffleVector(Ops
[1], Ops
[1], SV
);
8742 // Load the value as a one-element vector.
8743 Ty
= llvm::FixedVectorType::get(VTy
->getElementType(), 1);
8744 llvm::Type
*Tys
[] = {Ty
, Int8PtrTy
};
8745 Function
*F
= CGM
.getIntrinsic(Intrinsic::arm_neon_vld1
, Tys
);
8746 Value
*Align
= getAlignmentValue32(PtrOp0
);
8747 Value
*Ld
= Builder
.CreateCall(F
, {Ops
[0], Align
});
8749 int Indices
[] = {1 - Lane
, Lane
};
8750 return Builder
.CreateShuffleVector(Ops
[1], Ld
, Indices
, "vld1q_lane");
8753 case NEON::BI__builtin_neon_vld1_lane_v
: {
8754 Ops
[1] = Builder
.CreateBitCast(Ops
[1], Ty
);
8755 PtrOp0
= PtrOp0
.withElementType(VTy
->getElementType());
8756 Value
*Ld
= Builder
.CreateLoad(PtrOp0
);
8757 return Builder
.CreateInsertElement(Ops
[1], Ld
, Ops
[2], "vld1_lane");
8759 case NEON::BI__builtin_neon_vqrshrn_n_v
:
8761 usgn
? Intrinsic::arm_neon_vqrshiftnu
: Intrinsic::arm_neon_vqrshiftns
;
8762 return EmitNeonCall(CGM
.getIntrinsic(Int
, Ty
), Ops
, "vqrshrn_n",
8764 case NEON::BI__builtin_neon_vqrshrun_n_v
:
8765 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::arm_neon_vqrshiftnsu
, Ty
),
8766 Ops
, "vqrshrun_n", 1, true);
8767 case NEON::BI__builtin_neon_vqshrn_n_v
:
8768 Int
= usgn
? Intrinsic::arm_neon_vqshiftnu
: Intrinsic::arm_neon_vqshiftns
;
8769 return EmitNeonCall(CGM
.getIntrinsic(Int
, Ty
), Ops
, "vqshrn_n",
8771 case NEON::BI__builtin_neon_vqshrun_n_v
:
8772 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::arm_neon_vqshiftnsu
, Ty
),
8773 Ops
, "vqshrun_n", 1, true);
8774 case NEON::BI__builtin_neon_vrecpe_v
:
8775 case NEON::BI__builtin_neon_vrecpeq_v
:
8776 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::arm_neon_vrecpe
, Ty
),
8778 case NEON::BI__builtin_neon_vrshrn_n_v
:
8779 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::arm_neon_vrshiftn
, Ty
),
8780 Ops
, "vrshrn_n", 1, true);
8781 case NEON::BI__builtin_neon_vrsra_n_v
:
8782 case NEON::BI__builtin_neon_vrsraq_n_v
:
8783 Ops
[0] = Builder
.CreateBitCast(Ops
[0], Ty
);
8784 Ops
[1] = Builder
.CreateBitCast(Ops
[1], Ty
);
8785 Ops
[2] = EmitNeonShiftVector(Ops
[2], Ty
, true);
8786 Int
= usgn
? Intrinsic::arm_neon_vrshiftu
: Intrinsic::arm_neon_vrshifts
;
8787 Ops
[1] = Builder
.CreateCall(CGM
.getIntrinsic(Int
, Ty
), {Ops
[1], Ops
[2]});
8788 return Builder
.CreateAdd(Ops
[0], Ops
[1], "vrsra_n");
8789 case NEON::BI__builtin_neon_vsri_n_v
:
8790 case NEON::BI__builtin_neon_vsriq_n_v
:
8793 case NEON::BI__builtin_neon_vsli_n_v
:
8794 case NEON::BI__builtin_neon_vsliq_n_v
:
8795 Ops
[2] = EmitNeonShiftVector(Ops
[2], Ty
, rightShift
);
8796 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::arm_neon_vshiftins
, Ty
),
8798 case NEON::BI__builtin_neon_vsra_n_v
:
8799 case NEON::BI__builtin_neon_vsraq_n_v
:
8800 Ops
[0] = Builder
.CreateBitCast(Ops
[0], Ty
);
8801 Ops
[1] = EmitNeonRShiftImm(Ops
[1], Ops
[2], Ty
, usgn
, "vsra_n");
8802 return Builder
.CreateAdd(Ops
[0], Ops
[1]);
8803 case NEON::BI__builtin_neon_vst1q_lane_v
:
8804 // Handle 64-bit integer elements as a special case. Use a shuffle to get
8805 // a one-element vector and avoid poor code for i64 in the backend.
8806 if (VTy
->getElementType()->isIntegerTy(64)) {
8807 Ops
[1] = Builder
.CreateBitCast(Ops
[1], Ty
);
8808 Value
*SV
= llvm::ConstantVector::get(cast
<llvm::Constant
>(Ops
[2]));
8809 Ops
[1] = Builder
.CreateShuffleVector(Ops
[1], Ops
[1], SV
);
8810 Ops
[2] = getAlignmentValue32(PtrOp0
);
8811 llvm::Type
*Tys
[] = {Int8PtrTy
, Ops
[1]->getType()};
8812 return Builder
.CreateCall(CGM
.getIntrinsic(Intrinsic::arm_neon_vst1
,
8816 case NEON::BI__builtin_neon_vst1_lane_v
: {
8817 Ops
[1] = Builder
.CreateBitCast(Ops
[1], Ty
);
8818 Ops
[1] = Builder
.CreateExtractElement(Ops
[1], Ops
[2]);
8819 return Builder
.CreateStore(Ops
[1],
8820 PtrOp0
.withElementType(Ops
[1]->getType()));
8822 case NEON::BI__builtin_neon_vtbl1_v
:
8823 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::arm_neon_vtbl1
),
8825 case NEON::BI__builtin_neon_vtbl2_v
:
8826 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::arm_neon_vtbl2
),
8828 case NEON::BI__builtin_neon_vtbl3_v
:
8829 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::arm_neon_vtbl3
),
8831 case NEON::BI__builtin_neon_vtbl4_v
:
8832 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::arm_neon_vtbl4
),
8834 case NEON::BI__builtin_neon_vtbx1_v
:
8835 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::arm_neon_vtbx1
),
8837 case NEON::BI__builtin_neon_vtbx2_v
:
8838 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::arm_neon_vtbx2
),
8840 case NEON::BI__builtin_neon_vtbx3_v
:
8841 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::arm_neon_vtbx3
),
8843 case NEON::BI__builtin_neon_vtbx4_v
:
8844 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::arm_neon_vtbx4
),
8849 template<typename Integer
>
8850 static Integer
GetIntegerConstantValue(const Expr
*E
, ASTContext
&Context
) {
8851 return E
->getIntegerConstantExpr(Context
)->getExtValue();
8854 static llvm::Value
*SignOrZeroExtend(CGBuilderTy
&Builder
, llvm::Value
*V
,
8855 llvm::Type
*T
, bool Unsigned
) {
8856 // Helper function called by Tablegen-constructed ARM MVE builtin codegen,
8857 // which finds it convenient to specify signed/unsigned as a boolean flag.
8858 return Unsigned
? Builder
.CreateZExt(V
, T
) : Builder
.CreateSExt(V
, T
);
8861 static llvm::Value
*MVEImmediateShr(CGBuilderTy
&Builder
, llvm::Value
*V
,
8862 uint32_t Shift
, bool Unsigned
) {
8863 // MVE helper function for integer shift right. This must handle signed vs
8864 // unsigned, and also deal specially with the case where the shift count is
8865 // equal to the lane size. In LLVM IR, an LShr with that parameter would be
8866 // undefined behavior, but in MVE it's legal, so we must convert it to code
8867 // that is not undefined in IR.
8868 unsigned LaneBits
= cast
<llvm::VectorType
>(V
->getType())
8870 ->getPrimitiveSizeInBits();
8871 if (Shift
== LaneBits
) {
8872 // An unsigned shift of the full lane size always generates zero, so we can
8873 // simply emit a zero vector. A signed shift of the full lane size does the
8874 // same thing as shifting by one bit fewer.
8876 return llvm::Constant::getNullValue(V
->getType());
8880 return Unsigned
? Builder
.CreateLShr(V
, Shift
) : Builder
.CreateAShr(V
, Shift
);
8883 static llvm::Value
*ARMMVEVectorSplat(CGBuilderTy
&Builder
, llvm::Value
*V
) {
8884 // MVE-specific helper function for a vector splat, which infers the element
8885 // count of the output vector by knowing that MVE vectors are all 128 bits
8887 unsigned Elements
= 128 / V
->getType()->getPrimitiveSizeInBits();
8888 return Builder
.CreateVectorSplat(Elements
, V
);
8891 static llvm::Value
*ARMMVEVectorReinterpret(CGBuilderTy
&Builder
,
8892 CodeGenFunction
*CGF
,
8894 llvm::Type
*DestType
) {
8895 // Convert one MVE vector type into another by reinterpreting its in-register
8898 // Little-endian, this is identical to a bitcast (which reinterprets the
8899 // memory format). But big-endian, they're not necessarily the same, because
8900 // the register and memory formats map to each other differently depending on
8903 // We generate a bitcast whenever we can (if we're little-endian, or if the
8904 // lane sizes are the same anyway). Otherwise we fall back to an IR intrinsic
8905 // that performs the different kind of reinterpretation.
8906 if (CGF
->getTarget().isBigEndian() &&
8907 V
->getType()->getScalarSizeInBits() != DestType
->getScalarSizeInBits()) {
8908 return Builder
.CreateCall(
8909 CGF
->CGM
.getIntrinsic(Intrinsic::arm_mve_vreinterpretq
,
8910 {DestType
, V
->getType()}),
8913 return Builder
.CreateBitCast(V
, DestType
);
8917 static llvm::Value
*VectorUnzip(CGBuilderTy
&Builder
, llvm::Value
*V
, bool Odd
) {
8918 // Make a shufflevector that extracts every other element of a vector (evens
8919 // or odds, as desired).
8920 SmallVector
<int, 16> Indices
;
8921 unsigned InputElements
=
8922 cast
<llvm::FixedVectorType
>(V
->getType())->getNumElements();
8923 for (unsigned i
= 0; i
< InputElements
; i
+= 2)
8924 Indices
.push_back(i
+ Odd
);
8925 return Builder
.CreateShuffleVector(V
, Indices
);
8928 static llvm::Value
*VectorZip(CGBuilderTy
&Builder
, llvm::Value
*V0
,
8930 // Make a shufflevector that interleaves two vectors element by element.
8931 assert(V0
->getType() == V1
->getType() && "Can't zip different vector types");
8932 SmallVector
<int, 16> Indices
;
8933 unsigned InputElements
=
8934 cast
<llvm::FixedVectorType
>(V0
->getType())->getNumElements();
8935 for (unsigned i
= 0; i
< InputElements
; i
++) {
8936 Indices
.push_back(i
);
8937 Indices
.push_back(i
+ InputElements
);
8939 return Builder
.CreateShuffleVector(V0
, V1
, Indices
);
8942 template<unsigned HighBit
, unsigned OtherBits
>
8943 static llvm::Value
*ARMMVEConstantSplat(CGBuilderTy
&Builder
, llvm::Type
*VT
) {
8944 // MVE-specific helper function to make a vector splat of a constant such as
8945 // UINT_MAX or INT_MIN, in which all bits below the highest one are equal.
8946 llvm::Type
*T
= cast
<llvm::VectorType
>(VT
)->getElementType();
8947 unsigned LaneBits
= T
->getPrimitiveSizeInBits();
8948 uint32_t Value
= HighBit
<< (LaneBits
- 1);
8950 Value
|= (1UL << (LaneBits
- 1)) - 1;
8951 llvm::Value
*Lane
= llvm::ConstantInt::get(T
, Value
);
8952 return ARMMVEVectorSplat(Builder
, Lane
);
8955 static llvm::Value
*ARMMVEVectorElementReverse(CGBuilderTy
&Builder
,
8957 unsigned ReverseWidth
) {
8958 // MVE-specific helper function which reverses the elements of a
8959 // vector within every (ReverseWidth)-bit collection of lanes.
8960 SmallVector
<int, 16> Indices
;
8961 unsigned LaneSize
= V
->getType()->getScalarSizeInBits();
8962 unsigned Elements
= 128 / LaneSize
;
8963 unsigned Mask
= ReverseWidth
/ LaneSize
- 1;
8964 for (unsigned i
= 0; i
< Elements
; i
++)
8965 Indices
.push_back(i
^ Mask
);
8966 return Builder
.CreateShuffleVector(V
, Indices
);
8969 Value
*CodeGenFunction::EmitARMMVEBuiltinExpr(unsigned BuiltinID
,
8971 ReturnValueSlot ReturnValue
,
8972 llvm::Triple::ArchType Arch
) {
8973 enum class CustomCodeGen
{ VLD24
, VST24
} CustomCodeGenType
;
8974 Intrinsic::ID IRIntr
;
8975 unsigned NumVectors
;
8977 // Code autogenerated by Tablegen will handle all the simple builtins.
8978 switch (BuiltinID
) {
8979 #include "clang/Basic/arm_mve_builtin_cg.inc"
8981 // If we didn't match an MVE builtin id at all, go back to the
8982 // main EmitARMBuiltinExpr.
8987 // Anything that breaks from that switch is an MVE builtin that
8988 // needs handwritten code to generate.
8990 switch (CustomCodeGenType
) {
8992 case CustomCodeGen::VLD24
: {
8993 llvm::SmallVector
<Value
*, 4> Ops
;
8994 llvm::SmallVector
<llvm::Type
*, 4> Tys
;
8996 auto MvecCType
= E
->getType();
8997 auto MvecLType
= ConvertType(MvecCType
);
8998 assert(MvecLType
->isStructTy() &&
8999 "Return type for vld[24]q should be a struct");
9000 assert(MvecLType
->getStructNumElements() == 1 &&
9001 "Return-type struct for vld[24]q should have one element");
9002 auto MvecLTypeInner
= MvecLType
->getStructElementType(0);
9003 assert(MvecLTypeInner
->isArrayTy() &&
9004 "Return-type struct for vld[24]q should contain an array");
9005 assert(MvecLTypeInner
->getArrayNumElements() == NumVectors
&&
9006 "Array member of return-type struct vld[24]q has wrong length");
9007 auto VecLType
= MvecLTypeInner
->getArrayElementType();
9009 Tys
.push_back(VecLType
);
9011 auto Addr
= E
->getArg(0);
9012 Ops
.push_back(EmitScalarExpr(Addr
));
9013 Tys
.push_back(ConvertType(Addr
->getType()));
9015 Function
*F
= CGM
.getIntrinsic(IRIntr
, ArrayRef(Tys
));
9016 Value
*LoadResult
= Builder
.CreateCall(F
, Ops
);
9017 Value
*MvecOut
= PoisonValue::get(MvecLType
);
9018 for (unsigned i
= 0; i
< NumVectors
; ++i
) {
9019 Value
*Vec
= Builder
.CreateExtractValue(LoadResult
, i
);
9020 MvecOut
= Builder
.CreateInsertValue(MvecOut
, Vec
, {0, i
});
9023 if (ReturnValue
.isNull())
9026 return Builder
.CreateStore(MvecOut
, ReturnValue
.getValue());
9029 case CustomCodeGen::VST24
: {
9030 llvm::SmallVector
<Value
*, 4> Ops
;
9031 llvm::SmallVector
<llvm::Type
*, 4> Tys
;
9033 auto Addr
= E
->getArg(0);
9034 Ops
.push_back(EmitScalarExpr(Addr
));
9035 Tys
.push_back(ConvertType(Addr
->getType()));
9037 auto MvecCType
= E
->getArg(1)->getType();
9038 auto MvecLType
= ConvertType(MvecCType
);
9039 assert(MvecLType
->isStructTy() && "Data type for vst2q should be a struct");
9040 assert(MvecLType
->getStructNumElements() == 1 &&
9041 "Data-type struct for vst2q should have one element");
9042 auto MvecLTypeInner
= MvecLType
->getStructElementType(0);
9043 assert(MvecLTypeInner
->isArrayTy() &&
9044 "Data-type struct for vst2q should contain an array");
9045 assert(MvecLTypeInner
->getArrayNumElements() == NumVectors
&&
9046 "Array member of return-type struct vld[24]q has wrong length");
9047 auto VecLType
= MvecLTypeInner
->getArrayElementType();
9049 Tys
.push_back(VecLType
);
9051 AggValueSlot MvecSlot
= CreateAggTemp(MvecCType
);
9052 EmitAggExpr(E
->getArg(1), MvecSlot
);
9053 auto Mvec
= Builder
.CreateLoad(MvecSlot
.getAddress());
9054 for (unsigned i
= 0; i
< NumVectors
; i
++)
9055 Ops
.push_back(Builder
.CreateExtractValue(Mvec
, {0, i
}));
9057 Function
*F
= CGM
.getIntrinsic(IRIntr
, ArrayRef(Tys
));
9058 Value
*ToReturn
= nullptr;
9059 for (unsigned i
= 0; i
< NumVectors
; i
++) {
9060 Ops
.push_back(llvm::ConstantInt::get(Int32Ty
, i
));
9061 ToReturn
= Builder
.CreateCall(F
, Ops
);
9067 llvm_unreachable("unknown custom codegen type.");
9070 Value
*CodeGenFunction::EmitARMCDEBuiltinExpr(unsigned BuiltinID
,
9072 ReturnValueSlot ReturnValue
,
9073 llvm::Triple::ArchType Arch
) {
9074 switch (BuiltinID
) {
9077 #include "clang/Basic/arm_cde_builtin_cg.inc"
9081 static Value
*EmitAArch64TblBuiltinExpr(CodeGenFunction
&CGF
, unsigned BuiltinID
,
9083 SmallVectorImpl
<Value
*> &Ops
,
9084 llvm::Triple::ArchType Arch
) {
9085 unsigned int Int
= 0;
9086 const char *s
= nullptr;
9088 switch (BuiltinID
) {
9091 case NEON::BI__builtin_neon_vtbl1_v
:
9092 case NEON::BI__builtin_neon_vqtbl1_v
:
9093 case NEON::BI__builtin_neon_vqtbl1q_v
:
9094 case NEON::BI__builtin_neon_vtbl2_v
:
9095 case NEON::BI__builtin_neon_vqtbl2_v
:
9096 case NEON::BI__builtin_neon_vqtbl2q_v
:
9097 case NEON::BI__builtin_neon_vtbl3_v
:
9098 case NEON::BI__builtin_neon_vqtbl3_v
:
9099 case NEON::BI__builtin_neon_vqtbl3q_v
:
9100 case NEON::BI__builtin_neon_vtbl4_v
:
9101 case NEON::BI__builtin_neon_vqtbl4_v
:
9102 case NEON::BI__builtin_neon_vqtbl4q_v
:
9104 case NEON::BI__builtin_neon_vtbx1_v
:
9105 case NEON::BI__builtin_neon_vqtbx1_v
:
9106 case NEON::BI__builtin_neon_vqtbx1q_v
:
9107 case NEON::BI__builtin_neon_vtbx2_v
:
9108 case NEON::BI__builtin_neon_vqtbx2_v
:
9109 case NEON::BI__builtin_neon_vqtbx2q_v
:
9110 case NEON::BI__builtin_neon_vtbx3_v
:
9111 case NEON::BI__builtin_neon_vqtbx3_v
:
9112 case NEON::BI__builtin_neon_vqtbx3q_v
:
9113 case NEON::BI__builtin_neon_vtbx4_v
:
9114 case NEON::BI__builtin_neon_vqtbx4_v
:
9115 case NEON::BI__builtin_neon_vqtbx4q_v
:
9119 assert(E
->getNumArgs() >= 3);
9121 // Get the last argument, which specifies the vector type.
9122 const Expr
*Arg
= E
->getArg(E
->getNumArgs() - 1);
9123 std::optional
<llvm::APSInt
> Result
=
9124 Arg
->getIntegerConstantExpr(CGF
.getContext());
9128 // Determine the type of this overloaded NEON intrinsic.
9129 NeonTypeFlags Type
= Result
->getZExtValue();
9130 llvm::FixedVectorType
*Ty
= GetNeonType(&CGF
, Type
);
9134 CodeGen::CGBuilderTy
&Builder
= CGF
.Builder
;
9136 // AArch64 scalar builtins are not overloaded, they do not have an extra
9137 // argument that specifies the vector type, need to handle each case.
9138 switch (BuiltinID
) {
9139 case NEON::BI__builtin_neon_vtbl1_v
: {
9140 return packTBLDVectorList(CGF
, ArrayRef(Ops
).slice(0, 1), nullptr, Ops
[1],
9141 Ty
, Intrinsic::aarch64_neon_tbl1
, "vtbl1");
9143 case NEON::BI__builtin_neon_vtbl2_v
: {
9144 return packTBLDVectorList(CGF
, ArrayRef(Ops
).slice(0, 2), nullptr, Ops
[2],
9145 Ty
, Intrinsic::aarch64_neon_tbl1
, "vtbl1");
9147 case NEON::BI__builtin_neon_vtbl3_v
: {
9148 return packTBLDVectorList(CGF
, ArrayRef(Ops
).slice(0, 3), nullptr, Ops
[3],
9149 Ty
, Intrinsic::aarch64_neon_tbl2
, "vtbl2");
9151 case NEON::BI__builtin_neon_vtbl4_v
: {
9152 return packTBLDVectorList(CGF
, ArrayRef(Ops
).slice(0, 4), nullptr, Ops
[4],
9153 Ty
, Intrinsic::aarch64_neon_tbl2
, "vtbl2");
9155 case NEON::BI__builtin_neon_vtbx1_v
: {
9157 packTBLDVectorList(CGF
, ArrayRef(Ops
).slice(1, 1), nullptr, Ops
[2], Ty
,
9158 Intrinsic::aarch64_neon_tbl1
, "vtbl1");
9160 llvm::Constant
*EightV
= ConstantInt::get(Ty
, 8);
9161 Value
*CmpRes
= Builder
.CreateICmp(ICmpInst::ICMP_UGE
, Ops
[2], EightV
);
9162 CmpRes
= Builder
.CreateSExt(CmpRes
, Ty
);
9164 Value
*EltsFromInput
= Builder
.CreateAnd(CmpRes
, Ops
[0]);
9165 Value
*EltsFromTbl
= Builder
.CreateAnd(Builder
.CreateNot(CmpRes
), TblRes
);
9166 return Builder
.CreateOr(EltsFromInput
, EltsFromTbl
, "vtbx");
9168 case NEON::BI__builtin_neon_vtbx2_v
: {
9169 return packTBLDVectorList(CGF
, ArrayRef(Ops
).slice(1, 2), Ops
[0], Ops
[3],
9170 Ty
, Intrinsic::aarch64_neon_tbx1
, "vtbx1");
9172 case NEON::BI__builtin_neon_vtbx3_v
: {
9174 packTBLDVectorList(CGF
, ArrayRef(Ops
).slice(1, 3), nullptr, Ops
[4], Ty
,
9175 Intrinsic::aarch64_neon_tbl2
, "vtbl2");
9177 llvm::Constant
*TwentyFourV
= ConstantInt::get(Ty
, 24);
9178 Value
*CmpRes
= Builder
.CreateICmp(ICmpInst::ICMP_UGE
, Ops
[4],
9180 CmpRes
= Builder
.CreateSExt(CmpRes
, Ty
);
9182 Value
*EltsFromInput
= Builder
.CreateAnd(CmpRes
, Ops
[0]);
9183 Value
*EltsFromTbl
= Builder
.CreateAnd(Builder
.CreateNot(CmpRes
), TblRes
);
9184 return Builder
.CreateOr(EltsFromInput
, EltsFromTbl
, "vtbx");
9186 case NEON::BI__builtin_neon_vtbx4_v
: {
9187 return packTBLDVectorList(CGF
, ArrayRef(Ops
).slice(1, 4), Ops
[0], Ops
[5],
9188 Ty
, Intrinsic::aarch64_neon_tbx2
, "vtbx2");
9190 case NEON::BI__builtin_neon_vqtbl1_v
:
9191 case NEON::BI__builtin_neon_vqtbl1q_v
:
9192 Int
= Intrinsic::aarch64_neon_tbl1
; s
= "vtbl1"; break;
9193 case NEON::BI__builtin_neon_vqtbl2_v
:
9194 case NEON::BI__builtin_neon_vqtbl2q_v
: {
9195 Int
= Intrinsic::aarch64_neon_tbl2
; s
= "vtbl2"; break;
9196 case NEON::BI__builtin_neon_vqtbl3_v
:
9197 case NEON::BI__builtin_neon_vqtbl3q_v
:
9198 Int
= Intrinsic::aarch64_neon_tbl3
; s
= "vtbl3"; break;
9199 case NEON::BI__builtin_neon_vqtbl4_v
:
9200 case NEON::BI__builtin_neon_vqtbl4q_v
:
9201 Int
= Intrinsic::aarch64_neon_tbl4
; s
= "vtbl4"; break;
9202 case NEON::BI__builtin_neon_vqtbx1_v
:
9203 case NEON::BI__builtin_neon_vqtbx1q_v
:
9204 Int
= Intrinsic::aarch64_neon_tbx1
; s
= "vtbx1"; break;
9205 case NEON::BI__builtin_neon_vqtbx2_v
:
9206 case NEON::BI__builtin_neon_vqtbx2q_v
:
9207 Int
= Intrinsic::aarch64_neon_tbx2
; s
= "vtbx2"; break;
9208 case NEON::BI__builtin_neon_vqtbx3_v
:
9209 case NEON::BI__builtin_neon_vqtbx3q_v
:
9210 Int
= Intrinsic::aarch64_neon_tbx3
; s
= "vtbx3"; break;
9211 case NEON::BI__builtin_neon_vqtbx4_v
:
9212 case NEON::BI__builtin_neon_vqtbx4q_v
:
9213 Int
= Intrinsic::aarch64_neon_tbx4
; s
= "vtbx4"; break;
9220 Function
*F
= CGF
.CGM
.getIntrinsic(Int
, Ty
);
9221 return CGF
.EmitNeonCall(F
, Ops
, s
);
9224 Value
*CodeGenFunction::vectorWrapScalar16(Value
*Op
) {
9225 auto *VTy
= llvm::FixedVectorType::get(Int16Ty
, 4);
9226 Op
= Builder
.CreateBitCast(Op
, Int16Ty
);
9227 Value
*V
= PoisonValue::get(VTy
);
9228 llvm::Constant
*CI
= ConstantInt::get(SizeTy
, 0);
9229 Op
= Builder
.CreateInsertElement(V
, Op
, CI
);
9233 /// SVEBuiltinMemEltTy - Returns the memory element type for this memory
9234 /// access builtin. Only required if it can't be inferred from the base pointer
9236 llvm::Type
*CodeGenFunction::SVEBuiltinMemEltTy(const SVETypeFlags
&TypeFlags
) {
9237 switch (TypeFlags
.getMemEltType()) {
9238 case SVETypeFlags::MemEltTyDefault
:
9239 return getEltType(TypeFlags
);
9240 case SVETypeFlags::MemEltTyInt8
:
9241 return Builder
.getInt8Ty();
9242 case SVETypeFlags::MemEltTyInt16
:
9243 return Builder
.getInt16Ty();
9244 case SVETypeFlags::MemEltTyInt32
:
9245 return Builder
.getInt32Ty();
9246 case SVETypeFlags::MemEltTyInt64
:
9247 return Builder
.getInt64Ty();
9249 llvm_unreachable("Unknown MemEltType");
9252 llvm::Type
*CodeGenFunction::getEltType(const SVETypeFlags
&TypeFlags
) {
9253 switch (TypeFlags
.getEltType()) {
9255 llvm_unreachable("Invalid SVETypeFlag!");
9257 case SVETypeFlags::EltTyInt8
:
9258 return Builder
.getInt8Ty();
9259 case SVETypeFlags::EltTyInt16
:
9260 return Builder
.getInt16Ty();
9261 case SVETypeFlags::EltTyInt32
:
9262 return Builder
.getInt32Ty();
9263 case SVETypeFlags::EltTyInt64
:
9264 return Builder
.getInt64Ty();
9265 case SVETypeFlags::EltTyInt128
:
9266 return Builder
.getInt128Ty();
9268 case SVETypeFlags::EltTyFloat16
:
9269 return Builder
.getHalfTy();
9270 case SVETypeFlags::EltTyFloat32
:
9271 return Builder
.getFloatTy();
9272 case SVETypeFlags::EltTyFloat64
:
9273 return Builder
.getDoubleTy();
9275 case SVETypeFlags::EltTyBFloat16
:
9276 return Builder
.getBFloatTy();
9278 case SVETypeFlags::EltTyBool8
:
9279 case SVETypeFlags::EltTyBool16
:
9280 case SVETypeFlags::EltTyBool32
:
9281 case SVETypeFlags::EltTyBool64
:
9282 return Builder
.getInt1Ty();
9286 // Return the llvm predicate vector type corresponding to the specified element
9288 llvm::ScalableVectorType
*
9289 CodeGenFunction::getSVEPredType(const SVETypeFlags
&TypeFlags
) {
9290 switch (TypeFlags
.getEltType()) {
9291 default: llvm_unreachable("Unhandled SVETypeFlag!");
9293 case SVETypeFlags::EltTyInt8
:
9294 return llvm::ScalableVectorType::get(Builder
.getInt1Ty(), 16);
9295 case SVETypeFlags::EltTyInt16
:
9296 return llvm::ScalableVectorType::get(Builder
.getInt1Ty(), 8);
9297 case SVETypeFlags::EltTyInt32
:
9298 return llvm::ScalableVectorType::get(Builder
.getInt1Ty(), 4);
9299 case SVETypeFlags::EltTyInt64
:
9300 return llvm::ScalableVectorType::get(Builder
.getInt1Ty(), 2);
9302 case SVETypeFlags::EltTyBFloat16
:
9303 return llvm::ScalableVectorType::get(Builder
.getInt1Ty(), 8);
9304 case SVETypeFlags::EltTyFloat16
:
9305 return llvm::ScalableVectorType::get(Builder
.getInt1Ty(), 8);
9306 case SVETypeFlags::EltTyFloat32
:
9307 return llvm::ScalableVectorType::get(Builder
.getInt1Ty(), 4);
9308 case SVETypeFlags::EltTyFloat64
:
9309 return llvm::ScalableVectorType::get(Builder
.getInt1Ty(), 2);
9311 case SVETypeFlags::EltTyBool8
:
9312 return llvm::ScalableVectorType::get(Builder
.getInt1Ty(), 16);
9313 case SVETypeFlags::EltTyBool16
:
9314 return llvm::ScalableVectorType::get(Builder
.getInt1Ty(), 8);
9315 case SVETypeFlags::EltTyBool32
:
9316 return llvm::ScalableVectorType::get(Builder
.getInt1Ty(), 4);
9317 case SVETypeFlags::EltTyBool64
:
9318 return llvm::ScalableVectorType::get(Builder
.getInt1Ty(), 2);
9322 // Return the llvm vector type corresponding to the specified element TypeFlags.
9323 llvm::ScalableVectorType
*
9324 CodeGenFunction::getSVEType(const SVETypeFlags
&TypeFlags
) {
9325 switch (TypeFlags
.getEltType()) {
9327 llvm_unreachable("Invalid SVETypeFlag!");
9329 case SVETypeFlags::EltTyInt8
:
9330 return llvm::ScalableVectorType::get(Builder
.getInt8Ty(), 16);
9331 case SVETypeFlags::EltTyInt16
:
9332 return llvm::ScalableVectorType::get(Builder
.getInt16Ty(), 8);
9333 case SVETypeFlags::EltTyInt32
:
9334 return llvm::ScalableVectorType::get(Builder
.getInt32Ty(), 4);
9335 case SVETypeFlags::EltTyInt64
:
9336 return llvm::ScalableVectorType::get(Builder
.getInt64Ty(), 2);
9338 case SVETypeFlags::EltTyFloat16
:
9339 return llvm::ScalableVectorType::get(Builder
.getHalfTy(), 8);
9340 case SVETypeFlags::EltTyBFloat16
:
9341 return llvm::ScalableVectorType::get(Builder
.getBFloatTy(), 8);
9342 case SVETypeFlags::EltTyFloat32
:
9343 return llvm::ScalableVectorType::get(Builder
.getFloatTy(), 4);
9344 case SVETypeFlags::EltTyFloat64
:
9345 return llvm::ScalableVectorType::get(Builder
.getDoubleTy(), 2);
9347 case SVETypeFlags::EltTyBool8
:
9348 return llvm::ScalableVectorType::get(Builder
.getInt1Ty(), 16);
9349 case SVETypeFlags::EltTyBool16
:
9350 return llvm::ScalableVectorType::get(Builder
.getInt1Ty(), 8);
9351 case SVETypeFlags::EltTyBool32
:
9352 return llvm::ScalableVectorType::get(Builder
.getInt1Ty(), 4);
9353 case SVETypeFlags::EltTyBool64
:
9354 return llvm::ScalableVectorType::get(Builder
.getInt1Ty(), 2);
9359 CodeGenFunction::EmitSVEAllTruePred(const SVETypeFlags
&TypeFlags
) {
9361 CGM
.getIntrinsic(Intrinsic::aarch64_sve_ptrue
, getSVEPredType(TypeFlags
));
9362 return Builder
.CreateCall(Ptrue
, {Builder
.getInt32(/*SV_ALL*/ 31)});
9365 constexpr unsigned SVEBitsPerBlock
= 128;
9367 static llvm::ScalableVectorType
*getSVEVectorForElementType(llvm::Type
*EltTy
) {
9368 unsigned NumElts
= SVEBitsPerBlock
/ EltTy
->getScalarSizeInBits();
9369 return llvm::ScalableVectorType::get(EltTy
, NumElts
);
9372 // Reinterpret the input predicate so that it can be used to correctly isolate
9373 // the elements of the specified datatype.
9374 Value
*CodeGenFunction::EmitSVEPredicateCast(Value
*Pred
,
9375 llvm::ScalableVectorType
*VTy
) {
9377 if (isa
<TargetExtType
>(Pred
->getType()) &&
9378 cast
<TargetExtType
>(Pred
->getType())->getName() == "aarch64.svcount")
9381 auto *RTy
= llvm::VectorType::get(IntegerType::get(getLLVMContext(), 1), VTy
);
9382 if (Pred
->getType() == RTy
)
9386 llvm::Type
*IntrinsicTy
;
9387 switch (VTy
->getMinNumElements()) {
9389 llvm_unreachable("unsupported element count!");
9394 IntID
= Intrinsic::aarch64_sve_convert_from_svbool
;
9398 IntID
= Intrinsic::aarch64_sve_convert_to_svbool
;
9399 IntrinsicTy
= Pred
->getType();
9403 Function
*F
= CGM
.getIntrinsic(IntID
, IntrinsicTy
);
9404 Value
*C
= Builder
.CreateCall(F
, Pred
);
9405 assert(C
->getType() == RTy
&& "Unexpected return type!");
9409 Value
*CodeGenFunction::EmitSVEGatherLoad(const SVETypeFlags
&TypeFlags
,
9410 SmallVectorImpl
<Value
*> &Ops
,
9412 auto *ResultTy
= getSVEType(TypeFlags
);
9413 auto *OverloadedTy
=
9414 llvm::ScalableVectorType::get(SVEBuiltinMemEltTy(TypeFlags
), ResultTy
);
9416 // At the ACLE level there's only one predicate type, svbool_t, which is
9417 // mapped to <n x 16 x i1>. However, this might be incompatible with the
9418 // actual type being loaded. For example, when loading doubles (i64) the
9419 // predicated should be <n x 2 x i1> instead. At the IR level the type of
9420 // the predicate and the data being loaded must match. Cast accordingly.
9421 Ops
[0] = EmitSVEPredicateCast(Ops
[0], OverloadedTy
);
9423 Function
*F
= nullptr;
9424 if (Ops
[1]->getType()->isVectorTy())
9425 // This is the "vector base, scalar offset" case. In order to uniquely
9426 // map this built-in to an LLVM IR intrinsic, we need both the return type
9427 // and the type of the vector base.
9428 F
= CGM
.getIntrinsic(IntID
, {OverloadedTy
, Ops
[1]->getType()});
9430 // This is the "scalar base, vector offset case". The type of the offset
9431 // is encoded in the name of the intrinsic. We only need to specify the
9432 // return type in order to uniquely map this built-in to an LLVM IR
9434 F
= CGM
.getIntrinsic(IntID
, OverloadedTy
);
9436 // Pass 0 when the offset is missing. This can only be applied when using
9437 // the "vector base" addressing mode for which ACLE allows no offset. The
9438 // corresponding LLVM IR always requires an offset.
9439 if (Ops
.size() == 2) {
9440 assert(Ops
[1]->getType()->isVectorTy() && "Scalar base requires an offset");
9441 Ops
.push_back(ConstantInt::get(Int64Ty
, 0));
9444 // For "vector base, scalar index" scale the index so that it becomes a
9446 if (!TypeFlags
.isByteIndexed() && Ops
[1]->getType()->isVectorTy()) {
9447 unsigned BytesPerElt
=
9448 OverloadedTy
->getElementType()->getScalarSizeInBits() / 8;
9449 Ops
[2] = Builder
.CreateShl(Ops
[2], Log2_32(BytesPerElt
));
9452 Value
*Call
= Builder
.CreateCall(F
, Ops
);
9454 // The following sext/zext is only needed when ResultTy != OverloadedTy. In
9455 // other cases it's folded into a nop.
9456 return TypeFlags
.isZExtReturn() ? Builder
.CreateZExt(Call
, ResultTy
)
9457 : Builder
.CreateSExt(Call
, ResultTy
);
9460 Value
*CodeGenFunction::EmitSVEScatterStore(const SVETypeFlags
&TypeFlags
,
9461 SmallVectorImpl
<Value
*> &Ops
,
9463 auto *SrcDataTy
= getSVEType(TypeFlags
);
9464 auto *OverloadedTy
=
9465 llvm::ScalableVectorType::get(SVEBuiltinMemEltTy(TypeFlags
), SrcDataTy
);
9467 // In ACLE the source data is passed in the last argument, whereas in LLVM IR
9468 // it's the first argument. Move it accordingly.
9469 Ops
.insert(Ops
.begin(), Ops
.pop_back_val());
9471 Function
*F
= nullptr;
9472 if (Ops
[2]->getType()->isVectorTy())
9473 // This is the "vector base, scalar offset" case. In order to uniquely
9474 // map this built-in to an LLVM IR intrinsic, we need both the return type
9475 // and the type of the vector base.
9476 F
= CGM
.getIntrinsic(IntID
, {OverloadedTy
, Ops
[2]->getType()});
9478 // This is the "scalar base, vector offset case". The type of the offset
9479 // is encoded in the name of the intrinsic. We only need to specify the
9480 // return type in order to uniquely map this built-in to an LLVM IR
9482 F
= CGM
.getIntrinsic(IntID
, OverloadedTy
);
9484 // Pass 0 when the offset is missing. This can only be applied when using
9485 // the "vector base" addressing mode for which ACLE allows no offset. The
9486 // corresponding LLVM IR always requires an offset.
9487 if (Ops
.size() == 3) {
9488 assert(Ops
[1]->getType()->isVectorTy() && "Scalar base requires an offset");
9489 Ops
.push_back(ConstantInt::get(Int64Ty
, 0));
9492 // Truncation is needed when SrcDataTy != OverloadedTy. In other cases it's
9493 // folded into a nop.
9494 Ops
[0] = Builder
.CreateTrunc(Ops
[0], OverloadedTy
);
9496 // At the ACLE level there's only one predicate type, svbool_t, which is
9497 // mapped to <n x 16 x i1>. However, this might be incompatible with the
9498 // actual type being stored. For example, when storing doubles (i64) the
9499 // predicated should be <n x 2 x i1> instead. At the IR level the type of
9500 // the predicate and the data being stored must match. Cast accordingly.
9501 Ops
[1] = EmitSVEPredicateCast(Ops
[1], OverloadedTy
);
9503 // For "vector base, scalar index" scale the index so that it becomes a
9505 if (!TypeFlags
.isByteIndexed() && Ops
[2]->getType()->isVectorTy()) {
9506 unsigned BytesPerElt
=
9507 OverloadedTy
->getElementType()->getScalarSizeInBits() / 8;
9508 Ops
[3] = Builder
.CreateShl(Ops
[3], Log2_32(BytesPerElt
));
9511 return Builder
.CreateCall(F
, Ops
);
9514 Value
*CodeGenFunction::EmitSVEGatherPrefetch(const SVETypeFlags
&TypeFlags
,
9515 SmallVectorImpl
<Value
*> &Ops
,
9517 // The gather prefetches are overloaded on the vector input - this can either
9518 // be the vector of base addresses or vector of offsets.
9519 auto *OverloadedTy
= dyn_cast
<llvm::ScalableVectorType
>(Ops
[1]->getType());
9521 OverloadedTy
= cast
<llvm::ScalableVectorType
>(Ops
[2]->getType());
9523 // Cast the predicate from svbool_t to the right number of elements.
9524 Ops
[0] = EmitSVEPredicateCast(Ops
[0], OverloadedTy
);
9526 // vector + imm addressing modes
9527 if (Ops
[1]->getType()->isVectorTy()) {
9528 if (Ops
.size() == 3) {
9529 // Pass 0 for 'vector+imm' when the index is omitted.
9530 Ops
.push_back(ConstantInt::get(Int64Ty
, 0));
9532 // The sv_prfop is the last operand in the builtin and IR intrinsic.
9533 std::swap(Ops
[2], Ops
[3]);
9535 // Index needs to be passed as scaled offset.
9536 llvm::Type
*MemEltTy
= SVEBuiltinMemEltTy(TypeFlags
);
9537 unsigned BytesPerElt
= MemEltTy
->getPrimitiveSizeInBits() / 8;
9538 if (BytesPerElt
> 1)
9539 Ops
[2] = Builder
.CreateShl(Ops
[2], Log2_32(BytesPerElt
));
9543 Function
*F
= CGM
.getIntrinsic(IntID
, OverloadedTy
);
9544 return Builder
.CreateCall(F
, Ops
);
9547 Value
*CodeGenFunction::EmitSVEStructLoad(const SVETypeFlags
&TypeFlags
,
9548 SmallVectorImpl
<Value
*> &Ops
,
9550 llvm::ScalableVectorType
*VTy
= getSVEType(TypeFlags
);
9554 case Intrinsic::aarch64_sve_ld2_sret
:
9555 case Intrinsic::aarch64_sve_ld1_pn_x2
:
9556 case Intrinsic::aarch64_sve_ldnt1_pn_x2
:
9559 case Intrinsic::aarch64_sve_ld3_sret
:
9562 case Intrinsic::aarch64_sve_ld4_sret
:
9563 case Intrinsic::aarch64_sve_ld1_pn_x4
:
9564 case Intrinsic::aarch64_sve_ldnt1_pn_x4
:
9568 llvm_unreachable("unknown intrinsic!");
9570 auto RetTy
= llvm::VectorType::get(VTy
->getElementType(),
9571 VTy
->getElementCount() * N
);
9573 Value
*Predicate
= EmitSVEPredicateCast(Ops
[0], VTy
);
9574 Value
*BasePtr
= Ops
[1];
9576 // Does the load have an offset?
9578 BasePtr
= Builder
.CreateGEP(VTy
, BasePtr
, Ops
[2]);
9580 Function
*F
= CGM
.getIntrinsic(IntID
, {VTy
});
9581 Value
*Call
= Builder
.CreateCall(F
, {Predicate
, BasePtr
});
9582 unsigned MinElts
= VTy
->getMinNumElements();
9583 Value
*Ret
= llvm::PoisonValue::get(RetTy
);
9584 for (unsigned I
= 0; I
< N
; I
++) {
9585 Value
*Idx
= ConstantInt::get(CGM
.Int64Ty
, I
* MinElts
);
9586 Value
*SRet
= Builder
.CreateExtractValue(Call
, I
);
9587 Ret
= Builder
.CreateInsertVector(RetTy
, Ret
, SRet
, Idx
);
9592 Value
*CodeGenFunction::EmitSVEStructStore(const SVETypeFlags
&TypeFlags
,
9593 SmallVectorImpl
<Value
*> &Ops
,
9595 llvm::ScalableVectorType
*VTy
= getSVEType(TypeFlags
);
9599 case Intrinsic::aarch64_sve_st2
:
9600 case Intrinsic::aarch64_sve_st1_pn_x2
:
9601 case Intrinsic::aarch64_sve_stnt1_pn_x2
:
9604 case Intrinsic::aarch64_sve_st3
:
9607 case Intrinsic::aarch64_sve_st4
:
9608 case Intrinsic::aarch64_sve_st1_pn_x4
:
9609 case Intrinsic::aarch64_sve_stnt1_pn_x4
:
9613 llvm_unreachable("unknown intrinsic!");
9616 Value
*Predicate
= EmitSVEPredicateCast(Ops
[0], VTy
);
9617 Value
*BasePtr
= Ops
[1];
9619 // Does the store have an offset?
9621 BasePtr
= Builder
.CreateGEP(VTy
, BasePtr
, Ops
[2]);
9623 Value
*Val
= Ops
.back();
9625 // The llvm.aarch64.sve.st2/3/4 intrinsics take legal part vectors, so we
9626 // need to break up the tuple vector.
9627 SmallVector
<llvm::Value
*, 5> Operands
;
9628 unsigned MinElts
= VTy
->getElementCount().getKnownMinValue();
9629 for (unsigned I
= 0; I
< N
; ++I
) {
9630 Value
*Idx
= ConstantInt::get(CGM
.Int64Ty
, I
* MinElts
);
9631 Operands
.push_back(Builder
.CreateExtractVector(VTy
, Val
, Idx
));
9633 Operands
.append({Predicate
, BasePtr
});
9635 Function
*F
= CGM
.getIntrinsic(IntID
, { VTy
});
9636 return Builder
.CreateCall(F
, Operands
);
9639 // SVE2's svpmullb and svpmullt builtins are similar to the svpmullb_pair and
9640 // svpmullt_pair intrinsics, with the exception that their results are bitcast
9642 Value
*CodeGenFunction::EmitSVEPMull(const SVETypeFlags
&TypeFlags
,
9643 SmallVectorImpl
<Value
*> &Ops
,
9644 unsigned BuiltinID
) {
9645 // Splat scalar operand to vector (intrinsics with _n infix)
9646 if (TypeFlags
.hasSplatOperand()) {
9647 unsigned OpNo
= TypeFlags
.getSplatOperand();
9648 Ops
[OpNo
] = EmitSVEDupX(Ops
[OpNo
]);
9651 // The pair-wise function has a narrower overloaded type.
9652 Function
*F
= CGM
.getIntrinsic(BuiltinID
, Ops
[0]->getType());
9653 Value
*Call
= Builder
.CreateCall(F
, {Ops
[0], Ops
[1]});
9655 // Now bitcast to the wider result type.
9656 llvm::ScalableVectorType
*Ty
= getSVEType(TypeFlags
);
9657 return EmitSVEReinterpret(Call
, Ty
);
9660 Value
*CodeGenFunction::EmitSVEMovl(const SVETypeFlags
&TypeFlags
,
9661 ArrayRef
<Value
*> Ops
, unsigned BuiltinID
) {
9662 llvm::Type
*OverloadedTy
= getSVEType(TypeFlags
);
9663 Function
*F
= CGM
.getIntrinsic(BuiltinID
, OverloadedTy
);
9664 return Builder
.CreateCall(F
, {Ops
[0], Builder
.getInt32(0)});
9667 Value
*CodeGenFunction::EmitSVEPrefetchLoad(const SVETypeFlags
&TypeFlags
,
9668 SmallVectorImpl
<Value
*> &Ops
,
9669 unsigned BuiltinID
) {
9670 auto *MemEltTy
= SVEBuiltinMemEltTy(TypeFlags
);
9671 auto *VectorTy
= getSVEVectorForElementType(MemEltTy
);
9672 auto *MemoryTy
= llvm::ScalableVectorType::get(MemEltTy
, VectorTy
);
9674 Value
*Predicate
= EmitSVEPredicateCast(Ops
[0], MemoryTy
);
9675 Value
*BasePtr
= Ops
[1];
9677 // Implement the index operand if not omitted.
9679 BasePtr
= Builder
.CreateGEP(MemoryTy
, BasePtr
, Ops
[2]);
9681 Value
*PrfOp
= Ops
.back();
9683 Function
*F
= CGM
.getIntrinsic(BuiltinID
, Predicate
->getType());
9684 return Builder
.CreateCall(F
, {Predicate
, BasePtr
, PrfOp
});
9687 Value
*CodeGenFunction::EmitSVEMaskedLoad(const CallExpr
*E
,
9688 llvm::Type
*ReturnTy
,
9689 SmallVectorImpl
<Value
*> &Ops
,
9691 bool IsZExtReturn
) {
9692 QualType LangPTy
= E
->getArg(1)->getType();
9693 llvm::Type
*MemEltTy
= CGM
.getTypes().ConvertType(
9694 LangPTy
->castAs
<PointerType
>()->getPointeeType());
9696 // The vector type that is returned may be different from the
9697 // eventual type loaded from memory.
9698 auto VectorTy
= cast
<llvm::ScalableVectorType
>(ReturnTy
);
9699 auto MemoryTy
= llvm::ScalableVectorType::get(MemEltTy
, VectorTy
);
9701 Value
*Predicate
= EmitSVEPredicateCast(Ops
[0], MemoryTy
);
9702 Value
*BasePtr
= Ops
[1];
9704 // Does the load have an offset?
9706 BasePtr
= Builder
.CreateGEP(MemoryTy
, BasePtr
, Ops
[2]);
9708 Function
*F
= CGM
.getIntrinsic(BuiltinID
, MemoryTy
);
9710 cast
<llvm::Instruction
>(Builder
.CreateCall(F
, {Predicate
, BasePtr
}));
9711 auto TBAAInfo
= CGM
.getTBAAAccessInfo(LangPTy
->getPointeeType());
9712 CGM
.DecorateInstructionWithTBAA(Load
, TBAAInfo
);
9714 return IsZExtReturn
? Builder
.CreateZExt(Load
, VectorTy
)
9715 : Builder
.CreateSExt(Load
, VectorTy
);
9718 Value
*CodeGenFunction::EmitSVEMaskedStore(const CallExpr
*E
,
9719 SmallVectorImpl
<Value
*> &Ops
,
9720 unsigned BuiltinID
) {
9721 QualType LangPTy
= E
->getArg(1)->getType();
9722 llvm::Type
*MemEltTy
= CGM
.getTypes().ConvertType(
9723 LangPTy
->castAs
<PointerType
>()->getPointeeType());
9725 // The vector type that is stored may be different from the
9726 // eventual type stored to memory.
9727 auto VectorTy
= cast
<llvm::ScalableVectorType
>(Ops
.back()->getType());
9728 auto MemoryTy
= llvm::ScalableVectorType::get(MemEltTy
, VectorTy
);
9730 Value
*Predicate
= EmitSVEPredicateCast(Ops
[0], MemoryTy
);
9731 Value
*BasePtr
= Ops
[1];
9733 // Does the store have an offset?
9734 if (Ops
.size() == 4)
9735 BasePtr
= Builder
.CreateGEP(MemoryTy
, BasePtr
, Ops
[2]);
9737 // Last value is always the data
9738 llvm::Value
*Val
= Builder
.CreateTrunc(Ops
.back(), MemoryTy
);
9740 Function
*F
= CGM
.getIntrinsic(BuiltinID
, MemoryTy
);
9742 cast
<llvm::Instruction
>(Builder
.CreateCall(F
, {Val
, Predicate
, BasePtr
}));
9743 auto TBAAInfo
= CGM
.getTBAAAccessInfo(LangPTy
->getPointeeType());
9744 CGM
.DecorateInstructionWithTBAA(Store
, TBAAInfo
);
9748 Value
*CodeGenFunction::EmitSMELd1St1(const SVETypeFlags
&TypeFlags
,
9749 SmallVectorImpl
<Value
*> &Ops
,
9751 Ops
[2] = EmitSVEPredicateCast(
9752 Ops
[2], getSVEVectorForElementType(SVEBuiltinMemEltTy(TypeFlags
)));
9754 SmallVector
<Value
*> NewOps
;
9755 NewOps
.push_back(Ops
[2]);
9757 llvm::Value
*BasePtr
= Ops
[3];
9759 // If the intrinsic contains the vnum parameter, multiply it with the vector
9761 if (Ops
.size() == 5) {
9762 Function
*StreamingVectorLength
=
9763 CGM
.getIntrinsic(Intrinsic::aarch64_sme_cntsb
);
9764 llvm::Value
*StreamingVectorLengthCall
=
9765 Builder
.CreateCall(StreamingVectorLength
);
9766 llvm::Value
*Mulvl
=
9767 Builder
.CreateMul(StreamingVectorLengthCall
, Ops
[4], "mulvl");
9768 // The type of the ptr parameter is void *, so use Int8Ty here.
9769 BasePtr
= Builder
.CreateGEP(Int8Ty
, Ops
[3], Mulvl
);
9771 NewOps
.push_back(BasePtr
);
9772 NewOps
.push_back(Ops
[0]);
9773 NewOps
.push_back(Ops
[1]);
9774 Function
*F
= CGM
.getIntrinsic(IntID
);
9775 return Builder
.CreateCall(F
, NewOps
);
9778 Value
*CodeGenFunction::EmitSMEReadWrite(const SVETypeFlags
&TypeFlags
,
9779 SmallVectorImpl
<Value
*> &Ops
,
9781 auto *VecTy
= getSVEType(TypeFlags
);
9782 Function
*F
= CGM
.getIntrinsic(IntID
, VecTy
);
9783 if (TypeFlags
.isReadZA())
9784 Ops
[1] = EmitSVEPredicateCast(Ops
[1], VecTy
);
9785 else if (TypeFlags
.isWriteZA())
9786 Ops
[2] = EmitSVEPredicateCast(Ops
[2], VecTy
);
9787 return Builder
.CreateCall(F
, Ops
);
9790 Value
*CodeGenFunction::EmitSMEZero(const SVETypeFlags
&TypeFlags
,
9791 SmallVectorImpl
<Value
*> &Ops
,
9793 // svzero_za() intrinsic zeros the entire za tile and has no paramters.
9794 if (Ops
.size() == 0)
9795 Ops
.push_back(llvm::ConstantInt::get(Int32Ty
, 255));
9796 Function
*F
= CGM
.getIntrinsic(IntID
, {});
9797 return Builder
.CreateCall(F
, Ops
);
9800 Value
*CodeGenFunction::EmitSMELdrStr(const SVETypeFlags
&TypeFlags
,
9801 SmallVectorImpl
<Value
*> &Ops
,
9803 if (Ops
.size() == 3) {
9804 Function
*Cntsb
= CGM
.getIntrinsic(Intrinsic::aarch64_sme_cntsb
);
9805 llvm::Value
*CntsbCall
= Builder
.CreateCall(Cntsb
, {}, "svlb");
9807 llvm::Value
*VecNum
= Ops
[2];
9808 llvm::Value
*MulVL
= Builder
.CreateMul(CntsbCall
, VecNum
, "mulvl");
9810 Ops
[1] = Builder
.CreateGEP(Int8Ty
, Ops
[1], MulVL
);
9811 Ops
[0] = Builder
.CreateAdd(
9812 Ops
[0], Builder
.CreateIntCast(VecNum
, Int32Ty
, true), "tileslice");
9815 Function
*F
= CGM
.getIntrinsic(IntID
, {});
9816 return Builder
.CreateCall(F
, Ops
);
9819 // Limit the usage of scalable llvm IR generated by the ACLE by using the
9820 // sve dup.x intrinsic instead of IRBuilder::CreateVectorSplat.
9821 Value
*CodeGenFunction::EmitSVEDupX(Value
*Scalar
, llvm::Type
*Ty
) {
9822 return Builder
.CreateVectorSplat(
9823 cast
<llvm::VectorType
>(Ty
)->getElementCount(), Scalar
);
9826 Value
*CodeGenFunction::EmitSVEDupX(Value
* Scalar
) {
9827 return EmitSVEDupX(Scalar
, getSVEVectorForElementType(Scalar
->getType()));
9830 Value
*CodeGenFunction::EmitSVEReinterpret(Value
*Val
, llvm::Type
*Ty
) {
9831 // FIXME: For big endian this needs an additional REV, or needs a separate
9832 // intrinsic that is code-generated as a no-op, because the LLVM bitcast
9833 // instruction is defined as 'bitwise' equivalent from memory point of
9834 // view (when storing/reloading), whereas the svreinterpret builtin
9835 // implements bitwise equivalent cast from register point of view.
9836 // LLVM CodeGen for a bitcast must add an explicit REV for big-endian.
9837 return Builder
.CreateBitCast(Val
, Ty
);
9840 static void InsertExplicitZeroOperand(CGBuilderTy
&Builder
, llvm::Type
*Ty
,
9841 SmallVectorImpl
<Value
*> &Ops
) {
9842 auto *SplatZero
= Constant::getNullValue(Ty
);
9843 Ops
.insert(Ops
.begin(), SplatZero
);
9846 static void InsertExplicitUndefOperand(CGBuilderTy
&Builder
, llvm::Type
*Ty
,
9847 SmallVectorImpl
<Value
*> &Ops
) {
9848 auto *SplatUndef
= UndefValue::get(Ty
);
9849 Ops
.insert(Ops
.begin(), SplatUndef
);
9852 SmallVector
<llvm::Type
*, 2>
9853 CodeGenFunction::getSVEOverloadTypes(const SVETypeFlags
&TypeFlags
,
9854 llvm::Type
*ResultType
,
9855 ArrayRef
<Value
*> Ops
) {
9856 if (TypeFlags
.isOverloadNone())
9859 llvm::Type
*DefaultType
= getSVEType(TypeFlags
);
9861 if (TypeFlags
.isOverloadWhile())
9862 return {DefaultType
, Ops
[1]->getType()};
9864 if (TypeFlags
.isOverloadWhileRW())
9865 return {getSVEPredType(TypeFlags
), Ops
[0]->getType()};
9867 if (TypeFlags
.isOverloadCvt())
9868 return {Ops
[0]->getType(), Ops
.back()->getType()};
9870 assert(TypeFlags
.isOverloadDefault() && "Unexpected value for overloads");
9871 return {DefaultType
};
9874 Value
*CodeGenFunction::EmitSVETupleSetOrGet(const SVETypeFlags
&TypeFlags
,
9876 ArrayRef
<Value
*> Ops
) {
9877 assert((TypeFlags
.isTupleSet() || TypeFlags
.isTupleGet()) &&
9878 "Expects TypleFlag isTupleSet or TypeFlags.isTupleSet()");
9880 unsigned I
= cast
<ConstantInt
>(Ops
[1])->getSExtValue();
9881 auto *SingleVecTy
= dyn_cast
<llvm::ScalableVectorType
>(
9882 TypeFlags
.isTupleSet() ? Ops
[2]->getType() : Ty
);
9883 Value
*Idx
= ConstantInt::get(CGM
.Int64Ty
,
9884 I
* SingleVecTy
->getMinNumElements());
9886 if (TypeFlags
.isTupleSet())
9887 return Builder
.CreateInsertVector(Ty
, Ops
[0], Ops
[2], Idx
);
9888 return Builder
.CreateExtractVector(Ty
, Ops
[0], Idx
);
9891 Value
*CodeGenFunction::EmitSVETupleCreate(const SVETypeFlags
&TypeFlags
,
9893 ArrayRef
<Value
*> Ops
) {
9894 assert(TypeFlags
.isTupleCreate() && "Expects TypleFlag isTupleCreate");
9896 auto *SrcTy
= dyn_cast
<llvm::ScalableVectorType
>(Ops
[0]->getType());
9897 unsigned MinElts
= SrcTy
->getMinNumElements();
9898 Value
*Call
= llvm::PoisonValue::get(Ty
);
9899 for (unsigned I
= 0; I
< Ops
.size(); I
++) {
9900 Value
*Idx
= ConstantInt::get(CGM
.Int64Ty
, I
* MinElts
);
9901 Call
= Builder
.CreateInsertVector(Ty
, Call
, Ops
[I
], Idx
);
9907 Value
*CodeGenFunction::FormSVEBuiltinResult(Value
*Call
) {
9908 // Multi-vector results should be broken up into a single (wide) result
9910 auto *StructTy
= dyn_cast
<StructType
>(Call
->getType());
9914 auto *VTy
= dyn_cast
<ScalableVectorType
>(StructTy
->getTypeAtIndex(0U));
9917 unsigned N
= StructTy
->getNumElements();
9919 // We may need to emit a cast to a svbool_t
9920 bool IsPredTy
= VTy
->getElementType()->isIntegerTy(1);
9921 unsigned MinElts
= IsPredTy
? 16 : VTy
->getMinNumElements();
9923 ScalableVectorType
*WideVTy
=
9924 ScalableVectorType::get(VTy
->getElementType(), MinElts
* N
);
9925 Value
*Ret
= llvm::PoisonValue::get(WideVTy
);
9926 for (unsigned I
= 0; I
< N
; ++I
) {
9927 Value
*SRet
= Builder
.CreateExtractValue(Call
, I
);
9928 assert(SRet
->getType() == VTy
&& "Unexpected type for result value");
9929 Value
*Idx
= ConstantInt::get(CGM
.Int64Ty
, I
* MinElts
);
9932 SRet
= EmitSVEPredicateCast(
9933 SRet
, ScalableVectorType::get(Builder
.getInt1Ty(), 16));
9935 Ret
= Builder
.CreateInsertVector(WideVTy
, Ret
, SRet
, Idx
);
9942 Value
*CodeGenFunction::EmitAArch64SVEBuiltinExpr(unsigned BuiltinID
,
9943 const CallExpr
*E
) {
9944 // Find out if any arguments are required to be integer constant expressions.
9945 unsigned ICEArguments
= 0;
9946 ASTContext::GetBuiltinTypeError Error
;
9947 getContext().GetBuiltinType(BuiltinID
, Error
, &ICEArguments
);
9948 assert(Error
== ASTContext::GE_None
&& "Should not codegen an error");
9950 llvm::Type
*Ty
= ConvertType(E
->getType());
9951 if (BuiltinID
>= SVE::BI__builtin_sve_reinterpret_s8_s8
&&
9952 BuiltinID
<= SVE::BI__builtin_sve_reinterpret_f64_f64
) {
9953 Value
*Val
= EmitScalarExpr(E
->getArg(0));
9954 return EmitSVEReinterpret(Val
, Ty
);
9957 llvm::SmallVector
<Value
*, 4> Ops
;
9958 for (unsigned i
= 0, e
= E
->getNumArgs(); i
!= e
; i
++) {
9959 if ((ICEArguments
& (1 << i
)) == 0)
9960 Ops
.push_back(EmitScalarExpr(E
->getArg(i
)));
9962 // If this is required to be a constant, constant fold it so that we know
9963 // that the generated intrinsic gets a ConstantInt.
9964 std::optional
<llvm::APSInt
> Result
=
9965 E
->getArg(i
)->getIntegerConstantExpr(getContext());
9966 assert(Result
&& "Expected argument to be a constant");
9968 // Immediates for SVE llvm intrinsics are always 32bit. We can safely
9969 // truncate because the immediate has been range checked and no valid
9970 // immediate requires more than a handful of bits.
9971 *Result
= Result
->extOrTrunc(32);
9972 Ops
.push_back(llvm::ConstantInt::get(getLLVMContext(), *Result
));
9976 auto *Builtin
= findARMVectorIntrinsicInMap(AArch64SVEIntrinsicMap
, BuiltinID
,
9977 AArch64SVEIntrinsicsProvenSorted
);
9978 SVETypeFlags
TypeFlags(Builtin
->TypeModifier
);
9979 if (TypeFlags
.isLoad())
9980 return EmitSVEMaskedLoad(E
, Ty
, Ops
, Builtin
->LLVMIntrinsic
,
9981 TypeFlags
.isZExtReturn());
9982 else if (TypeFlags
.isStore())
9983 return EmitSVEMaskedStore(E
, Ops
, Builtin
->LLVMIntrinsic
);
9984 else if (TypeFlags
.isGatherLoad())
9985 return EmitSVEGatherLoad(TypeFlags
, Ops
, Builtin
->LLVMIntrinsic
);
9986 else if (TypeFlags
.isScatterStore())
9987 return EmitSVEScatterStore(TypeFlags
, Ops
, Builtin
->LLVMIntrinsic
);
9988 else if (TypeFlags
.isPrefetch())
9989 return EmitSVEPrefetchLoad(TypeFlags
, Ops
, Builtin
->LLVMIntrinsic
);
9990 else if (TypeFlags
.isGatherPrefetch())
9991 return EmitSVEGatherPrefetch(TypeFlags
, Ops
, Builtin
->LLVMIntrinsic
);
9992 else if (TypeFlags
.isStructLoad())
9993 return EmitSVEStructLoad(TypeFlags
, Ops
, Builtin
->LLVMIntrinsic
);
9994 else if (TypeFlags
.isStructStore())
9995 return EmitSVEStructStore(TypeFlags
, Ops
, Builtin
->LLVMIntrinsic
);
9996 else if (TypeFlags
.isTupleSet() || TypeFlags
.isTupleGet())
9997 return EmitSVETupleSetOrGet(TypeFlags
, Ty
, Ops
);
9998 else if (TypeFlags
.isTupleCreate())
9999 return EmitSVETupleCreate(TypeFlags
, Ty
, Ops
);
10000 else if (TypeFlags
.isUndef())
10001 return UndefValue::get(Ty
);
10002 else if (Builtin
->LLVMIntrinsic
!= 0) {
10003 if (TypeFlags
.getMergeType() == SVETypeFlags::MergeZeroExp
)
10004 InsertExplicitZeroOperand(Builder
, Ty
, Ops
);
10006 if (TypeFlags
.getMergeType() == SVETypeFlags::MergeAnyExp
)
10007 InsertExplicitUndefOperand(Builder
, Ty
, Ops
);
10009 // Some ACLE builtins leave out the argument to specify the predicate
10010 // pattern, which is expected to be expanded to an SV_ALL pattern.
10011 if (TypeFlags
.isAppendSVALL())
10012 Ops
.push_back(Builder
.getInt32(/*SV_ALL*/ 31));
10013 if (TypeFlags
.isInsertOp1SVALL())
10014 Ops
.insert(&Ops
[1], Builder
.getInt32(/*SV_ALL*/ 31));
10016 // Predicates must match the main datatype.
10017 for (unsigned i
= 0, e
= Ops
.size(); i
!= e
; ++i
)
10018 if (auto PredTy
= dyn_cast
<llvm::VectorType
>(Ops
[i
]->getType()))
10019 if (PredTy
->getElementType()->isIntegerTy(1))
10020 Ops
[i
] = EmitSVEPredicateCast(Ops
[i
], getSVEType(TypeFlags
));
10022 // Splat scalar operand to vector (intrinsics with _n infix)
10023 if (TypeFlags
.hasSplatOperand()) {
10024 unsigned OpNo
= TypeFlags
.getSplatOperand();
10025 Ops
[OpNo
] = EmitSVEDupX(Ops
[OpNo
]);
10028 if (TypeFlags
.isReverseCompare())
10029 std::swap(Ops
[1], Ops
[2]);
10030 else if (TypeFlags
.isReverseUSDOT())
10031 std::swap(Ops
[1], Ops
[2]);
10032 else if (TypeFlags
.isReverseMergeAnyBinOp() &&
10033 TypeFlags
.getMergeType() == SVETypeFlags::MergeAny
)
10034 std::swap(Ops
[1], Ops
[2]);
10035 else if (TypeFlags
.isReverseMergeAnyAccOp() &&
10036 TypeFlags
.getMergeType() == SVETypeFlags::MergeAny
)
10037 std::swap(Ops
[1], Ops
[3]);
10039 // Predicated intrinsics with _z suffix need a select w/ zeroinitializer.
10040 if (TypeFlags
.getMergeType() == SVETypeFlags::MergeZero
) {
10041 llvm::Type
*OpndTy
= Ops
[1]->getType();
10042 auto *SplatZero
= Constant::getNullValue(OpndTy
);
10043 Ops
[1] = Builder
.CreateSelect(Ops
[0], Ops
[1], SplatZero
);
10046 Function
*F
= CGM
.getIntrinsic(Builtin
->LLVMIntrinsic
,
10047 getSVEOverloadTypes(TypeFlags
, Ty
, Ops
));
10048 Value
*Call
= Builder
.CreateCall(F
, Ops
);
10050 // Predicate results must be converted to svbool_t.
10051 if (auto PredTy
= dyn_cast
<llvm::VectorType
>(Call
->getType()))
10052 if (PredTy
->getScalarType()->isIntegerTy(1))
10053 Call
= EmitSVEPredicateCast(Call
, cast
<llvm::ScalableVectorType
>(Ty
));
10055 return FormSVEBuiltinResult(Call
);
10058 switch (BuiltinID
) {
10061 case SVE::BI__builtin_sve_svpsel_lane_b8
:
10062 case SVE::BI__builtin_sve_svpsel_lane_b16
:
10063 case SVE::BI__builtin_sve_svpsel_lane_b32
:
10064 case SVE::BI__builtin_sve_svpsel_lane_b64
:
10065 case SVE::BI__builtin_sve_svpsel_lane_c8
:
10066 case SVE::BI__builtin_sve_svpsel_lane_c16
:
10067 case SVE::BI__builtin_sve_svpsel_lane_c32
:
10068 case SVE::BI__builtin_sve_svpsel_lane_c64
: {
10069 bool IsSVCount
= isa
<TargetExtType
>(Ops
[0]->getType());
10070 assert(((!IsSVCount
|| cast
<TargetExtType
>(Ops
[0]->getType())->getName() ==
10071 "aarch64.svcount")) &&
10072 "Unexpected TargetExtType");
10074 llvm::TargetExtType::get(getLLVMContext(), "aarch64.svcount");
10075 Function
*CastFromSVCountF
=
10076 CGM
.getIntrinsic(Intrinsic::aarch64_sve_convert_to_svbool
, SVCountTy
);
10077 Function
*CastToSVCountF
=
10078 CGM
.getIntrinsic(Intrinsic::aarch64_sve_convert_from_svbool
, SVCountTy
);
10080 auto OverloadedTy
= getSVEType(SVETypeFlags(Builtin
->TypeModifier
));
10081 Function
*F
= CGM
.getIntrinsic(Intrinsic::aarch64_sve_psel
, OverloadedTy
);
10082 llvm::Value
*Ops0
=
10083 IsSVCount
? Builder
.CreateCall(CastFromSVCountF
, Ops
[0]) : Ops
[0];
10084 llvm::Value
*Ops1
= EmitSVEPredicateCast(Ops
[1], OverloadedTy
);
10085 llvm::Value
*PSel
= Builder
.CreateCall(F
, {Ops0
, Ops1
, Ops
[2]});
10086 return IsSVCount
? Builder
.CreateCall(CastToSVCountF
, PSel
) : PSel
;
10088 case SVE::BI__builtin_sve_svmov_b_z
: {
10089 // svmov_b_z(pg, op) <=> svand_b_z(pg, op, op)
10090 SVETypeFlags
TypeFlags(Builtin
->TypeModifier
);
10091 llvm::Type
* OverloadedTy
= getSVEType(TypeFlags
);
10092 Function
*F
= CGM
.getIntrinsic(Intrinsic::aarch64_sve_and_z
, OverloadedTy
);
10093 return Builder
.CreateCall(F
, {Ops
[0], Ops
[1], Ops
[1]});
10096 case SVE::BI__builtin_sve_svnot_b_z
: {
10097 // svnot_b_z(pg, op) <=> sveor_b_z(pg, op, pg)
10098 SVETypeFlags
TypeFlags(Builtin
->TypeModifier
);
10099 llvm::Type
* OverloadedTy
= getSVEType(TypeFlags
);
10100 Function
*F
= CGM
.getIntrinsic(Intrinsic::aarch64_sve_eor_z
, OverloadedTy
);
10101 return Builder
.CreateCall(F
, {Ops
[0], Ops
[1], Ops
[0]});
10104 case SVE::BI__builtin_sve_svmovlb_u16
:
10105 case SVE::BI__builtin_sve_svmovlb_u32
:
10106 case SVE::BI__builtin_sve_svmovlb_u64
:
10107 return EmitSVEMovl(TypeFlags
, Ops
, Intrinsic::aarch64_sve_ushllb
);
10109 case SVE::BI__builtin_sve_svmovlb_s16
:
10110 case SVE::BI__builtin_sve_svmovlb_s32
:
10111 case SVE::BI__builtin_sve_svmovlb_s64
:
10112 return EmitSVEMovl(TypeFlags
, Ops
, Intrinsic::aarch64_sve_sshllb
);
10114 case SVE::BI__builtin_sve_svmovlt_u16
:
10115 case SVE::BI__builtin_sve_svmovlt_u32
:
10116 case SVE::BI__builtin_sve_svmovlt_u64
:
10117 return EmitSVEMovl(TypeFlags
, Ops
, Intrinsic::aarch64_sve_ushllt
);
10119 case SVE::BI__builtin_sve_svmovlt_s16
:
10120 case SVE::BI__builtin_sve_svmovlt_s32
:
10121 case SVE::BI__builtin_sve_svmovlt_s64
:
10122 return EmitSVEMovl(TypeFlags
, Ops
, Intrinsic::aarch64_sve_sshllt
);
10124 case SVE::BI__builtin_sve_svpmullt_u16
:
10125 case SVE::BI__builtin_sve_svpmullt_u64
:
10126 case SVE::BI__builtin_sve_svpmullt_n_u16
:
10127 case SVE::BI__builtin_sve_svpmullt_n_u64
:
10128 return EmitSVEPMull(TypeFlags
, Ops
, Intrinsic::aarch64_sve_pmullt_pair
);
10130 case SVE::BI__builtin_sve_svpmullb_u16
:
10131 case SVE::BI__builtin_sve_svpmullb_u64
:
10132 case SVE::BI__builtin_sve_svpmullb_n_u16
:
10133 case SVE::BI__builtin_sve_svpmullb_n_u64
:
10134 return EmitSVEPMull(TypeFlags
, Ops
, Intrinsic::aarch64_sve_pmullb_pair
);
10136 case SVE::BI__builtin_sve_svdup_n_b8
:
10137 case SVE::BI__builtin_sve_svdup_n_b16
:
10138 case SVE::BI__builtin_sve_svdup_n_b32
:
10139 case SVE::BI__builtin_sve_svdup_n_b64
: {
10141 Builder
.CreateICmpNE(Ops
[0], Constant::getNullValue(Ops
[0]->getType()));
10142 llvm::ScalableVectorType
*OverloadedTy
= getSVEType(TypeFlags
);
10143 Value
*Dup
= EmitSVEDupX(CmpNE
, OverloadedTy
);
10144 return EmitSVEPredicateCast(Dup
, cast
<llvm::ScalableVectorType
>(Ty
));
10147 case SVE::BI__builtin_sve_svdupq_n_b8
:
10148 case SVE::BI__builtin_sve_svdupq_n_b16
:
10149 case SVE::BI__builtin_sve_svdupq_n_b32
:
10150 case SVE::BI__builtin_sve_svdupq_n_b64
:
10151 case SVE::BI__builtin_sve_svdupq_n_u8
:
10152 case SVE::BI__builtin_sve_svdupq_n_s8
:
10153 case SVE::BI__builtin_sve_svdupq_n_u64
:
10154 case SVE::BI__builtin_sve_svdupq_n_f64
:
10155 case SVE::BI__builtin_sve_svdupq_n_s64
:
10156 case SVE::BI__builtin_sve_svdupq_n_u16
:
10157 case SVE::BI__builtin_sve_svdupq_n_f16
:
10158 case SVE::BI__builtin_sve_svdupq_n_bf16
:
10159 case SVE::BI__builtin_sve_svdupq_n_s16
:
10160 case SVE::BI__builtin_sve_svdupq_n_u32
:
10161 case SVE::BI__builtin_sve_svdupq_n_f32
:
10162 case SVE::BI__builtin_sve_svdupq_n_s32
: {
10163 // These builtins are implemented by storing each element to an array and using
10164 // ld1rq to materialize a vector.
10165 unsigned NumOpnds
= Ops
.size();
10168 cast
<llvm::VectorType
>(Ty
)->getElementType()->isIntegerTy(1);
10170 // For svdupq_n_b* the element type of is an integer of type 128/numelts,
10171 // so that the compare can use the width that is natural for the expected
10172 // number of predicate lanes.
10173 llvm::Type
*EltTy
= Ops
[0]->getType();
10175 EltTy
= IntegerType::get(getLLVMContext(), SVEBitsPerBlock
/ NumOpnds
);
10177 SmallVector
<llvm::Value
*, 16> VecOps
;
10178 for (unsigned I
= 0; I
< NumOpnds
; ++I
)
10179 VecOps
.push_back(Builder
.CreateZExt(Ops
[I
], EltTy
));
10180 Value
*Vec
= BuildVector(VecOps
);
10182 llvm::Type
*OverloadedTy
= getSVEVectorForElementType(EltTy
);
10183 Value
*InsertSubVec
= Builder
.CreateInsertVector(
10184 OverloadedTy
, PoisonValue::get(OverloadedTy
), Vec
, Builder
.getInt64(0));
10187 CGM
.getIntrinsic(Intrinsic::aarch64_sve_dupq_lane
, OverloadedTy
);
10189 Builder
.CreateCall(F
, {InsertSubVec
, Builder
.getInt64(0)});
10194 SVETypeFlags
TypeFlags(Builtin
->TypeModifier
);
10195 Value
*Pred
= EmitSVEAllTruePred(TypeFlags
);
10197 // For svdupq_n_b* we need to add an additional 'cmpne' with '0'.
10198 F
= CGM
.getIntrinsic(NumOpnds
== 2 ? Intrinsic::aarch64_sve_cmpne
10199 : Intrinsic::aarch64_sve_cmpne_wide
,
10201 Value
*Call
= Builder
.CreateCall(
10202 F
, {Pred
, DupQLane
, EmitSVEDupX(Builder
.getInt64(0))});
10203 return EmitSVEPredicateCast(Call
, cast
<llvm::ScalableVectorType
>(Ty
));
10206 case SVE::BI__builtin_sve_svpfalse_b
:
10207 return ConstantInt::getFalse(Ty
);
10209 case SVE::BI__builtin_sve_svpfalse_c
: {
10210 auto SVBoolTy
= ScalableVectorType::get(Builder
.getInt1Ty(), 16);
10211 Function
*CastToSVCountF
=
10212 CGM
.getIntrinsic(Intrinsic::aarch64_sve_convert_from_svbool
, Ty
);
10213 return Builder
.CreateCall(CastToSVCountF
, ConstantInt::getFalse(SVBoolTy
));
10216 case SVE::BI__builtin_sve_svlen_bf16
:
10217 case SVE::BI__builtin_sve_svlen_f16
:
10218 case SVE::BI__builtin_sve_svlen_f32
:
10219 case SVE::BI__builtin_sve_svlen_f64
:
10220 case SVE::BI__builtin_sve_svlen_s8
:
10221 case SVE::BI__builtin_sve_svlen_s16
:
10222 case SVE::BI__builtin_sve_svlen_s32
:
10223 case SVE::BI__builtin_sve_svlen_s64
:
10224 case SVE::BI__builtin_sve_svlen_u8
:
10225 case SVE::BI__builtin_sve_svlen_u16
:
10226 case SVE::BI__builtin_sve_svlen_u32
:
10227 case SVE::BI__builtin_sve_svlen_u64
: {
10228 SVETypeFlags
TF(Builtin
->TypeModifier
);
10229 auto VTy
= cast
<llvm::VectorType
>(getSVEType(TF
));
10231 llvm::ConstantInt::get(Ty
, VTy
->getElementCount().getKnownMinValue());
10233 Function
*F
= CGM
.getIntrinsic(Intrinsic::vscale
, Ty
);
10234 return Builder
.CreateMul(NumEls
, Builder
.CreateCall(F
));
10237 case SVE::BI__builtin_sve_svtbl2_u8
:
10238 case SVE::BI__builtin_sve_svtbl2_s8
:
10239 case SVE::BI__builtin_sve_svtbl2_u16
:
10240 case SVE::BI__builtin_sve_svtbl2_s16
:
10241 case SVE::BI__builtin_sve_svtbl2_u32
:
10242 case SVE::BI__builtin_sve_svtbl2_s32
:
10243 case SVE::BI__builtin_sve_svtbl2_u64
:
10244 case SVE::BI__builtin_sve_svtbl2_s64
:
10245 case SVE::BI__builtin_sve_svtbl2_f16
:
10246 case SVE::BI__builtin_sve_svtbl2_bf16
:
10247 case SVE::BI__builtin_sve_svtbl2_f32
:
10248 case SVE::BI__builtin_sve_svtbl2_f64
: {
10249 SVETypeFlags
TF(Builtin
->TypeModifier
);
10250 auto VTy
= cast
<llvm::ScalableVectorType
>(getSVEType(TF
));
10251 Value
*V0
= Builder
.CreateExtractVector(VTy
, Ops
[0],
10252 ConstantInt::get(CGM
.Int64Ty
, 0));
10253 unsigned MinElts
= VTy
->getMinNumElements();
10254 Value
*V1
= Builder
.CreateExtractVector(
10255 VTy
, Ops
[0], ConstantInt::get(CGM
.Int64Ty
, MinElts
));
10256 Function
*F
= CGM
.getIntrinsic(Intrinsic::aarch64_sve_tbl2
, VTy
);
10257 return Builder
.CreateCall(F
, {V0
, V1
, Ops
[1]});
10260 case SVE::BI__builtin_sve_svset_neonq_s8
:
10261 case SVE::BI__builtin_sve_svset_neonq_s16
:
10262 case SVE::BI__builtin_sve_svset_neonq_s32
:
10263 case SVE::BI__builtin_sve_svset_neonq_s64
:
10264 case SVE::BI__builtin_sve_svset_neonq_u8
:
10265 case SVE::BI__builtin_sve_svset_neonq_u16
:
10266 case SVE::BI__builtin_sve_svset_neonq_u32
:
10267 case SVE::BI__builtin_sve_svset_neonq_u64
:
10268 case SVE::BI__builtin_sve_svset_neonq_f16
:
10269 case SVE::BI__builtin_sve_svset_neonq_f32
:
10270 case SVE::BI__builtin_sve_svset_neonq_f64
:
10271 case SVE::BI__builtin_sve_svset_neonq_bf16
: {
10272 return Builder
.CreateInsertVector(Ty
, Ops
[0], Ops
[1], Builder
.getInt64(0));
10275 case SVE::BI__builtin_sve_svget_neonq_s8
:
10276 case SVE::BI__builtin_sve_svget_neonq_s16
:
10277 case SVE::BI__builtin_sve_svget_neonq_s32
:
10278 case SVE::BI__builtin_sve_svget_neonq_s64
:
10279 case SVE::BI__builtin_sve_svget_neonq_u8
:
10280 case SVE::BI__builtin_sve_svget_neonq_u16
:
10281 case SVE::BI__builtin_sve_svget_neonq_u32
:
10282 case SVE::BI__builtin_sve_svget_neonq_u64
:
10283 case SVE::BI__builtin_sve_svget_neonq_f16
:
10284 case SVE::BI__builtin_sve_svget_neonq_f32
:
10285 case SVE::BI__builtin_sve_svget_neonq_f64
:
10286 case SVE::BI__builtin_sve_svget_neonq_bf16
: {
10287 return Builder
.CreateExtractVector(Ty
, Ops
[0], Builder
.getInt64(0));
10290 case SVE::BI__builtin_sve_svdup_neonq_s8
:
10291 case SVE::BI__builtin_sve_svdup_neonq_s16
:
10292 case SVE::BI__builtin_sve_svdup_neonq_s32
:
10293 case SVE::BI__builtin_sve_svdup_neonq_s64
:
10294 case SVE::BI__builtin_sve_svdup_neonq_u8
:
10295 case SVE::BI__builtin_sve_svdup_neonq_u16
:
10296 case SVE::BI__builtin_sve_svdup_neonq_u32
:
10297 case SVE::BI__builtin_sve_svdup_neonq_u64
:
10298 case SVE::BI__builtin_sve_svdup_neonq_f16
:
10299 case SVE::BI__builtin_sve_svdup_neonq_f32
:
10300 case SVE::BI__builtin_sve_svdup_neonq_f64
:
10301 case SVE::BI__builtin_sve_svdup_neonq_bf16
: {
10302 Value
*Insert
= Builder
.CreateInsertVector(Ty
, PoisonValue::get(Ty
), Ops
[0],
10303 Builder
.getInt64(0));
10304 return Builder
.CreateIntrinsic(Intrinsic::aarch64_sve_dupq_lane
, {Ty
},
10305 {Insert
, Builder
.getInt64(0)});
10309 /// Should not happen
10313 Value
*CodeGenFunction::EmitAArch64SMEBuiltinExpr(unsigned BuiltinID
,
10314 const CallExpr
*E
) {
10315 // Find out if any arguments are required to be integer constant expressions.
10316 unsigned ICEArguments
= 0;
10317 ASTContext::GetBuiltinTypeError Error
;
10318 getContext().GetBuiltinType(BuiltinID
, Error
, &ICEArguments
);
10319 assert(Error
== ASTContext::GE_None
&& "Should not codegen an error");
10321 llvm::Type
*Ty
= ConvertType(E
->getType());
10322 llvm::SmallVector
<Value
*, 4> Ops
;
10323 for (unsigned i
= 0, e
= E
->getNumArgs(); i
!= e
; i
++) {
10324 if ((ICEArguments
& (1 << i
)) == 0)
10325 Ops
.push_back(EmitScalarExpr(E
->getArg(i
)));
10327 // If this is required to be a constant, constant fold it so that we know
10328 // that the generated intrinsic gets a ConstantInt.
10329 std::optional
<llvm::APSInt
> Result
=
10330 E
->getArg(i
)->getIntegerConstantExpr(getContext());
10331 assert(Result
&& "Expected argument to be a constant");
10333 // Immediates for SVE llvm intrinsics are always 32bit. We can safely
10334 // truncate because the immediate has been range checked and no valid
10335 // immediate requires more than a handful of bits.
10336 *Result
= Result
->extOrTrunc(32);
10337 Ops
.push_back(llvm::ConstantInt::get(getLLVMContext(), *Result
));
10341 auto *Builtin
= findARMVectorIntrinsicInMap(AArch64SMEIntrinsicMap
, BuiltinID
,
10342 AArch64SMEIntrinsicsProvenSorted
);
10343 SVETypeFlags
TypeFlags(Builtin
->TypeModifier
);
10344 if (TypeFlags
.isLoad() || TypeFlags
.isStore())
10345 return EmitSMELd1St1(TypeFlags
, Ops
, Builtin
->LLVMIntrinsic
);
10346 else if (TypeFlags
.isReadZA() || TypeFlags
.isWriteZA())
10347 return EmitSMEReadWrite(TypeFlags
, Ops
, Builtin
->LLVMIntrinsic
);
10348 else if (BuiltinID
== SME::BI__builtin_sme_svzero_mask_za
||
10349 BuiltinID
== SME::BI__builtin_sme_svzero_za
)
10350 return EmitSMEZero(TypeFlags
, Ops
, Builtin
->LLVMIntrinsic
);
10351 else if (BuiltinID
== SME::BI__builtin_sme_svldr_vnum_za
||
10352 BuiltinID
== SME::BI__builtin_sme_svstr_vnum_za
||
10353 BuiltinID
== SME::BI__builtin_sme_svldr_za
||
10354 BuiltinID
== SME::BI__builtin_sme_svstr_za
)
10355 return EmitSMELdrStr(TypeFlags
, Ops
, Builtin
->LLVMIntrinsic
);
10356 else if (Builtin
->LLVMIntrinsic
!= 0) {
10357 // Predicates must match the main datatype.
10358 for (unsigned i
= 0, e
= Ops
.size(); i
!= e
; ++i
)
10359 if (auto PredTy
= dyn_cast
<llvm::VectorType
>(Ops
[i
]->getType()))
10360 if (PredTy
->getElementType()->isIntegerTy(1))
10361 Ops
[i
] = EmitSVEPredicateCast(Ops
[i
], getSVEType(TypeFlags
));
10363 Function
*F
= CGM
.getIntrinsic(Builtin
->LLVMIntrinsic
,
10364 getSVEOverloadTypes(TypeFlags
, Ty
, Ops
));
10365 Value
*Call
= Builder
.CreateCall(F
, Ops
);
10369 /// Should not happen
10373 Value
*CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID
,
10375 llvm::Triple::ArchType Arch
) {
10376 if (BuiltinID
>= clang::AArch64::FirstSVEBuiltin
&&
10377 BuiltinID
<= clang::AArch64::LastSVEBuiltin
)
10378 return EmitAArch64SVEBuiltinExpr(BuiltinID
, E
);
10380 if (BuiltinID
>= clang::AArch64::FirstSMEBuiltin
&&
10381 BuiltinID
<= clang::AArch64::LastSMEBuiltin
)
10382 return EmitAArch64SMEBuiltinExpr(BuiltinID
, E
);
10384 unsigned HintID
= static_cast<unsigned>(-1);
10385 switch (BuiltinID
) {
10387 case clang::AArch64::BI__builtin_arm_nop
:
10390 case clang::AArch64::BI__builtin_arm_yield
:
10391 case clang::AArch64::BI__yield
:
10394 case clang::AArch64::BI__builtin_arm_wfe
:
10395 case clang::AArch64::BI__wfe
:
10398 case clang::AArch64::BI__builtin_arm_wfi
:
10399 case clang::AArch64::BI__wfi
:
10402 case clang::AArch64::BI__builtin_arm_sev
:
10403 case clang::AArch64::BI__sev
:
10406 case clang::AArch64::BI__builtin_arm_sevl
:
10407 case clang::AArch64::BI__sevl
:
10412 if (HintID
!= static_cast<unsigned>(-1)) {
10413 Function
*F
= CGM
.getIntrinsic(Intrinsic::aarch64_hint
);
10414 return Builder
.CreateCall(F
, llvm::ConstantInt::get(Int32Ty
, HintID
));
10417 if (BuiltinID
== clang::AArch64::BI__builtin_arm_rbit
) {
10418 assert((getContext().getTypeSize(E
->getType()) == 32) &&
10419 "rbit of unusual size!");
10420 llvm::Value
*Arg
= EmitScalarExpr(E
->getArg(0));
10421 return Builder
.CreateCall(
10422 CGM
.getIntrinsic(Intrinsic::bitreverse
, Arg
->getType()), Arg
, "rbit");
10424 if (BuiltinID
== clang::AArch64::BI__builtin_arm_rbit64
) {
10425 assert((getContext().getTypeSize(E
->getType()) == 64) &&
10426 "rbit of unusual size!");
10427 llvm::Value
*Arg
= EmitScalarExpr(E
->getArg(0));
10428 return Builder
.CreateCall(
10429 CGM
.getIntrinsic(Intrinsic::bitreverse
, Arg
->getType()), Arg
, "rbit");
10432 if (BuiltinID
== clang::AArch64::BI__builtin_arm_clz
||
10433 BuiltinID
== clang::AArch64::BI__builtin_arm_clz64
) {
10434 llvm::Value
*Arg
= EmitScalarExpr(E
->getArg(0));
10435 Function
*F
= CGM
.getIntrinsic(Intrinsic::ctlz
, Arg
->getType());
10436 Value
*Res
= Builder
.CreateCall(F
, {Arg
, Builder
.getInt1(false)});
10437 if (BuiltinID
== clang::AArch64::BI__builtin_arm_clz64
)
10438 Res
= Builder
.CreateTrunc(Res
, Builder
.getInt32Ty());
10442 if (BuiltinID
== clang::AArch64::BI__builtin_arm_cls
) {
10443 llvm::Value
*Arg
= EmitScalarExpr(E
->getArg(0));
10444 return Builder
.CreateCall(CGM
.getIntrinsic(Intrinsic::aarch64_cls
), Arg
,
10447 if (BuiltinID
== clang::AArch64::BI__builtin_arm_cls64
) {
10448 llvm::Value
*Arg
= EmitScalarExpr(E
->getArg(0));
10449 return Builder
.CreateCall(CGM
.getIntrinsic(Intrinsic::aarch64_cls64
), Arg
,
10453 if (BuiltinID
== clang::AArch64::BI__builtin_arm_rint32zf
||
10454 BuiltinID
== clang::AArch64::BI__builtin_arm_rint32z
) {
10455 llvm::Value
*Arg
= EmitScalarExpr(E
->getArg(0));
10456 llvm::Type
*Ty
= Arg
->getType();
10457 return Builder
.CreateCall(CGM
.getIntrinsic(Intrinsic::aarch64_frint32z
, Ty
),
10461 if (BuiltinID
== clang::AArch64::BI__builtin_arm_rint64zf
||
10462 BuiltinID
== clang::AArch64::BI__builtin_arm_rint64z
) {
10463 llvm::Value
*Arg
= EmitScalarExpr(E
->getArg(0));
10464 llvm::Type
*Ty
= Arg
->getType();
10465 return Builder
.CreateCall(CGM
.getIntrinsic(Intrinsic::aarch64_frint64z
, Ty
),
10469 if (BuiltinID
== clang::AArch64::BI__builtin_arm_rint32xf
||
10470 BuiltinID
== clang::AArch64::BI__builtin_arm_rint32x
) {
10471 llvm::Value
*Arg
= EmitScalarExpr(E
->getArg(0));
10472 llvm::Type
*Ty
= Arg
->getType();
10473 return Builder
.CreateCall(CGM
.getIntrinsic(Intrinsic::aarch64_frint32x
, Ty
),
10477 if (BuiltinID
== clang::AArch64::BI__builtin_arm_rint64xf
||
10478 BuiltinID
== clang::AArch64::BI__builtin_arm_rint64x
) {
10479 llvm::Value
*Arg
= EmitScalarExpr(E
->getArg(0));
10480 llvm::Type
*Ty
= Arg
->getType();
10481 return Builder
.CreateCall(CGM
.getIntrinsic(Intrinsic::aarch64_frint64x
, Ty
),
10485 if (BuiltinID
== clang::AArch64::BI__builtin_arm_jcvt
) {
10486 assert((getContext().getTypeSize(E
->getType()) == 32) &&
10487 "__jcvt of unusual size!");
10488 llvm::Value
*Arg
= EmitScalarExpr(E
->getArg(0));
10489 return Builder
.CreateCall(
10490 CGM
.getIntrinsic(Intrinsic::aarch64_fjcvtzs
), Arg
);
10493 if (BuiltinID
== clang::AArch64::BI__builtin_arm_ld64b
||
10494 BuiltinID
== clang::AArch64::BI__builtin_arm_st64b
||
10495 BuiltinID
== clang::AArch64::BI__builtin_arm_st64bv
||
10496 BuiltinID
== clang::AArch64::BI__builtin_arm_st64bv0
) {
10497 llvm::Value
*MemAddr
= EmitScalarExpr(E
->getArg(0));
10498 llvm::Value
*ValPtr
= EmitScalarExpr(E
->getArg(1));
10500 if (BuiltinID
== clang::AArch64::BI__builtin_arm_ld64b
) {
10501 // Load from the address via an LLVM intrinsic, receiving a
10502 // tuple of 8 i64 words, and store each one to ValPtr.
10503 Function
*F
= CGM
.getIntrinsic(Intrinsic::aarch64_ld64b
);
10504 llvm::Value
*Val
= Builder
.CreateCall(F
, MemAddr
);
10505 llvm::Value
*ToRet
;
10506 for (size_t i
= 0; i
< 8; i
++) {
10507 llvm::Value
*ValOffsetPtr
=
10508 Builder
.CreateGEP(Int64Ty
, ValPtr
, Builder
.getInt32(i
));
10510 Address(ValOffsetPtr
, Int64Ty
, CharUnits::fromQuantity(8));
10511 ToRet
= Builder
.CreateStore(Builder
.CreateExtractValue(Val
, i
), Addr
);
10515 // Load 8 i64 words from ValPtr, and store them to the address
10516 // via an LLVM intrinsic.
10517 SmallVector
<llvm::Value
*, 9> Args
;
10518 Args
.push_back(MemAddr
);
10519 for (size_t i
= 0; i
< 8; i
++) {
10520 llvm::Value
*ValOffsetPtr
=
10521 Builder
.CreateGEP(Int64Ty
, ValPtr
, Builder
.getInt32(i
));
10523 Address(ValOffsetPtr
, Int64Ty
, CharUnits::fromQuantity(8));
10524 Args
.push_back(Builder
.CreateLoad(Addr
));
10527 auto Intr
= (BuiltinID
== clang::AArch64::BI__builtin_arm_st64b
10528 ? Intrinsic::aarch64_st64b
10529 : BuiltinID
== clang::AArch64::BI__builtin_arm_st64bv
10530 ? Intrinsic::aarch64_st64bv
10531 : Intrinsic::aarch64_st64bv0
);
10532 Function
*F
= CGM
.getIntrinsic(Intr
);
10533 return Builder
.CreateCall(F
, Args
);
10537 if (BuiltinID
== clang::AArch64::BI__builtin_arm_rndr
||
10538 BuiltinID
== clang::AArch64::BI__builtin_arm_rndrrs
) {
10540 auto Intr
= (BuiltinID
== clang::AArch64::BI__builtin_arm_rndr
10541 ? Intrinsic::aarch64_rndr
10542 : Intrinsic::aarch64_rndrrs
);
10543 Function
*F
= CGM
.getIntrinsic(Intr
);
10544 llvm::Value
*Val
= Builder
.CreateCall(F
);
10545 Value
*RandomValue
= Builder
.CreateExtractValue(Val
, 0);
10546 Value
*Status
= Builder
.CreateExtractValue(Val
, 1);
10548 Address MemAddress
= EmitPointerWithAlignment(E
->getArg(0));
10549 Builder
.CreateStore(RandomValue
, MemAddress
);
10550 Status
= Builder
.CreateZExt(Status
, Int32Ty
);
10554 if (BuiltinID
== clang::AArch64::BI__clear_cache
) {
10555 assert(E
->getNumArgs() == 2 && "__clear_cache takes 2 arguments");
10556 const FunctionDecl
*FD
= E
->getDirectCallee();
10558 for (unsigned i
= 0; i
< 2; i
++)
10559 Ops
[i
] = EmitScalarExpr(E
->getArg(i
));
10560 llvm::Type
*Ty
= CGM
.getTypes().ConvertType(FD
->getType());
10561 llvm::FunctionType
*FTy
= cast
<llvm::FunctionType
>(Ty
);
10562 StringRef Name
= FD
->getName();
10563 return EmitNounwindRuntimeCall(CGM
.CreateRuntimeFunction(FTy
, Name
), Ops
);
10566 if ((BuiltinID
== clang::AArch64::BI__builtin_arm_ldrex
||
10567 BuiltinID
== clang::AArch64::BI__builtin_arm_ldaex
) &&
10568 getContext().getTypeSize(E
->getType()) == 128) {
10570 CGM
.getIntrinsic(BuiltinID
== clang::AArch64::BI__builtin_arm_ldaex
10571 ? Intrinsic::aarch64_ldaxp
10572 : Intrinsic::aarch64_ldxp
);
10574 Value
*LdPtr
= EmitScalarExpr(E
->getArg(0));
10575 Value
*Val
= Builder
.CreateCall(F
, LdPtr
, "ldxp");
10577 Value
*Val0
= Builder
.CreateExtractValue(Val
, 1);
10578 Value
*Val1
= Builder
.CreateExtractValue(Val
, 0);
10579 llvm::Type
*Int128Ty
= llvm::IntegerType::get(getLLVMContext(), 128);
10580 Val0
= Builder
.CreateZExt(Val0
, Int128Ty
);
10581 Val1
= Builder
.CreateZExt(Val1
, Int128Ty
);
10583 Value
*ShiftCst
= llvm::ConstantInt::get(Int128Ty
, 64);
10584 Val
= Builder
.CreateShl(Val0
, ShiftCst
, "shl", true /* nuw */);
10585 Val
= Builder
.CreateOr(Val
, Val1
);
10586 return Builder
.CreateBitCast(Val
, ConvertType(E
->getType()));
10587 } else if (BuiltinID
== clang::AArch64::BI__builtin_arm_ldrex
||
10588 BuiltinID
== clang::AArch64::BI__builtin_arm_ldaex
) {
10589 Value
*LoadAddr
= EmitScalarExpr(E
->getArg(0));
10591 QualType Ty
= E
->getType();
10592 llvm::Type
*RealResTy
= ConvertType(Ty
);
10593 llvm::Type
*IntTy
=
10594 llvm::IntegerType::get(getLLVMContext(), getContext().getTypeSize(Ty
));
10597 CGM
.getIntrinsic(BuiltinID
== clang::AArch64::BI__builtin_arm_ldaex
10598 ? Intrinsic::aarch64_ldaxr
10599 : Intrinsic::aarch64_ldxr
,
10601 CallInst
*Val
= Builder
.CreateCall(F
, LoadAddr
, "ldxr");
10603 0, Attribute::get(getLLVMContext(), Attribute::ElementType
, IntTy
));
10605 if (RealResTy
->isPointerTy())
10606 return Builder
.CreateIntToPtr(Val
, RealResTy
);
10608 llvm::Type
*IntResTy
= llvm::IntegerType::get(
10609 getLLVMContext(), CGM
.getDataLayout().getTypeSizeInBits(RealResTy
));
10610 return Builder
.CreateBitCast(Builder
.CreateTruncOrBitCast(Val
, IntResTy
),
10614 if ((BuiltinID
== clang::AArch64::BI__builtin_arm_strex
||
10615 BuiltinID
== clang::AArch64::BI__builtin_arm_stlex
) &&
10616 getContext().getTypeSize(E
->getArg(0)->getType()) == 128) {
10618 CGM
.getIntrinsic(BuiltinID
== clang::AArch64::BI__builtin_arm_stlex
10619 ? Intrinsic::aarch64_stlxp
10620 : Intrinsic::aarch64_stxp
);
10621 llvm::Type
*STy
= llvm::StructType::get(Int64Ty
, Int64Ty
);
10623 Address Tmp
= CreateMemTemp(E
->getArg(0)->getType());
10624 EmitAnyExprToMem(E
->getArg(0), Tmp
, Qualifiers(), /*init*/ true);
10626 Tmp
= Tmp
.withElementType(STy
);
10627 llvm::Value
*Val
= Builder
.CreateLoad(Tmp
);
10629 Value
*Arg0
= Builder
.CreateExtractValue(Val
, 0);
10630 Value
*Arg1
= Builder
.CreateExtractValue(Val
, 1);
10631 Value
*StPtr
= EmitScalarExpr(E
->getArg(1));
10632 return Builder
.CreateCall(F
, {Arg0
, Arg1
, StPtr
}, "stxp");
10635 if (BuiltinID
== clang::AArch64::BI__builtin_arm_strex
||
10636 BuiltinID
== clang::AArch64::BI__builtin_arm_stlex
) {
10637 Value
*StoreVal
= EmitScalarExpr(E
->getArg(0));
10638 Value
*StoreAddr
= EmitScalarExpr(E
->getArg(1));
10640 QualType Ty
= E
->getArg(0)->getType();
10641 llvm::Type
*StoreTy
=
10642 llvm::IntegerType::get(getLLVMContext(), getContext().getTypeSize(Ty
));
10644 if (StoreVal
->getType()->isPointerTy())
10645 StoreVal
= Builder
.CreatePtrToInt(StoreVal
, Int64Ty
);
10647 llvm::Type
*IntTy
= llvm::IntegerType::get(
10649 CGM
.getDataLayout().getTypeSizeInBits(StoreVal
->getType()));
10650 StoreVal
= Builder
.CreateBitCast(StoreVal
, IntTy
);
10651 StoreVal
= Builder
.CreateZExtOrBitCast(StoreVal
, Int64Ty
);
10655 CGM
.getIntrinsic(BuiltinID
== clang::AArch64::BI__builtin_arm_stlex
10656 ? Intrinsic::aarch64_stlxr
10657 : Intrinsic::aarch64_stxr
,
10658 StoreAddr
->getType());
10659 CallInst
*CI
= Builder
.CreateCall(F
, {StoreVal
, StoreAddr
}, "stxr");
10661 1, Attribute::get(getLLVMContext(), Attribute::ElementType
, StoreTy
));
10665 if (BuiltinID
== clang::AArch64::BI__getReg
) {
10666 Expr::EvalResult Result
;
10667 if (!E
->getArg(0)->EvaluateAsInt(Result
, CGM
.getContext()))
10668 llvm_unreachable("Sema will ensure that the parameter is constant");
10670 llvm::APSInt Value
= Result
.Val
.getInt();
10671 LLVMContext
&Context
= CGM
.getLLVMContext();
10672 std::string Reg
= Value
== 31 ? "sp" : "x" + toString(Value
, 10);
10674 llvm::Metadata
*Ops
[] = {llvm::MDString::get(Context
, Reg
)};
10675 llvm::MDNode
*RegName
= llvm::MDNode::get(Context
, Ops
);
10676 llvm::Value
*Metadata
= llvm::MetadataAsValue::get(Context
, RegName
);
10678 llvm::Function
*F
=
10679 CGM
.getIntrinsic(llvm::Intrinsic::read_register
, {Int64Ty
});
10680 return Builder
.CreateCall(F
, Metadata
);
10683 if (BuiltinID
== clang::AArch64::BI__break
) {
10684 Expr::EvalResult Result
;
10685 if (!E
->getArg(0)->EvaluateAsInt(Result
, CGM
.getContext()))
10686 llvm_unreachable("Sema will ensure that the parameter is constant");
10688 llvm::Function
*F
= CGM
.getIntrinsic(llvm::Intrinsic::aarch64_break
);
10689 return Builder
.CreateCall(F
, {EmitScalarExpr(E
->getArg(0))});
10692 if (BuiltinID
== clang::AArch64::BI__builtin_arm_clrex
) {
10693 Function
*F
= CGM
.getIntrinsic(Intrinsic::aarch64_clrex
);
10694 return Builder
.CreateCall(F
);
10697 if (BuiltinID
== clang::AArch64::BI_ReadWriteBarrier
)
10698 return Builder
.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent
,
10699 llvm::SyncScope::SingleThread
);
10702 Intrinsic::ID CRCIntrinsicID
= Intrinsic::not_intrinsic
;
10703 switch (BuiltinID
) {
10704 case clang::AArch64::BI__builtin_arm_crc32b
:
10705 CRCIntrinsicID
= Intrinsic::aarch64_crc32b
; break;
10706 case clang::AArch64::BI__builtin_arm_crc32cb
:
10707 CRCIntrinsicID
= Intrinsic::aarch64_crc32cb
; break;
10708 case clang::AArch64::BI__builtin_arm_crc32h
:
10709 CRCIntrinsicID
= Intrinsic::aarch64_crc32h
; break;
10710 case clang::AArch64::BI__builtin_arm_crc32ch
:
10711 CRCIntrinsicID
= Intrinsic::aarch64_crc32ch
; break;
10712 case clang::AArch64::BI__builtin_arm_crc32w
:
10713 CRCIntrinsicID
= Intrinsic::aarch64_crc32w
; break;
10714 case clang::AArch64::BI__builtin_arm_crc32cw
:
10715 CRCIntrinsicID
= Intrinsic::aarch64_crc32cw
; break;
10716 case clang::AArch64::BI__builtin_arm_crc32d
:
10717 CRCIntrinsicID
= Intrinsic::aarch64_crc32x
; break;
10718 case clang::AArch64::BI__builtin_arm_crc32cd
:
10719 CRCIntrinsicID
= Intrinsic::aarch64_crc32cx
; break;
10722 if (CRCIntrinsicID
!= Intrinsic::not_intrinsic
) {
10723 Value
*Arg0
= EmitScalarExpr(E
->getArg(0));
10724 Value
*Arg1
= EmitScalarExpr(E
->getArg(1));
10725 Function
*F
= CGM
.getIntrinsic(CRCIntrinsicID
);
10727 llvm::Type
*DataTy
= F
->getFunctionType()->getParamType(1);
10728 Arg1
= Builder
.CreateZExtOrBitCast(Arg1
, DataTy
);
10730 return Builder
.CreateCall(F
, {Arg0
, Arg1
});
10733 // Memory Operations (MOPS)
10734 if (BuiltinID
== AArch64::BI__builtin_arm_mops_memset_tag
) {
10735 Value
*Dst
= EmitScalarExpr(E
->getArg(0));
10736 Value
*Val
= EmitScalarExpr(E
->getArg(1));
10737 Value
*Size
= EmitScalarExpr(E
->getArg(2));
10738 Dst
= Builder
.CreatePointerCast(Dst
, Int8PtrTy
);
10739 Val
= Builder
.CreateTrunc(Val
, Int8Ty
);
10740 Size
= Builder
.CreateIntCast(Size
, Int64Ty
, false);
10741 return Builder
.CreateCall(
10742 CGM
.getIntrinsic(Intrinsic::aarch64_mops_memset_tag
), {Dst
, Val
, Size
});
10745 // Memory Tagging Extensions (MTE) Intrinsics
10746 Intrinsic::ID MTEIntrinsicID
= Intrinsic::not_intrinsic
;
10747 switch (BuiltinID
) {
10748 case clang::AArch64::BI__builtin_arm_irg
:
10749 MTEIntrinsicID
= Intrinsic::aarch64_irg
; break;
10750 case clang::AArch64::BI__builtin_arm_addg
:
10751 MTEIntrinsicID
= Intrinsic::aarch64_addg
; break;
10752 case clang::AArch64::BI__builtin_arm_gmi
:
10753 MTEIntrinsicID
= Intrinsic::aarch64_gmi
; break;
10754 case clang::AArch64::BI__builtin_arm_ldg
:
10755 MTEIntrinsicID
= Intrinsic::aarch64_ldg
; break;
10756 case clang::AArch64::BI__builtin_arm_stg
:
10757 MTEIntrinsicID
= Intrinsic::aarch64_stg
; break;
10758 case clang::AArch64::BI__builtin_arm_subp
:
10759 MTEIntrinsicID
= Intrinsic::aarch64_subp
; break;
10762 if (MTEIntrinsicID
!= Intrinsic::not_intrinsic
) {
10763 llvm::Type
*T
= ConvertType(E
->getType());
10765 if (MTEIntrinsicID
== Intrinsic::aarch64_irg
) {
10766 Value
*Pointer
= EmitScalarExpr(E
->getArg(0));
10767 Value
*Mask
= EmitScalarExpr(E
->getArg(1));
10769 Pointer
= Builder
.CreatePointerCast(Pointer
, Int8PtrTy
);
10770 Mask
= Builder
.CreateZExt(Mask
, Int64Ty
);
10771 Value
*RV
= Builder
.CreateCall(
10772 CGM
.getIntrinsic(MTEIntrinsicID
), {Pointer
, Mask
});
10773 return Builder
.CreatePointerCast(RV
, T
);
10775 if (MTEIntrinsicID
== Intrinsic::aarch64_addg
) {
10776 Value
*Pointer
= EmitScalarExpr(E
->getArg(0));
10777 Value
*TagOffset
= EmitScalarExpr(E
->getArg(1));
10779 Pointer
= Builder
.CreatePointerCast(Pointer
, Int8PtrTy
);
10780 TagOffset
= Builder
.CreateZExt(TagOffset
, Int64Ty
);
10781 Value
*RV
= Builder
.CreateCall(
10782 CGM
.getIntrinsic(MTEIntrinsicID
), {Pointer
, TagOffset
});
10783 return Builder
.CreatePointerCast(RV
, T
);
10785 if (MTEIntrinsicID
== Intrinsic::aarch64_gmi
) {
10786 Value
*Pointer
= EmitScalarExpr(E
->getArg(0));
10787 Value
*ExcludedMask
= EmitScalarExpr(E
->getArg(1));
10789 ExcludedMask
= Builder
.CreateZExt(ExcludedMask
, Int64Ty
);
10790 Pointer
= Builder
.CreatePointerCast(Pointer
, Int8PtrTy
);
10791 return Builder
.CreateCall(
10792 CGM
.getIntrinsic(MTEIntrinsicID
), {Pointer
, ExcludedMask
});
10794 // Although it is possible to supply a different return
10795 // address (first arg) to this intrinsic, for now we set
10796 // return address same as input address.
10797 if (MTEIntrinsicID
== Intrinsic::aarch64_ldg
) {
10798 Value
*TagAddress
= EmitScalarExpr(E
->getArg(0));
10799 TagAddress
= Builder
.CreatePointerCast(TagAddress
, Int8PtrTy
);
10800 Value
*RV
= Builder
.CreateCall(
10801 CGM
.getIntrinsic(MTEIntrinsicID
), {TagAddress
, TagAddress
});
10802 return Builder
.CreatePointerCast(RV
, T
);
10804 // Although it is possible to supply a different tag (to set)
10805 // to this intrinsic (as first arg), for now we supply
10806 // the tag that is in input address arg (common use case).
10807 if (MTEIntrinsicID
== Intrinsic::aarch64_stg
) {
10808 Value
*TagAddress
= EmitScalarExpr(E
->getArg(0));
10809 TagAddress
= Builder
.CreatePointerCast(TagAddress
, Int8PtrTy
);
10810 return Builder
.CreateCall(
10811 CGM
.getIntrinsic(MTEIntrinsicID
), {TagAddress
, TagAddress
});
10813 if (MTEIntrinsicID
== Intrinsic::aarch64_subp
) {
10814 Value
*PointerA
= EmitScalarExpr(E
->getArg(0));
10815 Value
*PointerB
= EmitScalarExpr(E
->getArg(1));
10816 PointerA
= Builder
.CreatePointerCast(PointerA
, Int8PtrTy
);
10817 PointerB
= Builder
.CreatePointerCast(PointerB
, Int8PtrTy
);
10818 return Builder
.CreateCall(
10819 CGM
.getIntrinsic(MTEIntrinsicID
), {PointerA
, PointerB
});
10823 if (BuiltinID
== clang::AArch64::BI__builtin_arm_rsr
||
10824 BuiltinID
== clang::AArch64::BI__builtin_arm_rsr64
||
10825 BuiltinID
== clang::AArch64::BI__builtin_arm_rsr128
||
10826 BuiltinID
== clang::AArch64::BI__builtin_arm_rsrp
||
10827 BuiltinID
== clang::AArch64::BI__builtin_arm_wsr
||
10828 BuiltinID
== clang::AArch64::BI__builtin_arm_wsr64
||
10829 BuiltinID
== clang::AArch64::BI__builtin_arm_wsr128
||
10830 BuiltinID
== clang::AArch64::BI__builtin_arm_wsrp
) {
10832 SpecialRegisterAccessKind AccessKind
= Write
;
10833 if (BuiltinID
== clang::AArch64::BI__builtin_arm_rsr
||
10834 BuiltinID
== clang::AArch64::BI__builtin_arm_rsr64
||
10835 BuiltinID
== clang::AArch64::BI__builtin_arm_rsr128
||
10836 BuiltinID
== clang::AArch64::BI__builtin_arm_rsrp
)
10837 AccessKind
= VolatileRead
;
10839 bool IsPointerBuiltin
= BuiltinID
== clang::AArch64::BI__builtin_arm_rsrp
||
10840 BuiltinID
== clang::AArch64::BI__builtin_arm_wsrp
;
10842 bool Is32Bit
= BuiltinID
== clang::AArch64::BI__builtin_arm_rsr
||
10843 BuiltinID
== clang::AArch64::BI__builtin_arm_wsr
;
10845 bool Is128Bit
= BuiltinID
== clang::AArch64::BI__builtin_arm_rsr128
||
10846 BuiltinID
== clang::AArch64::BI__builtin_arm_wsr128
;
10848 llvm::Type
*ValueType
;
10849 llvm::Type
*RegisterType
= Int64Ty
;
10851 ValueType
= Int32Ty
;
10852 } else if (Is128Bit
) {
10853 llvm::Type
*Int128Ty
=
10854 llvm::IntegerType::getInt128Ty(CGM
.getLLVMContext());
10855 ValueType
= Int128Ty
;
10856 RegisterType
= Int128Ty
;
10857 } else if (IsPointerBuiltin
) {
10858 ValueType
= VoidPtrTy
;
10860 ValueType
= Int64Ty
;
10863 return EmitSpecialRegisterBuiltin(*this, E
, RegisterType
, ValueType
,
10867 if (BuiltinID
== clang::AArch64::BI_ReadStatusReg
||
10868 BuiltinID
== clang::AArch64::BI_WriteStatusReg
) {
10869 LLVMContext
&Context
= CGM
.getLLVMContext();
10872 E
->getArg(0)->EvaluateKnownConstInt(getContext()).getZExtValue();
10874 std::string SysRegStr
;
10875 llvm::raw_string_ostream(SysRegStr
) <<
10876 ((1 << 1) | ((SysReg
>> 14) & 1)) << ":" <<
10877 ((SysReg
>> 11) & 7) << ":" <<
10878 ((SysReg
>> 7) & 15) << ":" <<
10879 ((SysReg
>> 3) & 15) << ":" <<
10882 llvm::Metadata
*Ops
[] = { llvm::MDString::get(Context
, SysRegStr
) };
10883 llvm::MDNode
*RegName
= llvm::MDNode::get(Context
, Ops
);
10884 llvm::Value
*Metadata
= llvm::MetadataAsValue::get(Context
, RegName
);
10886 llvm::Type
*RegisterType
= Int64Ty
;
10887 llvm::Type
*Types
[] = { RegisterType
};
10889 if (BuiltinID
== clang::AArch64::BI_ReadStatusReg
) {
10890 llvm::Function
*F
= CGM
.getIntrinsic(llvm::Intrinsic::read_register
, Types
);
10892 return Builder
.CreateCall(F
, Metadata
);
10895 llvm::Function
*F
= CGM
.getIntrinsic(llvm::Intrinsic::write_register
, Types
);
10896 llvm::Value
*ArgValue
= EmitScalarExpr(E
->getArg(1));
10898 return Builder
.CreateCall(F
, { Metadata
, ArgValue
});
10901 if (BuiltinID
== clang::AArch64::BI_AddressOfReturnAddress
) {
10902 llvm::Function
*F
=
10903 CGM
.getIntrinsic(Intrinsic::addressofreturnaddress
, AllocaInt8PtrTy
);
10904 return Builder
.CreateCall(F
);
10907 if (BuiltinID
== clang::AArch64::BI__builtin_sponentry
) {
10908 llvm::Function
*F
= CGM
.getIntrinsic(Intrinsic::sponentry
, AllocaInt8PtrTy
);
10909 return Builder
.CreateCall(F
);
10912 if (BuiltinID
== clang::AArch64::BI__mulh
||
10913 BuiltinID
== clang::AArch64::BI__umulh
) {
10914 llvm::Type
*ResType
= ConvertType(E
->getType());
10915 llvm::Type
*Int128Ty
= llvm::IntegerType::get(getLLVMContext(), 128);
10917 bool IsSigned
= BuiltinID
== clang::AArch64::BI__mulh
;
10919 Builder
.CreateIntCast(EmitScalarExpr(E
->getArg(0)), Int128Ty
, IsSigned
);
10921 Builder
.CreateIntCast(EmitScalarExpr(E
->getArg(1)), Int128Ty
, IsSigned
);
10923 Value
*MulResult
, *HigherBits
;
10925 MulResult
= Builder
.CreateNSWMul(LHS
, RHS
);
10926 HigherBits
= Builder
.CreateAShr(MulResult
, 64);
10928 MulResult
= Builder
.CreateNUWMul(LHS
, RHS
);
10929 HigherBits
= Builder
.CreateLShr(MulResult
, 64);
10931 HigherBits
= Builder
.CreateIntCast(HigherBits
, ResType
, IsSigned
);
10936 if (BuiltinID
== AArch64::BI__writex18byte
||
10937 BuiltinID
== AArch64::BI__writex18word
||
10938 BuiltinID
== AArch64::BI__writex18dword
||
10939 BuiltinID
== AArch64::BI__writex18qword
) {
10941 LLVMContext
&Context
= CGM
.getLLVMContext();
10942 llvm::Metadata
*Ops
[] = {llvm::MDString::get(Context
, "x18")};
10943 llvm::MDNode
*RegName
= llvm::MDNode::get(Context
, Ops
);
10944 llvm::Value
*Metadata
= llvm::MetadataAsValue::get(Context
, RegName
);
10945 llvm::Function
*F
=
10946 CGM
.getIntrinsic(llvm::Intrinsic::read_register
, {Int64Ty
});
10947 llvm::Value
*X18
= Builder
.CreateCall(F
, Metadata
);
10948 X18
= Builder
.CreateIntToPtr(X18
, Int8PtrTy
);
10950 // Store val at x18 + offset
10951 Value
*Offset
= Builder
.CreateZExt(EmitScalarExpr(E
->getArg(0)), Int64Ty
);
10952 Value
*Ptr
= Builder
.CreateGEP(Int8Ty
, X18
, Offset
);
10953 Value
*Val
= EmitScalarExpr(E
->getArg(1));
10954 StoreInst
*Store
= Builder
.CreateAlignedStore(Val
, Ptr
, CharUnits::One());
10958 if (BuiltinID
== AArch64::BI__readx18byte
||
10959 BuiltinID
== AArch64::BI__readx18word
||
10960 BuiltinID
== AArch64::BI__readx18dword
||
10961 BuiltinID
== AArch64::BI__readx18qword
) {
10962 llvm::Type
*IntTy
= ConvertType(E
->getType());
10965 LLVMContext
&Context
= CGM
.getLLVMContext();
10966 llvm::Metadata
*Ops
[] = {llvm::MDString::get(Context
, "x18")};
10967 llvm::MDNode
*RegName
= llvm::MDNode::get(Context
, Ops
);
10968 llvm::Value
*Metadata
= llvm::MetadataAsValue::get(Context
, RegName
);
10969 llvm::Function
*F
=
10970 CGM
.getIntrinsic(llvm::Intrinsic::read_register
, {Int64Ty
});
10971 llvm::Value
*X18
= Builder
.CreateCall(F
, Metadata
);
10972 X18
= Builder
.CreateIntToPtr(X18
, Int8PtrTy
);
10974 // Load x18 + offset
10975 Value
*Offset
= Builder
.CreateZExt(EmitScalarExpr(E
->getArg(0)), Int64Ty
);
10976 Value
*Ptr
= Builder
.CreateGEP(Int8Ty
, X18
, Offset
);
10977 LoadInst
*Load
= Builder
.CreateAlignedLoad(IntTy
, Ptr
, CharUnits::One());
10981 if (BuiltinID
== AArch64::BI_CopyDoubleFromInt64
||
10982 BuiltinID
== AArch64::BI_CopyFloatFromInt32
||
10983 BuiltinID
== AArch64::BI_CopyInt32FromFloat
||
10984 BuiltinID
== AArch64::BI_CopyInt64FromDouble
) {
10985 Value
*Arg
= EmitScalarExpr(E
->getArg(0));
10986 llvm::Type
*RetTy
= ConvertType(E
->getType());
10987 return Builder
.CreateBitCast(Arg
, RetTy
);
10990 if (BuiltinID
== AArch64::BI_CountLeadingOnes
||
10991 BuiltinID
== AArch64::BI_CountLeadingOnes64
||
10992 BuiltinID
== AArch64::BI_CountLeadingZeros
||
10993 BuiltinID
== AArch64::BI_CountLeadingZeros64
) {
10994 Value
*Arg
= EmitScalarExpr(E
->getArg(0));
10995 llvm::Type
*ArgType
= Arg
->getType();
10997 if (BuiltinID
== AArch64::BI_CountLeadingOnes
||
10998 BuiltinID
== AArch64::BI_CountLeadingOnes64
)
10999 Arg
= Builder
.CreateXor(Arg
, Constant::getAllOnesValue(ArgType
));
11001 Function
*F
= CGM
.getIntrinsic(Intrinsic::ctlz
, ArgType
);
11002 Value
*Result
= Builder
.CreateCall(F
, {Arg
, Builder
.getInt1(false)});
11004 if (BuiltinID
== AArch64::BI_CountLeadingOnes64
||
11005 BuiltinID
== AArch64::BI_CountLeadingZeros64
)
11006 Result
= Builder
.CreateTrunc(Result
, Builder
.getInt32Ty());
11010 if (BuiltinID
== AArch64::BI_CountLeadingSigns
||
11011 BuiltinID
== AArch64::BI_CountLeadingSigns64
) {
11012 Value
*Arg
= EmitScalarExpr(E
->getArg(0));
11014 Function
*F
= (BuiltinID
== AArch64::BI_CountLeadingSigns
)
11015 ? CGM
.getIntrinsic(Intrinsic::aarch64_cls
)
11016 : CGM
.getIntrinsic(Intrinsic::aarch64_cls64
);
11018 Value
*Result
= Builder
.CreateCall(F
, Arg
, "cls");
11019 if (BuiltinID
== AArch64::BI_CountLeadingSigns64
)
11020 Result
= Builder
.CreateTrunc(Result
, Builder
.getInt32Ty());
11024 if (BuiltinID
== AArch64::BI_CountOneBits
||
11025 BuiltinID
== AArch64::BI_CountOneBits64
) {
11026 Value
*ArgValue
= EmitScalarExpr(E
->getArg(0));
11027 llvm::Type
*ArgType
= ArgValue
->getType();
11028 Function
*F
= CGM
.getIntrinsic(Intrinsic::ctpop
, ArgType
);
11030 Value
*Result
= Builder
.CreateCall(F
, ArgValue
);
11031 if (BuiltinID
== AArch64::BI_CountOneBits64
)
11032 Result
= Builder
.CreateTrunc(Result
, Builder
.getInt32Ty());
11036 if (BuiltinID
== AArch64::BI__prefetch
) {
11037 Value
*Address
= EmitScalarExpr(E
->getArg(0));
11038 Value
*RW
= llvm::ConstantInt::get(Int32Ty
, 0);
11039 Value
*Locality
= ConstantInt::get(Int32Ty
, 3);
11040 Value
*Data
= llvm::ConstantInt::get(Int32Ty
, 1);
11041 Function
*F
= CGM
.getIntrinsic(Intrinsic::prefetch
, Address
->getType());
11042 return Builder
.CreateCall(F
, {Address
, RW
, Locality
, Data
});
11045 // Handle MSVC intrinsics before argument evaluation to prevent double
11047 if (std::optional
<MSVCIntrin
> MsvcIntId
=
11048 translateAarch64ToMsvcIntrin(BuiltinID
))
11049 return EmitMSVCBuiltinExpr(*MsvcIntId
, E
);
11051 // Some intrinsics are equivalent - if they are use the base intrinsic ID.
11052 auto It
= llvm::find_if(NEONEquivalentIntrinsicMap
, [BuiltinID
](auto &P
) {
11053 return P
.first
== BuiltinID
;
11055 if (It
!= end(NEONEquivalentIntrinsicMap
))
11056 BuiltinID
= It
->second
;
11058 // Find out if any arguments are required to be integer constant
11060 unsigned ICEArguments
= 0;
11061 ASTContext::GetBuiltinTypeError Error
;
11062 getContext().GetBuiltinType(BuiltinID
, Error
, &ICEArguments
);
11063 assert(Error
== ASTContext::GE_None
&& "Should not codegen an error");
11065 llvm::SmallVector
<Value
*, 4> Ops
;
11066 Address PtrOp0
= Address::invalid();
11067 for (unsigned i
= 0, e
= E
->getNumArgs() - 1; i
!= e
; i
++) {
11069 switch (BuiltinID
) {
11070 case NEON::BI__builtin_neon_vld1_v
:
11071 case NEON::BI__builtin_neon_vld1q_v
:
11072 case NEON::BI__builtin_neon_vld1_dup_v
:
11073 case NEON::BI__builtin_neon_vld1q_dup_v
:
11074 case NEON::BI__builtin_neon_vld1_lane_v
:
11075 case NEON::BI__builtin_neon_vld1q_lane_v
:
11076 case NEON::BI__builtin_neon_vst1_v
:
11077 case NEON::BI__builtin_neon_vst1q_v
:
11078 case NEON::BI__builtin_neon_vst1_lane_v
:
11079 case NEON::BI__builtin_neon_vst1q_lane_v
:
11080 case NEON::BI__builtin_neon_vldap1_lane_s64
:
11081 case NEON::BI__builtin_neon_vldap1q_lane_s64
:
11082 case NEON::BI__builtin_neon_vstl1_lane_s64
:
11083 case NEON::BI__builtin_neon_vstl1q_lane_s64
:
11084 // Get the alignment for the argument in addition to the value;
11085 // we'll use it later.
11086 PtrOp0
= EmitPointerWithAlignment(E
->getArg(0));
11087 Ops
.push_back(PtrOp0
.getPointer());
11091 if ((ICEArguments
& (1 << i
)) == 0) {
11092 Ops
.push_back(EmitScalarExpr(E
->getArg(i
)));
11094 // If this is required to be a constant, constant fold it so that we know
11095 // that the generated intrinsic gets a ConstantInt.
11096 Ops
.push_back(llvm::ConstantInt::get(
11098 *E
->getArg(i
)->getIntegerConstantExpr(getContext())));
11102 auto SISDMap
= ArrayRef(AArch64SISDIntrinsicMap
);
11103 const ARMVectorIntrinsicInfo
*Builtin
= findARMVectorIntrinsicInMap(
11104 SISDMap
, BuiltinID
, AArch64SISDIntrinsicsProvenSorted
);
11107 Ops
.push_back(EmitScalarExpr(E
->getArg(E
->getNumArgs() - 1)));
11108 Value
*Result
= EmitCommonNeonSISDBuiltinExpr(*this, *Builtin
, Ops
, E
);
11109 assert(Result
&& "SISD intrinsic should have been handled");
11113 const Expr
*Arg
= E
->getArg(E
->getNumArgs()-1);
11114 NeonTypeFlags
Type(0);
11115 if (std::optional
<llvm::APSInt
> Result
=
11116 Arg
->getIntegerConstantExpr(getContext()))
11117 // Determine the type of this overloaded NEON intrinsic.
11118 Type
= NeonTypeFlags(Result
->getZExtValue());
11120 bool usgn
= Type
.isUnsigned();
11121 bool quad
= Type
.isQuad();
11123 // Handle non-overloaded intrinsics first.
11124 switch (BuiltinID
) {
11126 case NEON::BI__builtin_neon_vabsh_f16
:
11127 Ops
.push_back(EmitScalarExpr(E
->getArg(0)));
11128 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::fabs
, HalfTy
), Ops
, "vabs");
11129 case NEON::BI__builtin_neon_vaddq_p128
: {
11130 llvm::Type
*Ty
= GetNeonType(this, NeonTypeFlags::Poly128
);
11131 Ops
.push_back(EmitScalarExpr(E
->getArg(1)));
11132 Ops
[0] = Builder
.CreateBitCast(Ops
[0], Ty
);
11133 Ops
[1] = Builder
.CreateBitCast(Ops
[1], Ty
);
11134 Ops
[0] = Builder
.CreateXor(Ops
[0], Ops
[1]);
11135 llvm::Type
*Int128Ty
= llvm::Type::getIntNTy(getLLVMContext(), 128);
11136 return Builder
.CreateBitCast(Ops
[0], Int128Ty
);
11138 case NEON::BI__builtin_neon_vldrq_p128
: {
11139 llvm::Type
*Int128Ty
= llvm::Type::getIntNTy(getLLVMContext(), 128);
11140 Value
*Ptr
= EmitScalarExpr(E
->getArg(0));
11141 return Builder
.CreateAlignedLoad(Int128Ty
, Ptr
,
11142 CharUnits::fromQuantity(16));
11144 case NEON::BI__builtin_neon_vstrq_p128
: {
11145 Value
*Ptr
= Ops
[0];
11146 return Builder
.CreateDefaultAlignedStore(EmitScalarExpr(E
->getArg(1)), Ptr
);
11148 case NEON::BI__builtin_neon_vcvts_f32_u32
:
11149 case NEON::BI__builtin_neon_vcvtd_f64_u64
:
11152 case NEON::BI__builtin_neon_vcvts_f32_s32
:
11153 case NEON::BI__builtin_neon_vcvtd_f64_s64
: {
11154 Ops
.push_back(EmitScalarExpr(E
->getArg(0)));
11155 bool Is64
= Ops
[0]->getType()->getPrimitiveSizeInBits() == 64;
11156 llvm::Type
*InTy
= Is64
? Int64Ty
: Int32Ty
;
11157 llvm::Type
*FTy
= Is64
? DoubleTy
: FloatTy
;
11158 Ops
[0] = Builder
.CreateBitCast(Ops
[0], InTy
);
11160 return Builder
.CreateUIToFP(Ops
[0], FTy
);
11161 return Builder
.CreateSIToFP(Ops
[0], FTy
);
11163 case NEON::BI__builtin_neon_vcvth_f16_u16
:
11164 case NEON::BI__builtin_neon_vcvth_f16_u32
:
11165 case NEON::BI__builtin_neon_vcvth_f16_u64
:
11168 case NEON::BI__builtin_neon_vcvth_f16_s16
:
11169 case NEON::BI__builtin_neon_vcvth_f16_s32
:
11170 case NEON::BI__builtin_neon_vcvth_f16_s64
: {
11171 Ops
.push_back(EmitScalarExpr(E
->getArg(0)));
11172 llvm::Type
*FTy
= HalfTy
;
11174 if (Ops
[0]->getType()->getPrimitiveSizeInBits() == 64)
11176 else if (Ops
[0]->getType()->getPrimitiveSizeInBits() == 32)
11180 Ops
[0] = Builder
.CreateBitCast(Ops
[0], InTy
);
11182 return Builder
.CreateUIToFP(Ops
[0], FTy
);
11183 return Builder
.CreateSIToFP(Ops
[0], FTy
);
11185 case NEON::BI__builtin_neon_vcvtah_u16_f16
:
11186 case NEON::BI__builtin_neon_vcvtmh_u16_f16
:
11187 case NEON::BI__builtin_neon_vcvtnh_u16_f16
:
11188 case NEON::BI__builtin_neon_vcvtph_u16_f16
:
11189 case NEON::BI__builtin_neon_vcvth_u16_f16
:
11190 case NEON::BI__builtin_neon_vcvtah_s16_f16
:
11191 case NEON::BI__builtin_neon_vcvtmh_s16_f16
:
11192 case NEON::BI__builtin_neon_vcvtnh_s16_f16
:
11193 case NEON::BI__builtin_neon_vcvtph_s16_f16
:
11194 case NEON::BI__builtin_neon_vcvth_s16_f16
: {
11196 llvm::Type
* InTy
= Int32Ty
;
11197 llvm::Type
* FTy
= HalfTy
;
11198 llvm::Type
*Tys
[2] = {InTy
, FTy
};
11199 Ops
.push_back(EmitScalarExpr(E
->getArg(0)));
11200 switch (BuiltinID
) {
11201 default: llvm_unreachable("missing builtin ID in switch!");
11202 case NEON::BI__builtin_neon_vcvtah_u16_f16
:
11203 Int
= Intrinsic::aarch64_neon_fcvtau
; break;
11204 case NEON::BI__builtin_neon_vcvtmh_u16_f16
:
11205 Int
= Intrinsic::aarch64_neon_fcvtmu
; break;
11206 case NEON::BI__builtin_neon_vcvtnh_u16_f16
:
11207 Int
= Intrinsic::aarch64_neon_fcvtnu
; break;
11208 case NEON::BI__builtin_neon_vcvtph_u16_f16
:
11209 Int
= Intrinsic::aarch64_neon_fcvtpu
; break;
11210 case NEON::BI__builtin_neon_vcvth_u16_f16
:
11211 Int
= Intrinsic::aarch64_neon_fcvtzu
; break;
11212 case NEON::BI__builtin_neon_vcvtah_s16_f16
:
11213 Int
= Intrinsic::aarch64_neon_fcvtas
; break;
11214 case NEON::BI__builtin_neon_vcvtmh_s16_f16
:
11215 Int
= Intrinsic::aarch64_neon_fcvtms
; break;
11216 case NEON::BI__builtin_neon_vcvtnh_s16_f16
:
11217 Int
= Intrinsic::aarch64_neon_fcvtns
; break;
11218 case NEON::BI__builtin_neon_vcvtph_s16_f16
:
11219 Int
= Intrinsic::aarch64_neon_fcvtps
; break;
11220 case NEON::BI__builtin_neon_vcvth_s16_f16
:
11221 Int
= Intrinsic::aarch64_neon_fcvtzs
; break;
11223 Ops
[0] = EmitNeonCall(CGM
.getIntrinsic(Int
, Tys
), Ops
, "fcvt");
11224 return Builder
.CreateTrunc(Ops
[0], Int16Ty
);
11226 case NEON::BI__builtin_neon_vcaleh_f16
:
11227 case NEON::BI__builtin_neon_vcalth_f16
:
11228 case NEON::BI__builtin_neon_vcageh_f16
:
11229 case NEON::BI__builtin_neon_vcagth_f16
: {
11231 llvm::Type
* InTy
= Int32Ty
;
11232 llvm::Type
* FTy
= HalfTy
;
11233 llvm::Type
*Tys
[2] = {InTy
, FTy
};
11234 Ops
.push_back(EmitScalarExpr(E
->getArg(1)));
11235 switch (BuiltinID
) {
11236 default: llvm_unreachable("missing builtin ID in switch!");
11237 case NEON::BI__builtin_neon_vcageh_f16
:
11238 Int
= Intrinsic::aarch64_neon_facge
; break;
11239 case NEON::BI__builtin_neon_vcagth_f16
:
11240 Int
= Intrinsic::aarch64_neon_facgt
; break;
11241 case NEON::BI__builtin_neon_vcaleh_f16
:
11242 Int
= Intrinsic::aarch64_neon_facge
; std::swap(Ops
[0], Ops
[1]); break;
11243 case NEON::BI__builtin_neon_vcalth_f16
:
11244 Int
= Intrinsic::aarch64_neon_facgt
; std::swap(Ops
[0], Ops
[1]); break;
11246 Ops
[0] = EmitNeonCall(CGM
.getIntrinsic(Int
, Tys
), Ops
, "facg");
11247 return Builder
.CreateTrunc(Ops
[0], Int16Ty
);
11249 case NEON::BI__builtin_neon_vcvth_n_s16_f16
:
11250 case NEON::BI__builtin_neon_vcvth_n_u16_f16
: {
11252 llvm::Type
* InTy
= Int32Ty
;
11253 llvm::Type
* FTy
= HalfTy
;
11254 llvm::Type
*Tys
[2] = {InTy
, FTy
};
11255 Ops
.push_back(EmitScalarExpr(E
->getArg(1)));
11256 switch (BuiltinID
) {
11257 default: llvm_unreachable("missing builtin ID in switch!");
11258 case NEON::BI__builtin_neon_vcvth_n_s16_f16
:
11259 Int
= Intrinsic::aarch64_neon_vcvtfp2fxs
; break;
11260 case NEON::BI__builtin_neon_vcvth_n_u16_f16
:
11261 Int
= Intrinsic::aarch64_neon_vcvtfp2fxu
; break;
11263 Ops
[0] = EmitNeonCall(CGM
.getIntrinsic(Int
, Tys
), Ops
, "fcvth_n");
11264 return Builder
.CreateTrunc(Ops
[0], Int16Ty
);
11266 case NEON::BI__builtin_neon_vcvth_n_f16_s16
:
11267 case NEON::BI__builtin_neon_vcvth_n_f16_u16
: {
11269 llvm::Type
* FTy
= HalfTy
;
11270 llvm::Type
* InTy
= Int32Ty
;
11271 llvm::Type
*Tys
[2] = {FTy
, InTy
};
11272 Ops
.push_back(EmitScalarExpr(E
->getArg(1)));
11273 switch (BuiltinID
) {
11274 default: llvm_unreachable("missing builtin ID in switch!");
11275 case NEON::BI__builtin_neon_vcvth_n_f16_s16
:
11276 Int
= Intrinsic::aarch64_neon_vcvtfxs2fp
;
11277 Ops
[0] = Builder
.CreateSExt(Ops
[0], InTy
, "sext");
11279 case NEON::BI__builtin_neon_vcvth_n_f16_u16
:
11280 Int
= Intrinsic::aarch64_neon_vcvtfxu2fp
;
11281 Ops
[0] = Builder
.CreateZExt(Ops
[0], InTy
);
11284 return EmitNeonCall(CGM
.getIntrinsic(Int
, Tys
), Ops
, "fcvth_n");
11286 case NEON::BI__builtin_neon_vpaddd_s64
: {
11287 auto *Ty
= llvm::FixedVectorType::get(Int64Ty
, 2);
11288 Value
*Vec
= EmitScalarExpr(E
->getArg(0));
11289 // The vector is v2f64, so make sure it's bitcast to that.
11290 Vec
= Builder
.CreateBitCast(Vec
, Ty
, "v2i64");
11291 llvm::Value
*Idx0
= llvm::ConstantInt::get(SizeTy
, 0);
11292 llvm::Value
*Idx1
= llvm::ConstantInt::get(SizeTy
, 1);
11293 Value
*Op0
= Builder
.CreateExtractElement(Vec
, Idx0
, "lane0");
11294 Value
*Op1
= Builder
.CreateExtractElement(Vec
, Idx1
, "lane1");
11295 // Pairwise addition of a v2f64 into a scalar f64.
11296 return Builder
.CreateAdd(Op0
, Op1
, "vpaddd");
11298 case NEON::BI__builtin_neon_vpaddd_f64
: {
11299 auto *Ty
= llvm::FixedVectorType::get(DoubleTy
, 2);
11300 Value
*Vec
= EmitScalarExpr(E
->getArg(0));
11301 // The vector is v2f64, so make sure it's bitcast to that.
11302 Vec
= Builder
.CreateBitCast(Vec
, Ty
, "v2f64");
11303 llvm::Value
*Idx0
= llvm::ConstantInt::get(SizeTy
, 0);
11304 llvm::Value
*Idx1
= llvm::ConstantInt::get(SizeTy
, 1);
11305 Value
*Op0
= Builder
.CreateExtractElement(Vec
, Idx0
, "lane0");
11306 Value
*Op1
= Builder
.CreateExtractElement(Vec
, Idx1
, "lane1");
11307 // Pairwise addition of a v2f64 into a scalar f64.
11308 return Builder
.CreateFAdd(Op0
, Op1
, "vpaddd");
11310 case NEON::BI__builtin_neon_vpadds_f32
: {
11311 auto *Ty
= llvm::FixedVectorType::get(FloatTy
, 2);
11312 Value
*Vec
= EmitScalarExpr(E
->getArg(0));
11313 // The vector is v2f32, so make sure it's bitcast to that.
11314 Vec
= Builder
.CreateBitCast(Vec
, Ty
, "v2f32");
11315 llvm::Value
*Idx0
= llvm::ConstantInt::get(SizeTy
, 0);
11316 llvm::Value
*Idx1
= llvm::ConstantInt::get(SizeTy
, 1);
11317 Value
*Op0
= Builder
.CreateExtractElement(Vec
, Idx0
, "lane0");
11318 Value
*Op1
= Builder
.CreateExtractElement(Vec
, Idx1
, "lane1");
11319 // Pairwise addition of a v2f32 into a scalar f32.
11320 return Builder
.CreateFAdd(Op0
, Op1
, "vpaddd");
11322 case NEON::BI__builtin_neon_vceqzd_s64
:
11323 case NEON::BI__builtin_neon_vceqzd_f64
:
11324 case NEON::BI__builtin_neon_vceqzs_f32
:
11325 case NEON::BI__builtin_neon_vceqzh_f16
:
11326 Ops
.push_back(EmitScalarExpr(E
->getArg(0)));
11327 return EmitAArch64CompareBuiltinExpr(
11328 Ops
[0], ConvertType(E
->getCallReturnType(getContext())),
11329 ICmpInst::FCMP_OEQ
, ICmpInst::ICMP_EQ
, "vceqz");
11330 case NEON::BI__builtin_neon_vcgezd_s64
:
11331 case NEON::BI__builtin_neon_vcgezd_f64
:
11332 case NEON::BI__builtin_neon_vcgezs_f32
:
11333 case NEON::BI__builtin_neon_vcgezh_f16
:
11334 Ops
.push_back(EmitScalarExpr(E
->getArg(0)));
11335 return EmitAArch64CompareBuiltinExpr(
11336 Ops
[0], ConvertType(E
->getCallReturnType(getContext())),
11337 ICmpInst::FCMP_OGE
, ICmpInst::ICMP_SGE
, "vcgez");
11338 case NEON::BI__builtin_neon_vclezd_s64
:
11339 case NEON::BI__builtin_neon_vclezd_f64
:
11340 case NEON::BI__builtin_neon_vclezs_f32
:
11341 case NEON::BI__builtin_neon_vclezh_f16
:
11342 Ops
.push_back(EmitScalarExpr(E
->getArg(0)));
11343 return EmitAArch64CompareBuiltinExpr(
11344 Ops
[0], ConvertType(E
->getCallReturnType(getContext())),
11345 ICmpInst::FCMP_OLE
, ICmpInst::ICMP_SLE
, "vclez");
11346 case NEON::BI__builtin_neon_vcgtzd_s64
:
11347 case NEON::BI__builtin_neon_vcgtzd_f64
:
11348 case NEON::BI__builtin_neon_vcgtzs_f32
:
11349 case NEON::BI__builtin_neon_vcgtzh_f16
:
11350 Ops
.push_back(EmitScalarExpr(E
->getArg(0)));
11351 return EmitAArch64CompareBuiltinExpr(
11352 Ops
[0], ConvertType(E
->getCallReturnType(getContext())),
11353 ICmpInst::FCMP_OGT
, ICmpInst::ICMP_SGT
, "vcgtz");
11354 case NEON::BI__builtin_neon_vcltzd_s64
:
11355 case NEON::BI__builtin_neon_vcltzd_f64
:
11356 case NEON::BI__builtin_neon_vcltzs_f32
:
11357 case NEON::BI__builtin_neon_vcltzh_f16
:
11358 Ops
.push_back(EmitScalarExpr(E
->getArg(0)));
11359 return EmitAArch64CompareBuiltinExpr(
11360 Ops
[0], ConvertType(E
->getCallReturnType(getContext())),
11361 ICmpInst::FCMP_OLT
, ICmpInst::ICMP_SLT
, "vcltz");
11363 case NEON::BI__builtin_neon_vceqzd_u64
: {
11364 Ops
.push_back(EmitScalarExpr(E
->getArg(0)));
11365 Ops
[0] = Builder
.CreateBitCast(Ops
[0], Int64Ty
);
11367 Builder
.CreateICmpEQ(Ops
[0], llvm::Constant::getNullValue(Int64Ty
));
11368 return Builder
.CreateSExt(Ops
[0], Int64Ty
, "vceqzd");
11370 case NEON::BI__builtin_neon_vceqd_f64
:
11371 case NEON::BI__builtin_neon_vcled_f64
:
11372 case NEON::BI__builtin_neon_vcltd_f64
:
11373 case NEON::BI__builtin_neon_vcged_f64
:
11374 case NEON::BI__builtin_neon_vcgtd_f64
: {
11375 llvm::CmpInst::Predicate P
;
11376 switch (BuiltinID
) {
11377 default: llvm_unreachable("missing builtin ID in switch!");
11378 case NEON::BI__builtin_neon_vceqd_f64
: P
= llvm::FCmpInst::FCMP_OEQ
; break;
11379 case NEON::BI__builtin_neon_vcled_f64
: P
= llvm::FCmpInst::FCMP_OLE
; break;
11380 case NEON::BI__builtin_neon_vcltd_f64
: P
= llvm::FCmpInst::FCMP_OLT
; break;
11381 case NEON::BI__builtin_neon_vcged_f64
: P
= llvm::FCmpInst::FCMP_OGE
; break;
11382 case NEON::BI__builtin_neon_vcgtd_f64
: P
= llvm::FCmpInst::FCMP_OGT
; break;
11384 Ops
.push_back(EmitScalarExpr(E
->getArg(1)));
11385 Ops
[0] = Builder
.CreateBitCast(Ops
[0], DoubleTy
);
11386 Ops
[1] = Builder
.CreateBitCast(Ops
[1], DoubleTy
);
11387 if (P
== llvm::FCmpInst::FCMP_OEQ
)
11388 Ops
[0] = Builder
.CreateFCmp(P
, Ops
[0], Ops
[1]);
11390 Ops
[0] = Builder
.CreateFCmpS(P
, Ops
[0], Ops
[1]);
11391 return Builder
.CreateSExt(Ops
[0], Int64Ty
, "vcmpd");
11393 case NEON::BI__builtin_neon_vceqs_f32
:
11394 case NEON::BI__builtin_neon_vcles_f32
:
11395 case NEON::BI__builtin_neon_vclts_f32
:
11396 case NEON::BI__builtin_neon_vcges_f32
:
11397 case NEON::BI__builtin_neon_vcgts_f32
: {
11398 llvm::CmpInst::Predicate P
;
11399 switch (BuiltinID
) {
11400 default: llvm_unreachable("missing builtin ID in switch!");
11401 case NEON::BI__builtin_neon_vceqs_f32
: P
= llvm::FCmpInst::FCMP_OEQ
; break;
11402 case NEON::BI__builtin_neon_vcles_f32
: P
= llvm::FCmpInst::FCMP_OLE
; break;
11403 case NEON::BI__builtin_neon_vclts_f32
: P
= llvm::FCmpInst::FCMP_OLT
; break;
11404 case NEON::BI__builtin_neon_vcges_f32
: P
= llvm::FCmpInst::FCMP_OGE
; break;
11405 case NEON::BI__builtin_neon_vcgts_f32
: P
= llvm::FCmpInst::FCMP_OGT
; break;
11407 Ops
.push_back(EmitScalarExpr(E
->getArg(1)));
11408 Ops
[0] = Builder
.CreateBitCast(Ops
[0], FloatTy
);
11409 Ops
[1] = Builder
.CreateBitCast(Ops
[1], FloatTy
);
11410 if (P
== llvm::FCmpInst::FCMP_OEQ
)
11411 Ops
[0] = Builder
.CreateFCmp(P
, Ops
[0], Ops
[1]);
11413 Ops
[0] = Builder
.CreateFCmpS(P
, Ops
[0], Ops
[1]);
11414 return Builder
.CreateSExt(Ops
[0], Int32Ty
, "vcmpd");
11416 case NEON::BI__builtin_neon_vceqh_f16
:
11417 case NEON::BI__builtin_neon_vcleh_f16
:
11418 case NEON::BI__builtin_neon_vclth_f16
:
11419 case NEON::BI__builtin_neon_vcgeh_f16
:
11420 case NEON::BI__builtin_neon_vcgth_f16
: {
11421 llvm::CmpInst::Predicate P
;
11422 switch (BuiltinID
) {
11423 default: llvm_unreachable("missing builtin ID in switch!");
11424 case NEON::BI__builtin_neon_vceqh_f16
: P
= llvm::FCmpInst::FCMP_OEQ
; break;
11425 case NEON::BI__builtin_neon_vcleh_f16
: P
= llvm::FCmpInst::FCMP_OLE
; break;
11426 case NEON::BI__builtin_neon_vclth_f16
: P
= llvm::FCmpInst::FCMP_OLT
; break;
11427 case NEON::BI__builtin_neon_vcgeh_f16
: P
= llvm::FCmpInst::FCMP_OGE
; break;
11428 case NEON::BI__builtin_neon_vcgth_f16
: P
= llvm::FCmpInst::FCMP_OGT
; break;
11430 Ops
.push_back(EmitScalarExpr(E
->getArg(1)));
11431 Ops
[0] = Builder
.CreateBitCast(Ops
[0], HalfTy
);
11432 Ops
[1] = Builder
.CreateBitCast(Ops
[1], HalfTy
);
11433 if (P
== llvm::FCmpInst::FCMP_OEQ
)
11434 Ops
[0] = Builder
.CreateFCmp(P
, Ops
[0], Ops
[1]);
11436 Ops
[0] = Builder
.CreateFCmpS(P
, Ops
[0], Ops
[1]);
11437 return Builder
.CreateSExt(Ops
[0], Int16Ty
, "vcmpd");
11439 case NEON::BI__builtin_neon_vceqd_s64
:
11440 case NEON::BI__builtin_neon_vceqd_u64
:
11441 case NEON::BI__builtin_neon_vcgtd_s64
:
11442 case NEON::BI__builtin_neon_vcgtd_u64
:
11443 case NEON::BI__builtin_neon_vcltd_s64
:
11444 case NEON::BI__builtin_neon_vcltd_u64
:
11445 case NEON::BI__builtin_neon_vcged_u64
:
11446 case NEON::BI__builtin_neon_vcged_s64
:
11447 case NEON::BI__builtin_neon_vcled_u64
:
11448 case NEON::BI__builtin_neon_vcled_s64
: {
11449 llvm::CmpInst::Predicate P
;
11450 switch (BuiltinID
) {
11451 default: llvm_unreachable("missing builtin ID in switch!");
11452 case NEON::BI__builtin_neon_vceqd_s64
:
11453 case NEON::BI__builtin_neon_vceqd_u64
:P
= llvm::ICmpInst::ICMP_EQ
;break;
11454 case NEON::BI__builtin_neon_vcgtd_s64
:P
= llvm::ICmpInst::ICMP_SGT
;break;
11455 case NEON::BI__builtin_neon_vcgtd_u64
:P
= llvm::ICmpInst::ICMP_UGT
;break;
11456 case NEON::BI__builtin_neon_vcltd_s64
:P
= llvm::ICmpInst::ICMP_SLT
;break;
11457 case NEON::BI__builtin_neon_vcltd_u64
:P
= llvm::ICmpInst::ICMP_ULT
;break;
11458 case NEON::BI__builtin_neon_vcged_u64
:P
= llvm::ICmpInst::ICMP_UGE
;break;
11459 case NEON::BI__builtin_neon_vcged_s64
:P
= llvm::ICmpInst::ICMP_SGE
;break;
11460 case NEON::BI__builtin_neon_vcled_u64
:P
= llvm::ICmpInst::ICMP_ULE
;break;
11461 case NEON::BI__builtin_neon_vcled_s64
:P
= llvm::ICmpInst::ICMP_SLE
;break;
11463 Ops
.push_back(EmitScalarExpr(E
->getArg(1)));
11464 Ops
[0] = Builder
.CreateBitCast(Ops
[0], Int64Ty
);
11465 Ops
[1] = Builder
.CreateBitCast(Ops
[1], Int64Ty
);
11466 Ops
[0] = Builder
.CreateICmp(P
, Ops
[0], Ops
[1]);
11467 return Builder
.CreateSExt(Ops
[0], Int64Ty
, "vceqd");
11469 case NEON::BI__builtin_neon_vtstd_s64
:
11470 case NEON::BI__builtin_neon_vtstd_u64
: {
11471 Ops
.push_back(EmitScalarExpr(E
->getArg(1)));
11472 Ops
[0] = Builder
.CreateBitCast(Ops
[0], Int64Ty
);
11473 Ops
[1] = Builder
.CreateBitCast(Ops
[1], Int64Ty
);
11474 Ops
[0] = Builder
.CreateAnd(Ops
[0], Ops
[1]);
11475 Ops
[0] = Builder
.CreateICmp(ICmpInst::ICMP_NE
, Ops
[0],
11476 llvm::Constant::getNullValue(Int64Ty
));
11477 return Builder
.CreateSExt(Ops
[0], Int64Ty
, "vtstd");
11479 case NEON::BI__builtin_neon_vset_lane_i8
:
11480 case NEON::BI__builtin_neon_vset_lane_i16
:
11481 case NEON::BI__builtin_neon_vset_lane_i32
:
11482 case NEON::BI__builtin_neon_vset_lane_i64
:
11483 case NEON::BI__builtin_neon_vset_lane_bf16
:
11484 case NEON::BI__builtin_neon_vset_lane_f32
:
11485 case NEON::BI__builtin_neon_vsetq_lane_i8
:
11486 case NEON::BI__builtin_neon_vsetq_lane_i16
:
11487 case NEON::BI__builtin_neon_vsetq_lane_i32
:
11488 case NEON::BI__builtin_neon_vsetq_lane_i64
:
11489 case NEON::BI__builtin_neon_vsetq_lane_bf16
:
11490 case NEON::BI__builtin_neon_vsetq_lane_f32
:
11491 Ops
.push_back(EmitScalarExpr(E
->getArg(2)));
11492 return Builder
.CreateInsertElement(Ops
[1], Ops
[0], Ops
[2], "vset_lane");
11493 case NEON::BI__builtin_neon_vset_lane_f64
:
11494 // The vector type needs a cast for the v1f64 variant.
11496 Builder
.CreateBitCast(Ops
[1], llvm::FixedVectorType::get(DoubleTy
, 1));
11497 Ops
.push_back(EmitScalarExpr(E
->getArg(2)));
11498 return Builder
.CreateInsertElement(Ops
[1], Ops
[0], Ops
[2], "vset_lane");
11499 case NEON::BI__builtin_neon_vsetq_lane_f64
:
11500 // The vector type needs a cast for the v2f64 variant.
11502 Builder
.CreateBitCast(Ops
[1], llvm::FixedVectorType::get(DoubleTy
, 2));
11503 Ops
.push_back(EmitScalarExpr(E
->getArg(2)));
11504 return Builder
.CreateInsertElement(Ops
[1], Ops
[0], Ops
[2], "vset_lane");
11506 case NEON::BI__builtin_neon_vget_lane_i8
:
11507 case NEON::BI__builtin_neon_vdupb_lane_i8
:
11509 Builder
.CreateBitCast(Ops
[0], llvm::FixedVectorType::get(Int8Ty
, 8));
11510 return Builder
.CreateExtractElement(Ops
[0], EmitScalarExpr(E
->getArg(1)),
11512 case NEON::BI__builtin_neon_vgetq_lane_i8
:
11513 case NEON::BI__builtin_neon_vdupb_laneq_i8
:
11515 Builder
.CreateBitCast(Ops
[0], llvm::FixedVectorType::get(Int8Ty
, 16));
11516 return Builder
.CreateExtractElement(Ops
[0], EmitScalarExpr(E
->getArg(1)),
11518 case NEON::BI__builtin_neon_vget_lane_i16
:
11519 case NEON::BI__builtin_neon_vduph_lane_i16
:
11521 Builder
.CreateBitCast(Ops
[0], llvm::FixedVectorType::get(Int16Ty
, 4));
11522 return Builder
.CreateExtractElement(Ops
[0], EmitScalarExpr(E
->getArg(1)),
11524 case NEON::BI__builtin_neon_vgetq_lane_i16
:
11525 case NEON::BI__builtin_neon_vduph_laneq_i16
:
11527 Builder
.CreateBitCast(Ops
[0], llvm::FixedVectorType::get(Int16Ty
, 8));
11528 return Builder
.CreateExtractElement(Ops
[0], EmitScalarExpr(E
->getArg(1)),
11530 case NEON::BI__builtin_neon_vget_lane_i32
:
11531 case NEON::BI__builtin_neon_vdups_lane_i32
:
11533 Builder
.CreateBitCast(Ops
[0], llvm::FixedVectorType::get(Int32Ty
, 2));
11534 return Builder
.CreateExtractElement(Ops
[0], EmitScalarExpr(E
->getArg(1)),
11536 case NEON::BI__builtin_neon_vdups_lane_f32
:
11538 Builder
.CreateBitCast(Ops
[0], llvm::FixedVectorType::get(FloatTy
, 2));
11539 return Builder
.CreateExtractElement(Ops
[0], EmitScalarExpr(E
->getArg(1)),
11541 case NEON::BI__builtin_neon_vgetq_lane_i32
:
11542 case NEON::BI__builtin_neon_vdups_laneq_i32
:
11544 Builder
.CreateBitCast(Ops
[0], llvm::FixedVectorType::get(Int32Ty
, 4));
11545 return Builder
.CreateExtractElement(Ops
[0], EmitScalarExpr(E
->getArg(1)),
11547 case NEON::BI__builtin_neon_vget_lane_i64
:
11548 case NEON::BI__builtin_neon_vdupd_lane_i64
:
11550 Builder
.CreateBitCast(Ops
[0], llvm::FixedVectorType::get(Int64Ty
, 1));
11551 return Builder
.CreateExtractElement(Ops
[0], EmitScalarExpr(E
->getArg(1)),
11553 case NEON::BI__builtin_neon_vdupd_lane_f64
:
11555 Builder
.CreateBitCast(Ops
[0], llvm::FixedVectorType::get(DoubleTy
, 1));
11556 return Builder
.CreateExtractElement(Ops
[0], EmitScalarExpr(E
->getArg(1)),
11558 case NEON::BI__builtin_neon_vgetq_lane_i64
:
11559 case NEON::BI__builtin_neon_vdupd_laneq_i64
:
11561 Builder
.CreateBitCast(Ops
[0], llvm::FixedVectorType::get(Int64Ty
, 2));
11562 return Builder
.CreateExtractElement(Ops
[0], EmitScalarExpr(E
->getArg(1)),
11564 case NEON::BI__builtin_neon_vget_lane_f32
:
11566 Builder
.CreateBitCast(Ops
[0], llvm::FixedVectorType::get(FloatTy
, 2));
11567 return Builder
.CreateExtractElement(Ops
[0], EmitScalarExpr(E
->getArg(1)),
11569 case NEON::BI__builtin_neon_vget_lane_f64
:
11571 Builder
.CreateBitCast(Ops
[0], llvm::FixedVectorType::get(DoubleTy
, 1));
11572 return Builder
.CreateExtractElement(Ops
[0], EmitScalarExpr(E
->getArg(1)),
11574 case NEON::BI__builtin_neon_vgetq_lane_f32
:
11575 case NEON::BI__builtin_neon_vdups_laneq_f32
:
11577 Builder
.CreateBitCast(Ops
[0], llvm::FixedVectorType::get(FloatTy
, 4));
11578 return Builder
.CreateExtractElement(Ops
[0], EmitScalarExpr(E
->getArg(1)),
11580 case NEON::BI__builtin_neon_vgetq_lane_f64
:
11581 case NEON::BI__builtin_neon_vdupd_laneq_f64
:
11583 Builder
.CreateBitCast(Ops
[0], llvm::FixedVectorType::get(DoubleTy
, 2));
11584 return Builder
.CreateExtractElement(Ops
[0], EmitScalarExpr(E
->getArg(1)),
11586 case NEON::BI__builtin_neon_vaddh_f16
:
11587 Ops
.push_back(EmitScalarExpr(E
->getArg(1)));
11588 return Builder
.CreateFAdd(Ops
[0], Ops
[1], "vaddh");
11589 case NEON::BI__builtin_neon_vsubh_f16
:
11590 Ops
.push_back(EmitScalarExpr(E
->getArg(1)));
11591 return Builder
.CreateFSub(Ops
[0], Ops
[1], "vsubh");
11592 case NEON::BI__builtin_neon_vmulh_f16
:
11593 Ops
.push_back(EmitScalarExpr(E
->getArg(1)));
11594 return Builder
.CreateFMul(Ops
[0], Ops
[1], "vmulh");
11595 case NEON::BI__builtin_neon_vdivh_f16
:
11596 Ops
.push_back(EmitScalarExpr(E
->getArg(1)));
11597 return Builder
.CreateFDiv(Ops
[0], Ops
[1], "vdivh");
11598 case NEON::BI__builtin_neon_vfmah_f16
:
11599 // NEON intrinsic puts accumulator first, unlike the LLVM fma.
11600 return emitCallMaybeConstrainedFPBuiltin(
11601 *this, Intrinsic::fma
, Intrinsic::experimental_constrained_fma
, HalfTy
,
11602 {EmitScalarExpr(E
->getArg(1)), EmitScalarExpr(E
->getArg(2)), Ops
[0]});
11603 case NEON::BI__builtin_neon_vfmsh_f16
: {
11604 Value
* Neg
= Builder
.CreateFNeg(EmitScalarExpr(E
->getArg(1)), "vsubh");
11606 // NEON intrinsic puts accumulator first, unlike the LLVM fma.
11607 return emitCallMaybeConstrainedFPBuiltin(
11608 *this, Intrinsic::fma
, Intrinsic::experimental_constrained_fma
, HalfTy
,
11609 {Neg
, EmitScalarExpr(E
->getArg(2)), Ops
[0]});
11611 case NEON::BI__builtin_neon_vaddd_s64
:
11612 case NEON::BI__builtin_neon_vaddd_u64
:
11613 return Builder
.CreateAdd(Ops
[0], EmitScalarExpr(E
->getArg(1)), "vaddd");
11614 case NEON::BI__builtin_neon_vsubd_s64
:
11615 case NEON::BI__builtin_neon_vsubd_u64
:
11616 return Builder
.CreateSub(Ops
[0], EmitScalarExpr(E
->getArg(1)), "vsubd");
11617 case NEON::BI__builtin_neon_vqdmlalh_s16
:
11618 case NEON::BI__builtin_neon_vqdmlslh_s16
: {
11619 SmallVector
<Value
*, 2> ProductOps
;
11620 ProductOps
.push_back(vectorWrapScalar16(Ops
[1]));
11621 ProductOps
.push_back(vectorWrapScalar16(EmitScalarExpr(E
->getArg(2))));
11622 auto *VTy
= llvm::FixedVectorType::get(Int32Ty
, 4);
11623 Ops
[1] = EmitNeonCall(CGM
.getIntrinsic(Intrinsic::aarch64_neon_sqdmull
, VTy
),
11624 ProductOps
, "vqdmlXl");
11625 Constant
*CI
= ConstantInt::get(SizeTy
, 0);
11626 Ops
[1] = Builder
.CreateExtractElement(Ops
[1], CI
, "lane0");
11628 unsigned AccumInt
= BuiltinID
== NEON::BI__builtin_neon_vqdmlalh_s16
11629 ? Intrinsic::aarch64_neon_sqadd
11630 : Intrinsic::aarch64_neon_sqsub
;
11631 return EmitNeonCall(CGM
.getIntrinsic(AccumInt
, Int32Ty
), Ops
, "vqdmlXl");
11633 case NEON::BI__builtin_neon_vqshlud_n_s64
: {
11634 Ops
.push_back(EmitScalarExpr(E
->getArg(1)));
11635 Ops
[1] = Builder
.CreateZExt(Ops
[1], Int64Ty
);
11636 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::aarch64_neon_sqshlu
, Int64Ty
),
11639 case NEON::BI__builtin_neon_vqshld_n_u64
:
11640 case NEON::BI__builtin_neon_vqshld_n_s64
: {
11641 unsigned Int
= BuiltinID
== NEON::BI__builtin_neon_vqshld_n_u64
11642 ? Intrinsic::aarch64_neon_uqshl
11643 : Intrinsic::aarch64_neon_sqshl
;
11644 Ops
.push_back(EmitScalarExpr(E
->getArg(1)));
11645 Ops
[1] = Builder
.CreateZExt(Ops
[1], Int64Ty
);
11646 return EmitNeonCall(CGM
.getIntrinsic(Int
, Int64Ty
), Ops
, "vqshl_n");
11648 case NEON::BI__builtin_neon_vrshrd_n_u64
:
11649 case NEON::BI__builtin_neon_vrshrd_n_s64
: {
11650 unsigned Int
= BuiltinID
== NEON::BI__builtin_neon_vrshrd_n_u64
11651 ? Intrinsic::aarch64_neon_urshl
11652 : Intrinsic::aarch64_neon_srshl
;
11653 Ops
.push_back(EmitScalarExpr(E
->getArg(1)));
11654 int SV
= cast
<ConstantInt
>(Ops
[1])->getSExtValue();
11655 Ops
[1] = ConstantInt::get(Int64Ty
, -SV
);
11656 return EmitNeonCall(CGM
.getIntrinsic(Int
, Int64Ty
), Ops
, "vrshr_n");
11658 case NEON::BI__builtin_neon_vrsrad_n_u64
:
11659 case NEON::BI__builtin_neon_vrsrad_n_s64
: {
11660 unsigned Int
= BuiltinID
== NEON::BI__builtin_neon_vrsrad_n_u64
11661 ? Intrinsic::aarch64_neon_urshl
11662 : Intrinsic::aarch64_neon_srshl
;
11663 Ops
[1] = Builder
.CreateBitCast(Ops
[1], Int64Ty
);
11664 Ops
.push_back(Builder
.CreateNeg(EmitScalarExpr(E
->getArg(2))));
11665 Ops
[1] = Builder
.CreateCall(CGM
.getIntrinsic(Int
, Int64Ty
),
11666 {Ops
[1], Builder
.CreateSExt(Ops
[2], Int64Ty
)});
11667 return Builder
.CreateAdd(Ops
[0], Builder
.CreateBitCast(Ops
[1], Int64Ty
));
11669 case NEON::BI__builtin_neon_vshld_n_s64
:
11670 case NEON::BI__builtin_neon_vshld_n_u64
: {
11671 llvm::ConstantInt
*Amt
= cast
<ConstantInt
>(EmitScalarExpr(E
->getArg(1)));
11672 return Builder
.CreateShl(
11673 Ops
[0], ConstantInt::get(Int64Ty
, Amt
->getZExtValue()), "shld_n");
11675 case NEON::BI__builtin_neon_vshrd_n_s64
: {
11676 llvm::ConstantInt
*Amt
= cast
<ConstantInt
>(EmitScalarExpr(E
->getArg(1)));
11677 return Builder
.CreateAShr(
11678 Ops
[0], ConstantInt::get(Int64Ty
, std::min(static_cast<uint64_t>(63),
11679 Amt
->getZExtValue())),
11682 case NEON::BI__builtin_neon_vshrd_n_u64
: {
11683 llvm::ConstantInt
*Amt
= cast
<ConstantInt
>(EmitScalarExpr(E
->getArg(1)));
11684 uint64_t ShiftAmt
= Amt
->getZExtValue();
11685 // Right-shifting an unsigned value by its size yields 0.
11686 if (ShiftAmt
== 64)
11687 return ConstantInt::get(Int64Ty
, 0);
11688 return Builder
.CreateLShr(Ops
[0], ConstantInt::get(Int64Ty
, ShiftAmt
),
11691 case NEON::BI__builtin_neon_vsrad_n_s64
: {
11692 llvm::ConstantInt
*Amt
= cast
<ConstantInt
>(EmitScalarExpr(E
->getArg(2)));
11693 Ops
[1] = Builder
.CreateAShr(
11694 Ops
[1], ConstantInt::get(Int64Ty
, std::min(static_cast<uint64_t>(63),
11695 Amt
->getZExtValue())),
11697 return Builder
.CreateAdd(Ops
[0], Ops
[1]);
11699 case NEON::BI__builtin_neon_vsrad_n_u64
: {
11700 llvm::ConstantInt
*Amt
= cast
<ConstantInt
>(EmitScalarExpr(E
->getArg(2)));
11701 uint64_t ShiftAmt
= Amt
->getZExtValue();
11702 // Right-shifting an unsigned value by its size yields 0.
11703 // As Op + 0 = Op, return Ops[0] directly.
11704 if (ShiftAmt
== 64)
11706 Ops
[1] = Builder
.CreateLShr(Ops
[1], ConstantInt::get(Int64Ty
, ShiftAmt
),
11708 return Builder
.CreateAdd(Ops
[0], Ops
[1]);
11710 case NEON::BI__builtin_neon_vqdmlalh_lane_s16
:
11711 case NEON::BI__builtin_neon_vqdmlalh_laneq_s16
:
11712 case NEON::BI__builtin_neon_vqdmlslh_lane_s16
:
11713 case NEON::BI__builtin_neon_vqdmlslh_laneq_s16
: {
11714 Ops
[2] = Builder
.CreateExtractElement(Ops
[2], EmitScalarExpr(E
->getArg(3)),
11716 SmallVector
<Value
*, 2> ProductOps
;
11717 ProductOps
.push_back(vectorWrapScalar16(Ops
[1]));
11718 ProductOps
.push_back(vectorWrapScalar16(Ops
[2]));
11719 auto *VTy
= llvm::FixedVectorType::get(Int32Ty
, 4);
11720 Ops
[1] = EmitNeonCall(CGM
.getIntrinsic(Intrinsic::aarch64_neon_sqdmull
, VTy
),
11721 ProductOps
, "vqdmlXl");
11722 Constant
*CI
= ConstantInt::get(SizeTy
, 0);
11723 Ops
[1] = Builder
.CreateExtractElement(Ops
[1], CI
, "lane0");
11726 unsigned AccInt
= (BuiltinID
== NEON::BI__builtin_neon_vqdmlalh_lane_s16
||
11727 BuiltinID
== NEON::BI__builtin_neon_vqdmlalh_laneq_s16
)
11728 ? Intrinsic::aarch64_neon_sqadd
11729 : Intrinsic::aarch64_neon_sqsub
;
11730 return EmitNeonCall(CGM
.getIntrinsic(AccInt
, Int32Ty
), Ops
, "vqdmlXl");
11732 case NEON::BI__builtin_neon_vqdmlals_s32
:
11733 case NEON::BI__builtin_neon_vqdmlsls_s32
: {
11734 SmallVector
<Value
*, 2> ProductOps
;
11735 ProductOps
.push_back(Ops
[1]);
11736 ProductOps
.push_back(EmitScalarExpr(E
->getArg(2)));
11738 EmitNeonCall(CGM
.getIntrinsic(Intrinsic::aarch64_neon_sqdmulls_scalar
),
11739 ProductOps
, "vqdmlXl");
11741 unsigned AccumInt
= BuiltinID
== NEON::BI__builtin_neon_vqdmlals_s32
11742 ? Intrinsic::aarch64_neon_sqadd
11743 : Intrinsic::aarch64_neon_sqsub
;
11744 return EmitNeonCall(CGM
.getIntrinsic(AccumInt
, Int64Ty
), Ops
, "vqdmlXl");
11746 case NEON::BI__builtin_neon_vqdmlals_lane_s32
:
11747 case NEON::BI__builtin_neon_vqdmlals_laneq_s32
:
11748 case NEON::BI__builtin_neon_vqdmlsls_lane_s32
:
11749 case NEON::BI__builtin_neon_vqdmlsls_laneq_s32
: {
11750 Ops
[2] = Builder
.CreateExtractElement(Ops
[2], EmitScalarExpr(E
->getArg(3)),
11752 SmallVector
<Value
*, 2> ProductOps
;
11753 ProductOps
.push_back(Ops
[1]);
11754 ProductOps
.push_back(Ops
[2]);
11756 EmitNeonCall(CGM
.getIntrinsic(Intrinsic::aarch64_neon_sqdmulls_scalar
),
11757 ProductOps
, "vqdmlXl");
11760 unsigned AccInt
= (BuiltinID
== NEON::BI__builtin_neon_vqdmlals_lane_s32
||
11761 BuiltinID
== NEON::BI__builtin_neon_vqdmlals_laneq_s32
)
11762 ? Intrinsic::aarch64_neon_sqadd
11763 : Intrinsic::aarch64_neon_sqsub
;
11764 return EmitNeonCall(CGM
.getIntrinsic(AccInt
, Int64Ty
), Ops
, "vqdmlXl");
11766 case NEON::BI__builtin_neon_vget_lane_bf16
:
11767 case NEON::BI__builtin_neon_vduph_lane_bf16
:
11768 case NEON::BI__builtin_neon_vduph_lane_f16
: {
11769 return Builder
.CreateExtractElement(Ops
[0], EmitScalarExpr(E
->getArg(1)),
11772 case NEON::BI__builtin_neon_vgetq_lane_bf16
:
11773 case NEON::BI__builtin_neon_vduph_laneq_bf16
:
11774 case NEON::BI__builtin_neon_vduph_laneq_f16
: {
11775 return Builder
.CreateExtractElement(Ops
[0], EmitScalarExpr(E
->getArg(1)),
11779 case clang::AArch64::BI_InterlockedAdd
: {
11780 Value
*Arg0
= EmitScalarExpr(E
->getArg(0));
11781 Value
*Arg1
= EmitScalarExpr(E
->getArg(1));
11782 AtomicRMWInst
*RMWI
= Builder
.CreateAtomicRMW(
11783 AtomicRMWInst::Add
, Arg0
, Arg1
,
11784 llvm::AtomicOrdering::SequentiallyConsistent
);
11785 return Builder
.CreateAdd(RMWI
, Arg1
);
11789 llvm::FixedVectorType
*VTy
= GetNeonType(this, Type
);
11790 llvm::Type
*Ty
= VTy
;
11794 // Not all intrinsics handled by the common case work for AArch64 yet, so only
11795 // defer to common code if it's been added to our special map.
11796 Builtin
= findARMVectorIntrinsicInMap(AArch64SIMDIntrinsicMap
, BuiltinID
,
11797 AArch64SIMDIntrinsicsProvenSorted
);
11800 return EmitCommonNeonBuiltinExpr(
11801 Builtin
->BuiltinID
, Builtin
->LLVMIntrinsic
, Builtin
->AltLLVMIntrinsic
,
11802 Builtin
->NameHint
, Builtin
->TypeModifier
, E
, Ops
,
11803 /*never use addresses*/ Address::invalid(), Address::invalid(), Arch
);
11805 if (Value
*V
= EmitAArch64TblBuiltinExpr(*this, BuiltinID
, E
, Ops
, Arch
))
11809 switch (BuiltinID
) {
11810 default: return nullptr;
11811 case NEON::BI__builtin_neon_vbsl_v
:
11812 case NEON::BI__builtin_neon_vbslq_v
: {
11813 llvm::Type
*BitTy
= llvm::VectorType::getInteger(VTy
);
11814 Ops
[0] = Builder
.CreateBitCast(Ops
[0], BitTy
, "vbsl");
11815 Ops
[1] = Builder
.CreateBitCast(Ops
[1], BitTy
, "vbsl");
11816 Ops
[2] = Builder
.CreateBitCast(Ops
[2], BitTy
, "vbsl");
11818 Ops
[1] = Builder
.CreateAnd(Ops
[0], Ops
[1], "vbsl");
11819 Ops
[2] = Builder
.CreateAnd(Builder
.CreateNot(Ops
[0]), Ops
[2], "vbsl");
11820 Ops
[0] = Builder
.CreateOr(Ops
[1], Ops
[2], "vbsl");
11821 return Builder
.CreateBitCast(Ops
[0], Ty
);
11823 case NEON::BI__builtin_neon_vfma_lane_v
:
11824 case NEON::BI__builtin_neon_vfmaq_lane_v
: { // Only used for FP types
11825 // The ARM builtins (and instructions) have the addend as the first
11826 // operand, but the 'fma' intrinsics have it last. Swap it around here.
11827 Value
*Addend
= Ops
[0];
11828 Value
*Multiplicand
= Ops
[1];
11829 Value
*LaneSource
= Ops
[2];
11830 Ops
[0] = Multiplicand
;
11831 Ops
[1] = LaneSource
;
11834 // Now adjust things to handle the lane access.
11835 auto *SourceTy
= BuiltinID
== NEON::BI__builtin_neon_vfmaq_lane_v
11836 ? llvm::FixedVectorType::get(VTy
->getElementType(),
11837 VTy
->getNumElements() / 2)
11839 llvm::Constant
*cst
= cast
<Constant
>(Ops
[3]);
11840 Value
*SV
= llvm::ConstantVector::getSplat(VTy
->getElementCount(), cst
);
11841 Ops
[1] = Builder
.CreateBitCast(Ops
[1], SourceTy
);
11842 Ops
[1] = Builder
.CreateShuffleVector(Ops
[1], Ops
[1], SV
, "lane");
11845 Int
= Builder
.getIsFPConstrained() ? Intrinsic::experimental_constrained_fma
11847 return EmitNeonCall(CGM
.getIntrinsic(Int
, Ty
), Ops
, "fmla");
11849 case NEON::BI__builtin_neon_vfma_laneq_v
: {
11850 auto *VTy
= cast
<llvm::FixedVectorType
>(Ty
);
11851 // v1f64 fma should be mapped to Neon scalar f64 fma
11852 if (VTy
&& VTy
->getElementType() == DoubleTy
) {
11853 Ops
[0] = Builder
.CreateBitCast(Ops
[0], DoubleTy
);
11854 Ops
[1] = Builder
.CreateBitCast(Ops
[1], DoubleTy
);
11855 llvm::FixedVectorType
*VTy
=
11856 GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64
, false, true));
11857 Ops
[2] = Builder
.CreateBitCast(Ops
[2], VTy
);
11858 Ops
[2] = Builder
.CreateExtractElement(Ops
[2], Ops
[3], "extract");
11860 Result
= emitCallMaybeConstrainedFPBuiltin(
11861 *this, Intrinsic::fma
, Intrinsic::experimental_constrained_fma
,
11862 DoubleTy
, {Ops
[1], Ops
[2], Ops
[0]});
11863 return Builder
.CreateBitCast(Result
, Ty
);
11865 Ops
[0] = Builder
.CreateBitCast(Ops
[0], Ty
);
11866 Ops
[1] = Builder
.CreateBitCast(Ops
[1], Ty
);
11868 auto *STy
= llvm::FixedVectorType::get(VTy
->getElementType(),
11869 VTy
->getNumElements() * 2);
11870 Ops
[2] = Builder
.CreateBitCast(Ops
[2], STy
);
11871 Value
*SV
= llvm::ConstantVector::getSplat(VTy
->getElementCount(),
11872 cast
<ConstantInt
>(Ops
[3]));
11873 Ops
[2] = Builder
.CreateShuffleVector(Ops
[2], Ops
[2], SV
, "lane");
11875 return emitCallMaybeConstrainedFPBuiltin(
11876 *this, Intrinsic::fma
, Intrinsic::experimental_constrained_fma
, Ty
,
11877 {Ops
[2], Ops
[1], Ops
[0]});
11879 case NEON::BI__builtin_neon_vfmaq_laneq_v
: {
11880 Ops
[0] = Builder
.CreateBitCast(Ops
[0], Ty
);
11881 Ops
[1] = Builder
.CreateBitCast(Ops
[1], Ty
);
11883 Ops
[2] = Builder
.CreateBitCast(Ops
[2], Ty
);
11884 Ops
[2] = EmitNeonSplat(Ops
[2], cast
<ConstantInt
>(Ops
[3]));
11885 return emitCallMaybeConstrainedFPBuiltin(
11886 *this, Intrinsic::fma
, Intrinsic::experimental_constrained_fma
, Ty
,
11887 {Ops
[2], Ops
[1], Ops
[0]});
11889 case NEON::BI__builtin_neon_vfmah_lane_f16
:
11890 case NEON::BI__builtin_neon_vfmas_lane_f32
:
11891 case NEON::BI__builtin_neon_vfmah_laneq_f16
:
11892 case NEON::BI__builtin_neon_vfmas_laneq_f32
:
11893 case NEON::BI__builtin_neon_vfmad_lane_f64
:
11894 case NEON::BI__builtin_neon_vfmad_laneq_f64
: {
11895 Ops
.push_back(EmitScalarExpr(E
->getArg(3)));
11896 llvm::Type
*Ty
= ConvertType(E
->getCallReturnType(getContext()));
11897 Ops
[2] = Builder
.CreateExtractElement(Ops
[2], Ops
[3], "extract");
11898 return emitCallMaybeConstrainedFPBuiltin(
11899 *this, Intrinsic::fma
, Intrinsic::experimental_constrained_fma
, Ty
,
11900 {Ops
[1], Ops
[2], Ops
[0]});
11902 case NEON::BI__builtin_neon_vmull_v
:
11903 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
11904 Int
= usgn
? Intrinsic::aarch64_neon_umull
: Intrinsic::aarch64_neon_smull
;
11905 if (Type
.isPoly()) Int
= Intrinsic::aarch64_neon_pmull
;
11906 return EmitNeonCall(CGM
.getIntrinsic(Int
, Ty
), Ops
, "vmull");
11907 case NEON::BI__builtin_neon_vmax_v
:
11908 case NEON::BI__builtin_neon_vmaxq_v
:
11909 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
11910 Int
= usgn
? Intrinsic::aarch64_neon_umax
: Intrinsic::aarch64_neon_smax
;
11911 if (Ty
->isFPOrFPVectorTy()) Int
= Intrinsic::aarch64_neon_fmax
;
11912 return EmitNeonCall(CGM
.getIntrinsic(Int
, Ty
), Ops
, "vmax");
11913 case NEON::BI__builtin_neon_vmaxh_f16
: {
11914 Ops
.push_back(EmitScalarExpr(E
->getArg(1)));
11915 Int
= Intrinsic::aarch64_neon_fmax
;
11916 return EmitNeonCall(CGM
.getIntrinsic(Int
, HalfTy
), Ops
, "vmax");
11918 case NEON::BI__builtin_neon_vmin_v
:
11919 case NEON::BI__builtin_neon_vminq_v
:
11920 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
11921 Int
= usgn
? Intrinsic::aarch64_neon_umin
: Intrinsic::aarch64_neon_smin
;
11922 if (Ty
->isFPOrFPVectorTy()) Int
= Intrinsic::aarch64_neon_fmin
;
11923 return EmitNeonCall(CGM
.getIntrinsic(Int
, Ty
), Ops
, "vmin");
11924 case NEON::BI__builtin_neon_vminh_f16
: {
11925 Ops
.push_back(EmitScalarExpr(E
->getArg(1)));
11926 Int
= Intrinsic::aarch64_neon_fmin
;
11927 return EmitNeonCall(CGM
.getIntrinsic(Int
, HalfTy
), Ops
, "vmin");
11929 case NEON::BI__builtin_neon_vabd_v
:
11930 case NEON::BI__builtin_neon_vabdq_v
:
11931 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
11932 Int
= usgn
? Intrinsic::aarch64_neon_uabd
: Intrinsic::aarch64_neon_sabd
;
11933 if (Ty
->isFPOrFPVectorTy()) Int
= Intrinsic::aarch64_neon_fabd
;
11934 return EmitNeonCall(CGM
.getIntrinsic(Int
, Ty
), Ops
, "vabd");
11935 case NEON::BI__builtin_neon_vpadal_v
:
11936 case NEON::BI__builtin_neon_vpadalq_v
: {
11937 unsigned ArgElts
= VTy
->getNumElements();
11938 llvm::IntegerType
*EltTy
= cast
<IntegerType
>(VTy
->getElementType());
11939 unsigned BitWidth
= EltTy
->getBitWidth();
11940 auto *ArgTy
= llvm::FixedVectorType::get(
11941 llvm::IntegerType::get(getLLVMContext(), BitWidth
/ 2), 2 * ArgElts
);
11942 llvm::Type
* Tys
[2] = { VTy
, ArgTy
};
11943 Int
= usgn
? Intrinsic::aarch64_neon_uaddlp
: Intrinsic::aarch64_neon_saddlp
;
11944 SmallVector
<llvm::Value
*, 1> TmpOps
;
11945 TmpOps
.push_back(Ops
[1]);
11946 Function
*F
= CGM
.getIntrinsic(Int
, Tys
);
11947 llvm::Value
*tmp
= EmitNeonCall(F
, TmpOps
, "vpadal");
11948 llvm::Value
*addend
= Builder
.CreateBitCast(Ops
[0], tmp
->getType());
11949 return Builder
.CreateAdd(tmp
, addend
);
11951 case NEON::BI__builtin_neon_vpmin_v
:
11952 case NEON::BI__builtin_neon_vpminq_v
:
11953 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
11954 Int
= usgn
? Intrinsic::aarch64_neon_uminp
: Intrinsic::aarch64_neon_sminp
;
11955 if (Ty
->isFPOrFPVectorTy()) Int
= Intrinsic::aarch64_neon_fminp
;
11956 return EmitNeonCall(CGM
.getIntrinsic(Int
, Ty
), Ops
, "vpmin");
11957 case NEON::BI__builtin_neon_vpmax_v
:
11958 case NEON::BI__builtin_neon_vpmaxq_v
:
11959 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
11960 Int
= usgn
? Intrinsic::aarch64_neon_umaxp
: Intrinsic::aarch64_neon_smaxp
;
11961 if (Ty
->isFPOrFPVectorTy()) Int
= Intrinsic::aarch64_neon_fmaxp
;
11962 return EmitNeonCall(CGM
.getIntrinsic(Int
, Ty
), Ops
, "vpmax");
11963 case NEON::BI__builtin_neon_vminnm_v
:
11964 case NEON::BI__builtin_neon_vminnmq_v
:
11965 Int
= Intrinsic::aarch64_neon_fminnm
;
11966 return EmitNeonCall(CGM
.getIntrinsic(Int
, Ty
), Ops
, "vminnm");
11967 case NEON::BI__builtin_neon_vminnmh_f16
:
11968 Ops
.push_back(EmitScalarExpr(E
->getArg(1)));
11969 Int
= Intrinsic::aarch64_neon_fminnm
;
11970 return EmitNeonCall(CGM
.getIntrinsic(Int
, HalfTy
), Ops
, "vminnm");
11971 case NEON::BI__builtin_neon_vmaxnm_v
:
11972 case NEON::BI__builtin_neon_vmaxnmq_v
:
11973 Int
= Intrinsic::aarch64_neon_fmaxnm
;
11974 return EmitNeonCall(CGM
.getIntrinsic(Int
, Ty
), Ops
, "vmaxnm");
11975 case NEON::BI__builtin_neon_vmaxnmh_f16
:
11976 Ops
.push_back(EmitScalarExpr(E
->getArg(1)));
11977 Int
= Intrinsic::aarch64_neon_fmaxnm
;
11978 return EmitNeonCall(CGM
.getIntrinsic(Int
, HalfTy
), Ops
, "vmaxnm");
11979 case NEON::BI__builtin_neon_vrecpss_f32
: {
11980 Ops
.push_back(EmitScalarExpr(E
->getArg(1)));
11981 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::aarch64_neon_frecps
, FloatTy
),
11984 case NEON::BI__builtin_neon_vrecpsd_f64
:
11985 Ops
.push_back(EmitScalarExpr(E
->getArg(1)));
11986 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::aarch64_neon_frecps
, DoubleTy
),
11988 case NEON::BI__builtin_neon_vrecpsh_f16
:
11989 Ops
.push_back(EmitScalarExpr(E
->getArg(1)));
11990 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::aarch64_neon_frecps
, HalfTy
),
11992 case NEON::BI__builtin_neon_vqshrun_n_v
:
11993 Int
= Intrinsic::aarch64_neon_sqshrun
;
11994 return EmitNeonCall(CGM
.getIntrinsic(Int
, Ty
), Ops
, "vqshrun_n");
11995 case NEON::BI__builtin_neon_vqrshrun_n_v
:
11996 Int
= Intrinsic::aarch64_neon_sqrshrun
;
11997 return EmitNeonCall(CGM
.getIntrinsic(Int
, Ty
), Ops
, "vqrshrun_n");
11998 case NEON::BI__builtin_neon_vqshrn_n_v
:
11999 Int
= usgn
? Intrinsic::aarch64_neon_uqshrn
: Intrinsic::aarch64_neon_sqshrn
;
12000 return EmitNeonCall(CGM
.getIntrinsic(Int
, Ty
), Ops
, "vqshrn_n");
12001 case NEON::BI__builtin_neon_vrshrn_n_v
:
12002 Int
= Intrinsic::aarch64_neon_rshrn
;
12003 return EmitNeonCall(CGM
.getIntrinsic(Int
, Ty
), Ops
, "vrshrn_n");
12004 case NEON::BI__builtin_neon_vqrshrn_n_v
:
12005 Int
= usgn
? Intrinsic::aarch64_neon_uqrshrn
: Intrinsic::aarch64_neon_sqrshrn
;
12006 return EmitNeonCall(CGM
.getIntrinsic(Int
, Ty
), Ops
, "vqrshrn_n");
12007 case NEON::BI__builtin_neon_vrndah_f16
: {
12008 Ops
.push_back(EmitScalarExpr(E
->getArg(0)));
12009 Int
= Builder
.getIsFPConstrained()
12010 ? Intrinsic::experimental_constrained_round
12011 : Intrinsic::round
;
12012 return EmitNeonCall(CGM
.getIntrinsic(Int
, HalfTy
), Ops
, "vrnda");
12014 case NEON::BI__builtin_neon_vrnda_v
:
12015 case NEON::BI__builtin_neon_vrndaq_v
: {
12016 Int
= Builder
.getIsFPConstrained()
12017 ? Intrinsic::experimental_constrained_round
12018 : Intrinsic::round
;
12019 return EmitNeonCall(CGM
.getIntrinsic(Int
, Ty
), Ops
, "vrnda");
12021 case NEON::BI__builtin_neon_vrndih_f16
: {
12022 Ops
.push_back(EmitScalarExpr(E
->getArg(0)));
12023 Int
= Builder
.getIsFPConstrained()
12024 ? Intrinsic::experimental_constrained_nearbyint
12025 : Intrinsic::nearbyint
;
12026 return EmitNeonCall(CGM
.getIntrinsic(Int
, HalfTy
), Ops
, "vrndi");
12028 case NEON::BI__builtin_neon_vrndmh_f16
: {
12029 Ops
.push_back(EmitScalarExpr(E
->getArg(0)));
12030 Int
= Builder
.getIsFPConstrained()
12031 ? Intrinsic::experimental_constrained_floor
12032 : Intrinsic::floor
;
12033 return EmitNeonCall(CGM
.getIntrinsic(Int
, HalfTy
), Ops
, "vrndm");
12035 case NEON::BI__builtin_neon_vrndm_v
:
12036 case NEON::BI__builtin_neon_vrndmq_v
: {
12037 Int
= Builder
.getIsFPConstrained()
12038 ? Intrinsic::experimental_constrained_floor
12039 : Intrinsic::floor
;
12040 return EmitNeonCall(CGM
.getIntrinsic(Int
, Ty
), Ops
, "vrndm");
12042 case NEON::BI__builtin_neon_vrndnh_f16
: {
12043 Ops
.push_back(EmitScalarExpr(E
->getArg(0)));
12044 Int
= Builder
.getIsFPConstrained()
12045 ? Intrinsic::experimental_constrained_roundeven
12046 : Intrinsic::roundeven
;
12047 return EmitNeonCall(CGM
.getIntrinsic(Int
, HalfTy
), Ops
, "vrndn");
12049 case NEON::BI__builtin_neon_vrndn_v
:
12050 case NEON::BI__builtin_neon_vrndnq_v
: {
12051 Int
= Builder
.getIsFPConstrained()
12052 ? Intrinsic::experimental_constrained_roundeven
12053 : Intrinsic::roundeven
;
12054 return EmitNeonCall(CGM
.getIntrinsic(Int
, Ty
), Ops
, "vrndn");
12056 case NEON::BI__builtin_neon_vrndns_f32
: {
12057 Ops
.push_back(EmitScalarExpr(E
->getArg(0)));
12058 Int
= Builder
.getIsFPConstrained()
12059 ? Intrinsic::experimental_constrained_roundeven
12060 : Intrinsic::roundeven
;
12061 return EmitNeonCall(CGM
.getIntrinsic(Int
, FloatTy
), Ops
, "vrndn");
12063 case NEON::BI__builtin_neon_vrndph_f16
: {
12064 Ops
.push_back(EmitScalarExpr(E
->getArg(0)));
12065 Int
= Builder
.getIsFPConstrained()
12066 ? Intrinsic::experimental_constrained_ceil
12068 return EmitNeonCall(CGM
.getIntrinsic(Int
, HalfTy
), Ops
, "vrndp");
12070 case NEON::BI__builtin_neon_vrndp_v
:
12071 case NEON::BI__builtin_neon_vrndpq_v
: {
12072 Int
= Builder
.getIsFPConstrained()
12073 ? Intrinsic::experimental_constrained_ceil
12075 return EmitNeonCall(CGM
.getIntrinsic(Int
, Ty
), Ops
, "vrndp");
12077 case NEON::BI__builtin_neon_vrndxh_f16
: {
12078 Ops
.push_back(EmitScalarExpr(E
->getArg(0)));
12079 Int
= Builder
.getIsFPConstrained()
12080 ? Intrinsic::experimental_constrained_rint
12082 return EmitNeonCall(CGM
.getIntrinsic(Int
, HalfTy
), Ops
, "vrndx");
12084 case NEON::BI__builtin_neon_vrndx_v
:
12085 case NEON::BI__builtin_neon_vrndxq_v
: {
12086 Int
= Builder
.getIsFPConstrained()
12087 ? Intrinsic::experimental_constrained_rint
12089 return EmitNeonCall(CGM
.getIntrinsic(Int
, Ty
), Ops
, "vrndx");
12091 case NEON::BI__builtin_neon_vrndh_f16
: {
12092 Ops
.push_back(EmitScalarExpr(E
->getArg(0)));
12093 Int
= Builder
.getIsFPConstrained()
12094 ? Intrinsic::experimental_constrained_trunc
12095 : Intrinsic::trunc
;
12096 return EmitNeonCall(CGM
.getIntrinsic(Int
, HalfTy
), Ops
, "vrndz");
12098 case NEON::BI__builtin_neon_vrnd32x_f32
:
12099 case NEON::BI__builtin_neon_vrnd32xq_f32
:
12100 case NEON::BI__builtin_neon_vrnd32x_f64
:
12101 case NEON::BI__builtin_neon_vrnd32xq_f64
: {
12102 Ops
.push_back(EmitScalarExpr(E
->getArg(0)));
12103 Int
= Intrinsic::aarch64_neon_frint32x
;
12104 return EmitNeonCall(CGM
.getIntrinsic(Int
, Ty
), Ops
, "vrnd32x");
12106 case NEON::BI__builtin_neon_vrnd32z_f32
:
12107 case NEON::BI__builtin_neon_vrnd32zq_f32
:
12108 case NEON::BI__builtin_neon_vrnd32z_f64
:
12109 case NEON::BI__builtin_neon_vrnd32zq_f64
: {
12110 Ops
.push_back(EmitScalarExpr(E
->getArg(0)));
12111 Int
= Intrinsic::aarch64_neon_frint32z
;
12112 return EmitNeonCall(CGM
.getIntrinsic(Int
, Ty
), Ops
, "vrnd32z");
12114 case NEON::BI__builtin_neon_vrnd64x_f32
:
12115 case NEON::BI__builtin_neon_vrnd64xq_f32
:
12116 case NEON::BI__builtin_neon_vrnd64x_f64
:
12117 case NEON::BI__builtin_neon_vrnd64xq_f64
: {
12118 Ops
.push_back(EmitScalarExpr(E
->getArg(0)));
12119 Int
= Intrinsic::aarch64_neon_frint64x
;
12120 return EmitNeonCall(CGM
.getIntrinsic(Int
, Ty
), Ops
, "vrnd64x");
12122 case NEON::BI__builtin_neon_vrnd64z_f32
:
12123 case NEON::BI__builtin_neon_vrnd64zq_f32
:
12124 case NEON::BI__builtin_neon_vrnd64z_f64
:
12125 case NEON::BI__builtin_neon_vrnd64zq_f64
: {
12126 Ops
.push_back(EmitScalarExpr(E
->getArg(0)));
12127 Int
= Intrinsic::aarch64_neon_frint64z
;
12128 return EmitNeonCall(CGM
.getIntrinsic(Int
, Ty
), Ops
, "vrnd64z");
12130 case NEON::BI__builtin_neon_vrnd_v
:
12131 case NEON::BI__builtin_neon_vrndq_v
: {
12132 Int
= Builder
.getIsFPConstrained()
12133 ? Intrinsic::experimental_constrained_trunc
12134 : Intrinsic::trunc
;
12135 return EmitNeonCall(CGM
.getIntrinsic(Int
, Ty
), Ops
, "vrndz");
12137 case NEON::BI__builtin_neon_vcvt_f64_v
:
12138 case NEON::BI__builtin_neon_vcvtq_f64_v
:
12139 Ops
[0] = Builder
.CreateBitCast(Ops
[0], Ty
);
12140 Ty
= GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64
, false, quad
));
12141 return usgn
? Builder
.CreateUIToFP(Ops
[0], Ty
, "vcvt")
12142 : Builder
.CreateSIToFP(Ops
[0], Ty
, "vcvt");
12143 case NEON::BI__builtin_neon_vcvt_f64_f32
: {
12144 assert(Type
.getEltType() == NeonTypeFlags::Float64
&& quad
&&
12145 "unexpected vcvt_f64_f32 builtin");
12146 NeonTypeFlags SrcFlag
= NeonTypeFlags(NeonTypeFlags::Float32
, false, false);
12147 Ops
[0] = Builder
.CreateBitCast(Ops
[0], GetNeonType(this, SrcFlag
));
12149 return Builder
.CreateFPExt(Ops
[0], Ty
, "vcvt");
12151 case NEON::BI__builtin_neon_vcvt_f32_f64
: {
12152 assert(Type
.getEltType() == NeonTypeFlags::Float32
&&
12153 "unexpected vcvt_f32_f64 builtin");
12154 NeonTypeFlags SrcFlag
= NeonTypeFlags(NeonTypeFlags::Float64
, false, true);
12155 Ops
[0] = Builder
.CreateBitCast(Ops
[0], GetNeonType(this, SrcFlag
));
12157 return Builder
.CreateFPTrunc(Ops
[0], Ty
, "vcvt");
12159 case NEON::BI__builtin_neon_vcvt_s32_v
:
12160 case NEON::BI__builtin_neon_vcvt_u32_v
:
12161 case NEON::BI__builtin_neon_vcvt_s64_v
:
12162 case NEON::BI__builtin_neon_vcvt_u64_v
:
12163 case NEON::BI__builtin_neon_vcvt_s16_f16
:
12164 case NEON::BI__builtin_neon_vcvt_u16_f16
:
12165 case NEON::BI__builtin_neon_vcvtq_s32_v
:
12166 case NEON::BI__builtin_neon_vcvtq_u32_v
:
12167 case NEON::BI__builtin_neon_vcvtq_s64_v
:
12168 case NEON::BI__builtin_neon_vcvtq_u64_v
:
12169 case NEON::BI__builtin_neon_vcvtq_s16_f16
:
12170 case NEON::BI__builtin_neon_vcvtq_u16_f16
: {
12172 usgn
? Intrinsic::aarch64_neon_fcvtzu
: Intrinsic::aarch64_neon_fcvtzs
;
12173 llvm::Type
*Tys
[2] = {Ty
, GetFloatNeonType(this, Type
)};
12174 return EmitNeonCall(CGM
.getIntrinsic(Int
, Tys
), Ops
, "vcvtz");
12176 case NEON::BI__builtin_neon_vcvta_s16_f16
:
12177 case NEON::BI__builtin_neon_vcvta_u16_f16
:
12178 case NEON::BI__builtin_neon_vcvta_s32_v
:
12179 case NEON::BI__builtin_neon_vcvtaq_s16_f16
:
12180 case NEON::BI__builtin_neon_vcvtaq_s32_v
:
12181 case NEON::BI__builtin_neon_vcvta_u32_v
:
12182 case NEON::BI__builtin_neon_vcvtaq_u16_f16
:
12183 case NEON::BI__builtin_neon_vcvtaq_u32_v
:
12184 case NEON::BI__builtin_neon_vcvta_s64_v
:
12185 case NEON::BI__builtin_neon_vcvtaq_s64_v
:
12186 case NEON::BI__builtin_neon_vcvta_u64_v
:
12187 case NEON::BI__builtin_neon_vcvtaq_u64_v
: {
12188 Int
= usgn
? Intrinsic::aarch64_neon_fcvtau
: Intrinsic::aarch64_neon_fcvtas
;
12189 llvm::Type
*Tys
[2] = { Ty
, GetFloatNeonType(this, Type
) };
12190 return EmitNeonCall(CGM
.getIntrinsic(Int
, Tys
), Ops
, "vcvta");
12192 case NEON::BI__builtin_neon_vcvtm_s16_f16
:
12193 case NEON::BI__builtin_neon_vcvtm_s32_v
:
12194 case NEON::BI__builtin_neon_vcvtmq_s16_f16
:
12195 case NEON::BI__builtin_neon_vcvtmq_s32_v
:
12196 case NEON::BI__builtin_neon_vcvtm_u16_f16
:
12197 case NEON::BI__builtin_neon_vcvtm_u32_v
:
12198 case NEON::BI__builtin_neon_vcvtmq_u16_f16
:
12199 case NEON::BI__builtin_neon_vcvtmq_u32_v
:
12200 case NEON::BI__builtin_neon_vcvtm_s64_v
:
12201 case NEON::BI__builtin_neon_vcvtmq_s64_v
:
12202 case NEON::BI__builtin_neon_vcvtm_u64_v
:
12203 case NEON::BI__builtin_neon_vcvtmq_u64_v
: {
12204 Int
= usgn
? Intrinsic::aarch64_neon_fcvtmu
: Intrinsic::aarch64_neon_fcvtms
;
12205 llvm::Type
*Tys
[2] = { Ty
, GetFloatNeonType(this, Type
) };
12206 return EmitNeonCall(CGM
.getIntrinsic(Int
, Tys
), Ops
, "vcvtm");
12208 case NEON::BI__builtin_neon_vcvtn_s16_f16
:
12209 case NEON::BI__builtin_neon_vcvtn_s32_v
:
12210 case NEON::BI__builtin_neon_vcvtnq_s16_f16
:
12211 case NEON::BI__builtin_neon_vcvtnq_s32_v
:
12212 case NEON::BI__builtin_neon_vcvtn_u16_f16
:
12213 case NEON::BI__builtin_neon_vcvtn_u32_v
:
12214 case NEON::BI__builtin_neon_vcvtnq_u16_f16
:
12215 case NEON::BI__builtin_neon_vcvtnq_u32_v
:
12216 case NEON::BI__builtin_neon_vcvtn_s64_v
:
12217 case NEON::BI__builtin_neon_vcvtnq_s64_v
:
12218 case NEON::BI__builtin_neon_vcvtn_u64_v
:
12219 case NEON::BI__builtin_neon_vcvtnq_u64_v
: {
12220 Int
= usgn
? Intrinsic::aarch64_neon_fcvtnu
: Intrinsic::aarch64_neon_fcvtns
;
12221 llvm::Type
*Tys
[2] = { Ty
, GetFloatNeonType(this, Type
) };
12222 return EmitNeonCall(CGM
.getIntrinsic(Int
, Tys
), Ops
, "vcvtn");
12224 case NEON::BI__builtin_neon_vcvtp_s16_f16
:
12225 case NEON::BI__builtin_neon_vcvtp_s32_v
:
12226 case NEON::BI__builtin_neon_vcvtpq_s16_f16
:
12227 case NEON::BI__builtin_neon_vcvtpq_s32_v
:
12228 case NEON::BI__builtin_neon_vcvtp_u16_f16
:
12229 case NEON::BI__builtin_neon_vcvtp_u32_v
:
12230 case NEON::BI__builtin_neon_vcvtpq_u16_f16
:
12231 case NEON::BI__builtin_neon_vcvtpq_u32_v
:
12232 case NEON::BI__builtin_neon_vcvtp_s64_v
:
12233 case NEON::BI__builtin_neon_vcvtpq_s64_v
:
12234 case NEON::BI__builtin_neon_vcvtp_u64_v
:
12235 case NEON::BI__builtin_neon_vcvtpq_u64_v
: {
12236 Int
= usgn
? Intrinsic::aarch64_neon_fcvtpu
: Intrinsic::aarch64_neon_fcvtps
;
12237 llvm::Type
*Tys
[2] = { Ty
, GetFloatNeonType(this, Type
) };
12238 return EmitNeonCall(CGM
.getIntrinsic(Int
, Tys
), Ops
, "vcvtp");
12240 case NEON::BI__builtin_neon_vmulx_v
:
12241 case NEON::BI__builtin_neon_vmulxq_v
: {
12242 Int
= Intrinsic::aarch64_neon_fmulx
;
12243 return EmitNeonCall(CGM
.getIntrinsic(Int
, Ty
), Ops
, "vmulx");
12245 case NEON::BI__builtin_neon_vmulxh_lane_f16
:
12246 case NEON::BI__builtin_neon_vmulxh_laneq_f16
: {
12247 // vmulx_lane should be mapped to Neon scalar mulx after
12248 // extracting the scalar element
12249 Ops
.push_back(EmitScalarExpr(E
->getArg(2)));
12250 Ops
[1] = Builder
.CreateExtractElement(Ops
[1], Ops
[2], "extract");
12252 Int
= Intrinsic::aarch64_neon_fmulx
;
12253 return EmitNeonCall(CGM
.getIntrinsic(Int
, HalfTy
), Ops
, "vmulx");
12255 case NEON::BI__builtin_neon_vmul_lane_v
:
12256 case NEON::BI__builtin_neon_vmul_laneq_v
: {
12257 // v1f64 vmul_lane should be mapped to Neon scalar mul lane
12259 if (BuiltinID
== NEON::BI__builtin_neon_vmul_laneq_v
)
12261 Ops
[0] = Builder
.CreateBitCast(Ops
[0], DoubleTy
);
12262 llvm::FixedVectorType
*VTy
=
12263 GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64
, false, Quad
));
12264 Ops
[1] = Builder
.CreateBitCast(Ops
[1], VTy
);
12265 Ops
[1] = Builder
.CreateExtractElement(Ops
[1], Ops
[2], "extract");
12266 Value
*Result
= Builder
.CreateFMul(Ops
[0], Ops
[1]);
12267 return Builder
.CreateBitCast(Result
, Ty
);
12269 case NEON::BI__builtin_neon_vnegd_s64
:
12270 return Builder
.CreateNeg(EmitScalarExpr(E
->getArg(0)), "vnegd");
12271 case NEON::BI__builtin_neon_vnegh_f16
:
12272 return Builder
.CreateFNeg(EmitScalarExpr(E
->getArg(0)), "vnegh");
12273 case NEON::BI__builtin_neon_vpmaxnm_v
:
12274 case NEON::BI__builtin_neon_vpmaxnmq_v
: {
12275 Int
= Intrinsic::aarch64_neon_fmaxnmp
;
12276 return EmitNeonCall(CGM
.getIntrinsic(Int
, Ty
), Ops
, "vpmaxnm");
12278 case NEON::BI__builtin_neon_vpminnm_v
:
12279 case NEON::BI__builtin_neon_vpminnmq_v
: {
12280 Int
= Intrinsic::aarch64_neon_fminnmp
;
12281 return EmitNeonCall(CGM
.getIntrinsic(Int
, Ty
), Ops
, "vpminnm");
12283 case NEON::BI__builtin_neon_vsqrth_f16
: {
12284 Ops
.push_back(EmitScalarExpr(E
->getArg(0)));
12285 Int
= Builder
.getIsFPConstrained()
12286 ? Intrinsic::experimental_constrained_sqrt
12288 return EmitNeonCall(CGM
.getIntrinsic(Int
, HalfTy
), Ops
, "vsqrt");
12290 case NEON::BI__builtin_neon_vsqrt_v
:
12291 case NEON::BI__builtin_neon_vsqrtq_v
: {
12292 Int
= Builder
.getIsFPConstrained()
12293 ? Intrinsic::experimental_constrained_sqrt
12295 Ops
[0] = Builder
.CreateBitCast(Ops
[0], Ty
);
12296 return EmitNeonCall(CGM
.getIntrinsic(Int
, Ty
), Ops
, "vsqrt");
12298 case NEON::BI__builtin_neon_vrbit_v
:
12299 case NEON::BI__builtin_neon_vrbitq_v
: {
12300 Int
= Intrinsic::bitreverse
;
12301 return EmitNeonCall(CGM
.getIntrinsic(Int
, Ty
), Ops
, "vrbit");
12303 case NEON::BI__builtin_neon_vaddv_u8
:
12304 // FIXME: These are handled by the AArch64 scalar code.
12307 case NEON::BI__builtin_neon_vaddv_s8
: {
12308 Int
= usgn
? Intrinsic::aarch64_neon_uaddv
: Intrinsic::aarch64_neon_saddv
;
12310 VTy
= llvm::FixedVectorType::get(Int8Ty
, 8);
12311 llvm::Type
*Tys
[2] = { Ty
, VTy
};
12312 Ops
.push_back(EmitScalarExpr(E
->getArg(0)));
12313 Ops
[0] = EmitNeonCall(CGM
.getIntrinsic(Int
, Tys
), Ops
, "vaddv");
12314 return Builder
.CreateTrunc(Ops
[0], Int8Ty
);
12316 case NEON::BI__builtin_neon_vaddv_u16
:
12319 case NEON::BI__builtin_neon_vaddv_s16
: {
12320 Int
= usgn
? Intrinsic::aarch64_neon_uaddv
: Intrinsic::aarch64_neon_saddv
;
12322 VTy
= llvm::FixedVectorType::get(Int16Ty
, 4);
12323 llvm::Type
*Tys
[2] = { Ty
, VTy
};
12324 Ops
.push_back(EmitScalarExpr(E
->getArg(0)));
12325 Ops
[0] = EmitNeonCall(CGM
.getIntrinsic(Int
, Tys
), Ops
, "vaddv");
12326 return Builder
.CreateTrunc(Ops
[0], Int16Ty
);
12328 case NEON::BI__builtin_neon_vaddvq_u8
:
12331 case NEON::BI__builtin_neon_vaddvq_s8
: {
12332 Int
= usgn
? Intrinsic::aarch64_neon_uaddv
: Intrinsic::aarch64_neon_saddv
;
12334 VTy
= llvm::FixedVectorType::get(Int8Ty
, 16);
12335 llvm::Type
*Tys
[2] = { Ty
, VTy
};
12336 Ops
.push_back(EmitScalarExpr(E
->getArg(0)));
12337 Ops
[0] = EmitNeonCall(CGM
.getIntrinsic(Int
, Tys
), Ops
, "vaddv");
12338 return Builder
.CreateTrunc(Ops
[0], Int8Ty
);
12340 case NEON::BI__builtin_neon_vaddvq_u16
:
12343 case NEON::BI__builtin_neon_vaddvq_s16
: {
12344 Int
= usgn
? Intrinsic::aarch64_neon_uaddv
: Intrinsic::aarch64_neon_saddv
;
12346 VTy
= llvm::FixedVectorType::get(Int16Ty
, 8);
12347 llvm::Type
*Tys
[2] = { Ty
, VTy
};
12348 Ops
.push_back(EmitScalarExpr(E
->getArg(0)));
12349 Ops
[0] = EmitNeonCall(CGM
.getIntrinsic(Int
, Tys
), Ops
, "vaddv");
12350 return Builder
.CreateTrunc(Ops
[0], Int16Ty
);
12352 case NEON::BI__builtin_neon_vmaxv_u8
: {
12353 Int
= Intrinsic::aarch64_neon_umaxv
;
12355 VTy
= llvm::FixedVectorType::get(Int8Ty
, 8);
12356 llvm::Type
*Tys
[2] = { Ty
, VTy
};
12357 Ops
.push_back(EmitScalarExpr(E
->getArg(0)));
12358 Ops
[0] = EmitNeonCall(CGM
.getIntrinsic(Int
, Tys
), Ops
, "vmaxv");
12359 return Builder
.CreateTrunc(Ops
[0], Int8Ty
);
12361 case NEON::BI__builtin_neon_vmaxv_u16
: {
12362 Int
= Intrinsic::aarch64_neon_umaxv
;
12364 VTy
= llvm::FixedVectorType::get(Int16Ty
, 4);
12365 llvm::Type
*Tys
[2] = { Ty
, VTy
};
12366 Ops
.push_back(EmitScalarExpr(E
->getArg(0)));
12367 Ops
[0] = EmitNeonCall(CGM
.getIntrinsic(Int
, Tys
), Ops
, "vmaxv");
12368 return Builder
.CreateTrunc(Ops
[0], Int16Ty
);
12370 case NEON::BI__builtin_neon_vmaxvq_u8
: {
12371 Int
= Intrinsic::aarch64_neon_umaxv
;
12373 VTy
= llvm::FixedVectorType::get(Int8Ty
, 16);
12374 llvm::Type
*Tys
[2] = { Ty
, VTy
};
12375 Ops
.push_back(EmitScalarExpr(E
->getArg(0)));
12376 Ops
[0] = EmitNeonCall(CGM
.getIntrinsic(Int
, Tys
), Ops
, "vmaxv");
12377 return Builder
.CreateTrunc(Ops
[0], Int8Ty
);
12379 case NEON::BI__builtin_neon_vmaxvq_u16
: {
12380 Int
= Intrinsic::aarch64_neon_umaxv
;
12382 VTy
= llvm::FixedVectorType::get(Int16Ty
, 8);
12383 llvm::Type
*Tys
[2] = { Ty
, VTy
};
12384 Ops
.push_back(EmitScalarExpr(E
->getArg(0)));
12385 Ops
[0] = EmitNeonCall(CGM
.getIntrinsic(Int
, Tys
), Ops
, "vmaxv");
12386 return Builder
.CreateTrunc(Ops
[0], Int16Ty
);
12388 case NEON::BI__builtin_neon_vmaxv_s8
: {
12389 Int
= Intrinsic::aarch64_neon_smaxv
;
12391 VTy
= llvm::FixedVectorType::get(Int8Ty
, 8);
12392 llvm::Type
*Tys
[2] = { Ty
, VTy
};
12393 Ops
.push_back(EmitScalarExpr(E
->getArg(0)));
12394 Ops
[0] = EmitNeonCall(CGM
.getIntrinsic(Int
, Tys
), Ops
, "vmaxv");
12395 return Builder
.CreateTrunc(Ops
[0], Int8Ty
);
12397 case NEON::BI__builtin_neon_vmaxv_s16
: {
12398 Int
= Intrinsic::aarch64_neon_smaxv
;
12400 VTy
= llvm::FixedVectorType::get(Int16Ty
, 4);
12401 llvm::Type
*Tys
[2] = { Ty
, VTy
};
12402 Ops
.push_back(EmitScalarExpr(E
->getArg(0)));
12403 Ops
[0] = EmitNeonCall(CGM
.getIntrinsic(Int
, Tys
), Ops
, "vmaxv");
12404 return Builder
.CreateTrunc(Ops
[0], Int16Ty
);
12406 case NEON::BI__builtin_neon_vmaxvq_s8
: {
12407 Int
= Intrinsic::aarch64_neon_smaxv
;
12409 VTy
= llvm::FixedVectorType::get(Int8Ty
, 16);
12410 llvm::Type
*Tys
[2] = { Ty
, VTy
};
12411 Ops
.push_back(EmitScalarExpr(E
->getArg(0)));
12412 Ops
[0] = EmitNeonCall(CGM
.getIntrinsic(Int
, Tys
), Ops
, "vmaxv");
12413 return Builder
.CreateTrunc(Ops
[0], Int8Ty
);
12415 case NEON::BI__builtin_neon_vmaxvq_s16
: {
12416 Int
= Intrinsic::aarch64_neon_smaxv
;
12418 VTy
= llvm::FixedVectorType::get(Int16Ty
, 8);
12419 llvm::Type
*Tys
[2] = { Ty
, VTy
};
12420 Ops
.push_back(EmitScalarExpr(E
->getArg(0)));
12421 Ops
[0] = EmitNeonCall(CGM
.getIntrinsic(Int
, Tys
), Ops
, "vmaxv");
12422 return Builder
.CreateTrunc(Ops
[0], Int16Ty
);
12424 case NEON::BI__builtin_neon_vmaxv_f16
: {
12425 Int
= Intrinsic::aarch64_neon_fmaxv
;
12427 VTy
= llvm::FixedVectorType::get(HalfTy
, 4);
12428 llvm::Type
*Tys
[2] = { Ty
, VTy
};
12429 Ops
.push_back(EmitScalarExpr(E
->getArg(0)));
12430 Ops
[0] = EmitNeonCall(CGM
.getIntrinsic(Int
, Tys
), Ops
, "vmaxv");
12431 return Builder
.CreateTrunc(Ops
[0], HalfTy
);
12433 case NEON::BI__builtin_neon_vmaxvq_f16
: {
12434 Int
= Intrinsic::aarch64_neon_fmaxv
;
12436 VTy
= llvm::FixedVectorType::get(HalfTy
, 8);
12437 llvm::Type
*Tys
[2] = { Ty
, VTy
};
12438 Ops
.push_back(EmitScalarExpr(E
->getArg(0)));
12439 Ops
[0] = EmitNeonCall(CGM
.getIntrinsic(Int
, Tys
), Ops
, "vmaxv");
12440 return Builder
.CreateTrunc(Ops
[0], HalfTy
);
12442 case NEON::BI__builtin_neon_vminv_u8
: {
12443 Int
= Intrinsic::aarch64_neon_uminv
;
12445 VTy
= llvm::FixedVectorType::get(Int8Ty
, 8);
12446 llvm::Type
*Tys
[2] = { Ty
, VTy
};
12447 Ops
.push_back(EmitScalarExpr(E
->getArg(0)));
12448 Ops
[0] = EmitNeonCall(CGM
.getIntrinsic(Int
, Tys
), Ops
, "vminv");
12449 return Builder
.CreateTrunc(Ops
[0], Int8Ty
);
12451 case NEON::BI__builtin_neon_vminv_u16
: {
12452 Int
= Intrinsic::aarch64_neon_uminv
;
12454 VTy
= llvm::FixedVectorType::get(Int16Ty
, 4);
12455 llvm::Type
*Tys
[2] = { Ty
, VTy
};
12456 Ops
.push_back(EmitScalarExpr(E
->getArg(0)));
12457 Ops
[0] = EmitNeonCall(CGM
.getIntrinsic(Int
, Tys
), Ops
, "vminv");
12458 return Builder
.CreateTrunc(Ops
[0], Int16Ty
);
12460 case NEON::BI__builtin_neon_vminvq_u8
: {
12461 Int
= Intrinsic::aarch64_neon_uminv
;
12463 VTy
= llvm::FixedVectorType::get(Int8Ty
, 16);
12464 llvm::Type
*Tys
[2] = { Ty
, VTy
};
12465 Ops
.push_back(EmitScalarExpr(E
->getArg(0)));
12466 Ops
[0] = EmitNeonCall(CGM
.getIntrinsic(Int
, Tys
), Ops
, "vminv");
12467 return Builder
.CreateTrunc(Ops
[0], Int8Ty
);
12469 case NEON::BI__builtin_neon_vminvq_u16
: {
12470 Int
= Intrinsic::aarch64_neon_uminv
;
12472 VTy
= llvm::FixedVectorType::get(Int16Ty
, 8);
12473 llvm::Type
*Tys
[2] = { Ty
, VTy
};
12474 Ops
.push_back(EmitScalarExpr(E
->getArg(0)));
12475 Ops
[0] = EmitNeonCall(CGM
.getIntrinsic(Int
, Tys
), Ops
, "vminv");
12476 return Builder
.CreateTrunc(Ops
[0], Int16Ty
);
12478 case NEON::BI__builtin_neon_vminv_s8
: {
12479 Int
= Intrinsic::aarch64_neon_sminv
;
12481 VTy
= llvm::FixedVectorType::get(Int8Ty
, 8);
12482 llvm::Type
*Tys
[2] = { Ty
, VTy
};
12483 Ops
.push_back(EmitScalarExpr(E
->getArg(0)));
12484 Ops
[0] = EmitNeonCall(CGM
.getIntrinsic(Int
, Tys
), Ops
, "vminv");
12485 return Builder
.CreateTrunc(Ops
[0], Int8Ty
);
12487 case NEON::BI__builtin_neon_vminv_s16
: {
12488 Int
= Intrinsic::aarch64_neon_sminv
;
12490 VTy
= llvm::FixedVectorType::get(Int16Ty
, 4);
12491 llvm::Type
*Tys
[2] = { Ty
, VTy
};
12492 Ops
.push_back(EmitScalarExpr(E
->getArg(0)));
12493 Ops
[0] = EmitNeonCall(CGM
.getIntrinsic(Int
, Tys
), Ops
, "vminv");
12494 return Builder
.CreateTrunc(Ops
[0], Int16Ty
);
12496 case NEON::BI__builtin_neon_vminvq_s8
: {
12497 Int
= Intrinsic::aarch64_neon_sminv
;
12499 VTy
= llvm::FixedVectorType::get(Int8Ty
, 16);
12500 llvm::Type
*Tys
[2] = { Ty
, VTy
};
12501 Ops
.push_back(EmitScalarExpr(E
->getArg(0)));
12502 Ops
[0] = EmitNeonCall(CGM
.getIntrinsic(Int
, Tys
), Ops
, "vminv");
12503 return Builder
.CreateTrunc(Ops
[0], Int8Ty
);
12505 case NEON::BI__builtin_neon_vminvq_s16
: {
12506 Int
= Intrinsic::aarch64_neon_sminv
;
12508 VTy
= llvm::FixedVectorType::get(Int16Ty
, 8);
12509 llvm::Type
*Tys
[2] = { Ty
, VTy
};
12510 Ops
.push_back(EmitScalarExpr(E
->getArg(0)));
12511 Ops
[0] = EmitNeonCall(CGM
.getIntrinsic(Int
, Tys
), Ops
, "vminv");
12512 return Builder
.CreateTrunc(Ops
[0], Int16Ty
);
12514 case NEON::BI__builtin_neon_vminv_f16
: {
12515 Int
= Intrinsic::aarch64_neon_fminv
;
12517 VTy
= llvm::FixedVectorType::get(HalfTy
, 4);
12518 llvm::Type
*Tys
[2] = { Ty
, VTy
};
12519 Ops
.push_back(EmitScalarExpr(E
->getArg(0)));
12520 Ops
[0] = EmitNeonCall(CGM
.getIntrinsic(Int
, Tys
), Ops
, "vminv");
12521 return Builder
.CreateTrunc(Ops
[0], HalfTy
);
12523 case NEON::BI__builtin_neon_vminvq_f16
: {
12524 Int
= Intrinsic::aarch64_neon_fminv
;
12526 VTy
= llvm::FixedVectorType::get(HalfTy
, 8);
12527 llvm::Type
*Tys
[2] = { Ty
, VTy
};
12528 Ops
.push_back(EmitScalarExpr(E
->getArg(0)));
12529 Ops
[0] = EmitNeonCall(CGM
.getIntrinsic(Int
, Tys
), Ops
, "vminv");
12530 return Builder
.CreateTrunc(Ops
[0], HalfTy
);
12532 case NEON::BI__builtin_neon_vmaxnmv_f16
: {
12533 Int
= Intrinsic::aarch64_neon_fmaxnmv
;
12535 VTy
= llvm::FixedVectorType::get(HalfTy
, 4);
12536 llvm::Type
*Tys
[2] = { Ty
, VTy
};
12537 Ops
.push_back(EmitScalarExpr(E
->getArg(0)));
12538 Ops
[0] = EmitNeonCall(CGM
.getIntrinsic(Int
, Tys
), Ops
, "vmaxnmv");
12539 return Builder
.CreateTrunc(Ops
[0], HalfTy
);
12541 case NEON::BI__builtin_neon_vmaxnmvq_f16
: {
12542 Int
= Intrinsic::aarch64_neon_fmaxnmv
;
12544 VTy
= llvm::FixedVectorType::get(HalfTy
, 8);
12545 llvm::Type
*Tys
[2] = { Ty
, VTy
};
12546 Ops
.push_back(EmitScalarExpr(E
->getArg(0)));
12547 Ops
[0] = EmitNeonCall(CGM
.getIntrinsic(Int
, Tys
), Ops
, "vmaxnmv");
12548 return Builder
.CreateTrunc(Ops
[0], HalfTy
);
12550 case NEON::BI__builtin_neon_vminnmv_f16
: {
12551 Int
= Intrinsic::aarch64_neon_fminnmv
;
12553 VTy
= llvm::FixedVectorType::get(HalfTy
, 4);
12554 llvm::Type
*Tys
[2] = { Ty
, VTy
};
12555 Ops
.push_back(EmitScalarExpr(E
->getArg(0)));
12556 Ops
[0] = EmitNeonCall(CGM
.getIntrinsic(Int
, Tys
), Ops
, "vminnmv");
12557 return Builder
.CreateTrunc(Ops
[0], HalfTy
);
12559 case NEON::BI__builtin_neon_vminnmvq_f16
: {
12560 Int
= Intrinsic::aarch64_neon_fminnmv
;
12562 VTy
= llvm::FixedVectorType::get(HalfTy
, 8);
12563 llvm::Type
*Tys
[2] = { Ty
, VTy
};
12564 Ops
.push_back(EmitScalarExpr(E
->getArg(0)));
12565 Ops
[0] = EmitNeonCall(CGM
.getIntrinsic(Int
, Tys
), Ops
, "vminnmv");
12566 return Builder
.CreateTrunc(Ops
[0], HalfTy
);
12568 case NEON::BI__builtin_neon_vmul_n_f64
: {
12569 Ops
[0] = Builder
.CreateBitCast(Ops
[0], DoubleTy
);
12570 Value
*RHS
= Builder
.CreateBitCast(EmitScalarExpr(E
->getArg(1)), DoubleTy
);
12571 return Builder
.CreateFMul(Ops
[0], RHS
);
12573 case NEON::BI__builtin_neon_vaddlv_u8
: {
12574 Int
= Intrinsic::aarch64_neon_uaddlv
;
12576 VTy
= llvm::FixedVectorType::get(Int8Ty
, 8);
12577 llvm::Type
*Tys
[2] = { Ty
, VTy
};
12578 Ops
.push_back(EmitScalarExpr(E
->getArg(0)));
12579 Ops
[0] = EmitNeonCall(CGM
.getIntrinsic(Int
, Tys
), Ops
, "vaddlv");
12580 return Builder
.CreateTrunc(Ops
[0], Int16Ty
);
12582 case NEON::BI__builtin_neon_vaddlv_u16
: {
12583 Int
= Intrinsic::aarch64_neon_uaddlv
;
12585 VTy
= llvm::FixedVectorType::get(Int16Ty
, 4);
12586 llvm::Type
*Tys
[2] = { Ty
, VTy
};
12587 Ops
.push_back(EmitScalarExpr(E
->getArg(0)));
12588 return EmitNeonCall(CGM
.getIntrinsic(Int
, Tys
), Ops
, "vaddlv");
12590 case NEON::BI__builtin_neon_vaddlvq_u8
: {
12591 Int
= Intrinsic::aarch64_neon_uaddlv
;
12593 VTy
= llvm::FixedVectorType::get(Int8Ty
, 16);
12594 llvm::Type
*Tys
[2] = { Ty
, VTy
};
12595 Ops
.push_back(EmitScalarExpr(E
->getArg(0)));
12596 Ops
[0] = EmitNeonCall(CGM
.getIntrinsic(Int
, Tys
), Ops
, "vaddlv");
12597 return Builder
.CreateTrunc(Ops
[0], Int16Ty
);
12599 case NEON::BI__builtin_neon_vaddlvq_u16
: {
12600 Int
= Intrinsic::aarch64_neon_uaddlv
;
12602 VTy
= llvm::FixedVectorType::get(Int16Ty
, 8);
12603 llvm::Type
*Tys
[2] = { Ty
, VTy
};
12604 Ops
.push_back(EmitScalarExpr(E
->getArg(0)));
12605 return EmitNeonCall(CGM
.getIntrinsic(Int
, Tys
), Ops
, "vaddlv");
12607 case NEON::BI__builtin_neon_vaddlv_s8
: {
12608 Int
= Intrinsic::aarch64_neon_saddlv
;
12610 VTy
= llvm::FixedVectorType::get(Int8Ty
, 8);
12611 llvm::Type
*Tys
[2] = { Ty
, VTy
};
12612 Ops
.push_back(EmitScalarExpr(E
->getArg(0)));
12613 Ops
[0] = EmitNeonCall(CGM
.getIntrinsic(Int
, Tys
), Ops
, "vaddlv");
12614 return Builder
.CreateTrunc(Ops
[0], Int16Ty
);
12616 case NEON::BI__builtin_neon_vaddlv_s16
: {
12617 Int
= Intrinsic::aarch64_neon_saddlv
;
12619 VTy
= llvm::FixedVectorType::get(Int16Ty
, 4);
12620 llvm::Type
*Tys
[2] = { Ty
, VTy
};
12621 Ops
.push_back(EmitScalarExpr(E
->getArg(0)));
12622 return EmitNeonCall(CGM
.getIntrinsic(Int
, Tys
), Ops
, "vaddlv");
12624 case NEON::BI__builtin_neon_vaddlvq_s8
: {
12625 Int
= Intrinsic::aarch64_neon_saddlv
;
12627 VTy
= llvm::FixedVectorType::get(Int8Ty
, 16);
12628 llvm::Type
*Tys
[2] = { Ty
, VTy
};
12629 Ops
.push_back(EmitScalarExpr(E
->getArg(0)));
12630 Ops
[0] = EmitNeonCall(CGM
.getIntrinsic(Int
, Tys
), Ops
, "vaddlv");
12631 return Builder
.CreateTrunc(Ops
[0], Int16Ty
);
12633 case NEON::BI__builtin_neon_vaddlvq_s16
: {
12634 Int
= Intrinsic::aarch64_neon_saddlv
;
12636 VTy
= llvm::FixedVectorType::get(Int16Ty
, 8);
12637 llvm::Type
*Tys
[2] = { Ty
, VTy
};
12638 Ops
.push_back(EmitScalarExpr(E
->getArg(0)));
12639 return EmitNeonCall(CGM
.getIntrinsic(Int
, Tys
), Ops
, "vaddlv");
12641 case NEON::BI__builtin_neon_vsri_n_v
:
12642 case NEON::BI__builtin_neon_vsriq_n_v
: {
12643 Int
= Intrinsic::aarch64_neon_vsri
;
12644 llvm::Function
*Intrin
= CGM
.getIntrinsic(Int
, Ty
);
12645 return EmitNeonCall(Intrin
, Ops
, "vsri_n");
12647 case NEON::BI__builtin_neon_vsli_n_v
:
12648 case NEON::BI__builtin_neon_vsliq_n_v
: {
12649 Int
= Intrinsic::aarch64_neon_vsli
;
12650 llvm::Function
*Intrin
= CGM
.getIntrinsic(Int
, Ty
);
12651 return EmitNeonCall(Intrin
, Ops
, "vsli_n");
12653 case NEON::BI__builtin_neon_vsra_n_v
:
12654 case NEON::BI__builtin_neon_vsraq_n_v
:
12655 Ops
[0] = Builder
.CreateBitCast(Ops
[0], Ty
);
12656 Ops
[1] = EmitNeonRShiftImm(Ops
[1], Ops
[2], Ty
, usgn
, "vsra_n");
12657 return Builder
.CreateAdd(Ops
[0], Ops
[1]);
12658 case NEON::BI__builtin_neon_vrsra_n_v
:
12659 case NEON::BI__builtin_neon_vrsraq_n_v
: {
12660 Int
= usgn
? Intrinsic::aarch64_neon_urshl
: Intrinsic::aarch64_neon_srshl
;
12661 SmallVector
<llvm::Value
*,2> TmpOps
;
12662 TmpOps
.push_back(Ops
[1]);
12663 TmpOps
.push_back(Ops
[2]);
12664 Function
* F
= CGM
.getIntrinsic(Int
, Ty
);
12665 llvm::Value
*tmp
= EmitNeonCall(F
, TmpOps
, "vrshr_n", 1, true);
12666 Ops
[0] = Builder
.CreateBitCast(Ops
[0], VTy
);
12667 return Builder
.CreateAdd(Ops
[0], tmp
);
12669 case NEON::BI__builtin_neon_vld1_v
:
12670 case NEON::BI__builtin_neon_vld1q_v
: {
12671 return Builder
.CreateAlignedLoad(VTy
, Ops
[0], PtrOp0
.getAlignment());
12673 case NEON::BI__builtin_neon_vst1_v
:
12674 case NEON::BI__builtin_neon_vst1q_v
:
12675 Ops
[1] = Builder
.CreateBitCast(Ops
[1], VTy
);
12676 return Builder
.CreateAlignedStore(Ops
[1], Ops
[0], PtrOp0
.getAlignment());
12677 case NEON::BI__builtin_neon_vld1_lane_v
:
12678 case NEON::BI__builtin_neon_vld1q_lane_v
: {
12679 Ops
[1] = Builder
.CreateBitCast(Ops
[1], Ty
);
12680 Ops
[0] = Builder
.CreateAlignedLoad(VTy
->getElementType(), Ops
[0],
12681 PtrOp0
.getAlignment());
12682 return Builder
.CreateInsertElement(Ops
[1], Ops
[0], Ops
[2], "vld1_lane");
12684 case NEON::BI__builtin_neon_vldap1_lane_s64
:
12685 case NEON::BI__builtin_neon_vldap1q_lane_s64
: {
12686 Ops
[1] = Builder
.CreateBitCast(Ops
[1], Ty
);
12687 llvm::LoadInst
*LI
= Builder
.CreateAlignedLoad(
12688 VTy
->getElementType(), Ops
[0], PtrOp0
.getAlignment());
12689 LI
->setAtomic(llvm::AtomicOrdering::Acquire
);
12691 return Builder
.CreateInsertElement(Ops
[1], Ops
[0], Ops
[2], "vldap1_lane");
12693 case NEON::BI__builtin_neon_vld1_dup_v
:
12694 case NEON::BI__builtin_neon_vld1q_dup_v
: {
12695 Value
*V
= PoisonValue::get(Ty
);
12696 Ops
[0] = Builder
.CreateAlignedLoad(VTy
->getElementType(), Ops
[0],
12697 PtrOp0
.getAlignment());
12698 llvm::Constant
*CI
= ConstantInt::get(Int32Ty
, 0);
12699 Ops
[0] = Builder
.CreateInsertElement(V
, Ops
[0], CI
);
12700 return EmitNeonSplat(Ops
[0], CI
);
12702 case NEON::BI__builtin_neon_vst1_lane_v
:
12703 case NEON::BI__builtin_neon_vst1q_lane_v
:
12704 Ops
[1] = Builder
.CreateBitCast(Ops
[1], Ty
);
12705 Ops
[1] = Builder
.CreateExtractElement(Ops
[1], Ops
[2]);
12706 return Builder
.CreateAlignedStore(Ops
[1], Ops
[0], PtrOp0
.getAlignment());
12707 case NEON::BI__builtin_neon_vstl1_lane_s64
:
12708 case NEON::BI__builtin_neon_vstl1q_lane_s64
: {
12709 Ops
[1] = Builder
.CreateBitCast(Ops
[1], Ty
);
12710 Ops
[1] = Builder
.CreateExtractElement(Ops
[1], Ops
[2]);
12711 llvm::StoreInst
*SI
=
12712 Builder
.CreateAlignedStore(Ops
[1], Ops
[0], PtrOp0
.getAlignment());
12713 SI
->setAtomic(llvm::AtomicOrdering::Release
);
12716 case NEON::BI__builtin_neon_vld2_v
:
12717 case NEON::BI__builtin_neon_vld2q_v
: {
12718 llvm::Type
*Tys
[2] = {VTy
, UnqualPtrTy
};
12719 Function
*F
= CGM
.getIntrinsic(Intrinsic::aarch64_neon_ld2
, Tys
);
12720 Ops
[1] = Builder
.CreateCall(F
, Ops
[1], "vld2");
12721 return Builder
.CreateDefaultAlignedStore(Ops
[1], Ops
[0]);
12723 case NEON::BI__builtin_neon_vld3_v
:
12724 case NEON::BI__builtin_neon_vld3q_v
: {
12725 llvm::Type
*Tys
[2] = {VTy
, UnqualPtrTy
};
12726 Function
*F
= CGM
.getIntrinsic(Intrinsic::aarch64_neon_ld3
, Tys
);
12727 Ops
[1] = Builder
.CreateCall(F
, Ops
[1], "vld3");
12728 return Builder
.CreateDefaultAlignedStore(Ops
[1], Ops
[0]);
12730 case NEON::BI__builtin_neon_vld4_v
:
12731 case NEON::BI__builtin_neon_vld4q_v
: {
12732 llvm::Type
*Tys
[2] = {VTy
, UnqualPtrTy
};
12733 Function
*F
= CGM
.getIntrinsic(Intrinsic::aarch64_neon_ld4
, Tys
);
12734 Ops
[1] = Builder
.CreateCall(F
, Ops
[1], "vld4");
12735 return Builder
.CreateDefaultAlignedStore(Ops
[1], Ops
[0]);
12737 case NEON::BI__builtin_neon_vld2_dup_v
:
12738 case NEON::BI__builtin_neon_vld2q_dup_v
: {
12739 llvm::Type
*Tys
[2] = {VTy
, UnqualPtrTy
};
12740 Function
*F
= CGM
.getIntrinsic(Intrinsic::aarch64_neon_ld2r
, Tys
);
12741 Ops
[1] = Builder
.CreateCall(F
, Ops
[1], "vld2");
12742 return Builder
.CreateDefaultAlignedStore(Ops
[1], Ops
[0]);
12744 case NEON::BI__builtin_neon_vld3_dup_v
:
12745 case NEON::BI__builtin_neon_vld3q_dup_v
: {
12746 llvm::Type
*Tys
[2] = {VTy
, UnqualPtrTy
};
12747 Function
*F
= CGM
.getIntrinsic(Intrinsic::aarch64_neon_ld3r
, Tys
);
12748 Ops
[1] = Builder
.CreateCall(F
, Ops
[1], "vld3");
12749 return Builder
.CreateDefaultAlignedStore(Ops
[1], Ops
[0]);
12751 case NEON::BI__builtin_neon_vld4_dup_v
:
12752 case NEON::BI__builtin_neon_vld4q_dup_v
: {
12753 llvm::Type
*Tys
[2] = {VTy
, UnqualPtrTy
};
12754 Function
*F
= CGM
.getIntrinsic(Intrinsic::aarch64_neon_ld4r
, Tys
);
12755 Ops
[1] = Builder
.CreateCall(F
, Ops
[1], "vld4");
12756 return Builder
.CreateDefaultAlignedStore(Ops
[1], Ops
[0]);
12758 case NEON::BI__builtin_neon_vld2_lane_v
:
12759 case NEON::BI__builtin_neon_vld2q_lane_v
: {
12760 llvm::Type
*Tys
[2] = { VTy
, Ops
[1]->getType() };
12761 Function
*F
= CGM
.getIntrinsic(Intrinsic::aarch64_neon_ld2lane
, Tys
);
12762 std::rotate(Ops
.begin() + 1, Ops
.begin() + 2, Ops
.end());
12763 Ops
[1] = Builder
.CreateBitCast(Ops
[1], Ty
);
12764 Ops
[2] = Builder
.CreateBitCast(Ops
[2], Ty
);
12765 Ops
[3] = Builder
.CreateZExt(Ops
[3], Int64Ty
);
12766 Ops
[1] = Builder
.CreateCall(F
, ArrayRef(Ops
).slice(1), "vld2_lane");
12767 return Builder
.CreateDefaultAlignedStore(Ops
[1], Ops
[0]);
12769 case NEON::BI__builtin_neon_vld3_lane_v
:
12770 case NEON::BI__builtin_neon_vld3q_lane_v
: {
12771 llvm::Type
*Tys
[2] = { VTy
, Ops
[1]->getType() };
12772 Function
*F
= CGM
.getIntrinsic(Intrinsic::aarch64_neon_ld3lane
, Tys
);
12773 std::rotate(Ops
.begin() + 1, Ops
.begin() + 2, Ops
.end());
12774 Ops
[1] = Builder
.CreateBitCast(Ops
[1], Ty
);
12775 Ops
[2] = Builder
.CreateBitCast(Ops
[2], Ty
);
12776 Ops
[3] = Builder
.CreateBitCast(Ops
[3], Ty
);
12777 Ops
[4] = Builder
.CreateZExt(Ops
[4], Int64Ty
);
12778 Ops
[1] = Builder
.CreateCall(F
, ArrayRef(Ops
).slice(1), "vld3_lane");
12779 return Builder
.CreateDefaultAlignedStore(Ops
[1], Ops
[0]);
12781 case NEON::BI__builtin_neon_vld4_lane_v
:
12782 case NEON::BI__builtin_neon_vld4q_lane_v
: {
12783 llvm::Type
*Tys
[2] = { VTy
, Ops
[1]->getType() };
12784 Function
*F
= CGM
.getIntrinsic(Intrinsic::aarch64_neon_ld4lane
, Tys
);
12785 std::rotate(Ops
.begin() + 1, Ops
.begin() + 2, Ops
.end());
12786 Ops
[1] = Builder
.CreateBitCast(Ops
[1], Ty
);
12787 Ops
[2] = Builder
.CreateBitCast(Ops
[2], Ty
);
12788 Ops
[3] = Builder
.CreateBitCast(Ops
[3], Ty
);
12789 Ops
[4] = Builder
.CreateBitCast(Ops
[4], Ty
);
12790 Ops
[5] = Builder
.CreateZExt(Ops
[5], Int64Ty
);
12791 Ops
[1] = Builder
.CreateCall(F
, ArrayRef(Ops
).slice(1), "vld4_lane");
12792 return Builder
.CreateDefaultAlignedStore(Ops
[1], Ops
[0]);
12794 case NEON::BI__builtin_neon_vst2_v
:
12795 case NEON::BI__builtin_neon_vst2q_v
: {
12796 std::rotate(Ops
.begin(), Ops
.begin() + 1, Ops
.end());
12797 llvm::Type
*Tys
[2] = { VTy
, Ops
[2]->getType() };
12798 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::aarch64_neon_st2
, Tys
),
12801 case NEON::BI__builtin_neon_vst2_lane_v
:
12802 case NEON::BI__builtin_neon_vst2q_lane_v
: {
12803 std::rotate(Ops
.begin(), Ops
.begin() + 1, Ops
.end());
12804 Ops
[2] = Builder
.CreateZExt(Ops
[2], Int64Ty
);
12805 llvm::Type
*Tys
[2] = { VTy
, Ops
[3]->getType() };
12806 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::aarch64_neon_st2lane
, Tys
),
12809 case NEON::BI__builtin_neon_vst3_v
:
12810 case NEON::BI__builtin_neon_vst3q_v
: {
12811 std::rotate(Ops
.begin(), Ops
.begin() + 1, Ops
.end());
12812 llvm::Type
*Tys
[2] = { VTy
, Ops
[3]->getType() };
12813 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::aarch64_neon_st3
, Tys
),
12816 case NEON::BI__builtin_neon_vst3_lane_v
:
12817 case NEON::BI__builtin_neon_vst3q_lane_v
: {
12818 std::rotate(Ops
.begin(), Ops
.begin() + 1, Ops
.end());
12819 Ops
[3] = Builder
.CreateZExt(Ops
[3], Int64Ty
);
12820 llvm::Type
*Tys
[2] = { VTy
, Ops
[4]->getType() };
12821 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::aarch64_neon_st3lane
, Tys
),
12824 case NEON::BI__builtin_neon_vst4_v
:
12825 case NEON::BI__builtin_neon_vst4q_v
: {
12826 std::rotate(Ops
.begin(), Ops
.begin() + 1, Ops
.end());
12827 llvm::Type
*Tys
[2] = { VTy
, Ops
[4]->getType() };
12828 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::aarch64_neon_st4
, Tys
),
12831 case NEON::BI__builtin_neon_vst4_lane_v
:
12832 case NEON::BI__builtin_neon_vst4q_lane_v
: {
12833 std::rotate(Ops
.begin(), Ops
.begin() + 1, Ops
.end());
12834 Ops
[4] = Builder
.CreateZExt(Ops
[4], Int64Ty
);
12835 llvm::Type
*Tys
[2] = { VTy
, Ops
[5]->getType() };
12836 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::aarch64_neon_st4lane
, Tys
),
12839 case NEON::BI__builtin_neon_vtrn_v
:
12840 case NEON::BI__builtin_neon_vtrnq_v
: {
12841 Ops
[1] = Builder
.CreateBitCast(Ops
[1], Ty
);
12842 Ops
[2] = Builder
.CreateBitCast(Ops
[2], Ty
);
12843 Value
*SV
= nullptr;
12845 for (unsigned vi
= 0; vi
!= 2; ++vi
) {
12846 SmallVector
<int, 16> Indices
;
12847 for (unsigned i
= 0, e
= VTy
->getNumElements(); i
!= e
; i
+= 2) {
12848 Indices
.push_back(i
+vi
);
12849 Indices
.push_back(i
+e
+vi
);
12851 Value
*Addr
= Builder
.CreateConstInBoundsGEP1_32(Ty
, Ops
[0], vi
);
12852 SV
= Builder
.CreateShuffleVector(Ops
[1], Ops
[2], Indices
, "vtrn");
12853 SV
= Builder
.CreateDefaultAlignedStore(SV
, Addr
);
12857 case NEON::BI__builtin_neon_vuzp_v
:
12858 case NEON::BI__builtin_neon_vuzpq_v
: {
12859 Ops
[1] = Builder
.CreateBitCast(Ops
[1], Ty
);
12860 Ops
[2] = Builder
.CreateBitCast(Ops
[2], Ty
);
12861 Value
*SV
= nullptr;
12863 for (unsigned vi
= 0; vi
!= 2; ++vi
) {
12864 SmallVector
<int, 16> Indices
;
12865 for (unsigned i
= 0, e
= VTy
->getNumElements(); i
!= e
; ++i
)
12866 Indices
.push_back(2*i
+vi
);
12868 Value
*Addr
= Builder
.CreateConstInBoundsGEP1_32(Ty
, Ops
[0], vi
);
12869 SV
= Builder
.CreateShuffleVector(Ops
[1], Ops
[2], Indices
, "vuzp");
12870 SV
= Builder
.CreateDefaultAlignedStore(SV
, Addr
);
12874 case NEON::BI__builtin_neon_vzip_v
:
12875 case NEON::BI__builtin_neon_vzipq_v
: {
12876 Ops
[1] = Builder
.CreateBitCast(Ops
[1], Ty
);
12877 Ops
[2] = Builder
.CreateBitCast(Ops
[2], Ty
);
12878 Value
*SV
= nullptr;
12880 for (unsigned vi
= 0; vi
!= 2; ++vi
) {
12881 SmallVector
<int, 16> Indices
;
12882 for (unsigned i
= 0, e
= VTy
->getNumElements(); i
!= e
; i
+= 2) {
12883 Indices
.push_back((i
+ vi
*e
) >> 1);
12884 Indices
.push_back(((i
+ vi
*e
) >> 1)+e
);
12886 Value
*Addr
= Builder
.CreateConstInBoundsGEP1_32(Ty
, Ops
[0], vi
);
12887 SV
= Builder
.CreateShuffleVector(Ops
[1], Ops
[2], Indices
, "vzip");
12888 SV
= Builder
.CreateDefaultAlignedStore(SV
, Addr
);
12892 case NEON::BI__builtin_neon_vqtbl1q_v
: {
12893 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::aarch64_neon_tbl1
, Ty
),
12896 case NEON::BI__builtin_neon_vqtbl2q_v
: {
12897 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::aarch64_neon_tbl2
, Ty
),
12900 case NEON::BI__builtin_neon_vqtbl3q_v
: {
12901 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::aarch64_neon_tbl3
, Ty
),
12904 case NEON::BI__builtin_neon_vqtbl4q_v
: {
12905 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::aarch64_neon_tbl4
, Ty
),
12908 case NEON::BI__builtin_neon_vqtbx1q_v
: {
12909 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::aarch64_neon_tbx1
, Ty
),
12912 case NEON::BI__builtin_neon_vqtbx2q_v
: {
12913 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::aarch64_neon_tbx2
, Ty
),
12916 case NEON::BI__builtin_neon_vqtbx3q_v
: {
12917 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::aarch64_neon_tbx3
, Ty
),
12920 case NEON::BI__builtin_neon_vqtbx4q_v
: {
12921 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::aarch64_neon_tbx4
, Ty
),
12924 case NEON::BI__builtin_neon_vsqadd_v
:
12925 case NEON::BI__builtin_neon_vsqaddq_v
: {
12926 Int
= Intrinsic::aarch64_neon_usqadd
;
12927 return EmitNeonCall(CGM
.getIntrinsic(Int
, Ty
), Ops
, "vsqadd");
12929 case NEON::BI__builtin_neon_vuqadd_v
:
12930 case NEON::BI__builtin_neon_vuqaddq_v
: {
12931 Int
= Intrinsic::aarch64_neon_suqadd
;
12932 return EmitNeonCall(CGM
.getIntrinsic(Int
, Ty
), Ops
, "vuqadd");
12937 Value
*CodeGenFunction::EmitBPFBuiltinExpr(unsigned BuiltinID
,
12938 const CallExpr
*E
) {
12939 assert((BuiltinID
== BPF::BI__builtin_preserve_field_info
||
12940 BuiltinID
== BPF::BI__builtin_btf_type_id
||
12941 BuiltinID
== BPF::BI__builtin_preserve_type_info
||
12942 BuiltinID
== BPF::BI__builtin_preserve_enum_value
) &&
12943 "unexpected BPF builtin");
12945 // A sequence number, injected into IR builtin functions, to
12946 // prevent CSE given the only difference of the function
12947 // may just be the debuginfo metadata.
12948 static uint32_t BuiltinSeqNum
;
12950 switch (BuiltinID
) {
12952 llvm_unreachable("Unexpected BPF builtin");
12953 case BPF::BI__builtin_preserve_field_info
: {
12954 const Expr
*Arg
= E
->getArg(0);
12955 bool IsBitField
= Arg
->IgnoreParens()->getObjectKind() == OK_BitField
;
12957 if (!getDebugInfo()) {
12958 CGM
.Error(E
->getExprLoc(),
12959 "using __builtin_preserve_field_info() without -g");
12960 return IsBitField
? EmitLValue(Arg
).getBitFieldPointer()
12961 : EmitLValue(Arg
).getPointer(*this);
12964 // Enable underlying preserve_*_access_index() generation.
12965 bool OldIsInPreservedAIRegion
= IsInPreservedAIRegion
;
12966 IsInPreservedAIRegion
= true;
12967 Value
*FieldAddr
= IsBitField
? EmitLValue(Arg
).getBitFieldPointer()
12968 : EmitLValue(Arg
).getPointer(*this);
12969 IsInPreservedAIRegion
= OldIsInPreservedAIRegion
;
12971 ConstantInt
*C
= cast
<ConstantInt
>(EmitScalarExpr(E
->getArg(1)));
12972 Value
*InfoKind
= ConstantInt::get(Int64Ty
, C
->getSExtValue());
12974 // Built the IR for the preserve_field_info intrinsic.
12975 llvm::Function
*FnGetFieldInfo
= llvm::Intrinsic::getDeclaration(
12976 &CGM
.getModule(), llvm::Intrinsic::bpf_preserve_field_info
,
12977 {FieldAddr
->getType()});
12978 return Builder
.CreateCall(FnGetFieldInfo
, {FieldAddr
, InfoKind
});
12980 case BPF::BI__builtin_btf_type_id
:
12981 case BPF::BI__builtin_preserve_type_info
: {
12982 if (!getDebugInfo()) {
12983 CGM
.Error(E
->getExprLoc(), "using builtin function without -g");
12987 const Expr
*Arg0
= E
->getArg(0);
12988 llvm::DIType
*DbgInfo
= getDebugInfo()->getOrCreateStandaloneType(
12989 Arg0
->getType(), Arg0
->getExprLoc());
12991 ConstantInt
*Flag
= cast
<ConstantInt
>(EmitScalarExpr(E
->getArg(1)));
12992 Value
*FlagValue
= ConstantInt::get(Int64Ty
, Flag
->getSExtValue());
12993 Value
*SeqNumVal
= ConstantInt::get(Int32Ty
, BuiltinSeqNum
++);
12995 llvm::Function
*FnDecl
;
12996 if (BuiltinID
== BPF::BI__builtin_btf_type_id
)
12997 FnDecl
= llvm::Intrinsic::getDeclaration(
12998 &CGM
.getModule(), llvm::Intrinsic::bpf_btf_type_id
, {});
13000 FnDecl
= llvm::Intrinsic::getDeclaration(
13001 &CGM
.getModule(), llvm::Intrinsic::bpf_preserve_type_info
, {});
13002 CallInst
*Fn
= Builder
.CreateCall(FnDecl
, {SeqNumVal
, FlagValue
});
13003 Fn
->setMetadata(LLVMContext::MD_preserve_access_index
, DbgInfo
);
13006 case BPF::BI__builtin_preserve_enum_value
: {
13007 if (!getDebugInfo()) {
13008 CGM
.Error(E
->getExprLoc(), "using builtin function without -g");
13012 const Expr
*Arg0
= E
->getArg(0);
13013 llvm::DIType
*DbgInfo
= getDebugInfo()->getOrCreateStandaloneType(
13014 Arg0
->getType(), Arg0
->getExprLoc());
13017 const auto *UO
= cast
<UnaryOperator
>(Arg0
->IgnoreParens());
13018 const auto *CE
= cast
<CStyleCastExpr
>(UO
->getSubExpr());
13019 const auto *DR
= cast
<DeclRefExpr
>(CE
->getSubExpr());
13020 const auto *Enumerator
= cast
<EnumConstantDecl
>(DR
->getDecl());
13022 auto &InitVal
= Enumerator
->getInitVal();
13023 std::string InitValStr
;
13024 if (InitVal
.isNegative() || InitVal
> uint64_t(INT64_MAX
))
13025 InitValStr
= std::to_string(InitVal
.getSExtValue());
13027 InitValStr
= std::to_string(InitVal
.getZExtValue());
13028 std::string EnumStr
= Enumerator
->getNameAsString() + ":" + InitValStr
;
13029 Value
*EnumStrVal
= Builder
.CreateGlobalStringPtr(EnumStr
);
13031 ConstantInt
*Flag
= cast
<ConstantInt
>(EmitScalarExpr(E
->getArg(1)));
13032 Value
*FlagValue
= ConstantInt::get(Int64Ty
, Flag
->getSExtValue());
13033 Value
*SeqNumVal
= ConstantInt::get(Int32Ty
, BuiltinSeqNum
++);
13035 llvm::Function
*IntrinsicFn
= llvm::Intrinsic::getDeclaration(
13036 &CGM
.getModule(), llvm::Intrinsic::bpf_preserve_enum_value
, {});
13038 Builder
.CreateCall(IntrinsicFn
, {SeqNumVal
, EnumStrVal
, FlagValue
});
13039 Fn
->setMetadata(LLVMContext::MD_preserve_access_index
, DbgInfo
);
13045 llvm::Value
*CodeGenFunction::
13046 BuildVector(ArrayRef
<llvm::Value
*> Ops
) {
13047 assert((Ops
.size() & (Ops
.size() - 1)) == 0 &&
13048 "Not a power-of-two sized vector!");
13049 bool AllConstants
= true;
13050 for (unsigned i
= 0, e
= Ops
.size(); i
!= e
&& AllConstants
; ++i
)
13051 AllConstants
&= isa
<Constant
>(Ops
[i
]);
13053 // If this is a constant vector, create a ConstantVector.
13054 if (AllConstants
) {
13055 SmallVector
<llvm::Constant
*, 16> CstOps
;
13056 for (unsigned i
= 0, e
= Ops
.size(); i
!= e
; ++i
)
13057 CstOps
.push_back(cast
<Constant
>(Ops
[i
]));
13058 return llvm::ConstantVector::get(CstOps
);
13061 // Otherwise, insertelement the values to build the vector.
13062 Value
*Result
= llvm::PoisonValue::get(
13063 llvm::FixedVectorType::get(Ops
[0]->getType(), Ops
.size()));
13065 for (unsigned i
= 0, e
= Ops
.size(); i
!= e
; ++i
)
13066 Result
= Builder
.CreateInsertElement(Result
, Ops
[i
], Builder
.getInt64(i
));
13071 // Convert the mask from an integer type to a vector of i1.
13072 static Value
*getMaskVecValue(CodeGenFunction
&CGF
, Value
*Mask
,
13073 unsigned NumElts
) {
13075 auto *MaskTy
= llvm::FixedVectorType::get(
13076 CGF
.Builder
.getInt1Ty(),
13077 cast
<IntegerType
>(Mask
->getType())->getBitWidth());
13078 Value
*MaskVec
= CGF
.Builder
.CreateBitCast(Mask
, MaskTy
);
13080 // If we have less than 8 elements, then the starting mask was an i8 and
13081 // we need to extract down to the right number of elements.
13084 for (unsigned i
= 0; i
!= NumElts
; ++i
)
13086 MaskVec
= CGF
.Builder
.CreateShuffleVector(
13087 MaskVec
, MaskVec
, ArrayRef(Indices
, NumElts
), "extract");
13092 static Value
*EmitX86MaskedStore(CodeGenFunction
&CGF
, ArrayRef
<Value
*> Ops
,
13094 Value
*Ptr
= Ops
[0];
13096 Value
*MaskVec
= getMaskVecValue(
13098 cast
<llvm::FixedVectorType
>(Ops
[1]->getType())->getNumElements());
13100 return CGF
.Builder
.CreateMaskedStore(Ops
[1], Ptr
, Alignment
, MaskVec
);
13103 static Value
*EmitX86MaskedLoad(CodeGenFunction
&CGF
, ArrayRef
<Value
*> Ops
,
13105 llvm::Type
*Ty
= Ops
[1]->getType();
13106 Value
*Ptr
= Ops
[0];
13108 Value
*MaskVec
= getMaskVecValue(
13109 CGF
, Ops
[2], cast
<llvm::FixedVectorType
>(Ty
)->getNumElements());
13111 return CGF
.Builder
.CreateMaskedLoad(Ty
, Ptr
, Alignment
, MaskVec
, Ops
[1]);
13114 static Value
*EmitX86ExpandLoad(CodeGenFunction
&CGF
,
13115 ArrayRef
<Value
*> Ops
) {
13116 auto *ResultTy
= cast
<llvm::VectorType
>(Ops
[1]->getType());
13117 Value
*Ptr
= Ops
[0];
13119 Value
*MaskVec
= getMaskVecValue(
13120 CGF
, Ops
[2], cast
<FixedVectorType
>(ResultTy
)->getNumElements());
13122 llvm::Function
*F
= CGF
.CGM
.getIntrinsic(Intrinsic::masked_expandload
,
13124 return CGF
.Builder
.CreateCall(F
, { Ptr
, MaskVec
, Ops
[1] });
13127 static Value
*EmitX86CompressExpand(CodeGenFunction
&CGF
,
13128 ArrayRef
<Value
*> Ops
,
13130 auto *ResultTy
= cast
<llvm::FixedVectorType
>(Ops
[1]->getType());
13132 Value
*MaskVec
= getMaskVecValue(CGF
, Ops
[2], ResultTy
->getNumElements());
13134 Intrinsic::ID IID
= IsCompress
? Intrinsic::x86_avx512_mask_compress
13135 : Intrinsic::x86_avx512_mask_expand
;
13136 llvm::Function
*F
= CGF
.CGM
.getIntrinsic(IID
, ResultTy
);
13137 return CGF
.Builder
.CreateCall(F
, { Ops
[0], Ops
[1], MaskVec
});
13140 static Value
*EmitX86CompressStore(CodeGenFunction
&CGF
,
13141 ArrayRef
<Value
*> Ops
) {
13142 auto *ResultTy
= cast
<llvm::FixedVectorType
>(Ops
[1]->getType());
13143 Value
*Ptr
= Ops
[0];
13145 Value
*MaskVec
= getMaskVecValue(CGF
, Ops
[2], ResultTy
->getNumElements());
13147 llvm::Function
*F
= CGF
.CGM
.getIntrinsic(Intrinsic::masked_compressstore
,
13149 return CGF
.Builder
.CreateCall(F
, { Ops
[1], Ptr
, MaskVec
});
13152 static Value
*EmitX86MaskLogic(CodeGenFunction
&CGF
, Instruction::BinaryOps Opc
,
13153 ArrayRef
<Value
*> Ops
,
13154 bool InvertLHS
= false) {
13155 unsigned NumElts
= Ops
[0]->getType()->getIntegerBitWidth();
13156 Value
*LHS
= getMaskVecValue(CGF
, Ops
[0], NumElts
);
13157 Value
*RHS
= getMaskVecValue(CGF
, Ops
[1], NumElts
);
13160 LHS
= CGF
.Builder
.CreateNot(LHS
);
13162 return CGF
.Builder
.CreateBitCast(CGF
.Builder
.CreateBinOp(Opc
, LHS
, RHS
),
13163 Ops
[0]->getType());
13166 static Value
*EmitX86FunnelShift(CodeGenFunction
&CGF
, Value
*Op0
, Value
*Op1
,
13167 Value
*Amt
, bool IsRight
) {
13168 llvm::Type
*Ty
= Op0
->getType();
13170 // Amount may be scalar immediate, in which case create a splat vector.
13171 // Funnel shifts amounts are treated as modulo and types are all power-of-2 so
13172 // we only care about the lowest log2 bits anyway.
13173 if (Amt
->getType() != Ty
) {
13174 unsigned NumElts
= cast
<llvm::FixedVectorType
>(Ty
)->getNumElements();
13175 Amt
= CGF
.Builder
.CreateIntCast(Amt
, Ty
->getScalarType(), false);
13176 Amt
= CGF
.Builder
.CreateVectorSplat(NumElts
, Amt
);
13179 unsigned IID
= IsRight
? Intrinsic::fshr
: Intrinsic::fshl
;
13180 Function
*F
= CGF
.CGM
.getIntrinsic(IID
, Ty
);
13181 return CGF
.Builder
.CreateCall(F
, {Op0
, Op1
, Amt
});
13184 static Value
*EmitX86vpcom(CodeGenFunction
&CGF
, ArrayRef
<Value
*> Ops
,
13186 Value
*Op0
= Ops
[0];
13187 Value
*Op1
= Ops
[1];
13188 llvm::Type
*Ty
= Op0
->getType();
13189 uint64_t Imm
= cast
<llvm::ConstantInt
>(Ops
[2])->getZExtValue() & 0x7;
13191 CmpInst::Predicate Pred
;
13194 Pred
= IsSigned
? ICmpInst::ICMP_SLT
: ICmpInst::ICMP_ULT
;
13197 Pred
= IsSigned
? ICmpInst::ICMP_SLE
: ICmpInst::ICMP_ULE
;
13200 Pred
= IsSigned
? ICmpInst::ICMP_SGT
: ICmpInst::ICMP_UGT
;
13203 Pred
= IsSigned
? ICmpInst::ICMP_SGE
: ICmpInst::ICMP_UGE
;
13206 Pred
= ICmpInst::ICMP_EQ
;
13209 Pred
= ICmpInst::ICMP_NE
;
13212 return llvm::Constant::getNullValue(Ty
); // FALSE
13214 return llvm::Constant::getAllOnesValue(Ty
); // TRUE
13216 llvm_unreachable("Unexpected XOP vpcom/vpcomu predicate");
13219 Value
*Cmp
= CGF
.Builder
.CreateICmp(Pred
, Op0
, Op1
);
13220 Value
*Res
= CGF
.Builder
.CreateSExt(Cmp
, Ty
);
13224 static Value
*EmitX86Select(CodeGenFunction
&CGF
,
13225 Value
*Mask
, Value
*Op0
, Value
*Op1
) {
13227 // If the mask is all ones just return first argument.
13228 if (const auto *C
= dyn_cast
<Constant
>(Mask
))
13229 if (C
->isAllOnesValue())
13232 Mask
= getMaskVecValue(
13233 CGF
, Mask
, cast
<llvm::FixedVectorType
>(Op0
->getType())->getNumElements());
13235 return CGF
.Builder
.CreateSelect(Mask
, Op0
, Op1
);
13238 static Value
*EmitX86ScalarSelect(CodeGenFunction
&CGF
,
13239 Value
*Mask
, Value
*Op0
, Value
*Op1
) {
13240 // If the mask is all ones just return first argument.
13241 if (const auto *C
= dyn_cast
<Constant
>(Mask
))
13242 if (C
->isAllOnesValue())
13245 auto *MaskTy
= llvm::FixedVectorType::get(
13246 CGF
.Builder
.getInt1Ty(), Mask
->getType()->getIntegerBitWidth());
13247 Mask
= CGF
.Builder
.CreateBitCast(Mask
, MaskTy
);
13248 Mask
= CGF
.Builder
.CreateExtractElement(Mask
, (uint64_t)0);
13249 return CGF
.Builder
.CreateSelect(Mask
, Op0
, Op1
);
13252 static Value
*EmitX86MaskedCompareResult(CodeGenFunction
&CGF
, Value
*Cmp
,
13253 unsigned NumElts
, Value
*MaskIn
) {
13255 const auto *C
= dyn_cast
<Constant
>(MaskIn
);
13256 if (!C
|| !C
->isAllOnesValue())
13257 Cmp
= CGF
.Builder
.CreateAnd(Cmp
, getMaskVecValue(CGF
, MaskIn
, NumElts
));
13262 for (unsigned i
= 0; i
!= NumElts
; ++i
)
13264 for (unsigned i
= NumElts
; i
!= 8; ++i
)
13265 Indices
[i
] = i
% NumElts
+ NumElts
;
13266 Cmp
= CGF
.Builder
.CreateShuffleVector(
13267 Cmp
, llvm::Constant::getNullValue(Cmp
->getType()), Indices
);
13270 return CGF
.Builder
.CreateBitCast(Cmp
,
13271 IntegerType::get(CGF
.getLLVMContext(),
13272 std::max(NumElts
, 8U)));
13275 static Value
*EmitX86MaskedCompare(CodeGenFunction
&CGF
, unsigned CC
,
13276 bool Signed
, ArrayRef
<Value
*> Ops
) {
13277 assert((Ops
.size() == 2 || Ops
.size() == 4) &&
13278 "Unexpected number of arguments");
13280 cast
<llvm::FixedVectorType
>(Ops
[0]->getType())->getNumElements();
13284 Cmp
= Constant::getNullValue(
13285 llvm::FixedVectorType::get(CGF
.Builder
.getInt1Ty(), NumElts
));
13286 } else if (CC
== 7) {
13287 Cmp
= Constant::getAllOnesValue(
13288 llvm::FixedVectorType::get(CGF
.Builder
.getInt1Ty(), NumElts
));
13290 ICmpInst::Predicate Pred
;
13292 default: llvm_unreachable("Unknown condition code");
13293 case 0: Pred
= ICmpInst::ICMP_EQ
; break;
13294 case 1: Pred
= Signed
? ICmpInst::ICMP_SLT
: ICmpInst::ICMP_ULT
; break;
13295 case 2: Pred
= Signed
? ICmpInst::ICMP_SLE
: ICmpInst::ICMP_ULE
; break;
13296 case 4: Pred
= ICmpInst::ICMP_NE
; break;
13297 case 5: Pred
= Signed
? ICmpInst::ICMP_SGE
: ICmpInst::ICMP_UGE
; break;
13298 case 6: Pred
= Signed
? ICmpInst::ICMP_SGT
: ICmpInst::ICMP_UGT
; break;
13300 Cmp
= CGF
.Builder
.CreateICmp(Pred
, Ops
[0], Ops
[1]);
13303 Value
*MaskIn
= nullptr;
13304 if (Ops
.size() == 4)
13307 return EmitX86MaskedCompareResult(CGF
, Cmp
, NumElts
, MaskIn
);
13310 static Value
*EmitX86ConvertToMask(CodeGenFunction
&CGF
, Value
*In
) {
13311 Value
*Zero
= Constant::getNullValue(In
->getType());
13312 return EmitX86MaskedCompare(CGF
, 1, true, { In
, Zero
});
13315 static Value
*EmitX86ConvertIntToFp(CodeGenFunction
&CGF
, const CallExpr
*E
,
13316 ArrayRef
<Value
*> Ops
, bool IsSigned
) {
13317 unsigned Rnd
= cast
<llvm::ConstantInt
>(Ops
[3])->getZExtValue();
13318 llvm::Type
*Ty
= Ops
[1]->getType();
13322 Intrinsic::ID IID
= IsSigned
? Intrinsic::x86_avx512_sitofp_round
13323 : Intrinsic::x86_avx512_uitofp_round
;
13324 Function
*F
= CGF
.CGM
.getIntrinsic(IID
, { Ty
, Ops
[0]->getType() });
13325 Res
= CGF
.Builder
.CreateCall(F
, { Ops
[0], Ops
[3] });
13327 CodeGenFunction::CGFPOptionsRAII
FPOptsRAII(CGF
, E
);
13328 Res
= IsSigned
? CGF
.Builder
.CreateSIToFP(Ops
[0], Ty
)
13329 : CGF
.Builder
.CreateUIToFP(Ops
[0], Ty
);
13332 return EmitX86Select(CGF
, Ops
[2], Res
, Ops
[1]);
13335 // Lowers X86 FMA intrinsics to IR.
13336 static Value
*EmitX86FMAExpr(CodeGenFunction
&CGF
, const CallExpr
*E
,
13337 ArrayRef
<Value
*> Ops
, unsigned BuiltinID
,
13340 bool Subtract
= false;
13341 Intrinsic::ID IID
= Intrinsic::not_intrinsic
;
13342 switch (BuiltinID
) {
13344 case clang::X86::BI__builtin_ia32_vfmsubph512_mask3
:
13347 case clang::X86::BI__builtin_ia32_vfmaddph512_mask
:
13348 case clang::X86::BI__builtin_ia32_vfmaddph512_maskz
:
13349 case clang::X86::BI__builtin_ia32_vfmaddph512_mask3
:
13350 IID
= llvm::Intrinsic::x86_avx512fp16_vfmadd_ph_512
;
13352 case clang::X86::BI__builtin_ia32_vfmsubaddph512_mask3
:
13355 case clang::X86::BI__builtin_ia32_vfmaddsubph512_mask
:
13356 case clang::X86::BI__builtin_ia32_vfmaddsubph512_maskz
:
13357 case clang::X86::BI__builtin_ia32_vfmaddsubph512_mask3
:
13358 IID
= llvm::Intrinsic::x86_avx512fp16_vfmaddsub_ph_512
;
13360 case clang::X86::BI__builtin_ia32_vfmsubps512_mask3
:
13363 case clang::X86::BI__builtin_ia32_vfmaddps512_mask
:
13364 case clang::X86::BI__builtin_ia32_vfmaddps512_maskz
:
13365 case clang::X86::BI__builtin_ia32_vfmaddps512_mask3
:
13366 IID
= llvm::Intrinsic::x86_avx512_vfmadd_ps_512
; break;
13367 case clang::X86::BI__builtin_ia32_vfmsubpd512_mask3
:
13370 case clang::X86::BI__builtin_ia32_vfmaddpd512_mask
:
13371 case clang::X86::BI__builtin_ia32_vfmaddpd512_maskz
:
13372 case clang::X86::BI__builtin_ia32_vfmaddpd512_mask3
:
13373 IID
= llvm::Intrinsic::x86_avx512_vfmadd_pd_512
; break;
13374 case clang::X86::BI__builtin_ia32_vfmsubaddps512_mask3
:
13377 case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask
:
13378 case clang::X86::BI__builtin_ia32_vfmaddsubps512_maskz
:
13379 case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask3
:
13380 IID
= llvm::Intrinsic::x86_avx512_vfmaddsub_ps_512
;
13382 case clang::X86::BI__builtin_ia32_vfmsubaddpd512_mask3
:
13385 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask
:
13386 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_maskz
:
13387 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask3
:
13388 IID
= llvm::Intrinsic::x86_avx512_vfmaddsub_pd_512
;
13397 C
= CGF
.Builder
.CreateFNeg(C
);
13401 // Only handle in case of _MM_FROUND_CUR_DIRECTION/4 (no rounding).
13402 if (IID
!= Intrinsic::not_intrinsic
&&
13403 (cast
<llvm::ConstantInt
>(Ops
.back())->getZExtValue() != (uint64_t)4 ||
13405 Function
*Intr
= CGF
.CGM
.getIntrinsic(IID
);
13406 Res
= CGF
.Builder
.CreateCall(Intr
, {A
, B
, C
, Ops
.back() });
13408 llvm::Type
*Ty
= A
->getType();
13410 if (CGF
.Builder
.getIsFPConstrained()) {
13411 CodeGenFunction::CGFPOptionsRAII
FPOptsRAII(CGF
, E
);
13412 FMA
= CGF
.CGM
.getIntrinsic(Intrinsic::experimental_constrained_fma
, Ty
);
13413 Res
= CGF
.Builder
.CreateConstrainedFPCall(FMA
, {A
, B
, C
});
13415 FMA
= CGF
.CGM
.getIntrinsic(Intrinsic::fma
, Ty
);
13416 Res
= CGF
.Builder
.CreateCall(FMA
, {A
, B
, C
});
13420 // Handle any required masking.
13421 Value
*MaskFalseVal
= nullptr;
13422 switch (BuiltinID
) {
13423 case clang::X86::BI__builtin_ia32_vfmaddph512_mask
:
13424 case clang::X86::BI__builtin_ia32_vfmaddps512_mask
:
13425 case clang::X86::BI__builtin_ia32_vfmaddpd512_mask
:
13426 case clang::X86::BI__builtin_ia32_vfmaddsubph512_mask
:
13427 case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask
:
13428 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask
:
13429 MaskFalseVal
= Ops
[0];
13431 case clang::X86::BI__builtin_ia32_vfmaddph512_maskz
:
13432 case clang::X86::BI__builtin_ia32_vfmaddps512_maskz
:
13433 case clang::X86::BI__builtin_ia32_vfmaddpd512_maskz
:
13434 case clang::X86::BI__builtin_ia32_vfmaddsubph512_maskz
:
13435 case clang::X86::BI__builtin_ia32_vfmaddsubps512_maskz
:
13436 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_maskz
:
13437 MaskFalseVal
= Constant::getNullValue(Ops
[0]->getType());
13439 case clang::X86::BI__builtin_ia32_vfmsubph512_mask3
:
13440 case clang::X86::BI__builtin_ia32_vfmaddph512_mask3
:
13441 case clang::X86::BI__builtin_ia32_vfmsubps512_mask3
:
13442 case clang::X86::BI__builtin_ia32_vfmaddps512_mask3
:
13443 case clang::X86::BI__builtin_ia32_vfmsubpd512_mask3
:
13444 case clang::X86::BI__builtin_ia32_vfmaddpd512_mask3
:
13445 case clang::X86::BI__builtin_ia32_vfmsubaddph512_mask3
:
13446 case clang::X86::BI__builtin_ia32_vfmaddsubph512_mask3
:
13447 case clang::X86::BI__builtin_ia32_vfmsubaddps512_mask3
:
13448 case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask3
:
13449 case clang::X86::BI__builtin_ia32_vfmsubaddpd512_mask3
:
13450 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask3
:
13451 MaskFalseVal
= Ops
[2];
13456 return EmitX86Select(CGF
, Ops
[3], Res
, MaskFalseVal
);
13461 static Value
*EmitScalarFMAExpr(CodeGenFunction
&CGF
, const CallExpr
*E
,
13462 MutableArrayRef
<Value
*> Ops
, Value
*Upper
,
13463 bool ZeroMask
= false, unsigned PTIdx
= 0,
13464 bool NegAcc
= false) {
13466 if (Ops
.size() > 4)
13467 Rnd
= cast
<llvm::ConstantInt
>(Ops
[4])->getZExtValue();
13470 Ops
[2] = CGF
.Builder
.CreateFNeg(Ops
[2]);
13472 Ops
[0] = CGF
.Builder
.CreateExtractElement(Ops
[0], (uint64_t)0);
13473 Ops
[1] = CGF
.Builder
.CreateExtractElement(Ops
[1], (uint64_t)0);
13474 Ops
[2] = CGF
.Builder
.CreateExtractElement(Ops
[2], (uint64_t)0);
13479 switch (Ops
[0]->getType()->getPrimitiveSizeInBits()) {
13481 IID
= Intrinsic::x86_avx512fp16_vfmadd_f16
;
13484 IID
= Intrinsic::x86_avx512_vfmadd_f32
;
13487 IID
= Intrinsic::x86_avx512_vfmadd_f64
;
13490 llvm_unreachable("Unexpected size");
13492 Res
= CGF
.Builder
.CreateCall(CGF
.CGM
.getIntrinsic(IID
),
13493 {Ops
[0], Ops
[1], Ops
[2], Ops
[4]});
13494 } else if (CGF
.Builder
.getIsFPConstrained()) {
13495 CodeGenFunction::CGFPOptionsRAII
FPOptsRAII(CGF
, E
);
13496 Function
*FMA
= CGF
.CGM
.getIntrinsic(
13497 Intrinsic::experimental_constrained_fma
, Ops
[0]->getType());
13498 Res
= CGF
.Builder
.CreateConstrainedFPCall(FMA
, Ops
.slice(0, 3));
13500 Function
*FMA
= CGF
.CGM
.getIntrinsic(Intrinsic::fma
, Ops
[0]->getType());
13501 Res
= CGF
.Builder
.CreateCall(FMA
, Ops
.slice(0, 3));
13503 // If we have more than 3 arguments, we need to do masking.
13504 if (Ops
.size() > 3) {
13505 Value
*PassThru
= ZeroMask
? Constant::getNullValue(Res
->getType())
13508 // If we negated the accumulator and the its the PassThru value we need to
13509 // bypass the negate. Conveniently Upper should be the same thing in this
13511 if (NegAcc
&& PTIdx
== 2)
13512 PassThru
= CGF
.Builder
.CreateExtractElement(Upper
, (uint64_t)0);
13514 Res
= EmitX86ScalarSelect(CGF
, Ops
[3], Res
, PassThru
);
13516 return CGF
.Builder
.CreateInsertElement(Upper
, Res
, (uint64_t)0);
13519 static Value
*EmitX86Muldq(CodeGenFunction
&CGF
, bool IsSigned
,
13520 ArrayRef
<Value
*> Ops
) {
13521 llvm::Type
*Ty
= Ops
[0]->getType();
13522 // Arguments have a vXi32 type so cast to vXi64.
13523 Ty
= llvm::FixedVectorType::get(CGF
.Int64Ty
,
13524 Ty
->getPrimitiveSizeInBits() / 64);
13525 Value
*LHS
= CGF
.Builder
.CreateBitCast(Ops
[0], Ty
);
13526 Value
*RHS
= CGF
.Builder
.CreateBitCast(Ops
[1], Ty
);
13529 // Shift left then arithmetic shift right.
13530 Constant
*ShiftAmt
= ConstantInt::get(Ty
, 32);
13531 LHS
= CGF
.Builder
.CreateShl(LHS
, ShiftAmt
);
13532 LHS
= CGF
.Builder
.CreateAShr(LHS
, ShiftAmt
);
13533 RHS
= CGF
.Builder
.CreateShl(RHS
, ShiftAmt
);
13534 RHS
= CGF
.Builder
.CreateAShr(RHS
, ShiftAmt
);
13536 // Clear the upper bits.
13537 Constant
*Mask
= ConstantInt::get(Ty
, 0xffffffff);
13538 LHS
= CGF
.Builder
.CreateAnd(LHS
, Mask
);
13539 RHS
= CGF
.Builder
.CreateAnd(RHS
, Mask
);
13542 return CGF
.Builder
.CreateMul(LHS
, RHS
);
13545 // Emit a masked pternlog intrinsic. This only exists because the header has to
13546 // use a macro and we aren't able to pass the input argument to a pternlog
13547 // builtin and a select builtin without evaluating it twice.
13548 static Value
*EmitX86Ternlog(CodeGenFunction
&CGF
, bool ZeroMask
,
13549 ArrayRef
<Value
*> Ops
) {
13550 llvm::Type
*Ty
= Ops
[0]->getType();
13552 unsigned VecWidth
= Ty
->getPrimitiveSizeInBits();
13553 unsigned EltWidth
= Ty
->getScalarSizeInBits();
13555 if (VecWidth
== 128 && EltWidth
== 32)
13556 IID
= Intrinsic::x86_avx512_pternlog_d_128
;
13557 else if (VecWidth
== 256 && EltWidth
== 32)
13558 IID
= Intrinsic::x86_avx512_pternlog_d_256
;
13559 else if (VecWidth
== 512 && EltWidth
== 32)
13560 IID
= Intrinsic::x86_avx512_pternlog_d_512
;
13561 else if (VecWidth
== 128 && EltWidth
== 64)
13562 IID
= Intrinsic::x86_avx512_pternlog_q_128
;
13563 else if (VecWidth
== 256 && EltWidth
== 64)
13564 IID
= Intrinsic::x86_avx512_pternlog_q_256
;
13565 else if (VecWidth
== 512 && EltWidth
== 64)
13566 IID
= Intrinsic::x86_avx512_pternlog_q_512
;
13568 llvm_unreachable("Unexpected intrinsic");
13570 Value
*Ternlog
= CGF
.Builder
.CreateCall(CGF
.CGM
.getIntrinsic(IID
),
13572 Value
*PassThru
= ZeroMask
? ConstantAggregateZero::get(Ty
) : Ops
[0];
13573 return EmitX86Select(CGF
, Ops
[4], Ternlog
, PassThru
);
13576 static Value
*EmitX86SExtMask(CodeGenFunction
&CGF
, Value
*Op
,
13577 llvm::Type
*DstTy
) {
13578 unsigned NumberOfElements
=
13579 cast
<llvm::FixedVectorType
>(DstTy
)->getNumElements();
13580 Value
*Mask
= getMaskVecValue(CGF
, Op
, NumberOfElements
);
13581 return CGF
.Builder
.CreateSExt(Mask
, DstTy
, "vpmovm2");
13584 Value
*CodeGenFunction::EmitX86CpuIs(const CallExpr
*E
) {
13585 const Expr
*CPUExpr
= E
->getArg(0)->IgnoreParenCasts();
13586 StringRef CPUStr
= cast
<clang::StringLiteral
>(CPUExpr
)->getString();
13587 return EmitX86CpuIs(CPUStr
);
13590 // Convert F16 halfs to floats.
13591 static Value
*EmitX86CvtF16ToFloatExpr(CodeGenFunction
&CGF
,
13592 ArrayRef
<Value
*> Ops
,
13593 llvm::Type
*DstTy
) {
13594 assert((Ops
.size() == 1 || Ops
.size() == 3 || Ops
.size() == 4) &&
13595 "Unknown cvtph2ps intrinsic");
13597 // If the SAE intrinsic doesn't use default rounding then we can't upgrade.
13598 if (Ops
.size() == 4 && cast
<llvm::ConstantInt
>(Ops
[3])->getZExtValue() != 4) {
13600 CGF
.CGM
.getIntrinsic(Intrinsic::x86_avx512_mask_vcvtph2ps_512
);
13601 return CGF
.Builder
.CreateCall(F
, {Ops
[0], Ops
[1], Ops
[2], Ops
[3]});
13604 unsigned NumDstElts
= cast
<llvm::FixedVectorType
>(DstTy
)->getNumElements();
13605 Value
*Src
= Ops
[0];
13607 // Extract the subvector.
13609 cast
<llvm::FixedVectorType
>(Src
->getType())->getNumElements()) {
13610 assert(NumDstElts
== 4 && "Unexpected vector size");
13611 Src
= CGF
.Builder
.CreateShuffleVector(Src
, ArrayRef
<int>{0, 1, 2, 3});
13614 // Bitcast from vXi16 to vXf16.
13615 auto *HalfTy
= llvm::FixedVectorType::get(
13616 llvm::Type::getHalfTy(CGF
.getLLVMContext()), NumDstElts
);
13617 Src
= CGF
.Builder
.CreateBitCast(Src
, HalfTy
);
13619 // Perform the fp-extension.
13620 Value
*Res
= CGF
.Builder
.CreateFPExt(Src
, DstTy
, "cvtph2ps");
13622 if (Ops
.size() >= 3)
13623 Res
= EmitX86Select(CGF
, Ops
[2], Res
, Ops
[1]);
13627 Value
*CodeGenFunction::EmitX86CpuIs(StringRef CPUStr
) {
13629 llvm::Type
*Int32Ty
= Builder
.getInt32Ty();
13631 // Matching the struct layout from the compiler-rt/libgcc structure that is
13633 // unsigned int __cpu_vendor;
13634 // unsigned int __cpu_type;
13635 // unsigned int __cpu_subtype;
13636 // unsigned int __cpu_features[1];
13637 llvm::Type
*STy
= llvm::StructType::get(Int32Ty
, Int32Ty
, Int32Ty
,
13638 llvm::ArrayType::get(Int32Ty
, 1));
13640 // Grab the global __cpu_model.
13641 llvm::Constant
*CpuModel
= CGM
.CreateRuntimeVariable(STy
, "__cpu_model");
13642 cast
<llvm::GlobalValue
>(CpuModel
)->setDSOLocal(true);
13644 // Calculate the index needed to access the correct field based on the
13645 // range. Also adjust the expected value.
13648 std::tie(Index
, Value
) = StringSwitch
<std::pair
<unsigned, unsigned>>(CPUStr
)
13649 #define X86_VENDOR(ENUM, STRING) \
13650 .Case(STRING, {0u, static_cast<unsigned>(llvm::X86::ENUM)})
13651 #define X86_CPU_TYPE_ALIAS(ENUM, ALIAS) \
13652 .Case(ALIAS, {1u, static_cast<unsigned>(llvm::X86::ENUM)})
13653 #define X86_CPU_TYPE(ENUM, STR) \
13654 .Case(STR, {1u, static_cast<unsigned>(llvm::X86::ENUM)})
13655 #define X86_CPU_SUBTYPE_ALIAS(ENUM, ALIAS) \
13656 .Case(ALIAS, {2u, static_cast<unsigned>(llvm::X86::ENUM)})
13657 #define X86_CPU_SUBTYPE(ENUM, STR) \
13658 .Case(STR, {2u, static_cast<unsigned>(llvm::X86::ENUM)})
13659 #include "llvm/TargetParser/X86TargetParser.def"
13661 assert(Value
!= 0 && "Invalid CPUStr passed to CpuIs");
13663 // Grab the appropriate field from __cpu_model.
13664 llvm::Value
*Idxs
[] = {ConstantInt::get(Int32Ty
, 0),
13665 ConstantInt::get(Int32Ty
, Index
)};
13666 llvm::Value
*CpuValue
= Builder
.CreateGEP(STy
, CpuModel
, Idxs
);
13667 CpuValue
= Builder
.CreateAlignedLoad(Int32Ty
, CpuValue
,
13668 CharUnits::fromQuantity(4));
13670 // Check the value of the field against the requested value.
13671 return Builder
.CreateICmpEQ(CpuValue
,
13672 llvm::ConstantInt::get(Int32Ty
, Value
));
13675 Value
*CodeGenFunction::EmitX86CpuSupports(const CallExpr
*E
) {
13676 const Expr
*FeatureExpr
= E
->getArg(0)->IgnoreParenCasts();
13677 StringRef FeatureStr
= cast
<StringLiteral
>(FeatureExpr
)->getString();
13678 return EmitX86CpuSupports(FeatureStr
);
13681 Value
*CodeGenFunction::EmitX86CpuSupports(ArrayRef
<StringRef
> FeatureStrs
) {
13682 return EmitX86CpuSupports(llvm::X86::getCpuSupportsMask(FeatureStrs
));
13686 CodeGenFunction::EmitX86CpuSupports(std::array
<uint32_t, 4> FeatureMask
) {
13687 Value
*Result
= Builder
.getTrue();
13688 if (FeatureMask
[0] != 0) {
13689 // Matching the struct layout from the compiler-rt/libgcc structure that is
13691 // unsigned int __cpu_vendor;
13692 // unsigned int __cpu_type;
13693 // unsigned int __cpu_subtype;
13694 // unsigned int __cpu_features[1];
13695 llvm::Type
*STy
= llvm::StructType::get(Int32Ty
, Int32Ty
, Int32Ty
,
13696 llvm::ArrayType::get(Int32Ty
, 1));
13698 // Grab the global __cpu_model.
13699 llvm::Constant
*CpuModel
= CGM
.CreateRuntimeVariable(STy
, "__cpu_model");
13700 cast
<llvm::GlobalValue
>(CpuModel
)->setDSOLocal(true);
13702 // Grab the first (0th) element from the field __cpu_features off of the
13703 // global in the struct STy.
13704 Value
*Idxs
[] = {Builder
.getInt32(0), Builder
.getInt32(3),
13705 Builder
.getInt32(0)};
13706 Value
*CpuFeatures
= Builder
.CreateGEP(STy
, CpuModel
, Idxs
);
13707 Value
*Features
= Builder
.CreateAlignedLoad(Int32Ty
, CpuFeatures
,
13708 CharUnits::fromQuantity(4));
13710 // Check the value of the bit corresponding to the feature requested.
13711 Value
*Mask
= Builder
.getInt32(FeatureMask
[0]);
13712 Value
*Bitset
= Builder
.CreateAnd(Features
, Mask
);
13713 Value
*Cmp
= Builder
.CreateICmpEQ(Bitset
, Mask
);
13714 Result
= Builder
.CreateAnd(Result
, Cmp
);
13717 llvm::Type
*ATy
= llvm::ArrayType::get(Int32Ty
, 3);
13718 llvm::Constant
*CpuFeatures2
=
13719 CGM
.CreateRuntimeVariable(ATy
, "__cpu_features2");
13720 cast
<llvm::GlobalValue
>(CpuFeatures2
)->setDSOLocal(true);
13721 for (int i
= 1; i
!= 4; ++i
) {
13722 const uint32_t M
= FeatureMask
[i
];
13725 Value
*Idxs
[] = {Builder
.getInt32(0), Builder
.getInt32(i
- 1)};
13726 Value
*Features
= Builder
.CreateAlignedLoad(
13727 Int32Ty
, Builder
.CreateGEP(ATy
, CpuFeatures2
, Idxs
),
13728 CharUnits::fromQuantity(4));
13729 // Check the value of the bit corresponding to the feature requested.
13730 Value
*Mask
= Builder
.getInt32(M
);
13731 Value
*Bitset
= Builder
.CreateAnd(Features
, Mask
);
13732 Value
*Cmp
= Builder
.CreateICmpEQ(Bitset
, Mask
);
13733 Result
= Builder
.CreateAnd(Result
, Cmp
);
13739 Value
*CodeGenFunction::EmitAArch64CpuInit() {
13740 llvm::FunctionType
*FTy
= llvm::FunctionType::get(VoidTy
, false);
13741 llvm::FunctionCallee Func
=
13742 CGM
.CreateRuntimeFunction(FTy
, "__init_cpu_features_resolver");
13743 cast
<llvm::GlobalValue
>(Func
.getCallee())->setDSOLocal(true);
13744 cast
<llvm::GlobalValue
>(Func
.getCallee())
13745 ->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass
);
13746 return Builder
.CreateCall(Func
);
13749 Value
*CodeGenFunction::EmitX86CpuInit() {
13750 llvm::FunctionType
*FTy
= llvm::FunctionType::get(VoidTy
,
13751 /*Variadic*/ false);
13752 llvm::FunctionCallee Func
=
13753 CGM
.CreateRuntimeFunction(FTy
, "__cpu_indicator_init");
13754 cast
<llvm::GlobalValue
>(Func
.getCallee())->setDSOLocal(true);
13755 cast
<llvm::GlobalValue
>(Func
.getCallee())
13756 ->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass
);
13757 return Builder
.CreateCall(Func
);
13761 CodeGenFunction::EmitAArch64CpuSupports(ArrayRef
<StringRef
> FeaturesStrs
) {
13762 uint64_t FeaturesMask
= llvm::AArch64::getCpuSupportsMask(FeaturesStrs
);
13763 Value
*Result
= Builder
.getTrue();
13764 if (FeaturesMask
!= 0) {
13765 // Get features from structure in runtime library
13767 // unsigned long long features;
13768 // } __aarch64_cpu_features;
13769 llvm::Type
*STy
= llvm::StructType::get(Int64Ty
);
13770 llvm::Constant
*AArch64CPUFeatures
=
13771 CGM
.CreateRuntimeVariable(STy
, "__aarch64_cpu_features");
13772 cast
<llvm::GlobalValue
>(AArch64CPUFeatures
)->setDSOLocal(true);
13773 llvm::Value
*CpuFeatures
= Builder
.CreateGEP(
13774 STy
, AArch64CPUFeatures
,
13775 {ConstantInt::get(Int32Ty
, 0), ConstantInt::get(Int32Ty
, 0)});
13776 Value
*Features
= Builder
.CreateAlignedLoad(Int64Ty
, CpuFeatures
,
13777 CharUnits::fromQuantity(8));
13778 Value
*Mask
= Builder
.getInt64(FeaturesMask
);
13779 Value
*Bitset
= Builder
.CreateAnd(Features
, Mask
);
13780 Value
*Cmp
= Builder
.CreateICmpEQ(Bitset
, Mask
);
13781 Result
= Builder
.CreateAnd(Result
, Cmp
);
13786 Value
*CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID
,
13787 const CallExpr
*E
) {
13788 if (BuiltinID
== X86::BI__builtin_cpu_is
)
13789 return EmitX86CpuIs(E
);
13790 if (BuiltinID
== X86::BI__builtin_cpu_supports
)
13791 return EmitX86CpuSupports(E
);
13792 if (BuiltinID
== X86::BI__builtin_cpu_init
)
13793 return EmitX86CpuInit();
13795 // Handle MSVC intrinsics before argument evaluation to prevent double
13797 if (std::optional
<MSVCIntrin
> MsvcIntId
= translateX86ToMsvcIntrin(BuiltinID
))
13798 return EmitMSVCBuiltinExpr(*MsvcIntId
, E
);
13800 SmallVector
<Value
*, 4> Ops
;
13801 bool IsMaskFCmp
= false;
13802 bool IsConjFMA
= false;
13804 // Find out if any arguments are required to be integer constant expressions.
13805 unsigned ICEArguments
= 0;
13806 ASTContext::GetBuiltinTypeError Error
;
13807 getContext().GetBuiltinType(BuiltinID
, Error
, &ICEArguments
);
13808 assert(Error
== ASTContext::GE_None
&& "Should not codegen an error");
13810 for (unsigned i
= 0, e
= E
->getNumArgs(); i
!= e
; i
++) {
13811 // If this is a normal argument, just emit it as a scalar.
13812 if ((ICEArguments
& (1 << i
)) == 0) {
13813 Ops
.push_back(EmitScalarExpr(E
->getArg(i
)));
13817 // If this is required to be a constant, constant fold it so that we know
13818 // that the generated intrinsic gets a ConstantInt.
13819 Ops
.push_back(llvm::ConstantInt::get(
13820 getLLVMContext(), *E
->getArg(i
)->getIntegerConstantExpr(getContext())));
13823 // These exist so that the builtin that takes an immediate can be bounds
13824 // checked by clang to avoid passing bad immediates to the backend. Since
13825 // AVX has a larger immediate than SSE we would need separate builtins to
13826 // do the different bounds checking. Rather than create a clang specific
13827 // SSE only builtin, this implements eight separate builtins to match gcc
13829 auto getCmpIntrinsicCall
= [this, &Ops
](Intrinsic::ID ID
, unsigned Imm
) {
13830 Ops
.push_back(llvm::ConstantInt::get(Int8Ty
, Imm
));
13831 llvm::Function
*F
= CGM
.getIntrinsic(ID
);
13832 return Builder
.CreateCall(F
, Ops
);
13835 // For the vector forms of FP comparisons, translate the builtins directly to
13837 // TODO: The builtins could be removed if the SSE header files used vector
13838 // extension comparisons directly (vector ordered/unordered may need
13839 // additional support via __builtin_isnan()).
13840 auto getVectorFCmpIR
= [this, &Ops
, E
](CmpInst::Predicate Pred
,
13841 bool IsSignaling
) {
13842 CodeGenFunction::CGFPOptionsRAII
FPOptsRAII(*this, E
);
13845 Cmp
= Builder
.CreateFCmpS(Pred
, Ops
[0], Ops
[1]);
13847 Cmp
= Builder
.CreateFCmp(Pred
, Ops
[0], Ops
[1]);
13848 llvm::VectorType
*FPVecTy
= cast
<llvm::VectorType
>(Ops
[0]->getType());
13849 llvm::VectorType
*IntVecTy
= llvm::VectorType::getInteger(FPVecTy
);
13850 Value
*Sext
= Builder
.CreateSExt(Cmp
, IntVecTy
);
13851 return Builder
.CreateBitCast(Sext
, FPVecTy
);
13854 switch (BuiltinID
) {
13855 default: return nullptr;
13856 case X86::BI_mm_prefetch
: {
13857 Value
*Address
= Ops
[0];
13858 ConstantInt
*C
= cast
<ConstantInt
>(Ops
[1]);
13859 Value
*RW
= ConstantInt::get(Int32Ty
, (C
->getZExtValue() >> 2) & 0x1);
13860 Value
*Locality
= ConstantInt::get(Int32Ty
, C
->getZExtValue() & 0x3);
13861 Value
*Data
= ConstantInt::get(Int32Ty
, 1);
13862 Function
*F
= CGM
.getIntrinsic(Intrinsic::prefetch
, Address
->getType());
13863 return Builder
.CreateCall(F
, {Address
, RW
, Locality
, Data
});
13865 case X86::BI_mm_clflush
: {
13866 return Builder
.CreateCall(CGM
.getIntrinsic(Intrinsic::x86_sse2_clflush
),
13869 case X86::BI_mm_lfence
: {
13870 return Builder
.CreateCall(CGM
.getIntrinsic(Intrinsic::x86_sse2_lfence
));
13872 case X86::BI_mm_mfence
: {
13873 return Builder
.CreateCall(CGM
.getIntrinsic(Intrinsic::x86_sse2_mfence
));
13875 case X86::BI_mm_sfence
: {
13876 return Builder
.CreateCall(CGM
.getIntrinsic(Intrinsic::x86_sse_sfence
));
13878 case X86::BI_mm_pause
: {
13879 return Builder
.CreateCall(CGM
.getIntrinsic(Intrinsic::x86_sse2_pause
));
13881 case X86::BI__rdtsc
: {
13882 return Builder
.CreateCall(CGM
.getIntrinsic(Intrinsic::x86_rdtsc
));
13884 case X86::BI__builtin_ia32_rdtscp
: {
13885 Value
*Call
= Builder
.CreateCall(CGM
.getIntrinsic(Intrinsic::x86_rdtscp
));
13886 Builder
.CreateDefaultAlignedStore(Builder
.CreateExtractValue(Call
, 1),
13888 return Builder
.CreateExtractValue(Call
, 0);
13890 case X86::BI__builtin_ia32_lzcnt_u16
:
13891 case X86::BI__builtin_ia32_lzcnt_u32
:
13892 case X86::BI__builtin_ia32_lzcnt_u64
: {
13893 Function
*F
= CGM
.getIntrinsic(Intrinsic::ctlz
, Ops
[0]->getType());
13894 return Builder
.CreateCall(F
, {Ops
[0], Builder
.getInt1(false)});
13896 case X86::BI__builtin_ia32_tzcnt_u16
:
13897 case X86::BI__builtin_ia32_tzcnt_u32
:
13898 case X86::BI__builtin_ia32_tzcnt_u64
: {
13899 Function
*F
= CGM
.getIntrinsic(Intrinsic::cttz
, Ops
[0]->getType());
13900 return Builder
.CreateCall(F
, {Ops
[0], Builder
.getInt1(false)});
13902 case X86::BI__builtin_ia32_undef128
:
13903 case X86::BI__builtin_ia32_undef256
:
13904 case X86::BI__builtin_ia32_undef512
:
13905 // The x86 definition of "undef" is not the same as the LLVM definition
13906 // (PR32176). We leave optimizing away an unnecessary zero constant to the
13907 // IR optimizer and backend.
13908 // TODO: If we had a "freeze" IR instruction to generate a fixed undef
13909 // value, we should use that here instead of a zero.
13910 return llvm::Constant::getNullValue(ConvertType(E
->getType()));
13911 case X86::BI__builtin_ia32_vec_init_v8qi
:
13912 case X86::BI__builtin_ia32_vec_init_v4hi
:
13913 case X86::BI__builtin_ia32_vec_init_v2si
:
13914 return Builder
.CreateBitCast(BuildVector(Ops
),
13915 llvm::Type::getX86_MMXTy(getLLVMContext()));
13916 case X86::BI__builtin_ia32_vec_ext_v2si
:
13917 case X86::BI__builtin_ia32_vec_ext_v16qi
:
13918 case X86::BI__builtin_ia32_vec_ext_v8hi
:
13919 case X86::BI__builtin_ia32_vec_ext_v4si
:
13920 case X86::BI__builtin_ia32_vec_ext_v4sf
:
13921 case X86::BI__builtin_ia32_vec_ext_v2di
:
13922 case X86::BI__builtin_ia32_vec_ext_v32qi
:
13923 case X86::BI__builtin_ia32_vec_ext_v16hi
:
13924 case X86::BI__builtin_ia32_vec_ext_v8si
:
13925 case X86::BI__builtin_ia32_vec_ext_v4di
: {
13927 cast
<llvm::FixedVectorType
>(Ops
[0]->getType())->getNumElements();
13928 uint64_t Index
= cast
<ConstantInt
>(Ops
[1])->getZExtValue();
13929 Index
&= NumElts
- 1;
13930 // These builtins exist so we can ensure the index is an ICE and in range.
13931 // Otherwise we could just do this in the header file.
13932 return Builder
.CreateExtractElement(Ops
[0], Index
);
13934 case X86::BI__builtin_ia32_vec_set_v16qi
:
13935 case X86::BI__builtin_ia32_vec_set_v8hi
:
13936 case X86::BI__builtin_ia32_vec_set_v4si
:
13937 case X86::BI__builtin_ia32_vec_set_v2di
:
13938 case X86::BI__builtin_ia32_vec_set_v32qi
:
13939 case X86::BI__builtin_ia32_vec_set_v16hi
:
13940 case X86::BI__builtin_ia32_vec_set_v8si
:
13941 case X86::BI__builtin_ia32_vec_set_v4di
: {
13943 cast
<llvm::FixedVectorType
>(Ops
[0]->getType())->getNumElements();
13944 unsigned Index
= cast
<ConstantInt
>(Ops
[2])->getZExtValue();
13945 Index
&= NumElts
- 1;
13946 // These builtins exist so we can ensure the index is an ICE and in range.
13947 // Otherwise we could just do this in the header file.
13948 return Builder
.CreateInsertElement(Ops
[0], Ops
[1], Index
);
13950 case X86::BI_mm_setcsr
:
13951 case X86::BI__builtin_ia32_ldmxcsr
: {
13952 Address Tmp
= CreateMemTemp(E
->getArg(0)->getType());
13953 Builder
.CreateStore(Ops
[0], Tmp
);
13954 return Builder
.CreateCall(CGM
.getIntrinsic(Intrinsic::x86_sse_ldmxcsr
),
13957 case X86::BI_mm_getcsr
:
13958 case X86::BI__builtin_ia32_stmxcsr
: {
13959 Address Tmp
= CreateMemTemp(E
->getType());
13960 Builder
.CreateCall(CGM
.getIntrinsic(Intrinsic::x86_sse_stmxcsr
),
13962 return Builder
.CreateLoad(Tmp
, "stmxcsr");
13964 case X86::BI__builtin_ia32_xsave
:
13965 case X86::BI__builtin_ia32_xsave64
:
13966 case X86::BI__builtin_ia32_xrstor
:
13967 case X86::BI__builtin_ia32_xrstor64
:
13968 case X86::BI__builtin_ia32_xsaveopt
:
13969 case X86::BI__builtin_ia32_xsaveopt64
:
13970 case X86::BI__builtin_ia32_xrstors
:
13971 case X86::BI__builtin_ia32_xrstors64
:
13972 case X86::BI__builtin_ia32_xsavec
:
13973 case X86::BI__builtin_ia32_xsavec64
:
13974 case X86::BI__builtin_ia32_xsaves
:
13975 case X86::BI__builtin_ia32_xsaves64
:
13976 case X86::BI__builtin_ia32_xsetbv
:
13977 case X86::BI_xsetbv
: {
13979 #define INTRINSIC_X86_XSAVE_ID(NAME) \
13980 case X86::BI__builtin_ia32_##NAME: \
13981 ID = Intrinsic::x86_##NAME; \
13983 switch (BuiltinID
) {
13984 default: llvm_unreachable("Unsupported intrinsic!");
13985 INTRINSIC_X86_XSAVE_ID(xsave
);
13986 INTRINSIC_X86_XSAVE_ID(xsave64
);
13987 INTRINSIC_X86_XSAVE_ID(xrstor
);
13988 INTRINSIC_X86_XSAVE_ID(xrstor64
);
13989 INTRINSIC_X86_XSAVE_ID(xsaveopt
);
13990 INTRINSIC_X86_XSAVE_ID(xsaveopt64
);
13991 INTRINSIC_X86_XSAVE_ID(xrstors
);
13992 INTRINSIC_X86_XSAVE_ID(xrstors64
);
13993 INTRINSIC_X86_XSAVE_ID(xsavec
);
13994 INTRINSIC_X86_XSAVE_ID(xsavec64
);
13995 INTRINSIC_X86_XSAVE_ID(xsaves
);
13996 INTRINSIC_X86_XSAVE_ID(xsaves64
);
13997 INTRINSIC_X86_XSAVE_ID(xsetbv
);
13998 case X86::BI_xsetbv
:
13999 ID
= Intrinsic::x86_xsetbv
;
14002 #undef INTRINSIC_X86_XSAVE_ID
14003 Value
*Mhi
= Builder
.CreateTrunc(
14004 Builder
.CreateLShr(Ops
[1], ConstantInt::get(Int64Ty
, 32)), Int32Ty
);
14005 Value
*Mlo
= Builder
.CreateTrunc(Ops
[1], Int32Ty
);
14007 Ops
.push_back(Mlo
);
14008 return Builder
.CreateCall(CGM
.getIntrinsic(ID
), Ops
);
14010 case X86::BI__builtin_ia32_xgetbv
:
14011 case X86::BI_xgetbv
:
14012 return Builder
.CreateCall(CGM
.getIntrinsic(Intrinsic::x86_xgetbv
), Ops
);
14013 case X86::BI__builtin_ia32_storedqudi128_mask
:
14014 case X86::BI__builtin_ia32_storedqusi128_mask
:
14015 case X86::BI__builtin_ia32_storedquhi128_mask
:
14016 case X86::BI__builtin_ia32_storedquqi128_mask
:
14017 case X86::BI__builtin_ia32_storeupd128_mask
:
14018 case X86::BI__builtin_ia32_storeups128_mask
:
14019 case X86::BI__builtin_ia32_storedqudi256_mask
:
14020 case X86::BI__builtin_ia32_storedqusi256_mask
:
14021 case X86::BI__builtin_ia32_storedquhi256_mask
:
14022 case X86::BI__builtin_ia32_storedquqi256_mask
:
14023 case X86::BI__builtin_ia32_storeupd256_mask
:
14024 case X86::BI__builtin_ia32_storeups256_mask
:
14025 case X86::BI__builtin_ia32_storedqudi512_mask
:
14026 case X86::BI__builtin_ia32_storedqusi512_mask
:
14027 case X86::BI__builtin_ia32_storedquhi512_mask
:
14028 case X86::BI__builtin_ia32_storedquqi512_mask
:
14029 case X86::BI__builtin_ia32_storeupd512_mask
:
14030 case X86::BI__builtin_ia32_storeups512_mask
:
14031 return EmitX86MaskedStore(*this, Ops
, Align(1));
14033 case X86::BI__builtin_ia32_storesh128_mask
:
14034 case X86::BI__builtin_ia32_storess128_mask
:
14035 case X86::BI__builtin_ia32_storesd128_mask
:
14036 return EmitX86MaskedStore(*this, Ops
, Align(1));
14038 case X86::BI__builtin_ia32_vpopcntb_128
:
14039 case X86::BI__builtin_ia32_vpopcntd_128
:
14040 case X86::BI__builtin_ia32_vpopcntq_128
:
14041 case X86::BI__builtin_ia32_vpopcntw_128
:
14042 case X86::BI__builtin_ia32_vpopcntb_256
:
14043 case X86::BI__builtin_ia32_vpopcntd_256
:
14044 case X86::BI__builtin_ia32_vpopcntq_256
:
14045 case X86::BI__builtin_ia32_vpopcntw_256
:
14046 case X86::BI__builtin_ia32_vpopcntb_512
:
14047 case X86::BI__builtin_ia32_vpopcntd_512
:
14048 case X86::BI__builtin_ia32_vpopcntq_512
:
14049 case X86::BI__builtin_ia32_vpopcntw_512
: {
14050 llvm::Type
*ResultType
= ConvertType(E
->getType());
14051 llvm::Function
*F
= CGM
.getIntrinsic(Intrinsic::ctpop
, ResultType
);
14052 return Builder
.CreateCall(F
, Ops
);
14054 case X86::BI__builtin_ia32_cvtmask2b128
:
14055 case X86::BI__builtin_ia32_cvtmask2b256
:
14056 case X86::BI__builtin_ia32_cvtmask2b512
:
14057 case X86::BI__builtin_ia32_cvtmask2w128
:
14058 case X86::BI__builtin_ia32_cvtmask2w256
:
14059 case X86::BI__builtin_ia32_cvtmask2w512
:
14060 case X86::BI__builtin_ia32_cvtmask2d128
:
14061 case X86::BI__builtin_ia32_cvtmask2d256
:
14062 case X86::BI__builtin_ia32_cvtmask2d512
:
14063 case X86::BI__builtin_ia32_cvtmask2q128
:
14064 case X86::BI__builtin_ia32_cvtmask2q256
:
14065 case X86::BI__builtin_ia32_cvtmask2q512
:
14066 return EmitX86SExtMask(*this, Ops
[0], ConvertType(E
->getType()));
14068 case X86::BI__builtin_ia32_cvtb2mask128
:
14069 case X86::BI__builtin_ia32_cvtb2mask256
:
14070 case X86::BI__builtin_ia32_cvtb2mask512
:
14071 case X86::BI__builtin_ia32_cvtw2mask128
:
14072 case X86::BI__builtin_ia32_cvtw2mask256
:
14073 case X86::BI__builtin_ia32_cvtw2mask512
:
14074 case X86::BI__builtin_ia32_cvtd2mask128
:
14075 case X86::BI__builtin_ia32_cvtd2mask256
:
14076 case X86::BI__builtin_ia32_cvtd2mask512
:
14077 case X86::BI__builtin_ia32_cvtq2mask128
:
14078 case X86::BI__builtin_ia32_cvtq2mask256
:
14079 case X86::BI__builtin_ia32_cvtq2mask512
:
14080 return EmitX86ConvertToMask(*this, Ops
[0]);
14082 case X86::BI__builtin_ia32_cvtdq2ps512_mask
:
14083 case X86::BI__builtin_ia32_cvtqq2ps512_mask
:
14084 case X86::BI__builtin_ia32_cvtqq2pd512_mask
:
14085 case X86::BI__builtin_ia32_vcvtw2ph512_mask
:
14086 case X86::BI__builtin_ia32_vcvtdq2ph512_mask
:
14087 case X86::BI__builtin_ia32_vcvtqq2ph512_mask
:
14088 return EmitX86ConvertIntToFp(*this, E
, Ops
, /*IsSigned*/ true);
14089 case X86::BI__builtin_ia32_cvtudq2ps512_mask
:
14090 case X86::BI__builtin_ia32_cvtuqq2ps512_mask
:
14091 case X86::BI__builtin_ia32_cvtuqq2pd512_mask
:
14092 case X86::BI__builtin_ia32_vcvtuw2ph512_mask
:
14093 case X86::BI__builtin_ia32_vcvtudq2ph512_mask
:
14094 case X86::BI__builtin_ia32_vcvtuqq2ph512_mask
:
14095 return EmitX86ConvertIntToFp(*this, E
, Ops
, /*IsSigned*/ false);
14097 case X86::BI__builtin_ia32_vfmaddss3
:
14098 case X86::BI__builtin_ia32_vfmaddsd3
:
14099 case X86::BI__builtin_ia32_vfmaddsh3_mask
:
14100 case X86::BI__builtin_ia32_vfmaddss3_mask
:
14101 case X86::BI__builtin_ia32_vfmaddsd3_mask
:
14102 return EmitScalarFMAExpr(*this, E
, Ops
, Ops
[0]);
14103 case X86::BI__builtin_ia32_vfmaddss
:
14104 case X86::BI__builtin_ia32_vfmaddsd
:
14105 return EmitScalarFMAExpr(*this, E
, Ops
,
14106 Constant::getNullValue(Ops
[0]->getType()));
14107 case X86::BI__builtin_ia32_vfmaddsh3_maskz
:
14108 case X86::BI__builtin_ia32_vfmaddss3_maskz
:
14109 case X86::BI__builtin_ia32_vfmaddsd3_maskz
:
14110 return EmitScalarFMAExpr(*this, E
, Ops
, Ops
[0], /*ZeroMask*/ true);
14111 case X86::BI__builtin_ia32_vfmaddsh3_mask3
:
14112 case X86::BI__builtin_ia32_vfmaddss3_mask3
:
14113 case X86::BI__builtin_ia32_vfmaddsd3_mask3
:
14114 return EmitScalarFMAExpr(*this, E
, Ops
, Ops
[2], /*ZeroMask*/ false, 2);
14115 case X86::BI__builtin_ia32_vfmsubsh3_mask3
:
14116 case X86::BI__builtin_ia32_vfmsubss3_mask3
:
14117 case X86::BI__builtin_ia32_vfmsubsd3_mask3
:
14118 return EmitScalarFMAExpr(*this, E
, Ops
, Ops
[2], /*ZeroMask*/ false, 2,
14120 case X86::BI__builtin_ia32_vfmaddph
:
14121 case X86::BI__builtin_ia32_vfmaddps
:
14122 case X86::BI__builtin_ia32_vfmaddpd
:
14123 case X86::BI__builtin_ia32_vfmaddph256
:
14124 case X86::BI__builtin_ia32_vfmaddps256
:
14125 case X86::BI__builtin_ia32_vfmaddpd256
:
14126 case X86::BI__builtin_ia32_vfmaddph512_mask
:
14127 case X86::BI__builtin_ia32_vfmaddph512_maskz
:
14128 case X86::BI__builtin_ia32_vfmaddph512_mask3
:
14129 case X86::BI__builtin_ia32_vfmaddps512_mask
:
14130 case X86::BI__builtin_ia32_vfmaddps512_maskz
:
14131 case X86::BI__builtin_ia32_vfmaddps512_mask3
:
14132 case X86::BI__builtin_ia32_vfmsubps512_mask3
:
14133 case X86::BI__builtin_ia32_vfmaddpd512_mask
:
14134 case X86::BI__builtin_ia32_vfmaddpd512_maskz
:
14135 case X86::BI__builtin_ia32_vfmaddpd512_mask3
:
14136 case X86::BI__builtin_ia32_vfmsubpd512_mask3
:
14137 case X86::BI__builtin_ia32_vfmsubph512_mask3
:
14138 return EmitX86FMAExpr(*this, E
, Ops
, BuiltinID
, /*IsAddSub*/ false);
14139 case X86::BI__builtin_ia32_vfmaddsubph512_mask
:
14140 case X86::BI__builtin_ia32_vfmaddsubph512_maskz
:
14141 case X86::BI__builtin_ia32_vfmaddsubph512_mask3
:
14142 case X86::BI__builtin_ia32_vfmsubaddph512_mask3
:
14143 case X86::BI__builtin_ia32_vfmaddsubps512_mask
:
14144 case X86::BI__builtin_ia32_vfmaddsubps512_maskz
:
14145 case X86::BI__builtin_ia32_vfmaddsubps512_mask3
:
14146 case X86::BI__builtin_ia32_vfmsubaddps512_mask3
:
14147 case X86::BI__builtin_ia32_vfmaddsubpd512_mask
:
14148 case X86::BI__builtin_ia32_vfmaddsubpd512_maskz
:
14149 case X86::BI__builtin_ia32_vfmaddsubpd512_mask3
:
14150 case X86::BI__builtin_ia32_vfmsubaddpd512_mask3
:
14151 return EmitX86FMAExpr(*this, E
, Ops
, BuiltinID
, /*IsAddSub*/ true);
14153 case X86::BI__builtin_ia32_movdqa32store128_mask
:
14154 case X86::BI__builtin_ia32_movdqa64store128_mask
:
14155 case X86::BI__builtin_ia32_storeaps128_mask
:
14156 case X86::BI__builtin_ia32_storeapd128_mask
:
14157 case X86::BI__builtin_ia32_movdqa32store256_mask
:
14158 case X86::BI__builtin_ia32_movdqa64store256_mask
:
14159 case X86::BI__builtin_ia32_storeaps256_mask
:
14160 case X86::BI__builtin_ia32_storeapd256_mask
:
14161 case X86::BI__builtin_ia32_movdqa32store512_mask
:
14162 case X86::BI__builtin_ia32_movdqa64store512_mask
:
14163 case X86::BI__builtin_ia32_storeaps512_mask
:
14164 case X86::BI__builtin_ia32_storeapd512_mask
:
14165 return EmitX86MaskedStore(
14167 getContext().getTypeAlignInChars(E
->getArg(1)->getType()).getAsAlign());
14169 case X86::BI__builtin_ia32_loadups128_mask
:
14170 case X86::BI__builtin_ia32_loadups256_mask
:
14171 case X86::BI__builtin_ia32_loadups512_mask
:
14172 case X86::BI__builtin_ia32_loadupd128_mask
:
14173 case X86::BI__builtin_ia32_loadupd256_mask
:
14174 case X86::BI__builtin_ia32_loadupd512_mask
:
14175 case X86::BI__builtin_ia32_loaddquqi128_mask
:
14176 case X86::BI__builtin_ia32_loaddquqi256_mask
:
14177 case X86::BI__builtin_ia32_loaddquqi512_mask
:
14178 case X86::BI__builtin_ia32_loaddquhi128_mask
:
14179 case X86::BI__builtin_ia32_loaddquhi256_mask
:
14180 case X86::BI__builtin_ia32_loaddquhi512_mask
:
14181 case X86::BI__builtin_ia32_loaddqusi128_mask
:
14182 case X86::BI__builtin_ia32_loaddqusi256_mask
:
14183 case X86::BI__builtin_ia32_loaddqusi512_mask
:
14184 case X86::BI__builtin_ia32_loaddqudi128_mask
:
14185 case X86::BI__builtin_ia32_loaddqudi256_mask
:
14186 case X86::BI__builtin_ia32_loaddqudi512_mask
:
14187 return EmitX86MaskedLoad(*this, Ops
, Align(1));
14189 case X86::BI__builtin_ia32_loadsh128_mask
:
14190 case X86::BI__builtin_ia32_loadss128_mask
:
14191 case X86::BI__builtin_ia32_loadsd128_mask
:
14192 return EmitX86MaskedLoad(*this, Ops
, Align(1));
14194 case X86::BI__builtin_ia32_loadaps128_mask
:
14195 case X86::BI__builtin_ia32_loadaps256_mask
:
14196 case X86::BI__builtin_ia32_loadaps512_mask
:
14197 case X86::BI__builtin_ia32_loadapd128_mask
:
14198 case X86::BI__builtin_ia32_loadapd256_mask
:
14199 case X86::BI__builtin_ia32_loadapd512_mask
:
14200 case X86::BI__builtin_ia32_movdqa32load128_mask
:
14201 case X86::BI__builtin_ia32_movdqa32load256_mask
:
14202 case X86::BI__builtin_ia32_movdqa32load512_mask
:
14203 case X86::BI__builtin_ia32_movdqa64load128_mask
:
14204 case X86::BI__builtin_ia32_movdqa64load256_mask
:
14205 case X86::BI__builtin_ia32_movdqa64load512_mask
:
14206 return EmitX86MaskedLoad(
14208 getContext().getTypeAlignInChars(E
->getArg(1)->getType()).getAsAlign());
14210 case X86::BI__builtin_ia32_expandloaddf128_mask
:
14211 case X86::BI__builtin_ia32_expandloaddf256_mask
:
14212 case X86::BI__builtin_ia32_expandloaddf512_mask
:
14213 case X86::BI__builtin_ia32_expandloadsf128_mask
:
14214 case X86::BI__builtin_ia32_expandloadsf256_mask
:
14215 case X86::BI__builtin_ia32_expandloadsf512_mask
:
14216 case X86::BI__builtin_ia32_expandloaddi128_mask
:
14217 case X86::BI__builtin_ia32_expandloaddi256_mask
:
14218 case X86::BI__builtin_ia32_expandloaddi512_mask
:
14219 case X86::BI__builtin_ia32_expandloadsi128_mask
:
14220 case X86::BI__builtin_ia32_expandloadsi256_mask
:
14221 case X86::BI__builtin_ia32_expandloadsi512_mask
:
14222 case X86::BI__builtin_ia32_expandloadhi128_mask
:
14223 case X86::BI__builtin_ia32_expandloadhi256_mask
:
14224 case X86::BI__builtin_ia32_expandloadhi512_mask
:
14225 case X86::BI__builtin_ia32_expandloadqi128_mask
:
14226 case X86::BI__builtin_ia32_expandloadqi256_mask
:
14227 case X86::BI__builtin_ia32_expandloadqi512_mask
:
14228 return EmitX86ExpandLoad(*this, Ops
);
14230 case X86::BI__builtin_ia32_compressstoredf128_mask
:
14231 case X86::BI__builtin_ia32_compressstoredf256_mask
:
14232 case X86::BI__builtin_ia32_compressstoredf512_mask
:
14233 case X86::BI__builtin_ia32_compressstoresf128_mask
:
14234 case X86::BI__builtin_ia32_compressstoresf256_mask
:
14235 case X86::BI__builtin_ia32_compressstoresf512_mask
:
14236 case X86::BI__builtin_ia32_compressstoredi128_mask
:
14237 case X86::BI__builtin_ia32_compressstoredi256_mask
:
14238 case X86::BI__builtin_ia32_compressstoredi512_mask
:
14239 case X86::BI__builtin_ia32_compressstoresi128_mask
:
14240 case X86::BI__builtin_ia32_compressstoresi256_mask
:
14241 case X86::BI__builtin_ia32_compressstoresi512_mask
:
14242 case X86::BI__builtin_ia32_compressstorehi128_mask
:
14243 case X86::BI__builtin_ia32_compressstorehi256_mask
:
14244 case X86::BI__builtin_ia32_compressstorehi512_mask
:
14245 case X86::BI__builtin_ia32_compressstoreqi128_mask
:
14246 case X86::BI__builtin_ia32_compressstoreqi256_mask
:
14247 case X86::BI__builtin_ia32_compressstoreqi512_mask
:
14248 return EmitX86CompressStore(*this, Ops
);
14250 case X86::BI__builtin_ia32_expanddf128_mask
:
14251 case X86::BI__builtin_ia32_expanddf256_mask
:
14252 case X86::BI__builtin_ia32_expanddf512_mask
:
14253 case X86::BI__builtin_ia32_expandsf128_mask
:
14254 case X86::BI__builtin_ia32_expandsf256_mask
:
14255 case X86::BI__builtin_ia32_expandsf512_mask
:
14256 case X86::BI__builtin_ia32_expanddi128_mask
:
14257 case X86::BI__builtin_ia32_expanddi256_mask
:
14258 case X86::BI__builtin_ia32_expanddi512_mask
:
14259 case X86::BI__builtin_ia32_expandsi128_mask
:
14260 case X86::BI__builtin_ia32_expandsi256_mask
:
14261 case X86::BI__builtin_ia32_expandsi512_mask
:
14262 case X86::BI__builtin_ia32_expandhi128_mask
:
14263 case X86::BI__builtin_ia32_expandhi256_mask
:
14264 case X86::BI__builtin_ia32_expandhi512_mask
:
14265 case X86::BI__builtin_ia32_expandqi128_mask
:
14266 case X86::BI__builtin_ia32_expandqi256_mask
:
14267 case X86::BI__builtin_ia32_expandqi512_mask
:
14268 return EmitX86CompressExpand(*this, Ops
, /*IsCompress*/false);
14270 case X86::BI__builtin_ia32_compressdf128_mask
:
14271 case X86::BI__builtin_ia32_compressdf256_mask
:
14272 case X86::BI__builtin_ia32_compressdf512_mask
:
14273 case X86::BI__builtin_ia32_compresssf128_mask
:
14274 case X86::BI__builtin_ia32_compresssf256_mask
:
14275 case X86::BI__builtin_ia32_compresssf512_mask
:
14276 case X86::BI__builtin_ia32_compressdi128_mask
:
14277 case X86::BI__builtin_ia32_compressdi256_mask
:
14278 case X86::BI__builtin_ia32_compressdi512_mask
:
14279 case X86::BI__builtin_ia32_compresssi128_mask
:
14280 case X86::BI__builtin_ia32_compresssi256_mask
:
14281 case X86::BI__builtin_ia32_compresssi512_mask
:
14282 case X86::BI__builtin_ia32_compresshi128_mask
:
14283 case X86::BI__builtin_ia32_compresshi256_mask
:
14284 case X86::BI__builtin_ia32_compresshi512_mask
:
14285 case X86::BI__builtin_ia32_compressqi128_mask
:
14286 case X86::BI__builtin_ia32_compressqi256_mask
:
14287 case X86::BI__builtin_ia32_compressqi512_mask
:
14288 return EmitX86CompressExpand(*this, Ops
, /*IsCompress*/true);
14290 case X86::BI__builtin_ia32_gather3div2df
:
14291 case X86::BI__builtin_ia32_gather3div2di
:
14292 case X86::BI__builtin_ia32_gather3div4df
:
14293 case X86::BI__builtin_ia32_gather3div4di
:
14294 case X86::BI__builtin_ia32_gather3div4sf
:
14295 case X86::BI__builtin_ia32_gather3div4si
:
14296 case X86::BI__builtin_ia32_gather3div8sf
:
14297 case X86::BI__builtin_ia32_gather3div8si
:
14298 case X86::BI__builtin_ia32_gather3siv2df
:
14299 case X86::BI__builtin_ia32_gather3siv2di
:
14300 case X86::BI__builtin_ia32_gather3siv4df
:
14301 case X86::BI__builtin_ia32_gather3siv4di
:
14302 case X86::BI__builtin_ia32_gather3siv4sf
:
14303 case X86::BI__builtin_ia32_gather3siv4si
:
14304 case X86::BI__builtin_ia32_gather3siv8sf
:
14305 case X86::BI__builtin_ia32_gather3siv8si
:
14306 case X86::BI__builtin_ia32_gathersiv8df
:
14307 case X86::BI__builtin_ia32_gathersiv16sf
:
14308 case X86::BI__builtin_ia32_gatherdiv8df
:
14309 case X86::BI__builtin_ia32_gatherdiv16sf
:
14310 case X86::BI__builtin_ia32_gathersiv8di
:
14311 case X86::BI__builtin_ia32_gathersiv16si
:
14312 case X86::BI__builtin_ia32_gatherdiv8di
:
14313 case X86::BI__builtin_ia32_gatherdiv16si
: {
14315 switch (BuiltinID
) {
14316 default: llvm_unreachable("Unexpected builtin");
14317 case X86::BI__builtin_ia32_gather3div2df
:
14318 IID
= Intrinsic::x86_avx512_mask_gather3div2_df
;
14320 case X86::BI__builtin_ia32_gather3div2di
:
14321 IID
= Intrinsic::x86_avx512_mask_gather3div2_di
;
14323 case X86::BI__builtin_ia32_gather3div4df
:
14324 IID
= Intrinsic::x86_avx512_mask_gather3div4_df
;
14326 case X86::BI__builtin_ia32_gather3div4di
:
14327 IID
= Intrinsic::x86_avx512_mask_gather3div4_di
;
14329 case X86::BI__builtin_ia32_gather3div4sf
:
14330 IID
= Intrinsic::x86_avx512_mask_gather3div4_sf
;
14332 case X86::BI__builtin_ia32_gather3div4si
:
14333 IID
= Intrinsic::x86_avx512_mask_gather3div4_si
;
14335 case X86::BI__builtin_ia32_gather3div8sf
:
14336 IID
= Intrinsic::x86_avx512_mask_gather3div8_sf
;
14338 case X86::BI__builtin_ia32_gather3div8si
:
14339 IID
= Intrinsic::x86_avx512_mask_gather3div8_si
;
14341 case X86::BI__builtin_ia32_gather3siv2df
:
14342 IID
= Intrinsic::x86_avx512_mask_gather3siv2_df
;
14344 case X86::BI__builtin_ia32_gather3siv2di
:
14345 IID
= Intrinsic::x86_avx512_mask_gather3siv2_di
;
14347 case X86::BI__builtin_ia32_gather3siv4df
:
14348 IID
= Intrinsic::x86_avx512_mask_gather3siv4_df
;
14350 case X86::BI__builtin_ia32_gather3siv4di
:
14351 IID
= Intrinsic::x86_avx512_mask_gather3siv4_di
;
14353 case X86::BI__builtin_ia32_gather3siv4sf
:
14354 IID
= Intrinsic::x86_avx512_mask_gather3siv4_sf
;
14356 case X86::BI__builtin_ia32_gather3siv4si
:
14357 IID
= Intrinsic::x86_avx512_mask_gather3siv4_si
;
14359 case X86::BI__builtin_ia32_gather3siv8sf
:
14360 IID
= Intrinsic::x86_avx512_mask_gather3siv8_sf
;
14362 case X86::BI__builtin_ia32_gather3siv8si
:
14363 IID
= Intrinsic::x86_avx512_mask_gather3siv8_si
;
14365 case X86::BI__builtin_ia32_gathersiv8df
:
14366 IID
= Intrinsic::x86_avx512_mask_gather_dpd_512
;
14368 case X86::BI__builtin_ia32_gathersiv16sf
:
14369 IID
= Intrinsic::x86_avx512_mask_gather_dps_512
;
14371 case X86::BI__builtin_ia32_gatherdiv8df
:
14372 IID
= Intrinsic::x86_avx512_mask_gather_qpd_512
;
14374 case X86::BI__builtin_ia32_gatherdiv16sf
:
14375 IID
= Intrinsic::x86_avx512_mask_gather_qps_512
;
14377 case X86::BI__builtin_ia32_gathersiv8di
:
14378 IID
= Intrinsic::x86_avx512_mask_gather_dpq_512
;
14380 case X86::BI__builtin_ia32_gathersiv16si
:
14381 IID
= Intrinsic::x86_avx512_mask_gather_dpi_512
;
14383 case X86::BI__builtin_ia32_gatherdiv8di
:
14384 IID
= Intrinsic::x86_avx512_mask_gather_qpq_512
;
14386 case X86::BI__builtin_ia32_gatherdiv16si
:
14387 IID
= Intrinsic::x86_avx512_mask_gather_qpi_512
;
14391 unsigned MinElts
= std::min(
14392 cast
<llvm::FixedVectorType
>(Ops
[0]->getType())->getNumElements(),
14393 cast
<llvm::FixedVectorType
>(Ops
[2]->getType())->getNumElements());
14394 Ops
[3] = getMaskVecValue(*this, Ops
[3], MinElts
);
14395 Function
*Intr
= CGM
.getIntrinsic(IID
);
14396 return Builder
.CreateCall(Intr
, Ops
);
14399 case X86::BI__builtin_ia32_scattersiv8df
:
14400 case X86::BI__builtin_ia32_scattersiv16sf
:
14401 case X86::BI__builtin_ia32_scatterdiv8df
:
14402 case X86::BI__builtin_ia32_scatterdiv16sf
:
14403 case X86::BI__builtin_ia32_scattersiv8di
:
14404 case X86::BI__builtin_ia32_scattersiv16si
:
14405 case X86::BI__builtin_ia32_scatterdiv8di
:
14406 case X86::BI__builtin_ia32_scatterdiv16si
:
14407 case X86::BI__builtin_ia32_scatterdiv2df
:
14408 case X86::BI__builtin_ia32_scatterdiv2di
:
14409 case X86::BI__builtin_ia32_scatterdiv4df
:
14410 case X86::BI__builtin_ia32_scatterdiv4di
:
14411 case X86::BI__builtin_ia32_scatterdiv4sf
:
14412 case X86::BI__builtin_ia32_scatterdiv4si
:
14413 case X86::BI__builtin_ia32_scatterdiv8sf
:
14414 case X86::BI__builtin_ia32_scatterdiv8si
:
14415 case X86::BI__builtin_ia32_scattersiv2df
:
14416 case X86::BI__builtin_ia32_scattersiv2di
:
14417 case X86::BI__builtin_ia32_scattersiv4df
:
14418 case X86::BI__builtin_ia32_scattersiv4di
:
14419 case X86::BI__builtin_ia32_scattersiv4sf
:
14420 case X86::BI__builtin_ia32_scattersiv4si
:
14421 case X86::BI__builtin_ia32_scattersiv8sf
:
14422 case X86::BI__builtin_ia32_scattersiv8si
: {
14424 switch (BuiltinID
) {
14425 default: llvm_unreachable("Unexpected builtin");
14426 case X86::BI__builtin_ia32_scattersiv8df
:
14427 IID
= Intrinsic::x86_avx512_mask_scatter_dpd_512
;
14429 case X86::BI__builtin_ia32_scattersiv16sf
:
14430 IID
= Intrinsic::x86_avx512_mask_scatter_dps_512
;
14432 case X86::BI__builtin_ia32_scatterdiv8df
:
14433 IID
= Intrinsic::x86_avx512_mask_scatter_qpd_512
;
14435 case X86::BI__builtin_ia32_scatterdiv16sf
:
14436 IID
= Intrinsic::x86_avx512_mask_scatter_qps_512
;
14438 case X86::BI__builtin_ia32_scattersiv8di
:
14439 IID
= Intrinsic::x86_avx512_mask_scatter_dpq_512
;
14441 case X86::BI__builtin_ia32_scattersiv16si
:
14442 IID
= Intrinsic::x86_avx512_mask_scatter_dpi_512
;
14444 case X86::BI__builtin_ia32_scatterdiv8di
:
14445 IID
= Intrinsic::x86_avx512_mask_scatter_qpq_512
;
14447 case X86::BI__builtin_ia32_scatterdiv16si
:
14448 IID
= Intrinsic::x86_avx512_mask_scatter_qpi_512
;
14450 case X86::BI__builtin_ia32_scatterdiv2df
:
14451 IID
= Intrinsic::x86_avx512_mask_scatterdiv2_df
;
14453 case X86::BI__builtin_ia32_scatterdiv2di
:
14454 IID
= Intrinsic::x86_avx512_mask_scatterdiv2_di
;
14456 case X86::BI__builtin_ia32_scatterdiv4df
:
14457 IID
= Intrinsic::x86_avx512_mask_scatterdiv4_df
;
14459 case X86::BI__builtin_ia32_scatterdiv4di
:
14460 IID
= Intrinsic::x86_avx512_mask_scatterdiv4_di
;
14462 case X86::BI__builtin_ia32_scatterdiv4sf
:
14463 IID
= Intrinsic::x86_avx512_mask_scatterdiv4_sf
;
14465 case X86::BI__builtin_ia32_scatterdiv4si
:
14466 IID
= Intrinsic::x86_avx512_mask_scatterdiv4_si
;
14468 case X86::BI__builtin_ia32_scatterdiv8sf
:
14469 IID
= Intrinsic::x86_avx512_mask_scatterdiv8_sf
;
14471 case X86::BI__builtin_ia32_scatterdiv8si
:
14472 IID
= Intrinsic::x86_avx512_mask_scatterdiv8_si
;
14474 case X86::BI__builtin_ia32_scattersiv2df
:
14475 IID
= Intrinsic::x86_avx512_mask_scattersiv2_df
;
14477 case X86::BI__builtin_ia32_scattersiv2di
:
14478 IID
= Intrinsic::x86_avx512_mask_scattersiv2_di
;
14480 case X86::BI__builtin_ia32_scattersiv4df
:
14481 IID
= Intrinsic::x86_avx512_mask_scattersiv4_df
;
14483 case X86::BI__builtin_ia32_scattersiv4di
:
14484 IID
= Intrinsic::x86_avx512_mask_scattersiv4_di
;
14486 case X86::BI__builtin_ia32_scattersiv4sf
:
14487 IID
= Intrinsic::x86_avx512_mask_scattersiv4_sf
;
14489 case X86::BI__builtin_ia32_scattersiv4si
:
14490 IID
= Intrinsic::x86_avx512_mask_scattersiv4_si
;
14492 case X86::BI__builtin_ia32_scattersiv8sf
:
14493 IID
= Intrinsic::x86_avx512_mask_scattersiv8_sf
;
14495 case X86::BI__builtin_ia32_scattersiv8si
:
14496 IID
= Intrinsic::x86_avx512_mask_scattersiv8_si
;
14500 unsigned MinElts
= std::min(
14501 cast
<llvm::FixedVectorType
>(Ops
[2]->getType())->getNumElements(),
14502 cast
<llvm::FixedVectorType
>(Ops
[3]->getType())->getNumElements());
14503 Ops
[1] = getMaskVecValue(*this, Ops
[1], MinElts
);
14504 Function
*Intr
= CGM
.getIntrinsic(IID
);
14505 return Builder
.CreateCall(Intr
, Ops
);
14508 case X86::BI__builtin_ia32_vextractf128_pd256
:
14509 case X86::BI__builtin_ia32_vextractf128_ps256
:
14510 case X86::BI__builtin_ia32_vextractf128_si256
:
14511 case X86::BI__builtin_ia32_extract128i256
:
14512 case X86::BI__builtin_ia32_extractf64x4_mask
:
14513 case X86::BI__builtin_ia32_extractf32x4_mask
:
14514 case X86::BI__builtin_ia32_extracti64x4_mask
:
14515 case X86::BI__builtin_ia32_extracti32x4_mask
:
14516 case X86::BI__builtin_ia32_extractf32x8_mask
:
14517 case X86::BI__builtin_ia32_extracti32x8_mask
:
14518 case X86::BI__builtin_ia32_extractf32x4_256_mask
:
14519 case X86::BI__builtin_ia32_extracti32x4_256_mask
:
14520 case X86::BI__builtin_ia32_extractf64x2_256_mask
:
14521 case X86::BI__builtin_ia32_extracti64x2_256_mask
:
14522 case X86::BI__builtin_ia32_extractf64x2_512_mask
:
14523 case X86::BI__builtin_ia32_extracti64x2_512_mask
: {
14524 auto *DstTy
= cast
<llvm::FixedVectorType
>(ConvertType(E
->getType()));
14525 unsigned NumElts
= DstTy
->getNumElements();
14526 unsigned SrcNumElts
=
14527 cast
<llvm::FixedVectorType
>(Ops
[0]->getType())->getNumElements();
14528 unsigned SubVectors
= SrcNumElts
/ NumElts
;
14529 unsigned Index
= cast
<ConstantInt
>(Ops
[1])->getZExtValue();
14530 assert(llvm::isPowerOf2_32(SubVectors
) && "Expected power of 2 subvectors");
14531 Index
&= SubVectors
- 1; // Remove any extra bits.
14535 for (unsigned i
= 0; i
!= NumElts
; ++i
)
14536 Indices
[i
] = i
+ Index
;
14538 Value
*Res
= Builder
.CreateShuffleVector(Ops
[0], ArrayRef(Indices
, NumElts
),
14541 if (Ops
.size() == 4)
14542 Res
= EmitX86Select(*this, Ops
[3], Res
, Ops
[2]);
14546 case X86::BI__builtin_ia32_vinsertf128_pd256
:
14547 case X86::BI__builtin_ia32_vinsertf128_ps256
:
14548 case X86::BI__builtin_ia32_vinsertf128_si256
:
14549 case X86::BI__builtin_ia32_insert128i256
:
14550 case X86::BI__builtin_ia32_insertf64x4
:
14551 case X86::BI__builtin_ia32_insertf32x4
:
14552 case X86::BI__builtin_ia32_inserti64x4
:
14553 case X86::BI__builtin_ia32_inserti32x4
:
14554 case X86::BI__builtin_ia32_insertf32x8
:
14555 case X86::BI__builtin_ia32_inserti32x8
:
14556 case X86::BI__builtin_ia32_insertf32x4_256
:
14557 case X86::BI__builtin_ia32_inserti32x4_256
:
14558 case X86::BI__builtin_ia32_insertf64x2_256
:
14559 case X86::BI__builtin_ia32_inserti64x2_256
:
14560 case X86::BI__builtin_ia32_insertf64x2_512
:
14561 case X86::BI__builtin_ia32_inserti64x2_512
: {
14562 unsigned DstNumElts
=
14563 cast
<llvm::FixedVectorType
>(Ops
[0]->getType())->getNumElements();
14564 unsigned SrcNumElts
=
14565 cast
<llvm::FixedVectorType
>(Ops
[1]->getType())->getNumElements();
14566 unsigned SubVectors
= DstNumElts
/ SrcNumElts
;
14567 unsigned Index
= cast
<ConstantInt
>(Ops
[2])->getZExtValue();
14568 assert(llvm::isPowerOf2_32(SubVectors
) && "Expected power of 2 subvectors");
14569 Index
&= SubVectors
- 1; // Remove any extra bits.
14570 Index
*= SrcNumElts
;
14573 for (unsigned i
= 0; i
!= DstNumElts
; ++i
)
14574 Indices
[i
] = (i
>= SrcNumElts
) ? SrcNumElts
+ (i
% SrcNumElts
) : i
;
14576 Value
*Op1
= Builder
.CreateShuffleVector(
14577 Ops
[1], ArrayRef(Indices
, DstNumElts
), "widen");
14579 for (unsigned i
= 0; i
!= DstNumElts
; ++i
) {
14580 if (i
>= Index
&& i
< (Index
+ SrcNumElts
))
14581 Indices
[i
] = (i
- Index
) + DstNumElts
;
14586 return Builder
.CreateShuffleVector(Ops
[0], Op1
,
14587 ArrayRef(Indices
, DstNumElts
), "insert");
14589 case X86::BI__builtin_ia32_pmovqd512_mask
:
14590 case X86::BI__builtin_ia32_pmovwb512_mask
: {
14591 Value
*Res
= Builder
.CreateTrunc(Ops
[0], Ops
[1]->getType());
14592 return EmitX86Select(*this, Ops
[2], Res
, Ops
[1]);
14594 case X86::BI__builtin_ia32_pmovdb512_mask
:
14595 case X86::BI__builtin_ia32_pmovdw512_mask
:
14596 case X86::BI__builtin_ia32_pmovqw512_mask
: {
14597 if (const auto *C
= dyn_cast
<Constant
>(Ops
[2]))
14598 if (C
->isAllOnesValue())
14599 return Builder
.CreateTrunc(Ops
[0], Ops
[1]->getType());
14602 switch (BuiltinID
) {
14603 default: llvm_unreachable("Unsupported intrinsic!");
14604 case X86::BI__builtin_ia32_pmovdb512_mask
:
14605 IID
= Intrinsic::x86_avx512_mask_pmov_db_512
;
14607 case X86::BI__builtin_ia32_pmovdw512_mask
:
14608 IID
= Intrinsic::x86_avx512_mask_pmov_dw_512
;
14610 case X86::BI__builtin_ia32_pmovqw512_mask
:
14611 IID
= Intrinsic::x86_avx512_mask_pmov_qw_512
;
14615 Function
*Intr
= CGM
.getIntrinsic(IID
);
14616 return Builder
.CreateCall(Intr
, Ops
);
14618 case X86::BI__builtin_ia32_pblendw128
:
14619 case X86::BI__builtin_ia32_blendpd
:
14620 case X86::BI__builtin_ia32_blendps
:
14621 case X86::BI__builtin_ia32_blendpd256
:
14622 case X86::BI__builtin_ia32_blendps256
:
14623 case X86::BI__builtin_ia32_pblendw256
:
14624 case X86::BI__builtin_ia32_pblendd128
:
14625 case X86::BI__builtin_ia32_pblendd256
: {
14627 cast
<llvm::FixedVectorType
>(Ops
[0]->getType())->getNumElements();
14628 unsigned Imm
= cast
<llvm::ConstantInt
>(Ops
[2])->getZExtValue();
14631 // If there are more than 8 elements, the immediate is used twice so make
14632 // sure we handle that.
14633 for (unsigned i
= 0; i
!= NumElts
; ++i
)
14634 Indices
[i
] = ((Imm
>> (i
% 8)) & 0x1) ? NumElts
+ i
: i
;
14636 return Builder
.CreateShuffleVector(Ops
[0], Ops
[1],
14637 ArrayRef(Indices
, NumElts
), "blend");
14639 case X86::BI__builtin_ia32_pshuflw
:
14640 case X86::BI__builtin_ia32_pshuflw256
:
14641 case X86::BI__builtin_ia32_pshuflw512
: {
14642 uint32_t Imm
= cast
<llvm::ConstantInt
>(Ops
[1])->getZExtValue();
14643 auto *Ty
= cast
<llvm::FixedVectorType
>(Ops
[0]->getType());
14644 unsigned NumElts
= Ty
->getNumElements();
14646 // Splat the 8-bits of immediate 4 times to help the loop wrap around.
14647 Imm
= (Imm
& 0xff) * 0x01010101;
14650 for (unsigned l
= 0; l
!= NumElts
; l
+= 8) {
14651 for (unsigned i
= 0; i
!= 4; ++i
) {
14652 Indices
[l
+ i
] = l
+ (Imm
& 3);
14655 for (unsigned i
= 4; i
!= 8; ++i
)
14656 Indices
[l
+ i
] = l
+ i
;
14659 return Builder
.CreateShuffleVector(Ops
[0], ArrayRef(Indices
, NumElts
),
14662 case X86::BI__builtin_ia32_pshufhw
:
14663 case X86::BI__builtin_ia32_pshufhw256
:
14664 case X86::BI__builtin_ia32_pshufhw512
: {
14665 uint32_t Imm
= cast
<llvm::ConstantInt
>(Ops
[1])->getZExtValue();
14666 auto *Ty
= cast
<llvm::FixedVectorType
>(Ops
[0]->getType());
14667 unsigned NumElts
= Ty
->getNumElements();
14669 // Splat the 8-bits of immediate 4 times to help the loop wrap around.
14670 Imm
= (Imm
& 0xff) * 0x01010101;
14673 for (unsigned l
= 0; l
!= NumElts
; l
+= 8) {
14674 for (unsigned i
= 0; i
!= 4; ++i
)
14675 Indices
[l
+ i
] = l
+ i
;
14676 for (unsigned i
= 4; i
!= 8; ++i
) {
14677 Indices
[l
+ i
] = l
+ 4 + (Imm
& 3);
14682 return Builder
.CreateShuffleVector(Ops
[0], ArrayRef(Indices
, NumElts
),
14685 case X86::BI__builtin_ia32_pshufd
:
14686 case X86::BI__builtin_ia32_pshufd256
:
14687 case X86::BI__builtin_ia32_pshufd512
:
14688 case X86::BI__builtin_ia32_vpermilpd
:
14689 case X86::BI__builtin_ia32_vpermilps
:
14690 case X86::BI__builtin_ia32_vpermilpd256
:
14691 case X86::BI__builtin_ia32_vpermilps256
:
14692 case X86::BI__builtin_ia32_vpermilpd512
:
14693 case X86::BI__builtin_ia32_vpermilps512
: {
14694 uint32_t Imm
= cast
<llvm::ConstantInt
>(Ops
[1])->getZExtValue();
14695 auto *Ty
= cast
<llvm::FixedVectorType
>(Ops
[0]->getType());
14696 unsigned NumElts
= Ty
->getNumElements();
14697 unsigned NumLanes
= Ty
->getPrimitiveSizeInBits() / 128;
14698 unsigned NumLaneElts
= NumElts
/ NumLanes
;
14700 // Splat the 8-bits of immediate 4 times to help the loop wrap around.
14701 Imm
= (Imm
& 0xff) * 0x01010101;
14704 for (unsigned l
= 0; l
!= NumElts
; l
+= NumLaneElts
) {
14705 for (unsigned i
= 0; i
!= NumLaneElts
; ++i
) {
14706 Indices
[i
+ l
] = (Imm
% NumLaneElts
) + l
;
14707 Imm
/= NumLaneElts
;
14711 return Builder
.CreateShuffleVector(Ops
[0], ArrayRef(Indices
, NumElts
),
14714 case X86::BI__builtin_ia32_shufpd
:
14715 case X86::BI__builtin_ia32_shufpd256
:
14716 case X86::BI__builtin_ia32_shufpd512
:
14717 case X86::BI__builtin_ia32_shufps
:
14718 case X86::BI__builtin_ia32_shufps256
:
14719 case X86::BI__builtin_ia32_shufps512
: {
14720 uint32_t Imm
= cast
<llvm::ConstantInt
>(Ops
[2])->getZExtValue();
14721 auto *Ty
= cast
<llvm::FixedVectorType
>(Ops
[0]->getType());
14722 unsigned NumElts
= Ty
->getNumElements();
14723 unsigned NumLanes
= Ty
->getPrimitiveSizeInBits() / 128;
14724 unsigned NumLaneElts
= NumElts
/ NumLanes
;
14726 // Splat the 8-bits of immediate 4 times to help the loop wrap around.
14727 Imm
= (Imm
& 0xff) * 0x01010101;
14730 for (unsigned l
= 0; l
!= NumElts
; l
+= NumLaneElts
) {
14731 for (unsigned i
= 0; i
!= NumLaneElts
; ++i
) {
14732 unsigned Index
= Imm
% NumLaneElts
;
14733 Imm
/= NumLaneElts
;
14734 if (i
>= (NumLaneElts
/ 2))
14736 Indices
[l
+ i
] = l
+ Index
;
14740 return Builder
.CreateShuffleVector(Ops
[0], Ops
[1],
14741 ArrayRef(Indices
, NumElts
), "shufp");
14743 case X86::BI__builtin_ia32_permdi256
:
14744 case X86::BI__builtin_ia32_permdf256
:
14745 case X86::BI__builtin_ia32_permdi512
:
14746 case X86::BI__builtin_ia32_permdf512
: {
14747 unsigned Imm
= cast
<llvm::ConstantInt
>(Ops
[1])->getZExtValue();
14748 auto *Ty
= cast
<llvm::FixedVectorType
>(Ops
[0]->getType());
14749 unsigned NumElts
= Ty
->getNumElements();
14751 // These intrinsics operate on 256-bit lanes of four 64-bit elements.
14753 for (unsigned l
= 0; l
!= NumElts
; l
+= 4)
14754 for (unsigned i
= 0; i
!= 4; ++i
)
14755 Indices
[l
+ i
] = l
+ ((Imm
>> (2 * i
)) & 0x3);
14757 return Builder
.CreateShuffleVector(Ops
[0], ArrayRef(Indices
, NumElts
),
14760 case X86::BI__builtin_ia32_palignr128
:
14761 case X86::BI__builtin_ia32_palignr256
:
14762 case X86::BI__builtin_ia32_palignr512
: {
14763 unsigned ShiftVal
= cast
<llvm::ConstantInt
>(Ops
[2])->getZExtValue() & 0xff;
14766 cast
<llvm::FixedVectorType
>(Ops
[0]->getType())->getNumElements();
14767 assert(NumElts
% 16 == 0);
14769 // If palignr is shifting the pair of vectors more than the size of two
14770 // lanes, emit zero.
14771 if (ShiftVal
>= 32)
14772 return llvm::Constant::getNullValue(ConvertType(E
->getType()));
14774 // If palignr is shifting the pair of input vectors more than one lane,
14775 // but less than two lanes, convert to shifting in zeroes.
14776 if (ShiftVal
> 16) {
14779 Ops
[0] = llvm::Constant::getNullValue(Ops
[0]->getType());
14783 // 256-bit palignr operates on 128-bit lanes so we need to handle that
14784 for (unsigned l
= 0; l
!= NumElts
; l
+= 16) {
14785 for (unsigned i
= 0; i
!= 16; ++i
) {
14786 unsigned Idx
= ShiftVal
+ i
;
14788 Idx
+= NumElts
- 16; // End of lane, switch operand.
14789 Indices
[l
+ i
] = Idx
+ l
;
14793 return Builder
.CreateShuffleVector(Ops
[1], Ops
[0],
14794 ArrayRef(Indices
, NumElts
), "palignr");
14796 case X86::BI__builtin_ia32_alignd128
:
14797 case X86::BI__builtin_ia32_alignd256
:
14798 case X86::BI__builtin_ia32_alignd512
:
14799 case X86::BI__builtin_ia32_alignq128
:
14800 case X86::BI__builtin_ia32_alignq256
:
14801 case X86::BI__builtin_ia32_alignq512
: {
14803 cast
<llvm::FixedVectorType
>(Ops
[0]->getType())->getNumElements();
14804 unsigned ShiftVal
= cast
<llvm::ConstantInt
>(Ops
[2])->getZExtValue() & 0xff;
14806 // Mask the shift amount to width of a vector.
14807 ShiftVal
&= NumElts
- 1;
14810 for (unsigned i
= 0; i
!= NumElts
; ++i
)
14811 Indices
[i
] = i
+ ShiftVal
;
14813 return Builder
.CreateShuffleVector(Ops
[1], Ops
[0],
14814 ArrayRef(Indices
, NumElts
), "valign");
14816 case X86::BI__builtin_ia32_shuf_f32x4_256
:
14817 case X86::BI__builtin_ia32_shuf_f64x2_256
:
14818 case X86::BI__builtin_ia32_shuf_i32x4_256
:
14819 case X86::BI__builtin_ia32_shuf_i64x2_256
:
14820 case X86::BI__builtin_ia32_shuf_f32x4
:
14821 case X86::BI__builtin_ia32_shuf_f64x2
:
14822 case X86::BI__builtin_ia32_shuf_i32x4
:
14823 case X86::BI__builtin_ia32_shuf_i64x2
: {
14824 unsigned Imm
= cast
<llvm::ConstantInt
>(Ops
[2])->getZExtValue();
14825 auto *Ty
= cast
<llvm::FixedVectorType
>(Ops
[0]->getType());
14826 unsigned NumElts
= Ty
->getNumElements();
14827 unsigned NumLanes
= Ty
->getPrimitiveSizeInBits() == 512 ? 4 : 2;
14828 unsigned NumLaneElts
= NumElts
/ NumLanes
;
14831 for (unsigned l
= 0; l
!= NumElts
; l
+= NumLaneElts
) {
14832 unsigned Index
= (Imm
% NumLanes
) * NumLaneElts
;
14833 Imm
/= NumLanes
; // Discard the bits we just used.
14834 if (l
>= (NumElts
/ 2))
14835 Index
+= NumElts
; // Switch to other source.
14836 for (unsigned i
= 0; i
!= NumLaneElts
; ++i
) {
14837 Indices
[l
+ i
] = Index
+ i
;
14841 return Builder
.CreateShuffleVector(Ops
[0], Ops
[1],
14842 ArrayRef(Indices
, NumElts
), "shuf");
14845 case X86::BI__builtin_ia32_vperm2f128_pd256
:
14846 case X86::BI__builtin_ia32_vperm2f128_ps256
:
14847 case X86::BI__builtin_ia32_vperm2f128_si256
:
14848 case X86::BI__builtin_ia32_permti256
: {
14849 unsigned Imm
= cast
<llvm::ConstantInt
>(Ops
[2])->getZExtValue();
14851 cast
<llvm::FixedVectorType
>(Ops
[0]->getType())->getNumElements();
14853 // This takes a very simple approach since there are two lanes and a
14854 // shuffle can have 2 inputs. So we reserve the first input for the first
14855 // lane and the second input for the second lane. This may result in
14856 // duplicate sources, but this can be dealt with in the backend.
14860 for (unsigned l
= 0; l
!= 2; ++l
) {
14861 // Determine the source for this lane.
14862 if (Imm
& (1 << ((l
* 4) + 3)))
14863 OutOps
[l
] = llvm::ConstantAggregateZero::get(Ops
[0]->getType());
14864 else if (Imm
& (1 << ((l
* 4) + 1)))
14865 OutOps
[l
] = Ops
[1];
14867 OutOps
[l
] = Ops
[0];
14869 for (unsigned i
= 0; i
!= NumElts
/2; ++i
) {
14870 // Start with ith element of the source for this lane.
14871 unsigned Idx
= (l
* NumElts
) + i
;
14872 // If bit 0 of the immediate half is set, switch to the high half of
14874 if (Imm
& (1 << (l
* 4)))
14876 Indices
[(l
* (NumElts
/2)) + i
] = Idx
;
14880 return Builder
.CreateShuffleVector(OutOps
[0], OutOps
[1],
14881 ArrayRef(Indices
, NumElts
), "vperm");
14884 case X86::BI__builtin_ia32_pslldqi128_byteshift
:
14885 case X86::BI__builtin_ia32_pslldqi256_byteshift
:
14886 case X86::BI__builtin_ia32_pslldqi512_byteshift
: {
14887 unsigned ShiftVal
= cast
<llvm::ConstantInt
>(Ops
[1])->getZExtValue() & 0xff;
14888 auto *ResultType
= cast
<llvm::FixedVectorType
>(Ops
[0]->getType());
14889 // Builtin type is vXi64 so multiply by 8 to get bytes.
14890 unsigned NumElts
= ResultType
->getNumElements() * 8;
14892 // If pslldq is shifting the vector more than 15 bytes, emit zero.
14893 if (ShiftVal
>= 16)
14894 return llvm::Constant::getNullValue(ResultType
);
14897 // 256/512-bit pslldq operates on 128-bit lanes so we need to handle that
14898 for (unsigned l
= 0; l
!= NumElts
; l
+= 16) {
14899 for (unsigned i
= 0; i
!= 16; ++i
) {
14900 unsigned Idx
= NumElts
+ i
- ShiftVal
;
14901 if (Idx
< NumElts
) Idx
-= NumElts
- 16; // end of lane, switch operand.
14902 Indices
[l
+ i
] = Idx
+ l
;
14906 auto *VecTy
= llvm::FixedVectorType::get(Int8Ty
, NumElts
);
14907 Value
*Cast
= Builder
.CreateBitCast(Ops
[0], VecTy
, "cast");
14908 Value
*Zero
= llvm::Constant::getNullValue(VecTy
);
14909 Value
*SV
= Builder
.CreateShuffleVector(
14910 Zero
, Cast
, ArrayRef(Indices
, NumElts
), "pslldq");
14911 return Builder
.CreateBitCast(SV
, Ops
[0]->getType(), "cast");
14913 case X86::BI__builtin_ia32_psrldqi128_byteshift
:
14914 case X86::BI__builtin_ia32_psrldqi256_byteshift
:
14915 case X86::BI__builtin_ia32_psrldqi512_byteshift
: {
14916 unsigned ShiftVal
= cast
<llvm::ConstantInt
>(Ops
[1])->getZExtValue() & 0xff;
14917 auto *ResultType
= cast
<llvm::FixedVectorType
>(Ops
[0]->getType());
14918 // Builtin type is vXi64 so multiply by 8 to get bytes.
14919 unsigned NumElts
= ResultType
->getNumElements() * 8;
14921 // If psrldq is shifting the vector more than 15 bytes, emit zero.
14922 if (ShiftVal
>= 16)
14923 return llvm::Constant::getNullValue(ResultType
);
14926 // 256/512-bit psrldq operates on 128-bit lanes so we need to handle that
14927 for (unsigned l
= 0; l
!= NumElts
; l
+= 16) {
14928 for (unsigned i
= 0; i
!= 16; ++i
) {
14929 unsigned Idx
= i
+ ShiftVal
;
14930 if (Idx
>= 16) Idx
+= NumElts
- 16; // end of lane, switch operand.
14931 Indices
[l
+ i
] = Idx
+ l
;
14935 auto *VecTy
= llvm::FixedVectorType::get(Int8Ty
, NumElts
);
14936 Value
*Cast
= Builder
.CreateBitCast(Ops
[0], VecTy
, "cast");
14937 Value
*Zero
= llvm::Constant::getNullValue(VecTy
);
14938 Value
*SV
= Builder
.CreateShuffleVector(
14939 Cast
, Zero
, ArrayRef(Indices
, NumElts
), "psrldq");
14940 return Builder
.CreateBitCast(SV
, ResultType
, "cast");
14942 case X86::BI__builtin_ia32_kshiftliqi
:
14943 case X86::BI__builtin_ia32_kshiftlihi
:
14944 case X86::BI__builtin_ia32_kshiftlisi
:
14945 case X86::BI__builtin_ia32_kshiftlidi
: {
14946 unsigned ShiftVal
= cast
<llvm::ConstantInt
>(Ops
[1])->getZExtValue() & 0xff;
14947 unsigned NumElts
= Ops
[0]->getType()->getIntegerBitWidth();
14949 if (ShiftVal
>= NumElts
)
14950 return llvm::Constant::getNullValue(Ops
[0]->getType());
14952 Value
*In
= getMaskVecValue(*this, Ops
[0], NumElts
);
14955 for (unsigned i
= 0; i
!= NumElts
; ++i
)
14956 Indices
[i
] = NumElts
+ i
- ShiftVal
;
14958 Value
*Zero
= llvm::Constant::getNullValue(In
->getType());
14959 Value
*SV
= Builder
.CreateShuffleVector(
14960 Zero
, In
, ArrayRef(Indices
, NumElts
), "kshiftl");
14961 return Builder
.CreateBitCast(SV
, Ops
[0]->getType());
14963 case X86::BI__builtin_ia32_kshiftriqi
:
14964 case X86::BI__builtin_ia32_kshiftrihi
:
14965 case X86::BI__builtin_ia32_kshiftrisi
:
14966 case X86::BI__builtin_ia32_kshiftridi
: {
14967 unsigned ShiftVal
= cast
<llvm::ConstantInt
>(Ops
[1])->getZExtValue() & 0xff;
14968 unsigned NumElts
= Ops
[0]->getType()->getIntegerBitWidth();
14970 if (ShiftVal
>= NumElts
)
14971 return llvm::Constant::getNullValue(Ops
[0]->getType());
14973 Value
*In
= getMaskVecValue(*this, Ops
[0], NumElts
);
14976 for (unsigned i
= 0; i
!= NumElts
; ++i
)
14977 Indices
[i
] = i
+ ShiftVal
;
14979 Value
*Zero
= llvm::Constant::getNullValue(In
->getType());
14980 Value
*SV
= Builder
.CreateShuffleVector(
14981 In
, Zero
, ArrayRef(Indices
, NumElts
), "kshiftr");
14982 return Builder
.CreateBitCast(SV
, Ops
[0]->getType());
14984 case X86::BI__builtin_ia32_movnti
:
14985 case X86::BI__builtin_ia32_movnti64
:
14986 case X86::BI__builtin_ia32_movntsd
:
14987 case X86::BI__builtin_ia32_movntss
: {
14988 llvm::MDNode
*Node
= llvm::MDNode::get(
14989 getLLVMContext(), llvm::ConstantAsMetadata::get(Builder
.getInt32(1)));
14991 Value
*Ptr
= Ops
[0];
14992 Value
*Src
= Ops
[1];
14994 // Extract the 0'th element of the source vector.
14995 if (BuiltinID
== X86::BI__builtin_ia32_movntsd
||
14996 BuiltinID
== X86::BI__builtin_ia32_movntss
)
14997 Src
= Builder
.CreateExtractElement(Src
, (uint64_t)0, "extract");
14999 // Unaligned nontemporal store of the scalar value.
15000 StoreInst
*SI
= Builder
.CreateDefaultAlignedStore(Src
, Ptr
);
15001 SI
->setMetadata(llvm::LLVMContext::MD_nontemporal
, Node
);
15002 SI
->setAlignment(llvm::Align(1));
15005 // Rotate is a special case of funnel shift - 1st 2 args are the same.
15006 case X86::BI__builtin_ia32_vprotb
:
15007 case X86::BI__builtin_ia32_vprotw
:
15008 case X86::BI__builtin_ia32_vprotd
:
15009 case X86::BI__builtin_ia32_vprotq
:
15010 case X86::BI__builtin_ia32_vprotbi
:
15011 case X86::BI__builtin_ia32_vprotwi
:
15012 case X86::BI__builtin_ia32_vprotdi
:
15013 case X86::BI__builtin_ia32_vprotqi
:
15014 case X86::BI__builtin_ia32_prold128
:
15015 case X86::BI__builtin_ia32_prold256
:
15016 case X86::BI__builtin_ia32_prold512
:
15017 case X86::BI__builtin_ia32_prolq128
:
15018 case X86::BI__builtin_ia32_prolq256
:
15019 case X86::BI__builtin_ia32_prolq512
:
15020 case X86::BI__builtin_ia32_prolvd128
:
15021 case X86::BI__builtin_ia32_prolvd256
:
15022 case X86::BI__builtin_ia32_prolvd512
:
15023 case X86::BI__builtin_ia32_prolvq128
:
15024 case X86::BI__builtin_ia32_prolvq256
:
15025 case X86::BI__builtin_ia32_prolvq512
:
15026 return EmitX86FunnelShift(*this, Ops
[0], Ops
[0], Ops
[1], false);
15027 case X86::BI__builtin_ia32_prord128
:
15028 case X86::BI__builtin_ia32_prord256
:
15029 case X86::BI__builtin_ia32_prord512
:
15030 case X86::BI__builtin_ia32_prorq128
:
15031 case X86::BI__builtin_ia32_prorq256
:
15032 case X86::BI__builtin_ia32_prorq512
:
15033 case X86::BI__builtin_ia32_prorvd128
:
15034 case X86::BI__builtin_ia32_prorvd256
:
15035 case X86::BI__builtin_ia32_prorvd512
:
15036 case X86::BI__builtin_ia32_prorvq128
:
15037 case X86::BI__builtin_ia32_prorvq256
:
15038 case X86::BI__builtin_ia32_prorvq512
:
15039 return EmitX86FunnelShift(*this, Ops
[0], Ops
[0], Ops
[1], true);
15040 case X86::BI__builtin_ia32_selectb_128
:
15041 case X86::BI__builtin_ia32_selectb_256
:
15042 case X86::BI__builtin_ia32_selectb_512
:
15043 case X86::BI__builtin_ia32_selectw_128
:
15044 case X86::BI__builtin_ia32_selectw_256
:
15045 case X86::BI__builtin_ia32_selectw_512
:
15046 case X86::BI__builtin_ia32_selectd_128
:
15047 case X86::BI__builtin_ia32_selectd_256
:
15048 case X86::BI__builtin_ia32_selectd_512
:
15049 case X86::BI__builtin_ia32_selectq_128
:
15050 case X86::BI__builtin_ia32_selectq_256
:
15051 case X86::BI__builtin_ia32_selectq_512
:
15052 case X86::BI__builtin_ia32_selectph_128
:
15053 case X86::BI__builtin_ia32_selectph_256
:
15054 case X86::BI__builtin_ia32_selectph_512
:
15055 case X86::BI__builtin_ia32_selectpbf_128
:
15056 case X86::BI__builtin_ia32_selectpbf_256
:
15057 case X86::BI__builtin_ia32_selectpbf_512
:
15058 case X86::BI__builtin_ia32_selectps_128
:
15059 case X86::BI__builtin_ia32_selectps_256
:
15060 case X86::BI__builtin_ia32_selectps_512
:
15061 case X86::BI__builtin_ia32_selectpd_128
:
15062 case X86::BI__builtin_ia32_selectpd_256
:
15063 case X86::BI__builtin_ia32_selectpd_512
:
15064 return EmitX86Select(*this, Ops
[0], Ops
[1], Ops
[2]);
15065 case X86::BI__builtin_ia32_selectsh_128
:
15066 case X86::BI__builtin_ia32_selectsbf_128
:
15067 case X86::BI__builtin_ia32_selectss_128
:
15068 case X86::BI__builtin_ia32_selectsd_128
: {
15069 Value
*A
= Builder
.CreateExtractElement(Ops
[1], (uint64_t)0);
15070 Value
*B
= Builder
.CreateExtractElement(Ops
[2], (uint64_t)0);
15071 A
= EmitX86ScalarSelect(*this, Ops
[0], A
, B
);
15072 return Builder
.CreateInsertElement(Ops
[1], A
, (uint64_t)0);
15074 case X86::BI__builtin_ia32_cmpb128_mask
:
15075 case X86::BI__builtin_ia32_cmpb256_mask
:
15076 case X86::BI__builtin_ia32_cmpb512_mask
:
15077 case X86::BI__builtin_ia32_cmpw128_mask
:
15078 case X86::BI__builtin_ia32_cmpw256_mask
:
15079 case X86::BI__builtin_ia32_cmpw512_mask
:
15080 case X86::BI__builtin_ia32_cmpd128_mask
:
15081 case X86::BI__builtin_ia32_cmpd256_mask
:
15082 case X86::BI__builtin_ia32_cmpd512_mask
:
15083 case X86::BI__builtin_ia32_cmpq128_mask
:
15084 case X86::BI__builtin_ia32_cmpq256_mask
:
15085 case X86::BI__builtin_ia32_cmpq512_mask
: {
15086 unsigned CC
= cast
<llvm::ConstantInt
>(Ops
[2])->getZExtValue() & 0x7;
15087 return EmitX86MaskedCompare(*this, CC
, true, Ops
);
15089 case X86::BI__builtin_ia32_ucmpb128_mask
:
15090 case X86::BI__builtin_ia32_ucmpb256_mask
:
15091 case X86::BI__builtin_ia32_ucmpb512_mask
:
15092 case X86::BI__builtin_ia32_ucmpw128_mask
:
15093 case X86::BI__builtin_ia32_ucmpw256_mask
:
15094 case X86::BI__builtin_ia32_ucmpw512_mask
:
15095 case X86::BI__builtin_ia32_ucmpd128_mask
:
15096 case X86::BI__builtin_ia32_ucmpd256_mask
:
15097 case X86::BI__builtin_ia32_ucmpd512_mask
:
15098 case X86::BI__builtin_ia32_ucmpq128_mask
:
15099 case X86::BI__builtin_ia32_ucmpq256_mask
:
15100 case X86::BI__builtin_ia32_ucmpq512_mask
: {
15101 unsigned CC
= cast
<llvm::ConstantInt
>(Ops
[2])->getZExtValue() & 0x7;
15102 return EmitX86MaskedCompare(*this, CC
, false, Ops
);
15104 case X86::BI__builtin_ia32_vpcomb
:
15105 case X86::BI__builtin_ia32_vpcomw
:
15106 case X86::BI__builtin_ia32_vpcomd
:
15107 case X86::BI__builtin_ia32_vpcomq
:
15108 return EmitX86vpcom(*this, Ops
, true);
15109 case X86::BI__builtin_ia32_vpcomub
:
15110 case X86::BI__builtin_ia32_vpcomuw
:
15111 case X86::BI__builtin_ia32_vpcomud
:
15112 case X86::BI__builtin_ia32_vpcomuq
:
15113 return EmitX86vpcom(*this, Ops
, false);
15115 case X86::BI__builtin_ia32_kortestcqi
:
15116 case X86::BI__builtin_ia32_kortestchi
:
15117 case X86::BI__builtin_ia32_kortestcsi
:
15118 case X86::BI__builtin_ia32_kortestcdi
: {
15119 Value
*Or
= EmitX86MaskLogic(*this, Instruction::Or
, Ops
);
15120 Value
*C
= llvm::Constant::getAllOnesValue(Ops
[0]->getType());
15121 Value
*Cmp
= Builder
.CreateICmpEQ(Or
, C
);
15122 return Builder
.CreateZExt(Cmp
, ConvertType(E
->getType()));
15124 case X86::BI__builtin_ia32_kortestzqi
:
15125 case X86::BI__builtin_ia32_kortestzhi
:
15126 case X86::BI__builtin_ia32_kortestzsi
:
15127 case X86::BI__builtin_ia32_kortestzdi
: {
15128 Value
*Or
= EmitX86MaskLogic(*this, Instruction::Or
, Ops
);
15129 Value
*C
= llvm::Constant::getNullValue(Ops
[0]->getType());
15130 Value
*Cmp
= Builder
.CreateICmpEQ(Or
, C
);
15131 return Builder
.CreateZExt(Cmp
, ConvertType(E
->getType()));
15134 case X86::BI__builtin_ia32_ktestcqi
:
15135 case X86::BI__builtin_ia32_ktestzqi
:
15136 case X86::BI__builtin_ia32_ktestchi
:
15137 case X86::BI__builtin_ia32_ktestzhi
:
15138 case X86::BI__builtin_ia32_ktestcsi
:
15139 case X86::BI__builtin_ia32_ktestzsi
:
15140 case X86::BI__builtin_ia32_ktestcdi
:
15141 case X86::BI__builtin_ia32_ktestzdi
: {
15143 switch (BuiltinID
) {
15144 default: llvm_unreachable("Unsupported intrinsic!");
15145 case X86::BI__builtin_ia32_ktestcqi
:
15146 IID
= Intrinsic::x86_avx512_ktestc_b
;
15148 case X86::BI__builtin_ia32_ktestzqi
:
15149 IID
= Intrinsic::x86_avx512_ktestz_b
;
15151 case X86::BI__builtin_ia32_ktestchi
:
15152 IID
= Intrinsic::x86_avx512_ktestc_w
;
15154 case X86::BI__builtin_ia32_ktestzhi
:
15155 IID
= Intrinsic::x86_avx512_ktestz_w
;
15157 case X86::BI__builtin_ia32_ktestcsi
:
15158 IID
= Intrinsic::x86_avx512_ktestc_d
;
15160 case X86::BI__builtin_ia32_ktestzsi
:
15161 IID
= Intrinsic::x86_avx512_ktestz_d
;
15163 case X86::BI__builtin_ia32_ktestcdi
:
15164 IID
= Intrinsic::x86_avx512_ktestc_q
;
15166 case X86::BI__builtin_ia32_ktestzdi
:
15167 IID
= Intrinsic::x86_avx512_ktestz_q
;
15171 unsigned NumElts
= Ops
[0]->getType()->getIntegerBitWidth();
15172 Value
*LHS
= getMaskVecValue(*this, Ops
[0], NumElts
);
15173 Value
*RHS
= getMaskVecValue(*this, Ops
[1], NumElts
);
15174 Function
*Intr
= CGM
.getIntrinsic(IID
);
15175 return Builder
.CreateCall(Intr
, {LHS
, RHS
});
15178 case X86::BI__builtin_ia32_kaddqi
:
15179 case X86::BI__builtin_ia32_kaddhi
:
15180 case X86::BI__builtin_ia32_kaddsi
:
15181 case X86::BI__builtin_ia32_kadddi
: {
15183 switch (BuiltinID
) {
15184 default: llvm_unreachable("Unsupported intrinsic!");
15185 case X86::BI__builtin_ia32_kaddqi
:
15186 IID
= Intrinsic::x86_avx512_kadd_b
;
15188 case X86::BI__builtin_ia32_kaddhi
:
15189 IID
= Intrinsic::x86_avx512_kadd_w
;
15191 case X86::BI__builtin_ia32_kaddsi
:
15192 IID
= Intrinsic::x86_avx512_kadd_d
;
15194 case X86::BI__builtin_ia32_kadddi
:
15195 IID
= Intrinsic::x86_avx512_kadd_q
;
15199 unsigned NumElts
= Ops
[0]->getType()->getIntegerBitWidth();
15200 Value
*LHS
= getMaskVecValue(*this, Ops
[0], NumElts
);
15201 Value
*RHS
= getMaskVecValue(*this, Ops
[1], NumElts
);
15202 Function
*Intr
= CGM
.getIntrinsic(IID
);
15203 Value
*Res
= Builder
.CreateCall(Intr
, {LHS
, RHS
});
15204 return Builder
.CreateBitCast(Res
, Ops
[0]->getType());
15206 case X86::BI__builtin_ia32_kandqi
:
15207 case X86::BI__builtin_ia32_kandhi
:
15208 case X86::BI__builtin_ia32_kandsi
:
15209 case X86::BI__builtin_ia32_kanddi
:
15210 return EmitX86MaskLogic(*this, Instruction::And
, Ops
);
15211 case X86::BI__builtin_ia32_kandnqi
:
15212 case X86::BI__builtin_ia32_kandnhi
:
15213 case X86::BI__builtin_ia32_kandnsi
:
15214 case X86::BI__builtin_ia32_kandndi
:
15215 return EmitX86MaskLogic(*this, Instruction::And
, Ops
, true);
15216 case X86::BI__builtin_ia32_korqi
:
15217 case X86::BI__builtin_ia32_korhi
:
15218 case X86::BI__builtin_ia32_korsi
:
15219 case X86::BI__builtin_ia32_kordi
:
15220 return EmitX86MaskLogic(*this, Instruction::Or
, Ops
);
15221 case X86::BI__builtin_ia32_kxnorqi
:
15222 case X86::BI__builtin_ia32_kxnorhi
:
15223 case X86::BI__builtin_ia32_kxnorsi
:
15224 case X86::BI__builtin_ia32_kxnordi
:
15225 return EmitX86MaskLogic(*this, Instruction::Xor
, Ops
, true);
15226 case X86::BI__builtin_ia32_kxorqi
:
15227 case X86::BI__builtin_ia32_kxorhi
:
15228 case X86::BI__builtin_ia32_kxorsi
:
15229 case X86::BI__builtin_ia32_kxordi
:
15230 return EmitX86MaskLogic(*this, Instruction::Xor
, Ops
);
15231 case X86::BI__builtin_ia32_knotqi
:
15232 case X86::BI__builtin_ia32_knothi
:
15233 case X86::BI__builtin_ia32_knotsi
:
15234 case X86::BI__builtin_ia32_knotdi
: {
15235 unsigned NumElts
= Ops
[0]->getType()->getIntegerBitWidth();
15236 Value
*Res
= getMaskVecValue(*this, Ops
[0], NumElts
);
15237 return Builder
.CreateBitCast(Builder
.CreateNot(Res
),
15238 Ops
[0]->getType());
15240 case X86::BI__builtin_ia32_kmovb
:
15241 case X86::BI__builtin_ia32_kmovw
:
15242 case X86::BI__builtin_ia32_kmovd
:
15243 case X86::BI__builtin_ia32_kmovq
: {
15244 // Bitcast to vXi1 type and then back to integer. This gets the mask
15245 // register type into the IR, but might be optimized out depending on
15246 // what's around it.
15247 unsigned NumElts
= Ops
[0]->getType()->getIntegerBitWidth();
15248 Value
*Res
= getMaskVecValue(*this, Ops
[0], NumElts
);
15249 return Builder
.CreateBitCast(Res
, Ops
[0]->getType());
15252 case X86::BI__builtin_ia32_kunpckdi
:
15253 case X86::BI__builtin_ia32_kunpcksi
:
15254 case X86::BI__builtin_ia32_kunpckhi
: {
15255 unsigned NumElts
= Ops
[0]->getType()->getIntegerBitWidth();
15256 Value
*LHS
= getMaskVecValue(*this, Ops
[0], NumElts
);
15257 Value
*RHS
= getMaskVecValue(*this, Ops
[1], NumElts
);
15259 for (unsigned i
= 0; i
!= NumElts
; ++i
)
15262 // First extract half of each vector. This gives better codegen than
15263 // doing it in a single shuffle.
15264 LHS
= Builder
.CreateShuffleVector(LHS
, LHS
, ArrayRef(Indices
, NumElts
/ 2));
15265 RHS
= Builder
.CreateShuffleVector(RHS
, RHS
, ArrayRef(Indices
, NumElts
/ 2));
15266 // Concat the vectors.
15267 // NOTE: Operands are swapped to match the intrinsic definition.
15269 Builder
.CreateShuffleVector(RHS
, LHS
, ArrayRef(Indices
, NumElts
));
15270 return Builder
.CreateBitCast(Res
, Ops
[0]->getType());
15273 case X86::BI__builtin_ia32_vplzcntd_128
:
15274 case X86::BI__builtin_ia32_vplzcntd_256
:
15275 case X86::BI__builtin_ia32_vplzcntd_512
:
15276 case X86::BI__builtin_ia32_vplzcntq_128
:
15277 case X86::BI__builtin_ia32_vplzcntq_256
:
15278 case X86::BI__builtin_ia32_vplzcntq_512
: {
15279 Function
*F
= CGM
.getIntrinsic(Intrinsic::ctlz
, Ops
[0]->getType());
15280 return Builder
.CreateCall(F
, {Ops
[0],Builder
.getInt1(false)});
15282 case X86::BI__builtin_ia32_sqrtss
:
15283 case X86::BI__builtin_ia32_sqrtsd
: {
15284 Value
*A
= Builder
.CreateExtractElement(Ops
[0], (uint64_t)0);
15286 if (Builder
.getIsFPConstrained()) {
15287 CodeGenFunction::CGFPOptionsRAII
FPOptsRAII(*this, E
);
15288 F
= CGM
.getIntrinsic(Intrinsic::experimental_constrained_sqrt
,
15290 A
= Builder
.CreateConstrainedFPCall(F
, {A
});
15292 F
= CGM
.getIntrinsic(Intrinsic::sqrt
, A
->getType());
15293 A
= Builder
.CreateCall(F
, {A
});
15295 return Builder
.CreateInsertElement(Ops
[0], A
, (uint64_t)0);
15297 case X86::BI__builtin_ia32_sqrtsh_round_mask
:
15298 case X86::BI__builtin_ia32_sqrtsd_round_mask
:
15299 case X86::BI__builtin_ia32_sqrtss_round_mask
: {
15300 unsigned CC
= cast
<llvm::ConstantInt
>(Ops
[4])->getZExtValue();
15301 // Support only if the rounding mode is 4 (AKA CUR_DIRECTION),
15302 // otherwise keep the intrinsic.
15306 switch (BuiltinID
) {
15308 llvm_unreachable("Unsupported intrinsic!");
15309 case X86::BI__builtin_ia32_sqrtsh_round_mask
:
15310 IID
= Intrinsic::x86_avx512fp16_mask_sqrt_sh
;
15312 case X86::BI__builtin_ia32_sqrtsd_round_mask
:
15313 IID
= Intrinsic::x86_avx512_mask_sqrt_sd
;
15315 case X86::BI__builtin_ia32_sqrtss_round_mask
:
15316 IID
= Intrinsic::x86_avx512_mask_sqrt_ss
;
15319 return Builder
.CreateCall(CGM
.getIntrinsic(IID
), Ops
);
15321 Value
*A
= Builder
.CreateExtractElement(Ops
[1], (uint64_t)0);
15323 if (Builder
.getIsFPConstrained()) {
15324 CodeGenFunction::CGFPOptionsRAII
FPOptsRAII(*this, E
);
15325 F
= CGM
.getIntrinsic(Intrinsic::experimental_constrained_sqrt
,
15327 A
= Builder
.CreateConstrainedFPCall(F
, A
);
15329 F
= CGM
.getIntrinsic(Intrinsic::sqrt
, A
->getType());
15330 A
= Builder
.CreateCall(F
, A
);
15332 Value
*Src
= Builder
.CreateExtractElement(Ops
[2], (uint64_t)0);
15333 A
= EmitX86ScalarSelect(*this, Ops
[3], A
, Src
);
15334 return Builder
.CreateInsertElement(Ops
[0], A
, (uint64_t)0);
15336 case X86::BI__builtin_ia32_sqrtpd256
:
15337 case X86::BI__builtin_ia32_sqrtpd
:
15338 case X86::BI__builtin_ia32_sqrtps256
:
15339 case X86::BI__builtin_ia32_sqrtps
:
15340 case X86::BI__builtin_ia32_sqrtph256
:
15341 case X86::BI__builtin_ia32_sqrtph
:
15342 case X86::BI__builtin_ia32_sqrtph512
:
15343 case X86::BI__builtin_ia32_sqrtps512
:
15344 case X86::BI__builtin_ia32_sqrtpd512
: {
15345 if (Ops
.size() == 2) {
15346 unsigned CC
= cast
<llvm::ConstantInt
>(Ops
[1])->getZExtValue();
15347 // Support only if the rounding mode is 4 (AKA CUR_DIRECTION),
15348 // otherwise keep the intrinsic.
15352 switch (BuiltinID
) {
15354 llvm_unreachable("Unsupported intrinsic!");
15355 case X86::BI__builtin_ia32_sqrtph512
:
15356 IID
= Intrinsic::x86_avx512fp16_sqrt_ph_512
;
15358 case X86::BI__builtin_ia32_sqrtps512
:
15359 IID
= Intrinsic::x86_avx512_sqrt_ps_512
;
15361 case X86::BI__builtin_ia32_sqrtpd512
:
15362 IID
= Intrinsic::x86_avx512_sqrt_pd_512
;
15365 return Builder
.CreateCall(CGM
.getIntrinsic(IID
), Ops
);
15368 if (Builder
.getIsFPConstrained()) {
15369 CodeGenFunction::CGFPOptionsRAII
FPOptsRAII(*this, E
);
15370 Function
*F
= CGM
.getIntrinsic(Intrinsic::experimental_constrained_sqrt
,
15371 Ops
[0]->getType());
15372 return Builder
.CreateConstrainedFPCall(F
, Ops
[0]);
15374 Function
*F
= CGM
.getIntrinsic(Intrinsic::sqrt
, Ops
[0]->getType());
15375 return Builder
.CreateCall(F
, Ops
[0]);
15379 case X86::BI__builtin_ia32_pmuludq128
:
15380 case X86::BI__builtin_ia32_pmuludq256
:
15381 case X86::BI__builtin_ia32_pmuludq512
:
15382 return EmitX86Muldq(*this, /*IsSigned*/false, Ops
);
15384 case X86::BI__builtin_ia32_pmuldq128
:
15385 case X86::BI__builtin_ia32_pmuldq256
:
15386 case X86::BI__builtin_ia32_pmuldq512
:
15387 return EmitX86Muldq(*this, /*IsSigned*/true, Ops
);
15389 case X86::BI__builtin_ia32_pternlogd512_mask
:
15390 case X86::BI__builtin_ia32_pternlogq512_mask
:
15391 case X86::BI__builtin_ia32_pternlogd128_mask
:
15392 case X86::BI__builtin_ia32_pternlogd256_mask
:
15393 case X86::BI__builtin_ia32_pternlogq128_mask
:
15394 case X86::BI__builtin_ia32_pternlogq256_mask
:
15395 return EmitX86Ternlog(*this, /*ZeroMask*/false, Ops
);
15397 case X86::BI__builtin_ia32_pternlogd512_maskz
:
15398 case X86::BI__builtin_ia32_pternlogq512_maskz
:
15399 case X86::BI__builtin_ia32_pternlogd128_maskz
:
15400 case X86::BI__builtin_ia32_pternlogd256_maskz
:
15401 case X86::BI__builtin_ia32_pternlogq128_maskz
:
15402 case X86::BI__builtin_ia32_pternlogq256_maskz
:
15403 return EmitX86Ternlog(*this, /*ZeroMask*/true, Ops
);
15405 case X86::BI__builtin_ia32_vpshldd128
:
15406 case X86::BI__builtin_ia32_vpshldd256
:
15407 case X86::BI__builtin_ia32_vpshldd512
:
15408 case X86::BI__builtin_ia32_vpshldq128
:
15409 case X86::BI__builtin_ia32_vpshldq256
:
15410 case X86::BI__builtin_ia32_vpshldq512
:
15411 case X86::BI__builtin_ia32_vpshldw128
:
15412 case X86::BI__builtin_ia32_vpshldw256
:
15413 case X86::BI__builtin_ia32_vpshldw512
:
15414 return EmitX86FunnelShift(*this, Ops
[0], Ops
[1], Ops
[2], false);
15416 case X86::BI__builtin_ia32_vpshrdd128
:
15417 case X86::BI__builtin_ia32_vpshrdd256
:
15418 case X86::BI__builtin_ia32_vpshrdd512
:
15419 case X86::BI__builtin_ia32_vpshrdq128
:
15420 case X86::BI__builtin_ia32_vpshrdq256
:
15421 case X86::BI__builtin_ia32_vpshrdq512
:
15422 case X86::BI__builtin_ia32_vpshrdw128
:
15423 case X86::BI__builtin_ia32_vpshrdw256
:
15424 case X86::BI__builtin_ia32_vpshrdw512
:
15425 // Ops 0 and 1 are swapped.
15426 return EmitX86FunnelShift(*this, Ops
[1], Ops
[0], Ops
[2], true);
15428 case X86::BI__builtin_ia32_vpshldvd128
:
15429 case X86::BI__builtin_ia32_vpshldvd256
:
15430 case X86::BI__builtin_ia32_vpshldvd512
:
15431 case X86::BI__builtin_ia32_vpshldvq128
:
15432 case X86::BI__builtin_ia32_vpshldvq256
:
15433 case X86::BI__builtin_ia32_vpshldvq512
:
15434 case X86::BI__builtin_ia32_vpshldvw128
:
15435 case X86::BI__builtin_ia32_vpshldvw256
:
15436 case X86::BI__builtin_ia32_vpshldvw512
:
15437 return EmitX86FunnelShift(*this, Ops
[0], Ops
[1], Ops
[2], false);
15439 case X86::BI__builtin_ia32_vpshrdvd128
:
15440 case X86::BI__builtin_ia32_vpshrdvd256
:
15441 case X86::BI__builtin_ia32_vpshrdvd512
:
15442 case X86::BI__builtin_ia32_vpshrdvq128
:
15443 case X86::BI__builtin_ia32_vpshrdvq256
:
15444 case X86::BI__builtin_ia32_vpshrdvq512
:
15445 case X86::BI__builtin_ia32_vpshrdvw128
:
15446 case X86::BI__builtin_ia32_vpshrdvw256
:
15447 case X86::BI__builtin_ia32_vpshrdvw512
:
15448 // Ops 0 and 1 are swapped.
15449 return EmitX86FunnelShift(*this, Ops
[1], Ops
[0], Ops
[2], true);
15452 case X86::BI__builtin_ia32_reduce_fadd_pd512
:
15453 case X86::BI__builtin_ia32_reduce_fadd_ps512
:
15454 case X86::BI__builtin_ia32_reduce_fadd_ph512
:
15455 case X86::BI__builtin_ia32_reduce_fadd_ph256
:
15456 case X86::BI__builtin_ia32_reduce_fadd_ph128
: {
15458 CGM
.getIntrinsic(Intrinsic::vector_reduce_fadd
, Ops
[1]->getType());
15459 IRBuilder
<>::FastMathFlagGuard
FMFGuard(Builder
);
15460 Builder
.getFastMathFlags().setAllowReassoc();
15461 return Builder
.CreateCall(F
, {Ops
[0], Ops
[1]});
15463 case X86::BI__builtin_ia32_reduce_fmul_pd512
:
15464 case X86::BI__builtin_ia32_reduce_fmul_ps512
:
15465 case X86::BI__builtin_ia32_reduce_fmul_ph512
:
15466 case X86::BI__builtin_ia32_reduce_fmul_ph256
:
15467 case X86::BI__builtin_ia32_reduce_fmul_ph128
: {
15469 CGM
.getIntrinsic(Intrinsic::vector_reduce_fmul
, Ops
[1]->getType());
15470 IRBuilder
<>::FastMathFlagGuard
FMFGuard(Builder
);
15471 Builder
.getFastMathFlags().setAllowReassoc();
15472 return Builder
.CreateCall(F
, {Ops
[0], Ops
[1]});
15474 case X86::BI__builtin_ia32_reduce_fmax_pd512
:
15475 case X86::BI__builtin_ia32_reduce_fmax_ps512
:
15476 case X86::BI__builtin_ia32_reduce_fmax_ph512
:
15477 case X86::BI__builtin_ia32_reduce_fmax_ph256
:
15478 case X86::BI__builtin_ia32_reduce_fmax_ph128
: {
15480 CGM
.getIntrinsic(Intrinsic::vector_reduce_fmax
, Ops
[0]->getType());
15481 IRBuilder
<>::FastMathFlagGuard
FMFGuard(Builder
);
15482 Builder
.getFastMathFlags().setNoNaNs();
15483 return Builder
.CreateCall(F
, {Ops
[0]});
15485 case X86::BI__builtin_ia32_reduce_fmin_pd512
:
15486 case X86::BI__builtin_ia32_reduce_fmin_ps512
:
15487 case X86::BI__builtin_ia32_reduce_fmin_ph512
:
15488 case X86::BI__builtin_ia32_reduce_fmin_ph256
:
15489 case X86::BI__builtin_ia32_reduce_fmin_ph128
: {
15491 CGM
.getIntrinsic(Intrinsic::vector_reduce_fmin
, Ops
[0]->getType());
15492 IRBuilder
<>::FastMathFlagGuard
FMFGuard(Builder
);
15493 Builder
.getFastMathFlags().setNoNaNs();
15494 return Builder
.CreateCall(F
, {Ops
[0]});
15498 case X86::BI__builtin_ia32_pswapdsf
:
15499 case X86::BI__builtin_ia32_pswapdsi
: {
15500 llvm::Type
*MMXTy
= llvm::Type::getX86_MMXTy(getLLVMContext());
15501 Ops
[0] = Builder
.CreateBitCast(Ops
[0], MMXTy
, "cast");
15502 llvm::Function
*F
= CGM
.getIntrinsic(Intrinsic::x86_3dnowa_pswapd
);
15503 return Builder
.CreateCall(F
, Ops
, "pswapd");
15505 case X86::BI__builtin_ia32_rdrand16_step
:
15506 case X86::BI__builtin_ia32_rdrand32_step
:
15507 case X86::BI__builtin_ia32_rdrand64_step
:
15508 case X86::BI__builtin_ia32_rdseed16_step
:
15509 case X86::BI__builtin_ia32_rdseed32_step
:
15510 case X86::BI__builtin_ia32_rdseed64_step
: {
15512 switch (BuiltinID
) {
15513 default: llvm_unreachable("Unsupported intrinsic!");
15514 case X86::BI__builtin_ia32_rdrand16_step
:
15515 ID
= Intrinsic::x86_rdrand_16
;
15517 case X86::BI__builtin_ia32_rdrand32_step
:
15518 ID
= Intrinsic::x86_rdrand_32
;
15520 case X86::BI__builtin_ia32_rdrand64_step
:
15521 ID
= Intrinsic::x86_rdrand_64
;
15523 case X86::BI__builtin_ia32_rdseed16_step
:
15524 ID
= Intrinsic::x86_rdseed_16
;
15526 case X86::BI__builtin_ia32_rdseed32_step
:
15527 ID
= Intrinsic::x86_rdseed_32
;
15529 case X86::BI__builtin_ia32_rdseed64_step
:
15530 ID
= Intrinsic::x86_rdseed_64
;
15534 Value
*Call
= Builder
.CreateCall(CGM
.getIntrinsic(ID
));
15535 Builder
.CreateDefaultAlignedStore(Builder
.CreateExtractValue(Call
, 0),
15537 return Builder
.CreateExtractValue(Call
, 1);
15539 case X86::BI__builtin_ia32_addcarryx_u32
:
15540 case X86::BI__builtin_ia32_addcarryx_u64
:
15541 case X86::BI__builtin_ia32_subborrow_u32
:
15542 case X86::BI__builtin_ia32_subborrow_u64
: {
15544 switch (BuiltinID
) {
15545 default: llvm_unreachable("Unsupported intrinsic!");
15546 case X86::BI__builtin_ia32_addcarryx_u32
:
15547 IID
= Intrinsic::x86_addcarry_32
;
15549 case X86::BI__builtin_ia32_addcarryx_u64
:
15550 IID
= Intrinsic::x86_addcarry_64
;
15552 case X86::BI__builtin_ia32_subborrow_u32
:
15553 IID
= Intrinsic::x86_subborrow_32
;
15555 case X86::BI__builtin_ia32_subborrow_u64
:
15556 IID
= Intrinsic::x86_subborrow_64
;
15560 Value
*Call
= Builder
.CreateCall(CGM
.getIntrinsic(IID
),
15561 { Ops
[0], Ops
[1], Ops
[2] });
15562 Builder
.CreateDefaultAlignedStore(Builder
.CreateExtractValue(Call
, 1),
15564 return Builder
.CreateExtractValue(Call
, 0);
15567 case X86::BI__builtin_ia32_fpclassps128_mask
:
15568 case X86::BI__builtin_ia32_fpclassps256_mask
:
15569 case X86::BI__builtin_ia32_fpclassps512_mask
:
15570 case X86::BI__builtin_ia32_fpclassph128_mask
:
15571 case X86::BI__builtin_ia32_fpclassph256_mask
:
15572 case X86::BI__builtin_ia32_fpclassph512_mask
:
15573 case X86::BI__builtin_ia32_fpclasspd128_mask
:
15574 case X86::BI__builtin_ia32_fpclasspd256_mask
:
15575 case X86::BI__builtin_ia32_fpclasspd512_mask
: {
15577 cast
<llvm::FixedVectorType
>(Ops
[0]->getType())->getNumElements();
15578 Value
*MaskIn
= Ops
[2];
15579 Ops
.erase(&Ops
[2]);
15582 switch (BuiltinID
) {
15583 default: llvm_unreachable("Unsupported intrinsic!");
15584 case X86::BI__builtin_ia32_fpclassph128_mask
:
15585 ID
= Intrinsic::x86_avx512fp16_fpclass_ph_128
;
15587 case X86::BI__builtin_ia32_fpclassph256_mask
:
15588 ID
= Intrinsic::x86_avx512fp16_fpclass_ph_256
;
15590 case X86::BI__builtin_ia32_fpclassph512_mask
:
15591 ID
= Intrinsic::x86_avx512fp16_fpclass_ph_512
;
15593 case X86::BI__builtin_ia32_fpclassps128_mask
:
15594 ID
= Intrinsic::x86_avx512_fpclass_ps_128
;
15596 case X86::BI__builtin_ia32_fpclassps256_mask
:
15597 ID
= Intrinsic::x86_avx512_fpclass_ps_256
;
15599 case X86::BI__builtin_ia32_fpclassps512_mask
:
15600 ID
= Intrinsic::x86_avx512_fpclass_ps_512
;
15602 case X86::BI__builtin_ia32_fpclasspd128_mask
:
15603 ID
= Intrinsic::x86_avx512_fpclass_pd_128
;
15605 case X86::BI__builtin_ia32_fpclasspd256_mask
:
15606 ID
= Intrinsic::x86_avx512_fpclass_pd_256
;
15608 case X86::BI__builtin_ia32_fpclasspd512_mask
:
15609 ID
= Intrinsic::x86_avx512_fpclass_pd_512
;
15613 Value
*Fpclass
= Builder
.CreateCall(CGM
.getIntrinsic(ID
), Ops
);
15614 return EmitX86MaskedCompareResult(*this, Fpclass
, NumElts
, MaskIn
);
15617 case X86::BI__builtin_ia32_vp2intersect_q_512
:
15618 case X86::BI__builtin_ia32_vp2intersect_q_256
:
15619 case X86::BI__builtin_ia32_vp2intersect_q_128
:
15620 case X86::BI__builtin_ia32_vp2intersect_d_512
:
15621 case X86::BI__builtin_ia32_vp2intersect_d_256
:
15622 case X86::BI__builtin_ia32_vp2intersect_d_128
: {
15624 cast
<llvm::FixedVectorType
>(Ops
[0]->getType())->getNumElements();
15627 switch (BuiltinID
) {
15628 default: llvm_unreachable("Unsupported intrinsic!");
15629 case X86::BI__builtin_ia32_vp2intersect_q_512
:
15630 ID
= Intrinsic::x86_avx512_vp2intersect_q_512
;
15632 case X86::BI__builtin_ia32_vp2intersect_q_256
:
15633 ID
= Intrinsic::x86_avx512_vp2intersect_q_256
;
15635 case X86::BI__builtin_ia32_vp2intersect_q_128
:
15636 ID
= Intrinsic::x86_avx512_vp2intersect_q_128
;
15638 case X86::BI__builtin_ia32_vp2intersect_d_512
:
15639 ID
= Intrinsic::x86_avx512_vp2intersect_d_512
;
15641 case X86::BI__builtin_ia32_vp2intersect_d_256
:
15642 ID
= Intrinsic::x86_avx512_vp2intersect_d_256
;
15644 case X86::BI__builtin_ia32_vp2intersect_d_128
:
15645 ID
= Intrinsic::x86_avx512_vp2intersect_d_128
;
15649 Value
*Call
= Builder
.CreateCall(CGM
.getIntrinsic(ID
), {Ops
[0], Ops
[1]});
15650 Value
*Result
= Builder
.CreateExtractValue(Call
, 0);
15651 Result
= EmitX86MaskedCompareResult(*this, Result
, NumElts
, nullptr);
15652 Builder
.CreateDefaultAlignedStore(Result
, Ops
[2]);
15654 Result
= Builder
.CreateExtractValue(Call
, 1);
15655 Result
= EmitX86MaskedCompareResult(*this, Result
, NumElts
, nullptr);
15656 return Builder
.CreateDefaultAlignedStore(Result
, Ops
[3]);
15659 case X86::BI__builtin_ia32_vpmultishiftqb128
:
15660 case X86::BI__builtin_ia32_vpmultishiftqb256
:
15661 case X86::BI__builtin_ia32_vpmultishiftqb512
: {
15663 switch (BuiltinID
) {
15664 default: llvm_unreachable("Unsupported intrinsic!");
15665 case X86::BI__builtin_ia32_vpmultishiftqb128
:
15666 ID
= Intrinsic::x86_avx512_pmultishift_qb_128
;
15668 case X86::BI__builtin_ia32_vpmultishiftqb256
:
15669 ID
= Intrinsic::x86_avx512_pmultishift_qb_256
;
15671 case X86::BI__builtin_ia32_vpmultishiftqb512
:
15672 ID
= Intrinsic::x86_avx512_pmultishift_qb_512
;
15676 return Builder
.CreateCall(CGM
.getIntrinsic(ID
), Ops
);
15679 case X86::BI__builtin_ia32_vpshufbitqmb128_mask
:
15680 case X86::BI__builtin_ia32_vpshufbitqmb256_mask
:
15681 case X86::BI__builtin_ia32_vpshufbitqmb512_mask
: {
15683 cast
<llvm::FixedVectorType
>(Ops
[0]->getType())->getNumElements();
15684 Value
*MaskIn
= Ops
[2];
15685 Ops
.erase(&Ops
[2]);
15688 switch (BuiltinID
) {
15689 default: llvm_unreachable("Unsupported intrinsic!");
15690 case X86::BI__builtin_ia32_vpshufbitqmb128_mask
:
15691 ID
= Intrinsic::x86_avx512_vpshufbitqmb_128
;
15693 case X86::BI__builtin_ia32_vpshufbitqmb256_mask
:
15694 ID
= Intrinsic::x86_avx512_vpshufbitqmb_256
;
15696 case X86::BI__builtin_ia32_vpshufbitqmb512_mask
:
15697 ID
= Intrinsic::x86_avx512_vpshufbitqmb_512
;
15701 Value
*Shufbit
= Builder
.CreateCall(CGM
.getIntrinsic(ID
), Ops
);
15702 return EmitX86MaskedCompareResult(*this, Shufbit
, NumElts
, MaskIn
);
15705 // packed comparison intrinsics
15706 case X86::BI__builtin_ia32_cmpeqps
:
15707 case X86::BI__builtin_ia32_cmpeqpd
:
15708 return getVectorFCmpIR(CmpInst::FCMP_OEQ
, /*IsSignaling*/false);
15709 case X86::BI__builtin_ia32_cmpltps
:
15710 case X86::BI__builtin_ia32_cmpltpd
:
15711 return getVectorFCmpIR(CmpInst::FCMP_OLT
, /*IsSignaling*/true);
15712 case X86::BI__builtin_ia32_cmpleps
:
15713 case X86::BI__builtin_ia32_cmplepd
:
15714 return getVectorFCmpIR(CmpInst::FCMP_OLE
, /*IsSignaling*/true);
15715 case X86::BI__builtin_ia32_cmpunordps
:
15716 case X86::BI__builtin_ia32_cmpunordpd
:
15717 return getVectorFCmpIR(CmpInst::FCMP_UNO
, /*IsSignaling*/false);
15718 case X86::BI__builtin_ia32_cmpneqps
:
15719 case X86::BI__builtin_ia32_cmpneqpd
:
15720 return getVectorFCmpIR(CmpInst::FCMP_UNE
, /*IsSignaling*/false);
15721 case X86::BI__builtin_ia32_cmpnltps
:
15722 case X86::BI__builtin_ia32_cmpnltpd
:
15723 return getVectorFCmpIR(CmpInst::FCMP_UGE
, /*IsSignaling*/true);
15724 case X86::BI__builtin_ia32_cmpnleps
:
15725 case X86::BI__builtin_ia32_cmpnlepd
:
15726 return getVectorFCmpIR(CmpInst::FCMP_UGT
, /*IsSignaling*/true);
15727 case X86::BI__builtin_ia32_cmpordps
:
15728 case X86::BI__builtin_ia32_cmpordpd
:
15729 return getVectorFCmpIR(CmpInst::FCMP_ORD
, /*IsSignaling*/false);
15730 case X86::BI__builtin_ia32_cmpph128_mask
:
15731 case X86::BI__builtin_ia32_cmpph256_mask
:
15732 case X86::BI__builtin_ia32_cmpph512_mask
:
15733 case X86::BI__builtin_ia32_cmpps128_mask
:
15734 case X86::BI__builtin_ia32_cmpps256_mask
:
15735 case X86::BI__builtin_ia32_cmpps512_mask
:
15736 case X86::BI__builtin_ia32_cmppd128_mask
:
15737 case X86::BI__builtin_ia32_cmppd256_mask
:
15738 case X86::BI__builtin_ia32_cmppd512_mask
:
15741 case X86::BI__builtin_ia32_cmpps
:
15742 case X86::BI__builtin_ia32_cmpps256
:
15743 case X86::BI__builtin_ia32_cmppd
:
15744 case X86::BI__builtin_ia32_cmppd256
: {
15745 // Lowering vector comparisons to fcmp instructions, while
15746 // ignoring signalling behaviour requested
15747 // ignoring rounding mode requested
15748 // This is only possible if fp-model is not strict and FENV_ACCESS is off.
15750 // The third argument is the comparison condition, and integer in the
15752 unsigned CC
= cast
<llvm::ConstantInt
>(Ops
[2])->getZExtValue() & 0x1f;
15754 // Lowering to IR fcmp instruction.
15755 // Ignoring requested signaling behaviour,
15756 // e.g. both _CMP_GT_OS & _CMP_GT_OQ are translated to FCMP_OGT.
15757 FCmpInst::Predicate Pred
;
15759 // Predicates for 16-31 repeat the 0-15 predicates. Only the signalling
15760 // behavior is inverted. We'll handle that after the switch.
15761 switch (CC
& 0xf) {
15762 case 0x00: Pred
= FCmpInst::FCMP_OEQ
; IsSignaling
= false; break;
15763 case 0x01: Pred
= FCmpInst::FCMP_OLT
; IsSignaling
= true; break;
15764 case 0x02: Pred
= FCmpInst::FCMP_OLE
; IsSignaling
= true; break;
15765 case 0x03: Pred
= FCmpInst::FCMP_UNO
; IsSignaling
= false; break;
15766 case 0x04: Pred
= FCmpInst::FCMP_UNE
; IsSignaling
= false; break;
15767 case 0x05: Pred
= FCmpInst::FCMP_UGE
; IsSignaling
= true; break;
15768 case 0x06: Pred
= FCmpInst::FCMP_UGT
; IsSignaling
= true; break;
15769 case 0x07: Pred
= FCmpInst::FCMP_ORD
; IsSignaling
= false; break;
15770 case 0x08: Pred
= FCmpInst::FCMP_UEQ
; IsSignaling
= false; break;
15771 case 0x09: Pred
= FCmpInst::FCMP_ULT
; IsSignaling
= true; break;
15772 case 0x0a: Pred
= FCmpInst::FCMP_ULE
; IsSignaling
= true; break;
15773 case 0x0b: Pred
= FCmpInst::FCMP_FALSE
; IsSignaling
= false; break;
15774 case 0x0c: Pred
= FCmpInst::FCMP_ONE
; IsSignaling
= false; break;
15775 case 0x0d: Pred
= FCmpInst::FCMP_OGE
; IsSignaling
= true; break;
15776 case 0x0e: Pred
= FCmpInst::FCMP_OGT
; IsSignaling
= true; break;
15777 case 0x0f: Pred
= FCmpInst::FCMP_TRUE
; IsSignaling
= false; break;
15778 default: llvm_unreachable("Unhandled CC");
15781 // Invert the signalling behavior for 16-31.
15783 IsSignaling
= !IsSignaling
;
15785 // If the predicate is true or false and we're using constrained intrinsics,
15786 // we don't have a compare intrinsic we can use. Just use the legacy X86
15787 // specific intrinsic.
15788 // If the intrinsic is mask enabled and we're using constrained intrinsics,
15789 // use the legacy X86 specific intrinsic.
15790 if (Builder
.getIsFPConstrained() &&
15791 (Pred
== FCmpInst::FCMP_TRUE
|| Pred
== FCmpInst::FCMP_FALSE
||
15795 switch (BuiltinID
) {
15796 default: llvm_unreachable("Unexpected builtin");
15797 case X86::BI__builtin_ia32_cmpps
:
15798 IID
= Intrinsic::x86_sse_cmp_ps
;
15800 case X86::BI__builtin_ia32_cmpps256
:
15801 IID
= Intrinsic::x86_avx_cmp_ps_256
;
15803 case X86::BI__builtin_ia32_cmppd
:
15804 IID
= Intrinsic::x86_sse2_cmp_pd
;
15806 case X86::BI__builtin_ia32_cmppd256
:
15807 IID
= Intrinsic::x86_avx_cmp_pd_256
;
15809 case X86::BI__builtin_ia32_cmpph128_mask
:
15810 IID
= Intrinsic::x86_avx512fp16_mask_cmp_ph_128
;
15812 case X86::BI__builtin_ia32_cmpph256_mask
:
15813 IID
= Intrinsic::x86_avx512fp16_mask_cmp_ph_256
;
15815 case X86::BI__builtin_ia32_cmpph512_mask
:
15816 IID
= Intrinsic::x86_avx512fp16_mask_cmp_ph_512
;
15818 case X86::BI__builtin_ia32_cmpps512_mask
:
15819 IID
= Intrinsic::x86_avx512_mask_cmp_ps_512
;
15821 case X86::BI__builtin_ia32_cmppd512_mask
:
15822 IID
= Intrinsic::x86_avx512_mask_cmp_pd_512
;
15824 case X86::BI__builtin_ia32_cmpps128_mask
:
15825 IID
= Intrinsic::x86_avx512_mask_cmp_ps_128
;
15827 case X86::BI__builtin_ia32_cmpps256_mask
:
15828 IID
= Intrinsic::x86_avx512_mask_cmp_ps_256
;
15830 case X86::BI__builtin_ia32_cmppd128_mask
:
15831 IID
= Intrinsic::x86_avx512_mask_cmp_pd_128
;
15833 case X86::BI__builtin_ia32_cmppd256_mask
:
15834 IID
= Intrinsic::x86_avx512_mask_cmp_pd_256
;
15838 Function
*Intr
= CGM
.getIntrinsic(IID
);
15841 cast
<llvm::FixedVectorType
>(Ops
[0]->getType())->getNumElements();
15842 Ops
[3] = getMaskVecValue(*this, Ops
[3], NumElts
);
15843 Value
*Cmp
= Builder
.CreateCall(Intr
, Ops
);
15844 return EmitX86MaskedCompareResult(*this, Cmp
, NumElts
, nullptr);
15847 return Builder
.CreateCall(Intr
, Ops
);
15850 // Builtins without the _mask suffix return a vector of integers
15851 // of the same width as the input vectors
15853 // We ignore SAE if strict FP is disabled. We only keep precise
15854 // exception behavior under strict FP.
15855 // NOTE: If strict FP does ever go through here a CGFPOptionsRAII
15856 // object will be required.
15858 cast
<llvm::FixedVectorType
>(Ops
[0]->getType())->getNumElements();
15861 Cmp
= Builder
.CreateFCmpS(Pred
, Ops
[0], Ops
[1]);
15863 Cmp
= Builder
.CreateFCmp(Pred
, Ops
[0], Ops
[1]);
15864 return EmitX86MaskedCompareResult(*this, Cmp
, NumElts
, Ops
[3]);
15867 return getVectorFCmpIR(Pred
, IsSignaling
);
15870 // SSE scalar comparison intrinsics
15871 case X86::BI__builtin_ia32_cmpeqss
:
15872 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss
, 0);
15873 case X86::BI__builtin_ia32_cmpltss
:
15874 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss
, 1);
15875 case X86::BI__builtin_ia32_cmpless
:
15876 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss
, 2);
15877 case X86::BI__builtin_ia32_cmpunordss
:
15878 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss
, 3);
15879 case X86::BI__builtin_ia32_cmpneqss
:
15880 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss
, 4);
15881 case X86::BI__builtin_ia32_cmpnltss
:
15882 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss
, 5);
15883 case X86::BI__builtin_ia32_cmpnless
:
15884 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss
, 6);
15885 case X86::BI__builtin_ia32_cmpordss
:
15886 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss
, 7);
15887 case X86::BI__builtin_ia32_cmpeqsd
:
15888 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd
, 0);
15889 case X86::BI__builtin_ia32_cmpltsd
:
15890 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd
, 1);
15891 case X86::BI__builtin_ia32_cmplesd
:
15892 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd
, 2);
15893 case X86::BI__builtin_ia32_cmpunordsd
:
15894 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd
, 3);
15895 case X86::BI__builtin_ia32_cmpneqsd
:
15896 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd
, 4);
15897 case X86::BI__builtin_ia32_cmpnltsd
:
15898 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd
, 5);
15899 case X86::BI__builtin_ia32_cmpnlesd
:
15900 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd
, 6);
15901 case X86::BI__builtin_ia32_cmpordsd
:
15902 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd
, 7);
15904 // f16c half2float intrinsics
15905 case X86::BI__builtin_ia32_vcvtph2ps
:
15906 case X86::BI__builtin_ia32_vcvtph2ps256
:
15907 case X86::BI__builtin_ia32_vcvtph2ps_mask
:
15908 case X86::BI__builtin_ia32_vcvtph2ps256_mask
:
15909 case X86::BI__builtin_ia32_vcvtph2ps512_mask
: {
15910 CodeGenFunction::CGFPOptionsRAII
FPOptsRAII(*this, E
);
15911 return EmitX86CvtF16ToFloatExpr(*this, Ops
, ConvertType(E
->getType()));
15914 // AVX512 bf16 intrinsics
15915 case X86::BI__builtin_ia32_cvtneps2bf16_128_mask
: {
15916 Ops
[2] = getMaskVecValue(
15918 cast
<llvm::FixedVectorType
>(Ops
[0]->getType())->getNumElements());
15919 Intrinsic::ID IID
= Intrinsic::x86_avx512bf16_mask_cvtneps2bf16_128
;
15920 return Builder
.CreateCall(CGM
.getIntrinsic(IID
), Ops
);
15922 case X86::BI__builtin_ia32_cvtsbf162ss_32
:
15923 return Builder
.CreateFPExt(Ops
[0], Builder
.getFloatTy());
15925 case X86::BI__builtin_ia32_cvtneps2bf16_256_mask
:
15926 case X86::BI__builtin_ia32_cvtneps2bf16_512_mask
: {
15928 switch (BuiltinID
) {
15929 default: llvm_unreachable("Unsupported intrinsic!");
15930 case X86::BI__builtin_ia32_cvtneps2bf16_256_mask
:
15931 IID
= Intrinsic::x86_avx512bf16_cvtneps2bf16_256
;
15933 case X86::BI__builtin_ia32_cvtneps2bf16_512_mask
:
15934 IID
= Intrinsic::x86_avx512bf16_cvtneps2bf16_512
;
15937 Value
*Res
= Builder
.CreateCall(CGM
.getIntrinsic(IID
), Ops
[0]);
15938 return EmitX86Select(*this, Ops
[2], Res
, Ops
[1]);
15941 case X86::BI__cpuid
:
15942 case X86::BI__cpuidex
: {
15943 Value
*FuncId
= EmitScalarExpr(E
->getArg(1));
15944 Value
*SubFuncId
= BuiltinID
== X86::BI__cpuidex
15945 ? EmitScalarExpr(E
->getArg(2))
15946 : llvm::ConstantInt::get(Int32Ty
, 0);
15948 llvm::StructType
*CpuidRetTy
=
15949 llvm::StructType::get(Int32Ty
, Int32Ty
, Int32Ty
, Int32Ty
);
15950 llvm::FunctionType
*FTy
=
15951 llvm::FunctionType::get(CpuidRetTy
, {Int32Ty
, Int32Ty
}, false);
15953 StringRef Asm
, Constraints
;
15954 if (getTarget().getTriple().getArch() == llvm::Triple::x86
) {
15956 Constraints
= "={ax},={bx},={cx},={dx},{ax},{cx}";
15958 // x86-64 uses %rbx as the base register, so preserve it.
15959 Asm
= "xchgq %rbx, ${1:q}\n"
15961 "xchgq %rbx, ${1:q}";
15962 Constraints
= "={ax},=r,={cx},={dx},0,2";
15965 llvm::InlineAsm
*IA
= llvm::InlineAsm::get(FTy
, Asm
, Constraints
,
15966 /*hasSideEffects=*/false);
15967 Value
*IACall
= Builder
.CreateCall(IA
, {FuncId
, SubFuncId
});
15968 Value
*BasePtr
= EmitScalarExpr(E
->getArg(0));
15969 Value
*Store
= nullptr;
15970 for (unsigned i
= 0; i
< 4; i
++) {
15971 Value
*Extracted
= Builder
.CreateExtractValue(IACall
, i
);
15972 Value
*StorePtr
= Builder
.CreateConstInBoundsGEP1_32(Int32Ty
, BasePtr
, i
);
15973 Store
= Builder
.CreateAlignedStore(Extracted
, StorePtr
, getIntAlign());
15976 // Return the last store instruction to signal that we have emitted the
15981 case X86::BI__emul
:
15982 case X86::BI__emulu
: {
15983 llvm::Type
*Int64Ty
= llvm::IntegerType::get(getLLVMContext(), 64);
15984 bool isSigned
= (BuiltinID
== X86::BI__emul
);
15985 Value
*LHS
= Builder
.CreateIntCast(Ops
[0], Int64Ty
, isSigned
);
15986 Value
*RHS
= Builder
.CreateIntCast(Ops
[1], Int64Ty
, isSigned
);
15987 return Builder
.CreateMul(LHS
, RHS
, "", !isSigned
, isSigned
);
15989 case X86::BI__mulh
:
15990 case X86::BI__umulh
:
15991 case X86::BI_mul128
:
15992 case X86::BI_umul128
: {
15993 llvm::Type
*ResType
= ConvertType(E
->getType());
15994 llvm::Type
*Int128Ty
= llvm::IntegerType::get(getLLVMContext(), 128);
15996 bool IsSigned
= (BuiltinID
== X86::BI__mulh
|| BuiltinID
== X86::BI_mul128
);
15997 Value
*LHS
= Builder
.CreateIntCast(Ops
[0], Int128Ty
, IsSigned
);
15998 Value
*RHS
= Builder
.CreateIntCast(Ops
[1], Int128Ty
, IsSigned
);
16000 Value
*MulResult
, *HigherBits
;
16002 MulResult
= Builder
.CreateNSWMul(LHS
, RHS
);
16003 HigherBits
= Builder
.CreateAShr(MulResult
, 64);
16005 MulResult
= Builder
.CreateNUWMul(LHS
, RHS
);
16006 HigherBits
= Builder
.CreateLShr(MulResult
, 64);
16008 HigherBits
= Builder
.CreateIntCast(HigherBits
, ResType
, IsSigned
);
16010 if (BuiltinID
== X86::BI__mulh
|| BuiltinID
== X86::BI__umulh
)
16013 Address HighBitsAddress
= EmitPointerWithAlignment(E
->getArg(2));
16014 Builder
.CreateStore(HigherBits
, HighBitsAddress
);
16015 return Builder
.CreateIntCast(MulResult
, ResType
, IsSigned
);
16018 case X86::BI__faststorefence
: {
16019 return Builder
.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent
,
16020 llvm::SyncScope::System
);
16022 case X86::BI__shiftleft128
:
16023 case X86::BI__shiftright128
: {
16024 llvm::Function
*F
= CGM
.getIntrinsic(
16025 BuiltinID
== X86::BI__shiftleft128
? Intrinsic::fshl
: Intrinsic::fshr
,
16027 // Flip low/high ops and zero-extend amount to matching type.
16028 // shiftleft128(Low, High, Amt) -> fshl(High, Low, Amt)
16029 // shiftright128(Low, High, Amt) -> fshr(High, Low, Amt)
16030 std::swap(Ops
[0], Ops
[1]);
16031 Ops
[2] = Builder
.CreateZExt(Ops
[2], Int64Ty
);
16032 return Builder
.CreateCall(F
, Ops
);
16034 case X86::BI_ReadWriteBarrier
:
16035 case X86::BI_ReadBarrier
:
16036 case X86::BI_WriteBarrier
: {
16037 return Builder
.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent
,
16038 llvm::SyncScope::SingleThread
);
16041 case X86::BI_AddressOfReturnAddress
: {
16043 CGM
.getIntrinsic(Intrinsic::addressofreturnaddress
, AllocaInt8PtrTy
);
16044 return Builder
.CreateCall(F
);
16046 case X86::BI__stosb
: {
16047 // We treat __stosb as a volatile memset - it may not generate "rep stosb"
16048 // instruction, but it will create a memset that won't be optimized away.
16049 return Builder
.CreateMemSet(Ops
[0], Ops
[1], Ops
[2], Align(1), true);
16052 // llvm.trap makes a ud2a instruction on x86.
16053 return EmitTrapCall(Intrinsic::trap
);
16054 case X86::BI__int2c
: {
16055 // This syscall signals a driver assertion failure in x86 NT kernels.
16056 llvm::FunctionType
*FTy
= llvm::FunctionType::get(VoidTy
, false);
16057 llvm::InlineAsm
*IA
=
16058 llvm::InlineAsm::get(FTy
, "int $$0x2c", "", /*hasSideEffects=*/true);
16059 llvm::AttributeList NoReturnAttr
= llvm::AttributeList::get(
16060 getLLVMContext(), llvm::AttributeList::FunctionIndex
,
16061 llvm::Attribute::NoReturn
);
16062 llvm::CallInst
*CI
= Builder
.CreateCall(IA
);
16063 CI
->setAttributes(NoReturnAttr
);
16066 case X86::BI__readfsbyte
:
16067 case X86::BI__readfsword
:
16068 case X86::BI__readfsdword
:
16069 case X86::BI__readfsqword
: {
16070 llvm::Type
*IntTy
= ConvertType(E
->getType());
16071 Value
*Ptr
= Builder
.CreateIntToPtr(
16072 Ops
[0], llvm::PointerType::get(getLLVMContext(), 257));
16073 LoadInst
*Load
= Builder
.CreateAlignedLoad(
16074 IntTy
, Ptr
, getContext().getTypeAlignInChars(E
->getType()));
16075 Load
->setVolatile(true);
16078 case X86::BI__readgsbyte
:
16079 case X86::BI__readgsword
:
16080 case X86::BI__readgsdword
:
16081 case X86::BI__readgsqword
: {
16082 llvm::Type
*IntTy
= ConvertType(E
->getType());
16083 Value
*Ptr
= Builder
.CreateIntToPtr(
16084 Ops
[0], llvm::PointerType::get(getLLVMContext(), 256));
16085 LoadInst
*Load
= Builder
.CreateAlignedLoad(
16086 IntTy
, Ptr
, getContext().getTypeAlignInChars(E
->getType()));
16087 Load
->setVolatile(true);
16090 case X86::BI__builtin_ia32_encodekey128_u32
: {
16091 Intrinsic::ID IID
= Intrinsic::x86_encodekey128
;
16093 Value
*Call
= Builder
.CreateCall(CGM
.getIntrinsic(IID
), {Ops
[0], Ops
[1]});
16095 for (int i
= 0; i
< 3; ++i
) {
16096 Value
*Extract
= Builder
.CreateExtractValue(Call
, i
+ 1);
16097 Value
*Ptr
= Builder
.CreateConstGEP1_32(Int8Ty
, Ops
[2], i
* 16);
16098 Builder
.CreateAlignedStore(Extract
, Ptr
, Align(1));
16101 return Builder
.CreateExtractValue(Call
, 0);
16103 case X86::BI__builtin_ia32_encodekey256_u32
: {
16104 Intrinsic::ID IID
= Intrinsic::x86_encodekey256
;
16107 Builder
.CreateCall(CGM
.getIntrinsic(IID
), {Ops
[0], Ops
[1], Ops
[2]});
16109 for (int i
= 0; i
< 4; ++i
) {
16110 Value
*Extract
= Builder
.CreateExtractValue(Call
, i
+ 1);
16111 Value
*Ptr
= Builder
.CreateConstGEP1_32(Int8Ty
, Ops
[3], i
* 16);
16112 Builder
.CreateAlignedStore(Extract
, Ptr
, Align(1));
16115 return Builder
.CreateExtractValue(Call
, 0);
16117 case X86::BI__builtin_ia32_aesenc128kl_u8
:
16118 case X86::BI__builtin_ia32_aesdec128kl_u8
:
16119 case X86::BI__builtin_ia32_aesenc256kl_u8
:
16120 case X86::BI__builtin_ia32_aesdec256kl_u8
: {
16122 StringRef BlockName
;
16123 switch (BuiltinID
) {
16125 llvm_unreachable("Unexpected builtin");
16126 case X86::BI__builtin_ia32_aesenc128kl_u8
:
16127 IID
= Intrinsic::x86_aesenc128kl
;
16128 BlockName
= "aesenc128kl";
16130 case X86::BI__builtin_ia32_aesdec128kl_u8
:
16131 IID
= Intrinsic::x86_aesdec128kl
;
16132 BlockName
= "aesdec128kl";
16134 case X86::BI__builtin_ia32_aesenc256kl_u8
:
16135 IID
= Intrinsic::x86_aesenc256kl
;
16136 BlockName
= "aesenc256kl";
16138 case X86::BI__builtin_ia32_aesdec256kl_u8
:
16139 IID
= Intrinsic::x86_aesdec256kl
;
16140 BlockName
= "aesdec256kl";
16144 Value
*Call
= Builder
.CreateCall(CGM
.getIntrinsic(IID
), {Ops
[1], Ops
[2]});
16146 BasicBlock
*NoError
=
16147 createBasicBlock(BlockName
+ "_no_error", this->CurFn
);
16148 BasicBlock
*Error
= createBasicBlock(BlockName
+ "_error", this->CurFn
);
16149 BasicBlock
*End
= createBasicBlock(BlockName
+ "_end", this->CurFn
);
16151 Value
*Ret
= Builder
.CreateExtractValue(Call
, 0);
16152 Value
*Succ
= Builder
.CreateTrunc(Ret
, Builder
.getInt1Ty());
16153 Value
*Out
= Builder
.CreateExtractValue(Call
, 1);
16154 Builder
.CreateCondBr(Succ
, NoError
, Error
);
16156 Builder
.SetInsertPoint(NoError
);
16157 Builder
.CreateDefaultAlignedStore(Out
, Ops
[0]);
16158 Builder
.CreateBr(End
);
16160 Builder
.SetInsertPoint(Error
);
16161 Constant
*Zero
= llvm::Constant::getNullValue(Out
->getType());
16162 Builder
.CreateDefaultAlignedStore(Zero
, Ops
[0]);
16163 Builder
.CreateBr(End
);
16165 Builder
.SetInsertPoint(End
);
16166 return Builder
.CreateExtractValue(Call
, 0);
16168 case X86::BI__builtin_ia32_aesencwide128kl_u8
:
16169 case X86::BI__builtin_ia32_aesdecwide128kl_u8
:
16170 case X86::BI__builtin_ia32_aesencwide256kl_u8
:
16171 case X86::BI__builtin_ia32_aesdecwide256kl_u8
: {
16173 StringRef BlockName
;
16174 switch (BuiltinID
) {
16175 case X86::BI__builtin_ia32_aesencwide128kl_u8
:
16176 IID
= Intrinsic::x86_aesencwide128kl
;
16177 BlockName
= "aesencwide128kl";
16179 case X86::BI__builtin_ia32_aesdecwide128kl_u8
:
16180 IID
= Intrinsic::x86_aesdecwide128kl
;
16181 BlockName
= "aesdecwide128kl";
16183 case X86::BI__builtin_ia32_aesencwide256kl_u8
:
16184 IID
= Intrinsic::x86_aesencwide256kl
;
16185 BlockName
= "aesencwide256kl";
16187 case X86::BI__builtin_ia32_aesdecwide256kl_u8
:
16188 IID
= Intrinsic::x86_aesdecwide256kl
;
16189 BlockName
= "aesdecwide256kl";
16193 llvm::Type
*Ty
= FixedVectorType::get(Builder
.getInt64Ty(), 2);
16196 for (int i
= 0; i
!= 8; ++i
) {
16197 Value
*Ptr
= Builder
.CreateConstGEP1_32(Ty
, Ops
[1], i
);
16198 InOps
[i
+ 1] = Builder
.CreateAlignedLoad(Ty
, Ptr
, Align(16));
16201 Value
*Call
= Builder
.CreateCall(CGM
.getIntrinsic(IID
), InOps
);
16203 BasicBlock
*NoError
=
16204 createBasicBlock(BlockName
+ "_no_error", this->CurFn
);
16205 BasicBlock
*Error
= createBasicBlock(BlockName
+ "_error", this->CurFn
);
16206 BasicBlock
*End
= createBasicBlock(BlockName
+ "_end", this->CurFn
);
16208 Value
*Ret
= Builder
.CreateExtractValue(Call
, 0);
16209 Value
*Succ
= Builder
.CreateTrunc(Ret
, Builder
.getInt1Ty());
16210 Builder
.CreateCondBr(Succ
, NoError
, Error
);
16212 Builder
.SetInsertPoint(NoError
);
16213 for (int i
= 0; i
!= 8; ++i
) {
16214 Value
*Extract
= Builder
.CreateExtractValue(Call
, i
+ 1);
16215 Value
*Ptr
= Builder
.CreateConstGEP1_32(Extract
->getType(), Ops
[0], i
);
16216 Builder
.CreateAlignedStore(Extract
, Ptr
, Align(16));
16218 Builder
.CreateBr(End
);
16220 Builder
.SetInsertPoint(Error
);
16221 for (int i
= 0; i
!= 8; ++i
) {
16222 Value
*Out
= Builder
.CreateExtractValue(Call
, i
+ 1);
16223 Constant
*Zero
= llvm::Constant::getNullValue(Out
->getType());
16224 Value
*Ptr
= Builder
.CreateConstGEP1_32(Out
->getType(), Ops
[0], i
);
16225 Builder
.CreateAlignedStore(Zero
, Ptr
, Align(16));
16227 Builder
.CreateBr(End
);
16229 Builder
.SetInsertPoint(End
);
16230 return Builder
.CreateExtractValue(Call
, 0);
16232 case X86::BI__builtin_ia32_vfcmaddcph512_mask
:
16235 case X86::BI__builtin_ia32_vfmaddcph512_mask
: {
16236 Intrinsic::ID IID
= IsConjFMA
16237 ? Intrinsic::x86_avx512fp16_mask_vfcmadd_cph_512
16238 : Intrinsic::x86_avx512fp16_mask_vfmadd_cph_512
;
16239 Value
*Call
= Builder
.CreateCall(CGM
.getIntrinsic(IID
), Ops
);
16240 return EmitX86Select(*this, Ops
[3], Call
, Ops
[0]);
16242 case X86::BI__builtin_ia32_vfcmaddcsh_round_mask
:
16245 case X86::BI__builtin_ia32_vfmaddcsh_round_mask
: {
16246 Intrinsic::ID IID
= IsConjFMA
? Intrinsic::x86_avx512fp16_mask_vfcmadd_csh
16247 : Intrinsic::x86_avx512fp16_mask_vfmadd_csh
;
16248 Value
*Call
= Builder
.CreateCall(CGM
.getIntrinsic(IID
), Ops
);
16249 Value
*And
= Builder
.CreateAnd(Ops
[3], llvm::ConstantInt::get(Int8Ty
, 1));
16250 return EmitX86Select(*this, And
, Call
, Ops
[0]);
16252 case X86::BI__builtin_ia32_vfcmaddcsh_round_mask3
:
16255 case X86::BI__builtin_ia32_vfmaddcsh_round_mask3
: {
16256 Intrinsic::ID IID
= IsConjFMA
? Intrinsic::x86_avx512fp16_mask_vfcmadd_csh
16257 : Intrinsic::x86_avx512fp16_mask_vfmadd_csh
;
16258 Value
*Call
= Builder
.CreateCall(CGM
.getIntrinsic(IID
), Ops
);
16259 static constexpr int Mask
[] = {0, 5, 6, 7};
16260 return Builder
.CreateShuffleVector(Call
, Ops
[2], Mask
);
16262 case X86::BI__builtin_ia32_prefetchi
:
16263 return Builder
.CreateCall(
16264 CGM
.getIntrinsic(Intrinsic::prefetch
, Ops
[0]->getType()),
16265 {Ops
[0], llvm::ConstantInt::get(Int32Ty
, 0), Ops
[1],
16266 llvm::ConstantInt::get(Int32Ty
, 0)});
16270 Value
*CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID
,
16271 const CallExpr
*E
) {
16272 // Do not emit the builtin arguments in the arguments of a function call,
16273 // because the evaluation order of function arguments is not specified in C++.
16274 // This is important when testing to ensure the arguments are emitted in the
16275 // same order every time. Eg:
16277 // return Builder.CreateFDiv(EmitScalarExpr(E->getArg(0)),
16278 // EmitScalarExpr(E->getArg(1)), "swdiv");
16280 // Value *Op0 = EmitScalarExpr(E->getArg(0));
16281 // Value *Op1 = EmitScalarExpr(E->getArg(1));
16282 // return Builder.CreateFDiv(Op0, Op1, "swdiv")
16284 Intrinsic::ID ID
= Intrinsic::not_intrinsic
;
16286 switch (BuiltinID
) {
16287 default: return nullptr;
16289 // __builtin_ppc_get_timebase is GCC 4.8+'s PowerPC-specific name for what we
16290 // call __builtin_readcyclecounter.
16291 case PPC::BI__builtin_ppc_get_timebase
:
16292 return Builder
.CreateCall(CGM
.getIntrinsic(Intrinsic::readcyclecounter
));
16294 // vec_ld, vec_xl_be, vec_lvsl, vec_lvsr
16295 case PPC::BI__builtin_altivec_lvx
:
16296 case PPC::BI__builtin_altivec_lvxl
:
16297 case PPC::BI__builtin_altivec_lvebx
:
16298 case PPC::BI__builtin_altivec_lvehx
:
16299 case PPC::BI__builtin_altivec_lvewx
:
16300 case PPC::BI__builtin_altivec_lvsl
:
16301 case PPC::BI__builtin_altivec_lvsr
:
16302 case PPC::BI__builtin_vsx_lxvd2x
:
16303 case PPC::BI__builtin_vsx_lxvw4x
:
16304 case PPC::BI__builtin_vsx_lxvd2x_be
:
16305 case PPC::BI__builtin_vsx_lxvw4x_be
:
16306 case PPC::BI__builtin_vsx_lxvl
:
16307 case PPC::BI__builtin_vsx_lxvll
:
16309 SmallVector
<Value
*, 2> Ops
;
16310 Ops
.push_back(EmitScalarExpr(E
->getArg(0)));
16311 Ops
.push_back(EmitScalarExpr(E
->getArg(1)));
16312 if (!(BuiltinID
== PPC::BI__builtin_vsx_lxvl
||
16313 BuiltinID
== PPC::BI__builtin_vsx_lxvll
)) {
16314 Ops
[0] = Builder
.CreateGEP(Int8Ty
, Ops
[1], Ops
[0]);
16318 switch (BuiltinID
) {
16319 default: llvm_unreachable("Unsupported ld/lvsl/lvsr intrinsic!");
16320 case PPC::BI__builtin_altivec_lvx
:
16321 ID
= Intrinsic::ppc_altivec_lvx
;
16323 case PPC::BI__builtin_altivec_lvxl
:
16324 ID
= Intrinsic::ppc_altivec_lvxl
;
16326 case PPC::BI__builtin_altivec_lvebx
:
16327 ID
= Intrinsic::ppc_altivec_lvebx
;
16329 case PPC::BI__builtin_altivec_lvehx
:
16330 ID
= Intrinsic::ppc_altivec_lvehx
;
16332 case PPC::BI__builtin_altivec_lvewx
:
16333 ID
= Intrinsic::ppc_altivec_lvewx
;
16335 case PPC::BI__builtin_altivec_lvsl
:
16336 ID
= Intrinsic::ppc_altivec_lvsl
;
16338 case PPC::BI__builtin_altivec_lvsr
:
16339 ID
= Intrinsic::ppc_altivec_lvsr
;
16341 case PPC::BI__builtin_vsx_lxvd2x
:
16342 ID
= Intrinsic::ppc_vsx_lxvd2x
;
16344 case PPC::BI__builtin_vsx_lxvw4x
:
16345 ID
= Intrinsic::ppc_vsx_lxvw4x
;
16347 case PPC::BI__builtin_vsx_lxvd2x_be
:
16348 ID
= Intrinsic::ppc_vsx_lxvd2x_be
;
16350 case PPC::BI__builtin_vsx_lxvw4x_be
:
16351 ID
= Intrinsic::ppc_vsx_lxvw4x_be
;
16353 case PPC::BI__builtin_vsx_lxvl
:
16354 ID
= Intrinsic::ppc_vsx_lxvl
;
16356 case PPC::BI__builtin_vsx_lxvll
:
16357 ID
= Intrinsic::ppc_vsx_lxvll
;
16360 llvm::Function
*F
= CGM
.getIntrinsic(ID
);
16361 return Builder
.CreateCall(F
, Ops
, "");
16364 // vec_st, vec_xst_be
16365 case PPC::BI__builtin_altivec_stvx
:
16366 case PPC::BI__builtin_altivec_stvxl
:
16367 case PPC::BI__builtin_altivec_stvebx
:
16368 case PPC::BI__builtin_altivec_stvehx
:
16369 case PPC::BI__builtin_altivec_stvewx
:
16370 case PPC::BI__builtin_vsx_stxvd2x
:
16371 case PPC::BI__builtin_vsx_stxvw4x
:
16372 case PPC::BI__builtin_vsx_stxvd2x_be
:
16373 case PPC::BI__builtin_vsx_stxvw4x_be
:
16374 case PPC::BI__builtin_vsx_stxvl
:
16375 case PPC::BI__builtin_vsx_stxvll
:
16377 SmallVector
<Value
*, 3> Ops
;
16378 Ops
.push_back(EmitScalarExpr(E
->getArg(0)));
16379 Ops
.push_back(EmitScalarExpr(E
->getArg(1)));
16380 Ops
.push_back(EmitScalarExpr(E
->getArg(2)));
16381 if (!(BuiltinID
== PPC::BI__builtin_vsx_stxvl
||
16382 BuiltinID
== PPC::BI__builtin_vsx_stxvll
)) {
16383 Ops
[1] = Builder
.CreateGEP(Int8Ty
, Ops
[2], Ops
[1]);
16387 switch (BuiltinID
) {
16388 default: llvm_unreachable("Unsupported st intrinsic!");
16389 case PPC::BI__builtin_altivec_stvx
:
16390 ID
= Intrinsic::ppc_altivec_stvx
;
16392 case PPC::BI__builtin_altivec_stvxl
:
16393 ID
= Intrinsic::ppc_altivec_stvxl
;
16395 case PPC::BI__builtin_altivec_stvebx
:
16396 ID
= Intrinsic::ppc_altivec_stvebx
;
16398 case PPC::BI__builtin_altivec_stvehx
:
16399 ID
= Intrinsic::ppc_altivec_stvehx
;
16401 case PPC::BI__builtin_altivec_stvewx
:
16402 ID
= Intrinsic::ppc_altivec_stvewx
;
16404 case PPC::BI__builtin_vsx_stxvd2x
:
16405 ID
= Intrinsic::ppc_vsx_stxvd2x
;
16407 case PPC::BI__builtin_vsx_stxvw4x
:
16408 ID
= Intrinsic::ppc_vsx_stxvw4x
;
16410 case PPC::BI__builtin_vsx_stxvd2x_be
:
16411 ID
= Intrinsic::ppc_vsx_stxvd2x_be
;
16413 case PPC::BI__builtin_vsx_stxvw4x_be
:
16414 ID
= Intrinsic::ppc_vsx_stxvw4x_be
;
16416 case PPC::BI__builtin_vsx_stxvl
:
16417 ID
= Intrinsic::ppc_vsx_stxvl
;
16419 case PPC::BI__builtin_vsx_stxvll
:
16420 ID
= Intrinsic::ppc_vsx_stxvll
;
16423 llvm::Function
*F
= CGM
.getIntrinsic(ID
);
16424 return Builder
.CreateCall(F
, Ops
, "");
16426 case PPC::BI__builtin_vsx_ldrmb
: {
16427 // Essentially boils down to performing an unaligned VMX load sequence so
16428 // as to avoid crossing a page boundary and then shuffling the elements
16429 // into the right side of the vector register.
16430 Value
*Op0
= EmitScalarExpr(E
->getArg(0));
16431 Value
*Op1
= EmitScalarExpr(E
->getArg(1));
16432 int64_t NumBytes
= cast
<ConstantInt
>(Op1
)->getZExtValue();
16433 llvm::Type
*ResTy
= ConvertType(E
->getType());
16434 bool IsLE
= getTarget().isLittleEndian();
16436 // If the user wants the entire vector, just load the entire vector.
16437 if (NumBytes
== 16) {
16439 Builder
.CreateLoad(Address(Op0
, ResTy
, CharUnits::fromQuantity(1)));
16443 // Reverse the bytes on LE.
16444 SmallVector
<int, 16> RevMask
;
16445 for (int Idx
= 0; Idx
< 16; Idx
++)
16446 RevMask
.push_back(15 - Idx
);
16447 return Builder
.CreateShuffleVector(LD
, LD
, RevMask
);
16450 llvm::Function
*Lvx
= CGM
.getIntrinsic(Intrinsic::ppc_altivec_lvx
);
16451 llvm::Function
*Lvs
= CGM
.getIntrinsic(IsLE
? Intrinsic::ppc_altivec_lvsr
16452 : Intrinsic::ppc_altivec_lvsl
);
16453 llvm::Function
*Vperm
= CGM
.getIntrinsic(Intrinsic::ppc_altivec_vperm
);
16454 Value
*HiMem
= Builder
.CreateGEP(
16455 Int8Ty
, Op0
, ConstantInt::get(Op1
->getType(), NumBytes
- 1));
16456 Value
*LoLd
= Builder
.CreateCall(Lvx
, Op0
, "ld.lo");
16457 Value
*HiLd
= Builder
.CreateCall(Lvx
, HiMem
, "ld.hi");
16458 Value
*Mask1
= Builder
.CreateCall(Lvs
, Op0
, "mask1");
16460 Op0
= IsLE
? HiLd
: LoLd
;
16461 Op1
= IsLE
? LoLd
: HiLd
;
16462 Value
*AllElts
= Builder
.CreateCall(Vperm
, {Op0
, Op1
, Mask1
}, "shuffle1");
16463 Constant
*Zero
= llvm::Constant::getNullValue(IsLE
? ResTy
: AllElts
->getType());
16466 SmallVector
<int, 16> Consts
;
16467 for (int Idx
= 0; Idx
< 16; Idx
++) {
16468 int Val
= (NumBytes
- Idx
- 1 >= 0) ? (NumBytes
- Idx
- 1)
16469 : 16 - (NumBytes
- Idx
);
16470 Consts
.push_back(Val
);
16472 return Builder
.CreateShuffleVector(Builder
.CreateBitCast(AllElts
, ResTy
),
16475 SmallVector
<Constant
*, 16> Consts
;
16476 for (int Idx
= 0; Idx
< 16; Idx
++)
16477 Consts
.push_back(Builder
.getInt8(NumBytes
+ Idx
));
16478 Value
*Mask2
= ConstantVector::get(Consts
);
16479 return Builder
.CreateBitCast(
16480 Builder
.CreateCall(Vperm
, {Zero
, AllElts
, Mask2
}, "shuffle2"), ResTy
);
16482 case PPC::BI__builtin_vsx_strmb
: {
16483 Value
*Op0
= EmitScalarExpr(E
->getArg(0));
16484 Value
*Op1
= EmitScalarExpr(E
->getArg(1));
16485 Value
*Op2
= EmitScalarExpr(E
->getArg(2));
16486 int64_t NumBytes
= cast
<ConstantInt
>(Op1
)->getZExtValue();
16487 bool IsLE
= getTarget().isLittleEndian();
16488 auto StoreSubVec
= [&](unsigned Width
, unsigned Offset
, unsigned EltNo
) {
16489 // Storing the whole vector, simply store it on BE and reverse bytes and
16492 Value
*StVec
= Op2
;
16494 SmallVector
<int, 16> RevMask
;
16495 for (int Idx
= 0; Idx
< 16; Idx
++)
16496 RevMask
.push_back(15 - Idx
);
16497 StVec
= Builder
.CreateShuffleVector(Op2
, Op2
, RevMask
);
16499 return Builder
.CreateStore(
16500 StVec
, Address(Op0
, Op2
->getType(), CharUnits::fromQuantity(1)));
16502 auto *ConvTy
= Int64Ty
;
16503 unsigned NumElts
= 0;
16506 llvm_unreachable("width for stores must be a power of 2");
16524 Value
*Vec
= Builder
.CreateBitCast(
16525 Op2
, llvm::FixedVectorType::get(ConvTy
, NumElts
));
16527 Builder
.CreateGEP(Int8Ty
, Op0
, ConstantInt::get(Int64Ty
, Offset
));
16528 Value
*Elt
= Builder
.CreateExtractElement(Vec
, EltNo
);
16529 if (IsLE
&& Width
> 1) {
16530 Function
*F
= CGM
.getIntrinsic(Intrinsic::bswap
, ConvTy
);
16531 Elt
= Builder
.CreateCall(F
, Elt
);
16533 return Builder
.CreateStore(
16534 Elt
, Address(Ptr
, ConvTy
, CharUnits::fromQuantity(1)));
16536 unsigned Stored
= 0;
16537 unsigned RemainingBytes
= NumBytes
;
16539 if (NumBytes
== 16)
16540 return StoreSubVec(16, 0, 0);
16541 if (NumBytes
>= 8) {
16542 Result
= StoreSubVec(8, NumBytes
- 8, IsLE
? 0 : 1);
16543 RemainingBytes
-= 8;
16546 if (RemainingBytes
>= 4) {
16547 Result
= StoreSubVec(4, NumBytes
- Stored
- 4,
16548 IsLE
? (Stored
>> 2) : 3 - (Stored
>> 2));
16549 RemainingBytes
-= 4;
16552 if (RemainingBytes
>= 2) {
16553 Result
= StoreSubVec(2, NumBytes
- Stored
- 2,
16554 IsLE
? (Stored
>> 1) : 7 - (Stored
>> 1));
16555 RemainingBytes
-= 2;
16558 if (RemainingBytes
)
16560 StoreSubVec(1, NumBytes
- Stored
- 1, IsLE
? Stored
: 15 - Stored
);
16564 case PPC::BI__builtin_vsx_xvsqrtsp
:
16565 case PPC::BI__builtin_vsx_xvsqrtdp
: {
16566 llvm::Type
*ResultType
= ConvertType(E
->getType());
16567 Value
*X
= EmitScalarExpr(E
->getArg(0));
16568 if (Builder
.getIsFPConstrained()) {
16569 llvm::Function
*F
= CGM
.getIntrinsic(
16570 Intrinsic::experimental_constrained_sqrt
, ResultType
);
16571 return Builder
.CreateConstrainedFPCall(F
, X
);
16573 llvm::Function
*F
= CGM
.getIntrinsic(Intrinsic::sqrt
, ResultType
);
16574 return Builder
.CreateCall(F
, X
);
16577 // Count leading zeros
16578 case PPC::BI__builtin_altivec_vclzb
:
16579 case PPC::BI__builtin_altivec_vclzh
:
16580 case PPC::BI__builtin_altivec_vclzw
:
16581 case PPC::BI__builtin_altivec_vclzd
: {
16582 llvm::Type
*ResultType
= ConvertType(E
->getType());
16583 Value
*X
= EmitScalarExpr(E
->getArg(0));
16584 Value
*Undef
= ConstantInt::get(Builder
.getInt1Ty(), false);
16585 Function
*F
= CGM
.getIntrinsic(Intrinsic::ctlz
, ResultType
);
16586 return Builder
.CreateCall(F
, {X
, Undef
});
16588 case PPC::BI__builtin_altivec_vctzb
:
16589 case PPC::BI__builtin_altivec_vctzh
:
16590 case PPC::BI__builtin_altivec_vctzw
:
16591 case PPC::BI__builtin_altivec_vctzd
: {
16592 llvm::Type
*ResultType
= ConvertType(E
->getType());
16593 Value
*X
= EmitScalarExpr(E
->getArg(0));
16594 Value
*Undef
= ConstantInt::get(Builder
.getInt1Ty(), false);
16595 Function
*F
= CGM
.getIntrinsic(Intrinsic::cttz
, ResultType
);
16596 return Builder
.CreateCall(F
, {X
, Undef
});
16598 case PPC::BI__builtin_altivec_vinsd
:
16599 case PPC::BI__builtin_altivec_vinsw
:
16600 case PPC::BI__builtin_altivec_vinsd_elt
:
16601 case PPC::BI__builtin_altivec_vinsw_elt
: {
16602 llvm::Type
*ResultType
= ConvertType(E
->getType());
16603 Value
*Op0
= EmitScalarExpr(E
->getArg(0));
16604 Value
*Op1
= EmitScalarExpr(E
->getArg(1));
16605 Value
*Op2
= EmitScalarExpr(E
->getArg(2));
16607 bool IsUnaligned
= (BuiltinID
== PPC::BI__builtin_altivec_vinsw
||
16608 BuiltinID
== PPC::BI__builtin_altivec_vinsd
);
16610 bool Is32bit
= (BuiltinID
== PPC::BI__builtin_altivec_vinsw
||
16611 BuiltinID
== PPC::BI__builtin_altivec_vinsw_elt
);
16613 // The third argument must be a compile time constant.
16614 ConstantInt
*ArgCI
= dyn_cast
<ConstantInt
>(Op2
);
16616 "Third Arg to vinsw/vinsd intrinsic must be a constant integer!");
16618 // Valid value for the third argument is dependent on the input type and
16620 int ValidMaxValue
= 0;
16622 ValidMaxValue
= (Is32bit
) ? 12 : 8;
16624 ValidMaxValue
= (Is32bit
) ? 3 : 1;
16626 // Get value of third argument.
16627 int64_t ConstArg
= ArgCI
->getSExtValue();
16629 // Compose range checking error message.
16630 std::string RangeErrMsg
= IsUnaligned
? "byte" : "element";
16631 RangeErrMsg
+= " number " + llvm::to_string(ConstArg
);
16632 RangeErrMsg
+= " is outside of the valid range [0, ";
16633 RangeErrMsg
+= llvm::to_string(ValidMaxValue
) + "]";
16635 // Issue error if third argument is not within the valid range.
16636 if (ConstArg
< 0 || ConstArg
> ValidMaxValue
)
16637 CGM
.Error(E
->getExprLoc(), RangeErrMsg
);
16639 // Input to vec_replace_elt is an element index, convert to byte index.
16640 if (!IsUnaligned
) {
16641 ConstArg
*= Is32bit
? 4 : 8;
16642 // Fix the constant according to endianess.
16643 if (getTarget().isLittleEndian())
16644 ConstArg
= (Is32bit
? 12 : 8) - ConstArg
;
16647 ID
= Is32bit
? Intrinsic::ppc_altivec_vinsw
: Intrinsic::ppc_altivec_vinsd
;
16648 Op2
= ConstantInt::getSigned(Int32Ty
, ConstArg
);
16649 // Casting input to vector int as per intrinsic definition.
16652 ? Builder
.CreateBitCast(Op0
, llvm::FixedVectorType::get(Int32Ty
, 4))
16653 : Builder
.CreateBitCast(Op0
,
16654 llvm::FixedVectorType::get(Int64Ty
, 2));
16655 return Builder
.CreateBitCast(
16656 Builder
.CreateCall(CGM
.getIntrinsic(ID
), {Op0
, Op1
, Op2
}), ResultType
);
16658 case PPC::BI__builtin_altivec_vpopcntb
:
16659 case PPC::BI__builtin_altivec_vpopcnth
:
16660 case PPC::BI__builtin_altivec_vpopcntw
:
16661 case PPC::BI__builtin_altivec_vpopcntd
: {
16662 llvm::Type
*ResultType
= ConvertType(E
->getType());
16663 Value
*X
= EmitScalarExpr(E
->getArg(0));
16664 llvm::Function
*F
= CGM
.getIntrinsic(Intrinsic::ctpop
, ResultType
);
16665 return Builder
.CreateCall(F
, X
);
16667 case PPC::BI__builtin_altivec_vadduqm
:
16668 case PPC::BI__builtin_altivec_vsubuqm
: {
16669 Value
*Op0
= EmitScalarExpr(E
->getArg(0));
16670 Value
*Op1
= EmitScalarExpr(E
->getArg(1));
16671 llvm::Type
*Int128Ty
= llvm::IntegerType::get(getLLVMContext(), 128);
16672 Op0
= Builder
.CreateBitCast(Op0
, llvm::FixedVectorType::get(Int128Ty
, 1));
16673 Op1
= Builder
.CreateBitCast(Op1
, llvm::FixedVectorType::get(Int128Ty
, 1));
16674 if (BuiltinID
== PPC::BI__builtin_altivec_vadduqm
)
16675 return Builder
.CreateAdd(Op0
, Op1
, "vadduqm");
16677 return Builder
.CreateSub(Op0
, Op1
, "vsubuqm");
16679 case PPC::BI__builtin_altivec_vaddcuq_c
:
16680 case PPC::BI__builtin_altivec_vsubcuq_c
: {
16681 SmallVector
<Value
*, 2> Ops
;
16682 Value
*Op0
= EmitScalarExpr(E
->getArg(0));
16683 Value
*Op1
= EmitScalarExpr(E
->getArg(1));
16684 llvm::Type
*V1I128Ty
= llvm::FixedVectorType::get(
16685 llvm::IntegerType::get(getLLVMContext(), 128), 1);
16686 Ops
.push_back(Builder
.CreateBitCast(Op0
, V1I128Ty
));
16687 Ops
.push_back(Builder
.CreateBitCast(Op1
, V1I128Ty
));
16688 ID
= (BuiltinID
== PPC::BI__builtin_altivec_vaddcuq_c
)
16689 ? Intrinsic::ppc_altivec_vaddcuq
16690 : Intrinsic::ppc_altivec_vsubcuq
;
16691 return Builder
.CreateCall(CGM
.getIntrinsic(ID
), Ops
, "");
16693 case PPC::BI__builtin_altivec_vaddeuqm_c
:
16694 case PPC::BI__builtin_altivec_vaddecuq_c
:
16695 case PPC::BI__builtin_altivec_vsubeuqm_c
:
16696 case PPC::BI__builtin_altivec_vsubecuq_c
: {
16697 SmallVector
<Value
*, 3> Ops
;
16698 Value
*Op0
= EmitScalarExpr(E
->getArg(0));
16699 Value
*Op1
= EmitScalarExpr(E
->getArg(1));
16700 Value
*Op2
= EmitScalarExpr(E
->getArg(2));
16701 llvm::Type
*V1I128Ty
= llvm::FixedVectorType::get(
16702 llvm::IntegerType::get(getLLVMContext(), 128), 1);
16703 Ops
.push_back(Builder
.CreateBitCast(Op0
, V1I128Ty
));
16704 Ops
.push_back(Builder
.CreateBitCast(Op1
, V1I128Ty
));
16705 Ops
.push_back(Builder
.CreateBitCast(Op2
, V1I128Ty
));
16706 switch (BuiltinID
) {
16708 llvm_unreachable("Unsupported intrinsic!");
16709 case PPC::BI__builtin_altivec_vaddeuqm_c
:
16710 ID
= Intrinsic::ppc_altivec_vaddeuqm
;
16712 case PPC::BI__builtin_altivec_vaddecuq_c
:
16713 ID
= Intrinsic::ppc_altivec_vaddecuq
;
16715 case PPC::BI__builtin_altivec_vsubeuqm_c
:
16716 ID
= Intrinsic::ppc_altivec_vsubeuqm
;
16718 case PPC::BI__builtin_altivec_vsubecuq_c
:
16719 ID
= Intrinsic::ppc_altivec_vsubecuq
;
16722 return Builder
.CreateCall(CGM
.getIntrinsic(ID
), Ops
, "");
16724 // Rotate and insert under mask operation.
16725 // __rldimi(rs, is, shift, mask)
16726 // (rotl64(rs, shift) & mask) | (is & ~mask)
16727 // __rlwimi(rs, is, shift, mask)
16728 // (rotl(rs, shift) & mask) | (is & ~mask)
16729 case PPC::BI__builtin_ppc_rldimi
:
16730 case PPC::BI__builtin_ppc_rlwimi
: {
16731 Value
*Op0
= EmitScalarExpr(E
->getArg(0));
16732 Value
*Op1
= EmitScalarExpr(E
->getArg(1));
16733 Value
*Op2
= EmitScalarExpr(E
->getArg(2));
16734 Value
*Op3
= EmitScalarExpr(E
->getArg(3));
16735 llvm::Type
*Ty
= Op0
->getType();
16736 Function
*F
= CGM
.getIntrinsic(Intrinsic::fshl
, Ty
);
16737 if (BuiltinID
== PPC::BI__builtin_ppc_rldimi
)
16738 Op2
= Builder
.CreateZExt(Op2
, Int64Ty
);
16739 Value
*Shift
= Builder
.CreateCall(F
, {Op0
, Op0
, Op2
});
16740 Value
*X
= Builder
.CreateAnd(Shift
, Op3
);
16741 Value
*Y
= Builder
.CreateAnd(Op1
, Builder
.CreateNot(Op3
));
16742 return Builder
.CreateOr(X
, Y
);
16744 // Rotate and insert under mask operation.
16745 // __rlwnm(rs, shift, mask)
16746 // rotl(rs, shift) & mask
16747 case PPC::BI__builtin_ppc_rlwnm
: {
16748 Value
*Op0
= EmitScalarExpr(E
->getArg(0));
16749 Value
*Op1
= EmitScalarExpr(E
->getArg(1));
16750 Value
*Op2
= EmitScalarExpr(E
->getArg(2));
16751 llvm::Type
*Ty
= Op0
->getType();
16752 Function
*F
= CGM
.getIntrinsic(Intrinsic::fshl
, Ty
);
16753 Value
*Shift
= Builder
.CreateCall(F
, {Op0
, Op0
, Op1
});
16754 return Builder
.CreateAnd(Shift
, Op2
);
16756 case PPC::BI__builtin_ppc_poppar4
:
16757 case PPC::BI__builtin_ppc_poppar8
: {
16758 Value
*Op0
= EmitScalarExpr(E
->getArg(0));
16759 llvm::Type
*ArgType
= Op0
->getType();
16760 Function
*F
= CGM
.getIntrinsic(Intrinsic::ctpop
, ArgType
);
16761 Value
*Tmp
= Builder
.CreateCall(F
, Op0
);
16763 llvm::Type
*ResultType
= ConvertType(E
->getType());
16764 Value
*Result
= Builder
.CreateAnd(Tmp
, llvm::ConstantInt::get(ArgType
, 1));
16765 if (Result
->getType() != ResultType
)
16766 Result
= Builder
.CreateIntCast(Result
, ResultType
, /*isSigned*/true,
16770 case PPC::BI__builtin_ppc_cmpb
: {
16771 Value
*Op0
= EmitScalarExpr(E
->getArg(0));
16772 Value
*Op1
= EmitScalarExpr(E
->getArg(1));
16773 if (getTarget().getTriple().isPPC64()) {
16775 CGM
.getIntrinsic(Intrinsic::ppc_cmpb
, {Int64Ty
, Int64Ty
, Int64Ty
});
16776 return Builder
.CreateCall(F
, {Op0
, Op1
}, "cmpb");
16778 // For 32 bit, emit the code as below:
16779 // %conv = trunc i64 %a to i32
16780 // %conv1 = trunc i64 %b to i32
16781 // %shr = lshr i64 %a, 32
16782 // %conv2 = trunc i64 %shr to i32
16783 // %shr3 = lshr i64 %b, 32
16784 // %conv4 = trunc i64 %shr3 to i32
16785 // %0 = tail call i32 @llvm.ppc.cmpb32(i32 %conv, i32 %conv1)
16786 // %conv5 = zext i32 %0 to i64
16787 // %1 = tail call i32 @llvm.ppc.cmpb32(i32 %conv2, i32 %conv4)
16788 // %conv614 = zext i32 %1 to i64
16789 // %shl = shl nuw i64 %conv614, 32
16790 // %or = or i64 %shl, %conv5
16793 CGM
.getIntrinsic(Intrinsic::ppc_cmpb
, {Int32Ty
, Int32Ty
, Int32Ty
});
16794 Value
*ArgOneLo
= Builder
.CreateTrunc(Op0
, Int32Ty
);
16795 Value
*ArgTwoLo
= Builder
.CreateTrunc(Op1
, Int32Ty
);
16796 Constant
*ShiftAmt
= ConstantInt::get(Int64Ty
, 32);
16798 Builder
.CreateTrunc(Builder
.CreateLShr(Op0
, ShiftAmt
), Int32Ty
);
16800 Builder
.CreateTrunc(Builder
.CreateLShr(Op1
, ShiftAmt
), Int32Ty
);
16801 Value
*ResLo
= Builder
.CreateZExt(
16802 Builder
.CreateCall(F
, {ArgOneLo
, ArgTwoLo
}, "cmpb"), Int64Ty
);
16803 Value
*ResHiShift
= Builder
.CreateZExt(
16804 Builder
.CreateCall(F
, {ArgOneHi
, ArgTwoHi
}, "cmpb"), Int64Ty
);
16805 Value
*ResHi
= Builder
.CreateShl(ResHiShift
, ShiftAmt
);
16806 return Builder
.CreateOr(ResLo
, ResHi
);
16809 case PPC::BI__builtin_vsx_xvcpsgnsp
:
16810 case PPC::BI__builtin_vsx_xvcpsgndp
: {
16811 llvm::Type
*ResultType
= ConvertType(E
->getType());
16812 Value
*X
= EmitScalarExpr(E
->getArg(0));
16813 Value
*Y
= EmitScalarExpr(E
->getArg(1));
16814 ID
= Intrinsic::copysign
;
16815 llvm::Function
*F
= CGM
.getIntrinsic(ID
, ResultType
);
16816 return Builder
.CreateCall(F
, {X
, Y
});
16818 // Rounding/truncation
16819 case PPC::BI__builtin_vsx_xvrspip
:
16820 case PPC::BI__builtin_vsx_xvrdpip
:
16821 case PPC::BI__builtin_vsx_xvrdpim
:
16822 case PPC::BI__builtin_vsx_xvrspim
:
16823 case PPC::BI__builtin_vsx_xvrdpi
:
16824 case PPC::BI__builtin_vsx_xvrspi
:
16825 case PPC::BI__builtin_vsx_xvrdpic
:
16826 case PPC::BI__builtin_vsx_xvrspic
:
16827 case PPC::BI__builtin_vsx_xvrdpiz
:
16828 case PPC::BI__builtin_vsx_xvrspiz
: {
16829 llvm::Type
*ResultType
= ConvertType(E
->getType());
16830 Value
*X
= EmitScalarExpr(E
->getArg(0));
16831 if (BuiltinID
== PPC::BI__builtin_vsx_xvrdpim
||
16832 BuiltinID
== PPC::BI__builtin_vsx_xvrspim
)
16833 ID
= Builder
.getIsFPConstrained()
16834 ? Intrinsic::experimental_constrained_floor
16835 : Intrinsic::floor
;
16836 else if (BuiltinID
== PPC::BI__builtin_vsx_xvrdpi
||
16837 BuiltinID
== PPC::BI__builtin_vsx_xvrspi
)
16838 ID
= Builder
.getIsFPConstrained()
16839 ? Intrinsic::experimental_constrained_round
16840 : Intrinsic::round
;
16841 else if (BuiltinID
== PPC::BI__builtin_vsx_xvrdpic
||
16842 BuiltinID
== PPC::BI__builtin_vsx_xvrspic
)
16843 ID
= Builder
.getIsFPConstrained()
16844 ? Intrinsic::experimental_constrained_rint
16846 else if (BuiltinID
== PPC::BI__builtin_vsx_xvrdpip
||
16847 BuiltinID
== PPC::BI__builtin_vsx_xvrspip
)
16848 ID
= Builder
.getIsFPConstrained()
16849 ? Intrinsic::experimental_constrained_ceil
16851 else if (BuiltinID
== PPC::BI__builtin_vsx_xvrdpiz
||
16852 BuiltinID
== PPC::BI__builtin_vsx_xvrspiz
)
16853 ID
= Builder
.getIsFPConstrained()
16854 ? Intrinsic::experimental_constrained_trunc
16855 : Intrinsic::trunc
;
16856 llvm::Function
*F
= CGM
.getIntrinsic(ID
, ResultType
);
16857 return Builder
.getIsFPConstrained() ? Builder
.CreateConstrainedFPCall(F
, X
)
16858 : Builder
.CreateCall(F
, X
);
16862 case PPC::BI__builtin_vsx_xvabsdp
:
16863 case PPC::BI__builtin_vsx_xvabssp
: {
16864 llvm::Type
*ResultType
= ConvertType(E
->getType());
16865 Value
*X
= EmitScalarExpr(E
->getArg(0));
16866 llvm::Function
*F
= CGM
.getIntrinsic(Intrinsic::fabs
, ResultType
);
16867 return Builder
.CreateCall(F
, X
);
16870 // Fastmath by default
16871 case PPC::BI__builtin_ppc_recipdivf
:
16872 case PPC::BI__builtin_ppc_recipdivd
:
16873 case PPC::BI__builtin_ppc_rsqrtf
:
16874 case PPC::BI__builtin_ppc_rsqrtd
: {
16875 FastMathFlags FMF
= Builder
.getFastMathFlags();
16876 Builder
.getFastMathFlags().setFast();
16877 llvm::Type
*ResultType
= ConvertType(E
->getType());
16878 Value
*X
= EmitScalarExpr(E
->getArg(0));
16880 if (BuiltinID
== PPC::BI__builtin_ppc_recipdivf
||
16881 BuiltinID
== PPC::BI__builtin_ppc_recipdivd
) {
16882 Value
*Y
= EmitScalarExpr(E
->getArg(1));
16883 Value
*FDiv
= Builder
.CreateFDiv(X
, Y
, "recipdiv");
16884 Builder
.getFastMathFlags() &= (FMF
);
16887 auto *One
= ConstantFP::get(ResultType
, 1.0);
16888 llvm::Function
*F
= CGM
.getIntrinsic(Intrinsic::sqrt
, ResultType
);
16889 Value
*FDiv
= Builder
.CreateFDiv(One
, Builder
.CreateCall(F
, X
), "rsqrt");
16890 Builder
.getFastMathFlags() &= (FMF
);
16893 case PPC::BI__builtin_ppc_alignx
: {
16894 Value
*Op0
= EmitScalarExpr(E
->getArg(0));
16895 Value
*Op1
= EmitScalarExpr(E
->getArg(1));
16896 ConstantInt
*AlignmentCI
= cast
<ConstantInt
>(Op0
);
16897 if (AlignmentCI
->getValue().ugt(llvm::Value::MaximumAlignment
))
16898 AlignmentCI
= ConstantInt::get(AlignmentCI
->getType(),
16899 llvm::Value::MaximumAlignment
);
16901 emitAlignmentAssumption(Op1
, E
->getArg(1),
16902 /*The expr loc is sufficient.*/ SourceLocation(),
16903 AlignmentCI
, nullptr);
16906 case PPC::BI__builtin_ppc_rdlam
: {
16907 Value
*Op0
= EmitScalarExpr(E
->getArg(0));
16908 Value
*Op1
= EmitScalarExpr(E
->getArg(1));
16909 Value
*Op2
= EmitScalarExpr(E
->getArg(2));
16910 llvm::Type
*Ty
= Op0
->getType();
16911 Value
*ShiftAmt
= Builder
.CreateIntCast(Op1
, Ty
, false);
16912 Function
*F
= CGM
.getIntrinsic(Intrinsic::fshl
, Ty
);
16913 Value
*Rotate
= Builder
.CreateCall(F
, {Op0
, Op0
, ShiftAmt
});
16914 return Builder
.CreateAnd(Rotate
, Op2
);
16916 case PPC::BI__builtin_ppc_load2r
: {
16917 Function
*F
= CGM
.getIntrinsic(Intrinsic::ppc_load2r
);
16918 Value
*Op0
= EmitScalarExpr(E
->getArg(0));
16919 Value
*LoadIntrinsic
= Builder
.CreateCall(F
, {Op0
});
16920 return Builder
.CreateTrunc(LoadIntrinsic
, Int16Ty
);
16923 case PPC::BI__builtin_ppc_fnmsub
:
16924 case PPC::BI__builtin_ppc_fnmsubs
:
16925 case PPC::BI__builtin_vsx_xvmaddadp
:
16926 case PPC::BI__builtin_vsx_xvmaddasp
:
16927 case PPC::BI__builtin_vsx_xvnmaddadp
:
16928 case PPC::BI__builtin_vsx_xvnmaddasp
:
16929 case PPC::BI__builtin_vsx_xvmsubadp
:
16930 case PPC::BI__builtin_vsx_xvmsubasp
:
16931 case PPC::BI__builtin_vsx_xvnmsubadp
:
16932 case PPC::BI__builtin_vsx_xvnmsubasp
: {
16933 llvm::Type
*ResultType
= ConvertType(E
->getType());
16934 Value
*X
= EmitScalarExpr(E
->getArg(0));
16935 Value
*Y
= EmitScalarExpr(E
->getArg(1));
16936 Value
*Z
= EmitScalarExpr(E
->getArg(2));
16938 if (Builder
.getIsFPConstrained())
16939 F
= CGM
.getIntrinsic(Intrinsic::experimental_constrained_fma
, ResultType
);
16941 F
= CGM
.getIntrinsic(Intrinsic::fma
, ResultType
);
16942 switch (BuiltinID
) {
16943 case PPC::BI__builtin_vsx_xvmaddadp
:
16944 case PPC::BI__builtin_vsx_xvmaddasp
:
16945 if (Builder
.getIsFPConstrained())
16946 return Builder
.CreateConstrainedFPCall(F
, {X
, Y
, Z
});
16948 return Builder
.CreateCall(F
, {X
, Y
, Z
});
16949 case PPC::BI__builtin_vsx_xvnmaddadp
:
16950 case PPC::BI__builtin_vsx_xvnmaddasp
:
16951 if (Builder
.getIsFPConstrained())
16952 return Builder
.CreateFNeg(
16953 Builder
.CreateConstrainedFPCall(F
, {X
, Y
, Z
}), "neg");
16955 return Builder
.CreateFNeg(Builder
.CreateCall(F
, {X
, Y
, Z
}), "neg");
16956 case PPC::BI__builtin_vsx_xvmsubadp
:
16957 case PPC::BI__builtin_vsx_xvmsubasp
:
16958 if (Builder
.getIsFPConstrained())
16959 return Builder
.CreateConstrainedFPCall(
16960 F
, {X
, Y
, Builder
.CreateFNeg(Z
, "neg")});
16962 return Builder
.CreateCall(F
, {X
, Y
, Builder
.CreateFNeg(Z
, "neg")});
16963 case PPC::BI__builtin_ppc_fnmsub
:
16964 case PPC::BI__builtin_ppc_fnmsubs
:
16965 case PPC::BI__builtin_vsx_xvnmsubadp
:
16966 case PPC::BI__builtin_vsx_xvnmsubasp
:
16967 if (Builder
.getIsFPConstrained())
16968 return Builder
.CreateFNeg(
16969 Builder
.CreateConstrainedFPCall(
16970 F
, {X
, Y
, Builder
.CreateFNeg(Z
, "neg")}),
16973 return Builder
.CreateCall(
16974 CGM
.getIntrinsic(Intrinsic::ppc_fnmsub
, ResultType
), {X
, Y
, Z
});
16976 llvm_unreachable("Unknown FMA operation");
16977 return nullptr; // Suppress no-return warning
16980 case PPC::BI__builtin_vsx_insertword
: {
16981 Value
*Op0
= EmitScalarExpr(E
->getArg(0));
16982 Value
*Op1
= EmitScalarExpr(E
->getArg(1));
16983 Value
*Op2
= EmitScalarExpr(E
->getArg(2));
16984 llvm::Function
*F
= CGM
.getIntrinsic(Intrinsic::ppc_vsx_xxinsertw
);
16986 // Third argument is a compile time constant int. It must be clamped to
16987 // to the range [0, 12].
16988 ConstantInt
*ArgCI
= dyn_cast
<ConstantInt
>(Op2
);
16990 "Third arg to xxinsertw intrinsic must be constant integer");
16991 const int64_t MaxIndex
= 12;
16992 int64_t Index
= std::clamp(ArgCI
->getSExtValue(), (int64_t)0, MaxIndex
);
16994 // The builtin semantics don't exactly match the xxinsertw instructions
16995 // semantics (which ppc_vsx_xxinsertw follows). The builtin extracts the
16996 // word from the first argument, and inserts it in the second argument. The
16997 // instruction extracts the word from its second input register and inserts
16998 // it into its first input register, so swap the first and second arguments.
16999 std::swap(Op0
, Op1
);
17001 // Need to cast the second argument from a vector of unsigned int to a
17002 // vector of long long.
17003 Op1
= Builder
.CreateBitCast(Op1
, llvm::FixedVectorType::get(Int64Ty
, 2));
17005 if (getTarget().isLittleEndian()) {
17006 // Reverse the double words in the vector we will extract from.
17007 Op0
= Builder
.CreateBitCast(Op0
, llvm::FixedVectorType::get(Int64Ty
, 2));
17008 Op0
= Builder
.CreateShuffleVector(Op0
, Op0
, ArrayRef
<int>{1, 0});
17010 // Reverse the index.
17011 Index
= MaxIndex
- Index
;
17014 // Intrinsic expects the first arg to be a vector of int.
17015 Op0
= Builder
.CreateBitCast(Op0
, llvm::FixedVectorType::get(Int32Ty
, 4));
17016 Op2
= ConstantInt::getSigned(Int32Ty
, Index
);
17017 return Builder
.CreateCall(F
, {Op0
, Op1
, Op2
});
17020 case PPC::BI__builtin_vsx_extractuword
: {
17021 Value
*Op0
= EmitScalarExpr(E
->getArg(0));
17022 Value
*Op1
= EmitScalarExpr(E
->getArg(1));
17023 llvm::Function
*F
= CGM
.getIntrinsic(Intrinsic::ppc_vsx_xxextractuw
);
17025 // Intrinsic expects the first argument to be a vector of doublewords.
17026 Op0
= Builder
.CreateBitCast(Op0
, llvm::FixedVectorType::get(Int64Ty
, 2));
17028 // The second argument is a compile time constant int that needs to
17029 // be clamped to the range [0, 12].
17030 ConstantInt
*ArgCI
= dyn_cast
<ConstantInt
>(Op1
);
17032 "Second Arg to xxextractuw intrinsic must be a constant integer!");
17033 const int64_t MaxIndex
= 12;
17034 int64_t Index
= std::clamp(ArgCI
->getSExtValue(), (int64_t)0, MaxIndex
);
17036 if (getTarget().isLittleEndian()) {
17037 // Reverse the index.
17038 Index
= MaxIndex
- Index
;
17039 Op1
= ConstantInt::getSigned(Int32Ty
, Index
);
17041 // Emit the call, then reverse the double words of the results vector.
17042 Value
*Call
= Builder
.CreateCall(F
, {Op0
, Op1
});
17044 Value
*ShuffleCall
=
17045 Builder
.CreateShuffleVector(Call
, Call
, ArrayRef
<int>{1, 0});
17046 return ShuffleCall
;
17048 Op1
= ConstantInt::getSigned(Int32Ty
, Index
);
17049 return Builder
.CreateCall(F
, {Op0
, Op1
});
17053 case PPC::BI__builtin_vsx_xxpermdi
: {
17054 Value
*Op0
= EmitScalarExpr(E
->getArg(0));
17055 Value
*Op1
= EmitScalarExpr(E
->getArg(1));
17056 Value
*Op2
= EmitScalarExpr(E
->getArg(2));
17057 ConstantInt
*ArgCI
= dyn_cast
<ConstantInt
>(Op2
);
17058 assert(ArgCI
&& "Third arg must be constant integer!");
17060 unsigned Index
= ArgCI
->getZExtValue();
17061 Op0
= Builder
.CreateBitCast(Op0
, llvm::FixedVectorType::get(Int64Ty
, 2));
17062 Op1
= Builder
.CreateBitCast(Op1
, llvm::FixedVectorType::get(Int64Ty
, 2));
17064 // Account for endianness by treating this as just a shuffle. So we use the
17065 // same indices for both LE and BE in order to produce expected results in
17067 int ElemIdx0
= (Index
& 2) >> 1;
17068 int ElemIdx1
= 2 + (Index
& 1);
17070 int ShuffleElts
[2] = {ElemIdx0
, ElemIdx1
};
17071 Value
*ShuffleCall
= Builder
.CreateShuffleVector(Op0
, Op1
, ShuffleElts
);
17072 QualType BIRetType
= E
->getType();
17073 auto RetTy
= ConvertType(BIRetType
);
17074 return Builder
.CreateBitCast(ShuffleCall
, RetTy
);
17077 case PPC::BI__builtin_vsx_xxsldwi
: {
17078 Value
*Op0
= EmitScalarExpr(E
->getArg(0));
17079 Value
*Op1
= EmitScalarExpr(E
->getArg(1));
17080 Value
*Op2
= EmitScalarExpr(E
->getArg(2));
17081 ConstantInt
*ArgCI
= dyn_cast
<ConstantInt
>(Op2
);
17082 assert(ArgCI
&& "Third argument must be a compile time constant");
17083 unsigned Index
= ArgCI
->getZExtValue() & 0x3;
17084 Op0
= Builder
.CreateBitCast(Op0
, llvm::FixedVectorType::get(Int32Ty
, 4));
17085 Op1
= Builder
.CreateBitCast(Op1
, llvm::FixedVectorType::get(Int32Ty
, 4));
17087 // Create a shuffle mask
17092 if (getTarget().isLittleEndian()) {
17093 // Little endian element N comes from element 8+N-Index of the
17094 // concatenated wide vector (of course, using modulo arithmetic on
17095 // the total number of elements).
17096 ElemIdx0
= (8 - Index
) % 8;
17097 ElemIdx1
= (9 - Index
) % 8;
17098 ElemIdx2
= (10 - Index
) % 8;
17099 ElemIdx3
= (11 - Index
) % 8;
17101 // Big endian ElemIdx<N> = Index + N
17103 ElemIdx1
= Index
+ 1;
17104 ElemIdx2
= Index
+ 2;
17105 ElemIdx3
= Index
+ 3;
17108 int ShuffleElts
[4] = {ElemIdx0
, ElemIdx1
, ElemIdx2
, ElemIdx3
};
17109 Value
*ShuffleCall
= Builder
.CreateShuffleVector(Op0
, Op1
, ShuffleElts
);
17110 QualType BIRetType
= E
->getType();
17111 auto RetTy
= ConvertType(BIRetType
);
17112 return Builder
.CreateBitCast(ShuffleCall
, RetTy
);
17115 case PPC::BI__builtin_pack_vector_int128
: {
17116 Value
*Op0
= EmitScalarExpr(E
->getArg(0));
17117 Value
*Op1
= EmitScalarExpr(E
->getArg(1));
17118 bool isLittleEndian
= getTarget().isLittleEndian();
17119 Value
*PoisonValue
=
17120 llvm::PoisonValue::get(llvm::FixedVectorType::get(Op0
->getType(), 2));
17121 Value
*Res
= Builder
.CreateInsertElement(
17122 PoisonValue
, Op0
, (uint64_t)(isLittleEndian
? 1 : 0));
17123 Res
= Builder
.CreateInsertElement(Res
, Op1
,
17124 (uint64_t)(isLittleEndian
? 0 : 1));
17125 return Builder
.CreateBitCast(Res
, ConvertType(E
->getType()));
17128 case PPC::BI__builtin_unpack_vector_int128
: {
17129 Value
*Op0
= EmitScalarExpr(E
->getArg(0));
17130 Value
*Op1
= EmitScalarExpr(E
->getArg(1));
17131 ConstantInt
*Index
= cast
<ConstantInt
>(Op1
);
17132 Value
*Unpacked
= Builder
.CreateBitCast(
17133 Op0
, llvm::FixedVectorType::get(ConvertType(E
->getType()), 2));
17135 if (getTarget().isLittleEndian())
17136 Index
= ConstantInt::get(Index
->getType(), 1 - Index
->getZExtValue());
17138 return Builder
.CreateExtractElement(Unpacked
, Index
);
17141 case PPC::BI__builtin_ppc_sthcx
: {
17142 llvm::Function
*F
= CGM
.getIntrinsic(Intrinsic::ppc_sthcx
);
17143 Value
*Op0
= EmitScalarExpr(E
->getArg(0));
17144 Value
*Op1
= Builder
.CreateSExt(EmitScalarExpr(E
->getArg(1)), Int32Ty
);
17145 return Builder
.CreateCall(F
, {Op0
, Op1
});
17148 // The PPC MMA builtins take a pointer to a __vector_quad as an argument.
17149 // Some of the MMA instructions accumulate their result into an existing
17150 // accumulator whereas the others generate a new accumulator. So we need to
17151 // use custom code generation to expand a builtin call with a pointer to a
17152 // load (if the corresponding instruction accumulates its result) followed by
17153 // the call to the intrinsic and a store of the result.
17154 #define CUSTOM_BUILTIN(Name, Intr, Types, Accumulate, Feature) \
17155 case PPC::BI__builtin_##Name:
17156 #include "clang/Basic/BuiltinsPPC.def"
17158 SmallVector
<Value
*, 4> Ops
;
17159 for (unsigned i
= 0, e
= E
->getNumArgs(); i
!= e
; i
++)
17160 if (E
->getArg(i
)->getType()->isArrayType())
17161 Ops
.push_back(EmitArrayToPointerDecay(E
->getArg(i
)).getPointer());
17163 Ops
.push_back(EmitScalarExpr(E
->getArg(i
)));
17164 // The first argument of these two builtins is a pointer used to store their
17165 // result. However, the llvm intrinsics return their result in multiple
17166 // return values. So, here we emit code extracting these values from the
17167 // intrinsic results and storing them using that pointer.
17168 if (BuiltinID
== PPC::BI__builtin_mma_disassemble_acc
||
17169 BuiltinID
== PPC::BI__builtin_vsx_disassemble_pair
||
17170 BuiltinID
== PPC::BI__builtin_mma_disassemble_pair
) {
17171 unsigned NumVecs
= 2;
17172 auto Intrinsic
= Intrinsic::ppc_vsx_disassemble_pair
;
17173 if (BuiltinID
== PPC::BI__builtin_mma_disassemble_acc
) {
17175 Intrinsic
= Intrinsic::ppc_mma_disassemble_acc
;
17177 llvm::Function
*F
= CGM
.getIntrinsic(Intrinsic
);
17178 Address Addr
= EmitPointerWithAlignment(E
->getArg(1));
17179 Value
*Vec
= Builder
.CreateLoad(Addr
);
17180 Value
*Call
= Builder
.CreateCall(F
, {Vec
});
17181 llvm::Type
*VTy
= llvm::FixedVectorType::get(Int8Ty
, 16);
17182 Value
*Ptr
= Ops
[0];
17183 for (unsigned i
=0; i
<NumVecs
; i
++) {
17184 Value
*Vec
= Builder
.CreateExtractValue(Call
, i
);
17185 llvm::ConstantInt
* Index
= llvm::ConstantInt::get(IntTy
, i
);
17186 Value
*GEP
= Builder
.CreateInBoundsGEP(VTy
, Ptr
, Index
);
17187 Builder
.CreateAlignedStore(Vec
, GEP
, MaybeAlign(16));
17191 if (BuiltinID
== PPC::BI__builtin_vsx_build_pair
||
17192 BuiltinID
== PPC::BI__builtin_mma_build_acc
) {
17193 // Reverse the order of the operands for LE, so the
17194 // same builtin call can be used on both LE and BE
17195 // without the need for the programmer to swap operands.
17196 // The operands are reversed starting from the second argument,
17197 // the first operand is the pointer to the pair/accumulator
17198 // that is being built.
17199 if (getTarget().isLittleEndian())
17200 std::reverse(Ops
.begin() + 1, Ops
.end());
17203 switch (BuiltinID
) {
17204 #define CUSTOM_BUILTIN(Name, Intr, Types, Acc, Feature) \
17205 case PPC::BI__builtin_##Name: \
17206 ID = Intrinsic::ppc_##Intr; \
17207 Accumulate = Acc; \
17209 #include "clang/Basic/BuiltinsPPC.def"
17211 if (BuiltinID
== PPC::BI__builtin_vsx_lxvp
||
17212 BuiltinID
== PPC::BI__builtin_vsx_stxvp
||
17213 BuiltinID
== PPC::BI__builtin_mma_lxvp
||
17214 BuiltinID
== PPC::BI__builtin_mma_stxvp
) {
17215 if (BuiltinID
== PPC::BI__builtin_vsx_lxvp
||
17216 BuiltinID
== PPC::BI__builtin_mma_lxvp
) {
17217 Ops
[0] = Builder
.CreateGEP(Int8Ty
, Ops
[1], Ops
[0]);
17219 Ops
[1] = Builder
.CreateGEP(Int8Ty
, Ops
[2], Ops
[1]);
17222 llvm::Function
*F
= CGM
.getIntrinsic(ID
);
17223 return Builder
.CreateCall(F
, Ops
, "");
17225 SmallVector
<Value
*, 4> CallOps
;
17227 Address Addr
= EmitPointerWithAlignment(E
->getArg(0));
17228 Value
*Acc
= Builder
.CreateLoad(Addr
);
17229 CallOps
.push_back(Acc
);
17231 for (unsigned i
=1; i
<Ops
.size(); i
++)
17232 CallOps
.push_back(Ops
[i
]);
17233 llvm::Function
*F
= CGM
.getIntrinsic(ID
);
17234 Value
*Call
= Builder
.CreateCall(F
, CallOps
);
17235 return Builder
.CreateAlignedStore(Call
, Ops
[0], MaybeAlign(64));
17238 case PPC::BI__builtin_ppc_compare_and_swap
:
17239 case PPC::BI__builtin_ppc_compare_and_swaplp
: {
17240 Address Addr
= EmitPointerWithAlignment(E
->getArg(0));
17241 Address OldValAddr
= EmitPointerWithAlignment(E
->getArg(1));
17242 Value
*OldVal
= Builder
.CreateLoad(OldValAddr
);
17243 QualType AtomicTy
= E
->getArg(0)->getType()->getPointeeType();
17244 LValue LV
= MakeAddrLValue(Addr
, AtomicTy
);
17245 Value
*Op2
= EmitScalarExpr(E
->getArg(2));
17246 auto Pair
= EmitAtomicCompareExchange(
17247 LV
, RValue::get(OldVal
), RValue::get(Op2
), E
->getExprLoc(),
17248 llvm::AtomicOrdering::Monotonic
, llvm::AtomicOrdering::Monotonic
, true);
17249 // Unlike c11's atomic_compare_exchange, according to
17250 // https://www.ibm.com/docs/en/xl-c-and-cpp-aix/16.1?topic=functions-compare-swap-compare-swaplp
17251 // > In either case, the contents of the memory location specified by addr
17252 // > are copied into the memory location specified by old_val_addr.
17253 // But it hasn't specified storing to OldValAddr is atomic or not and
17254 // which order to use. Now following XL's codegen, treat it as a normal
17256 Value
*LoadedVal
= Pair
.first
.getScalarVal();
17257 Builder
.CreateStore(LoadedVal
, OldValAddr
);
17258 return Builder
.CreateZExt(Pair
.second
, Builder
.getInt32Ty());
17260 case PPC::BI__builtin_ppc_fetch_and_add
:
17261 case PPC::BI__builtin_ppc_fetch_and_addlp
: {
17262 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add
, E
,
17263 llvm::AtomicOrdering::Monotonic
);
17265 case PPC::BI__builtin_ppc_fetch_and_and
:
17266 case PPC::BI__builtin_ppc_fetch_and_andlp
: {
17267 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And
, E
,
17268 llvm::AtomicOrdering::Monotonic
);
17271 case PPC::BI__builtin_ppc_fetch_and_or
:
17272 case PPC::BI__builtin_ppc_fetch_and_orlp
: {
17273 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or
, E
,
17274 llvm::AtomicOrdering::Monotonic
);
17276 case PPC::BI__builtin_ppc_fetch_and_swap
:
17277 case PPC::BI__builtin_ppc_fetch_and_swaplp
: {
17278 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg
, E
,
17279 llvm::AtomicOrdering::Monotonic
);
17281 case PPC::BI__builtin_ppc_ldarx
:
17282 case PPC::BI__builtin_ppc_lwarx
:
17283 case PPC::BI__builtin_ppc_lharx
:
17284 case PPC::BI__builtin_ppc_lbarx
:
17285 return emitPPCLoadReserveIntrinsic(*this, BuiltinID
, E
);
17286 case PPC::BI__builtin_ppc_mfspr
: {
17287 Value
*Op0
= EmitScalarExpr(E
->getArg(0));
17288 llvm::Type
*RetType
= CGM
.getDataLayout().getTypeSizeInBits(VoidPtrTy
) == 32
17291 Function
*F
= CGM
.getIntrinsic(Intrinsic::ppc_mfspr
, RetType
);
17292 return Builder
.CreateCall(F
, {Op0
});
17294 case PPC::BI__builtin_ppc_mtspr
: {
17295 Value
*Op0
= EmitScalarExpr(E
->getArg(0));
17296 Value
*Op1
= EmitScalarExpr(E
->getArg(1));
17297 llvm::Type
*RetType
= CGM
.getDataLayout().getTypeSizeInBits(VoidPtrTy
) == 32
17300 Function
*F
= CGM
.getIntrinsic(Intrinsic::ppc_mtspr
, RetType
);
17301 return Builder
.CreateCall(F
, {Op0
, Op1
});
17303 case PPC::BI__builtin_ppc_popcntb
: {
17304 Value
*ArgValue
= EmitScalarExpr(E
->getArg(0));
17305 llvm::Type
*ArgType
= ArgValue
->getType();
17306 Function
*F
= CGM
.getIntrinsic(Intrinsic::ppc_popcntb
, {ArgType
, ArgType
});
17307 return Builder
.CreateCall(F
, {ArgValue
}, "popcntb");
17309 case PPC::BI__builtin_ppc_mtfsf
: {
17310 // The builtin takes a uint32 that needs to be cast to an
17311 // f64 to be passed to the intrinsic.
17312 Value
*Op0
= EmitScalarExpr(E
->getArg(0));
17313 Value
*Op1
= EmitScalarExpr(E
->getArg(1));
17314 Value
*Cast
= Builder
.CreateUIToFP(Op1
, DoubleTy
);
17315 llvm::Function
*F
= CGM
.getIntrinsic(Intrinsic::ppc_mtfsf
);
17316 return Builder
.CreateCall(F
, {Op0
, Cast
}, "");
17319 case PPC::BI__builtin_ppc_swdiv_nochk
:
17320 case PPC::BI__builtin_ppc_swdivs_nochk
: {
17321 Value
*Op0
= EmitScalarExpr(E
->getArg(0));
17322 Value
*Op1
= EmitScalarExpr(E
->getArg(1));
17323 FastMathFlags FMF
= Builder
.getFastMathFlags();
17324 Builder
.getFastMathFlags().setFast();
17325 Value
*FDiv
= Builder
.CreateFDiv(Op0
, Op1
, "swdiv_nochk");
17326 Builder
.getFastMathFlags() &= (FMF
);
17329 case PPC::BI__builtin_ppc_fric
:
17330 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
17331 *this, E
, Intrinsic::rint
,
17332 Intrinsic::experimental_constrained_rint
))
17334 case PPC::BI__builtin_ppc_frim
:
17335 case PPC::BI__builtin_ppc_frims
:
17336 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
17337 *this, E
, Intrinsic::floor
,
17338 Intrinsic::experimental_constrained_floor
))
17340 case PPC::BI__builtin_ppc_frin
:
17341 case PPC::BI__builtin_ppc_frins
:
17342 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
17343 *this, E
, Intrinsic::round
,
17344 Intrinsic::experimental_constrained_round
))
17346 case PPC::BI__builtin_ppc_frip
:
17347 case PPC::BI__builtin_ppc_frips
:
17348 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
17349 *this, E
, Intrinsic::ceil
,
17350 Intrinsic::experimental_constrained_ceil
))
17352 case PPC::BI__builtin_ppc_friz
:
17353 case PPC::BI__builtin_ppc_frizs
:
17354 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
17355 *this, E
, Intrinsic::trunc
,
17356 Intrinsic::experimental_constrained_trunc
))
17358 case PPC::BI__builtin_ppc_fsqrt
:
17359 case PPC::BI__builtin_ppc_fsqrts
:
17360 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
17361 *this, E
, Intrinsic::sqrt
,
17362 Intrinsic::experimental_constrained_sqrt
))
17364 case PPC::BI__builtin_ppc_test_data_class
: {
17365 Value
*Op0
= EmitScalarExpr(E
->getArg(0));
17366 Value
*Op1
= EmitScalarExpr(E
->getArg(1));
17367 return Builder
.CreateCall(
17368 CGM
.getIntrinsic(Intrinsic::ppc_test_data_class
, Op0
->getType()),
17369 {Op0
, Op1
}, "test_data_class");
17371 case PPC::BI__builtin_ppc_maxfe
: {
17372 Value
*Op0
= EmitScalarExpr(E
->getArg(0));
17373 Value
*Op1
= EmitScalarExpr(E
->getArg(1));
17374 Value
*Op2
= EmitScalarExpr(E
->getArg(2));
17375 Value
*Op3
= EmitScalarExpr(E
->getArg(3));
17376 return Builder
.CreateCall(CGM
.getIntrinsic(Intrinsic::ppc_maxfe
),
17377 {Op0
, Op1
, Op2
, Op3
});
17379 case PPC::BI__builtin_ppc_maxfl
: {
17380 Value
*Op0
= EmitScalarExpr(E
->getArg(0));
17381 Value
*Op1
= EmitScalarExpr(E
->getArg(1));
17382 Value
*Op2
= EmitScalarExpr(E
->getArg(2));
17383 Value
*Op3
= EmitScalarExpr(E
->getArg(3));
17384 return Builder
.CreateCall(CGM
.getIntrinsic(Intrinsic::ppc_maxfl
),
17385 {Op0
, Op1
, Op2
, Op3
});
17387 case PPC::BI__builtin_ppc_maxfs
: {
17388 Value
*Op0
= EmitScalarExpr(E
->getArg(0));
17389 Value
*Op1
= EmitScalarExpr(E
->getArg(1));
17390 Value
*Op2
= EmitScalarExpr(E
->getArg(2));
17391 Value
*Op3
= EmitScalarExpr(E
->getArg(3));
17392 return Builder
.CreateCall(CGM
.getIntrinsic(Intrinsic::ppc_maxfs
),
17393 {Op0
, Op1
, Op2
, Op3
});
17395 case PPC::BI__builtin_ppc_minfe
: {
17396 Value
*Op0
= EmitScalarExpr(E
->getArg(0));
17397 Value
*Op1
= EmitScalarExpr(E
->getArg(1));
17398 Value
*Op2
= EmitScalarExpr(E
->getArg(2));
17399 Value
*Op3
= EmitScalarExpr(E
->getArg(3));
17400 return Builder
.CreateCall(CGM
.getIntrinsic(Intrinsic::ppc_minfe
),
17401 {Op0
, Op1
, Op2
, Op3
});
17403 case PPC::BI__builtin_ppc_minfl
: {
17404 Value
*Op0
= EmitScalarExpr(E
->getArg(0));
17405 Value
*Op1
= EmitScalarExpr(E
->getArg(1));
17406 Value
*Op2
= EmitScalarExpr(E
->getArg(2));
17407 Value
*Op3
= EmitScalarExpr(E
->getArg(3));
17408 return Builder
.CreateCall(CGM
.getIntrinsic(Intrinsic::ppc_minfl
),
17409 {Op0
, Op1
, Op2
, Op3
});
17411 case PPC::BI__builtin_ppc_minfs
: {
17412 Value
*Op0
= EmitScalarExpr(E
->getArg(0));
17413 Value
*Op1
= EmitScalarExpr(E
->getArg(1));
17414 Value
*Op2
= EmitScalarExpr(E
->getArg(2));
17415 Value
*Op3
= EmitScalarExpr(E
->getArg(3));
17416 return Builder
.CreateCall(CGM
.getIntrinsic(Intrinsic::ppc_minfs
),
17417 {Op0
, Op1
, Op2
, Op3
});
17419 case PPC::BI__builtin_ppc_swdiv
:
17420 case PPC::BI__builtin_ppc_swdivs
: {
17421 Value
*Op0
= EmitScalarExpr(E
->getArg(0));
17422 Value
*Op1
= EmitScalarExpr(E
->getArg(1));
17423 return Builder
.CreateFDiv(Op0
, Op1
, "swdiv");
17425 case PPC::BI__builtin_ppc_set_fpscr_rn
:
17426 return Builder
.CreateCall(CGM
.getIntrinsic(Intrinsic::ppc_setrnd
),
17427 {EmitScalarExpr(E
->getArg(0))});
17428 case PPC::BI__builtin_ppc_mffs
:
17429 return Builder
.CreateCall(CGM
.getIntrinsic(Intrinsic::ppc_readflm
));
17434 // If \p E is not null pointer, insert address space cast to match return
17435 // type of \p E if necessary.
17436 Value
*EmitAMDGPUDispatchPtr(CodeGenFunction
&CGF
,
17437 const CallExpr
*E
= nullptr) {
17438 auto *F
= CGF
.CGM
.getIntrinsic(Intrinsic::amdgcn_dispatch_ptr
);
17439 auto *Call
= CGF
.Builder
.CreateCall(F
);
17441 Attribute::getWithDereferenceableBytes(Call
->getContext(), 64));
17442 Call
->addRetAttr(Attribute::getWithAlignment(Call
->getContext(), Align(4)));
17445 QualType BuiltinRetType
= E
->getType();
17446 auto *RetTy
= cast
<llvm::PointerType
>(CGF
.ConvertType(BuiltinRetType
));
17447 if (RetTy
== Call
->getType())
17449 return CGF
.Builder
.CreateAddrSpaceCast(Call
, RetTy
);
17452 Value
*EmitAMDGPUImplicitArgPtr(CodeGenFunction
&CGF
) {
17453 auto *F
= CGF
.CGM
.getIntrinsic(Intrinsic::amdgcn_implicitarg_ptr
);
17454 auto *Call
= CGF
.Builder
.CreateCall(F
);
17456 Attribute::getWithDereferenceableBytes(Call
->getContext(), 256));
17457 Call
->addRetAttr(Attribute::getWithAlignment(Call
->getContext(), Align(8)));
17461 // \p Index is 0, 1, and 2 for x, y, and z dimension, respectively.
17462 /// Emit code based on Code Object ABI version.
17463 /// COV_4 : Emit code to use dispatch ptr
17464 /// COV_5 : Emit code to use implicitarg ptr
17465 /// COV_NONE : Emit code to load a global variable "llvm.amdgcn.abi.version"
17466 /// and use its value for COV_4 or COV_5 approach. It is used for
17467 /// compiling device libraries in an ABI-agnostic way.
17469 /// Note: "llvm.amdgcn.abi.version" is supposed to be emitted and intialized by
17470 /// clang during compilation of user code.
17471 Value
*EmitAMDGPUWorkGroupSize(CodeGenFunction
&CGF
, unsigned Index
) {
17472 llvm::LoadInst
*LD
;
17474 auto Cov
= CGF
.getTarget().getTargetOpts().CodeObjectVersion
;
17476 if (Cov
== clang::TargetOptions::COV_None
) {
17477 StringRef Name
= "llvm.amdgcn.abi.version";
17478 auto *ABIVersionC
= CGF
.CGM
.getModule().getNamedGlobal(Name
);
17480 ABIVersionC
= new llvm::GlobalVariable(
17481 CGF
.CGM
.getModule(), CGF
.Int32Ty
, false,
17482 llvm::GlobalValue::ExternalLinkage
, nullptr, Name
, nullptr,
17483 llvm::GlobalVariable::NotThreadLocal
,
17484 CGF
.CGM
.getContext().getTargetAddressSpace(LangAS::opencl_constant
));
17486 // This load will be eliminated by the IPSCCP because it is constant
17487 // weak_odr without externally_initialized. Either changing it to weak or
17488 // adding externally_initialized will keep the load.
17489 Value
*ABIVersion
= CGF
.Builder
.CreateAlignedLoad(CGF
.Int32Ty
, ABIVersionC
,
17490 CGF
.CGM
.getIntAlign());
17492 Value
*IsCOV5
= CGF
.Builder
.CreateICmpSGE(
17494 llvm::ConstantInt::get(CGF
.Int32Ty
, clang::TargetOptions::COV_5
));
17496 // Indexing the implicit kernarg segment.
17497 Value
*ImplicitGEP
= CGF
.Builder
.CreateConstGEP1_32(
17498 CGF
.Int8Ty
, EmitAMDGPUImplicitArgPtr(CGF
), 12 + Index
* 2);
17500 // Indexing the HSA kernel_dispatch_packet struct.
17501 Value
*DispatchGEP
= CGF
.Builder
.CreateConstGEP1_32(
17502 CGF
.Int8Ty
, EmitAMDGPUDispatchPtr(CGF
), 4 + Index
* 2);
17504 auto Result
= CGF
.Builder
.CreateSelect(IsCOV5
, ImplicitGEP
, DispatchGEP
);
17505 LD
= CGF
.Builder
.CreateLoad(
17506 Address(Result
, CGF
.Int16Ty
, CharUnits::fromQuantity(2)));
17508 Value
*GEP
= nullptr;
17509 if (Cov
== clang::TargetOptions::COV_5
) {
17510 // Indexing the implicit kernarg segment.
17511 GEP
= CGF
.Builder
.CreateConstGEP1_32(
17512 CGF
.Int8Ty
, EmitAMDGPUImplicitArgPtr(CGF
), 12 + Index
* 2);
17514 // Indexing the HSA kernel_dispatch_packet struct.
17515 GEP
= CGF
.Builder
.CreateConstGEP1_32(
17516 CGF
.Int8Ty
, EmitAMDGPUDispatchPtr(CGF
), 4 + Index
* 2);
17518 LD
= CGF
.Builder
.CreateLoad(
17519 Address(GEP
, CGF
.Int16Ty
, CharUnits::fromQuantity(2)));
17522 llvm::MDBuilder
MDHelper(CGF
.getLLVMContext());
17523 llvm::MDNode
*RNode
= MDHelper
.createRange(APInt(16, 1),
17524 APInt(16, CGF
.getTarget().getMaxOpenCLWorkGroupSize() + 1));
17525 LD
->setMetadata(llvm::LLVMContext::MD_range
, RNode
);
17526 LD
->setMetadata(llvm::LLVMContext::MD_noundef
,
17527 llvm::MDNode::get(CGF
.getLLVMContext(), std::nullopt
));
17528 LD
->setMetadata(llvm::LLVMContext::MD_invariant_load
,
17529 llvm::MDNode::get(CGF
.getLLVMContext(), std::nullopt
));
17533 // \p Index is 0, 1, and 2 for x, y, and z dimension, respectively.
17534 Value
*EmitAMDGPUGridSize(CodeGenFunction
&CGF
, unsigned Index
) {
17535 const unsigned XOffset
= 12;
17536 auto *DP
= EmitAMDGPUDispatchPtr(CGF
);
17537 // Indexing the HSA kernel_dispatch_packet struct.
17538 auto *Offset
= llvm::ConstantInt::get(CGF
.Int32Ty
, XOffset
+ Index
* 4);
17539 auto *GEP
= CGF
.Builder
.CreateGEP(CGF
.Int8Ty
, DP
, Offset
);
17540 auto *LD
= CGF
.Builder
.CreateLoad(
17541 Address(GEP
, CGF
.Int32Ty
, CharUnits::fromQuantity(4)));
17542 LD
->setMetadata(llvm::LLVMContext::MD_invariant_load
,
17543 llvm::MDNode::get(CGF
.getLLVMContext(), std::nullopt
));
17548 // For processing memory ordering and memory scope arguments of various
17549 // amdgcn builtins.
17550 // \p Order takes a C++11 comptabile memory-ordering specifier and converts
17551 // it into LLVM's memory ordering specifier using atomic C ABI, and writes
17552 // to \p AO. \p Scope takes a const char * and converts it into AMDGCN
17553 // specific SyncScopeID and writes it to \p SSID.
17554 void CodeGenFunction::ProcessOrderScopeAMDGCN(Value
*Order
, Value
*Scope
,
17555 llvm::AtomicOrdering
&AO
,
17556 llvm::SyncScope::ID
&SSID
) {
17557 int ord
= cast
<llvm::ConstantInt
>(Order
)->getZExtValue();
17559 // Map C11/C++11 memory ordering to LLVM memory ordering
17560 assert(llvm::isValidAtomicOrderingCABI(ord
));
17561 switch (static_cast<llvm::AtomicOrderingCABI
>(ord
)) {
17562 case llvm::AtomicOrderingCABI::acquire
:
17563 case llvm::AtomicOrderingCABI::consume
:
17564 AO
= llvm::AtomicOrdering::Acquire
;
17566 case llvm::AtomicOrderingCABI::release
:
17567 AO
= llvm::AtomicOrdering::Release
;
17569 case llvm::AtomicOrderingCABI::acq_rel
:
17570 AO
= llvm::AtomicOrdering::AcquireRelease
;
17572 case llvm::AtomicOrderingCABI::seq_cst
:
17573 AO
= llvm::AtomicOrdering::SequentiallyConsistent
;
17575 case llvm::AtomicOrderingCABI::relaxed
:
17576 AO
= llvm::AtomicOrdering::Monotonic
;
17581 llvm::getConstantStringInfo(Scope
, scp
);
17582 SSID
= getLLVMContext().getOrInsertSyncScopeID(scp
);
17585 Value
*CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID
,
17586 const CallExpr
*E
) {
17587 llvm::AtomicOrdering AO
= llvm::AtomicOrdering::SequentiallyConsistent
;
17588 llvm::SyncScope::ID SSID
;
17589 switch (BuiltinID
) {
17590 case AMDGPU::BI__builtin_amdgcn_div_scale
:
17591 case AMDGPU::BI__builtin_amdgcn_div_scalef
: {
17592 // Translate from the intrinsics's struct return to the builtin's out
17595 Address FlagOutPtr
= EmitPointerWithAlignment(E
->getArg(3));
17597 llvm::Value
*X
= EmitScalarExpr(E
->getArg(0));
17598 llvm::Value
*Y
= EmitScalarExpr(E
->getArg(1));
17599 llvm::Value
*Z
= EmitScalarExpr(E
->getArg(2));
17601 llvm::Function
*Callee
= CGM
.getIntrinsic(Intrinsic::amdgcn_div_scale
,
17604 llvm::Value
*Tmp
= Builder
.CreateCall(Callee
, {X
, Y
, Z
});
17606 llvm::Value
*Result
= Builder
.CreateExtractValue(Tmp
, 0);
17607 llvm::Value
*Flag
= Builder
.CreateExtractValue(Tmp
, 1);
17609 llvm::Type
*RealFlagType
= FlagOutPtr
.getElementType();
17611 llvm::Value
*FlagExt
= Builder
.CreateZExt(Flag
, RealFlagType
);
17612 Builder
.CreateStore(FlagExt
, FlagOutPtr
);
17615 case AMDGPU::BI__builtin_amdgcn_div_fmas
:
17616 case AMDGPU::BI__builtin_amdgcn_div_fmasf
: {
17617 llvm::Value
*Src0
= EmitScalarExpr(E
->getArg(0));
17618 llvm::Value
*Src1
= EmitScalarExpr(E
->getArg(1));
17619 llvm::Value
*Src2
= EmitScalarExpr(E
->getArg(2));
17620 llvm::Value
*Src3
= EmitScalarExpr(E
->getArg(3));
17622 llvm::Function
*F
= CGM
.getIntrinsic(Intrinsic::amdgcn_div_fmas
,
17624 llvm::Value
*Src3ToBool
= Builder
.CreateIsNotNull(Src3
);
17625 return Builder
.CreateCall(F
, {Src0
, Src1
, Src2
, Src3ToBool
});
17628 case AMDGPU::BI__builtin_amdgcn_ds_swizzle
:
17629 return emitBinaryBuiltin(*this, E
, Intrinsic::amdgcn_ds_swizzle
);
17630 case AMDGPU::BI__builtin_amdgcn_mov_dpp8
:
17631 return emitBinaryBuiltin(*this, E
, Intrinsic::amdgcn_mov_dpp8
);
17632 case AMDGPU::BI__builtin_amdgcn_mov_dpp
:
17633 case AMDGPU::BI__builtin_amdgcn_update_dpp
: {
17634 llvm::SmallVector
<llvm::Value
*, 6> Args
;
17635 for (unsigned I
= 0; I
!= E
->getNumArgs(); ++I
)
17636 Args
.push_back(EmitScalarExpr(E
->getArg(I
)));
17637 assert(Args
.size() == 5 || Args
.size() == 6);
17638 if (Args
.size() == 5)
17639 Args
.insert(Args
.begin(), llvm::PoisonValue::get(Args
[0]->getType()));
17641 CGM
.getIntrinsic(Intrinsic::amdgcn_update_dpp
, Args
[0]->getType());
17642 return Builder
.CreateCall(F
, Args
);
17644 case AMDGPU::BI__builtin_amdgcn_div_fixup
:
17645 case AMDGPU::BI__builtin_amdgcn_div_fixupf
:
17646 case AMDGPU::BI__builtin_amdgcn_div_fixuph
:
17647 return emitTernaryBuiltin(*this, E
, Intrinsic::amdgcn_div_fixup
);
17648 case AMDGPU::BI__builtin_amdgcn_trig_preop
:
17649 case AMDGPU::BI__builtin_amdgcn_trig_preopf
:
17650 return emitFPIntBuiltin(*this, E
, Intrinsic::amdgcn_trig_preop
);
17651 case AMDGPU::BI__builtin_amdgcn_rcp
:
17652 case AMDGPU::BI__builtin_amdgcn_rcpf
:
17653 case AMDGPU::BI__builtin_amdgcn_rcph
:
17654 return emitUnaryBuiltin(*this, E
, Intrinsic::amdgcn_rcp
);
17655 case AMDGPU::BI__builtin_amdgcn_sqrt
:
17656 case AMDGPU::BI__builtin_amdgcn_sqrtf
:
17657 case AMDGPU::BI__builtin_amdgcn_sqrth
:
17658 return emitUnaryBuiltin(*this, E
, Intrinsic::amdgcn_sqrt
);
17659 case AMDGPU::BI__builtin_amdgcn_rsq
:
17660 case AMDGPU::BI__builtin_amdgcn_rsqf
:
17661 case AMDGPU::BI__builtin_amdgcn_rsqh
:
17662 return emitUnaryBuiltin(*this, E
, Intrinsic::amdgcn_rsq
);
17663 case AMDGPU::BI__builtin_amdgcn_rsq_clamp
:
17664 case AMDGPU::BI__builtin_amdgcn_rsq_clampf
:
17665 return emitUnaryBuiltin(*this, E
, Intrinsic::amdgcn_rsq_clamp
);
17666 case AMDGPU::BI__builtin_amdgcn_sinf
:
17667 case AMDGPU::BI__builtin_amdgcn_sinh
:
17668 return emitUnaryBuiltin(*this, E
, Intrinsic::amdgcn_sin
);
17669 case AMDGPU::BI__builtin_amdgcn_cosf
:
17670 case AMDGPU::BI__builtin_amdgcn_cosh
:
17671 return emitUnaryBuiltin(*this, E
, Intrinsic::amdgcn_cos
);
17672 case AMDGPU::BI__builtin_amdgcn_dispatch_ptr
:
17673 return EmitAMDGPUDispatchPtr(*this, E
);
17674 case AMDGPU::BI__builtin_amdgcn_logf
:
17675 return emitUnaryBuiltin(*this, E
, Intrinsic::amdgcn_log
);
17676 case AMDGPU::BI__builtin_amdgcn_exp2f
:
17677 return emitUnaryBuiltin(*this, E
, Intrinsic::amdgcn_exp2
);
17678 case AMDGPU::BI__builtin_amdgcn_log_clampf
:
17679 return emitUnaryBuiltin(*this, E
, Intrinsic::amdgcn_log_clamp
);
17680 case AMDGPU::BI__builtin_amdgcn_ldexp
:
17681 case AMDGPU::BI__builtin_amdgcn_ldexpf
: {
17682 llvm::Value
*Src0
= EmitScalarExpr(E
->getArg(0));
17683 llvm::Value
*Src1
= EmitScalarExpr(E
->getArg(1));
17684 llvm::Function
*F
=
17685 CGM
.getIntrinsic(Intrinsic::ldexp
, {Src0
->getType(), Src1
->getType()});
17686 return Builder
.CreateCall(F
, {Src0
, Src1
});
17688 case AMDGPU::BI__builtin_amdgcn_ldexph
: {
17689 // The raw instruction has a different behavior for out of bounds exponent
17690 // values (implicit truncation instead of saturate to short_min/short_max).
17691 llvm::Value
*Src0
= EmitScalarExpr(E
->getArg(0));
17692 llvm::Value
*Src1
= EmitScalarExpr(E
->getArg(1));
17693 llvm::Function
*F
=
17694 CGM
.getIntrinsic(Intrinsic::ldexp
, {Src0
->getType(), Int16Ty
});
17695 return Builder
.CreateCall(F
, {Src0
, Builder
.CreateTrunc(Src1
, Int16Ty
)});
17697 case AMDGPU::BI__builtin_amdgcn_frexp_mant
:
17698 case AMDGPU::BI__builtin_amdgcn_frexp_mantf
:
17699 case AMDGPU::BI__builtin_amdgcn_frexp_manth
:
17700 return emitUnaryBuiltin(*this, E
, Intrinsic::amdgcn_frexp_mant
);
17701 case AMDGPU::BI__builtin_amdgcn_frexp_exp
:
17702 case AMDGPU::BI__builtin_amdgcn_frexp_expf
: {
17703 Value
*Src0
= EmitScalarExpr(E
->getArg(0));
17704 Function
*F
= CGM
.getIntrinsic(Intrinsic::amdgcn_frexp_exp
,
17705 { Builder
.getInt32Ty(), Src0
->getType() });
17706 return Builder
.CreateCall(F
, Src0
);
17708 case AMDGPU::BI__builtin_amdgcn_frexp_exph
: {
17709 Value
*Src0
= EmitScalarExpr(E
->getArg(0));
17710 Function
*F
= CGM
.getIntrinsic(Intrinsic::amdgcn_frexp_exp
,
17711 { Builder
.getInt16Ty(), Src0
->getType() });
17712 return Builder
.CreateCall(F
, Src0
);
17714 case AMDGPU::BI__builtin_amdgcn_fract
:
17715 case AMDGPU::BI__builtin_amdgcn_fractf
:
17716 case AMDGPU::BI__builtin_amdgcn_fracth
:
17717 return emitUnaryBuiltin(*this, E
, Intrinsic::amdgcn_fract
);
17718 case AMDGPU::BI__builtin_amdgcn_lerp
:
17719 return emitTernaryBuiltin(*this, E
, Intrinsic::amdgcn_lerp
);
17720 case AMDGPU::BI__builtin_amdgcn_ubfe
:
17721 return emitTernaryBuiltin(*this, E
, Intrinsic::amdgcn_ubfe
);
17722 case AMDGPU::BI__builtin_amdgcn_sbfe
:
17723 return emitTernaryBuiltin(*this, E
, Intrinsic::amdgcn_sbfe
);
17724 case AMDGPU::BI__builtin_amdgcn_ballot_w32
:
17725 case AMDGPU::BI__builtin_amdgcn_ballot_w64
: {
17726 llvm::Type
*ResultType
= ConvertType(E
->getType());
17727 llvm::Value
*Src
= EmitScalarExpr(E
->getArg(0));
17728 Function
*F
= CGM
.getIntrinsic(Intrinsic::amdgcn_ballot
, { ResultType
});
17729 return Builder
.CreateCall(F
, { Src
});
17731 case AMDGPU::BI__builtin_amdgcn_uicmp
:
17732 case AMDGPU::BI__builtin_amdgcn_uicmpl
:
17733 case AMDGPU::BI__builtin_amdgcn_sicmp
:
17734 case AMDGPU::BI__builtin_amdgcn_sicmpl
: {
17735 llvm::Value
*Src0
= EmitScalarExpr(E
->getArg(0));
17736 llvm::Value
*Src1
= EmitScalarExpr(E
->getArg(1));
17737 llvm::Value
*Src2
= EmitScalarExpr(E
->getArg(2));
17739 // FIXME-GFX10: How should 32 bit mask be handled?
17740 Function
*F
= CGM
.getIntrinsic(Intrinsic::amdgcn_icmp
,
17741 { Builder
.getInt64Ty(), Src0
->getType() });
17742 return Builder
.CreateCall(F
, { Src0
, Src1
, Src2
});
17744 case AMDGPU::BI__builtin_amdgcn_fcmp
:
17745 case AMDGPU::BI__builtin_amdgcn_fcmpf
: {
17746 llvm::Value
*Src0
= EmitScalarExpr(E
->getArg(0));
17747 llvm::Value
*Src1
= EmitScalarExpr(E
->getArg(1));
17748 llvm::Value
*Src2
= EmitScalarExpr(E
->getArg(2));
17750 // FIXME-GFX10: How should 32 bit mask be handled?
17751 Function
*F
= CGM
.getIntrinsic(Intrinsic::amdgcn_fcmp
,
17752 { Builder
.getInt64Ty(), Src0
->getType() });
17753 return Builder
.CreateCall(F
, { Src0
, Src1
, Src2
});
17755 case AMDGPU::BI__builtin_amdgcn_class
:
17756 case AMDGPU::BI__builtin_amdgcn_classf
:
17757 case AMDGPU::BI__builtin_amdgcn_classh
:
17758 return emitFPIntBuiltin(*this, E
, Intrinsic::amdgcn_class
);
17759 case AMDGPU::BI__builtin_amdgcn_fmed3f
:
17760 case AMDGPU::BI__builtin_amdgcn_fmed3h
:
17761 return emitTernaryBuiltin(*this, E
, Intrinsic::amdgcn_fmed3
);
17762 case AMDGPU::BI__builtin_amdgcn_ds_append
:
17763 case AMDGPU::BI__builtin_amdgcn_ds_consume
: {
17764 Intrinsic::ID Intrin
= BuiltinID
== AMDGPU::BI__builtin_amdgcn_ds_append
?
17765 Intrinsic::amdgcn_ds_append
: Intrinsic::amdgcn_ds_consume
;
17766 Value
*Src0
= EmitScalarExpr(E
->getArg(0));
17767 Function
*F
= CGM
.getIntrinsic(Intrin
, { Src0
->getType() });
17768 return Builder
.CreateCall(F
, { Src0
, Builder
.getFalse() });
17770 case AMDGPU::BI__builtin_amdgcn_ds_faddf
:
17771 case AMDGPU::BI__builtin_amdgcn_ds_fminf
:
17772 case AMDGPU::BI__builtin_amdgcn_ds_fmaxf
: {
17773 Intrinsic::ID Intrin
;
17774 switch (BuiltinID
) {
17775 case AMDGPU::BI__builtin_amdgcn_ds_faddf
:
17776 Intrin
= Intrinsic::amdgcn_ds_fadd
;
17778 case AMDGPU::BI__builtin_amdgcn_ds_fminf
:
17779 Intrin
= Intrinsic::amdgcn_ds_fmin
;
17781 case AMDGPU::BI__builtin_amdgcn_ds_fmaxf
:
17782 Intrin
= Intrinsic::amdgcn_ds_fmax
;
17785 llvm::Value
*Src0
= EmitScalarExpr(E
->getArg(0));
17786 llvm::Value
*Src1
= EmitScalarExpr(E
->getArg(1));
17787 llvm::Value
*Src2
= EmitScalarExpr(E
->getArg(2));
17788 llvm::Value
*Src3
= EmitScalarExpr(E
->getArg(3));
17789 llvm::Value
*Src4
= EmitScalarExpr(E
->getArg(4));
17790 llvm::Function
*F
= CGM
.getIntrinsic(Intrin
, { Src1
->getType() });
17791 llvm::FunctionType
*FTy
= F
->getFunctionType();
17792 llvm::Type
*PTy
= FTy
->getParamType(0);
17793 Src0
= Builder
.CreatePointerBitCastOrAddrSpaceCast(Src0
, PTy
);
17794 return Builder
.CreateCall(F
, { Src0
, Src1
, Src2
, Src3
, Src4
});
17796 case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_f64
:
17797 case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_f32
:
17798 case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_v2f16
:
17799 case AMDGPU::BI__builtin_amdgcn_global_atomic_fmin_f64
:
17800 case AMDGPU::BI__builtin_amdgcn_global_atomic_fmax_f64
:
17801 case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_f64
:
17802 case AMDGPU::BI__builtin_amdgcn_flat_atomic_fmin_f64
:
17803 case AMDGPU::BI__builtin_amdgcn_flat_atomic_fmax_f64
:
17804 case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_f32
:
17805 case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_v2f16
: {
17807 llvm::Type
*ArgTy
= llvm::Type::getDoubleTy(getLLVMContext());
17808 switch (BuiltinID
) {
17809 case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_f32
:
17810 ArgTy
= llvm::Type::getFloatTy(getLLVMContext());
17811 IID
= Intrinsic::amdgcn_global_atomic_fadd
;
17813 case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_v2f16
:
17814 ArgTy
= llvm::FixedVectorType::get(
17815 llvm::Type::getHalfTy(getLLVMContext()), 2);
17816 IID
= Intrinsic::amdgcn_global_atomic_fadd
;
17818 case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_f64
:
17819 IID
= Intrinsic::amdgcn_global_atomic_fadd
;
17821 case AMDGPU::BI__builtin_amdgcn_global_atomic_fmin_f64
:
17822 IID
= Intrinsic::amdgcn_global_atomic_fmin
;
17824 case AMDGPU::BI__builtin_amdgcn_global_atomic_fmax_f64
:
17825 IID
= Intrinsic::amdgcn_global_atomic_fmax
;
17827 case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_f64
:
17828 IID
= Intrinsic::amdgcn_flat_atomic_fadd
;
17830 case AMDGPU::BI__builtin_amdgcn_flat_atomic_fmin_f64
:
17831 IID
= Intrinsic::amdgcn_flat_atomic_fmin
;
17833 case AMDGPU::BI__builtin_amdgcn_flat_atomic_fmax_f64
:
17834 IID
= Intrinsic::amdgcn_flat_atomic_fmax
;
17836 case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_f32
:
17837 ArgTy
= llvm::Type::getFloatTy(getLLVMContext());
17838 IID
= Intrinsic::amdgcn_flat_atomic_fadd
;
17840 case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_v2f16
:
17841 ArgTy
= llvm::FixedVectorType::get(
17842 llvm::Type::getHalfTy(getLLVMContext()), 2);
17843 IID
= Intrinsic::amdgcn_flat_atomic_fadd
;
17846 llvm::Value
*Addr
= EmitScalarExpr(E
->getArg(0));
17847 llvm::Value
*Val
= EmitScalarExpr(E
->getArg(1));
17848 llvm::Function
*F
=
17849 CGM
.getIntrinsic(IID
, {ArgTy
, Addr
->getType(), Val
->getType()});
17850 return Builder
.CreateCall(F
, {Addr
, Val
});
17852 case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_v2bf16
:
17853 case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_v2bf16
: {
17855 switch (BuiltinID
) {
17856 case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_v2bf16
:
17857 IID
= Intrinsic::amdgcn_global_atomic_fadd_v2bf16
;
17859 case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_v2bf16
:
17860 IID
= Intrinsic::amdgcn_flat_atomic_fadd_v2bf16
;
17863 llvm::Value
*Addr
= EmitScalarExpr(E
->getArg(0));
17864 llvm::Value
*Val
= EmitScalarExpr(E
->getArg(1));
17865 llvm::Function
*F
= CGM
.getIntrinsic(IID
, {Addr
->getType()});
17866 return Builder
.CreateCall(F
, {Addr
, Val
});
17868 case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_f64
:
17869 case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_f32
:
17870 case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_v2f16
: {
17873 switch (BuiltinID
) {
17874 case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_f32
:
17875 ArgTy
= llvm::Type::getFloatTy(getLLVMContext());
17876 IID
= Intrinsic::amdgcn_ds_fadd
;
17878 case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_f64
:
17879 ArgTy
= llvm::Type::getDoubleTy(getLLVMContext());
17880 IID
= Intrinsic::amdgcn_ds_fadd
;
17882 case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_v2f16
:
17883 ArgTy
= llvm::FixedVectorType::get(
17884 llvm::Type::getHalfTy(getLLVMContext()), 2);
17885 IID
= Intrinsic::amdgcn_ds_fadd
;
17888 llvm::Value
*Addr
= EmitScalarExpr(E
->getArg(0));
17889 llvm::Value
*Val
= EmitScalarExpr(E
->getArg(1));
17890 llvm::Constant
*ZeroI32
= llvm::ConstantInt::getIntegerValue(
17891 llvm::Type::getInt32Ty(getLLVMContext()), APInt(32, 0, true));
17892 llvm::Constant
*ZeroI1
= llvm::ConstantInt::getIntegerValue(
17893 llvm::Type::getInt1Ty(getLLVMContext()), APInt(1, 0));
17894 llvm::Function
*F
= CGM
.getIntrinsic(IID
, {ArgTy
});
17895 return Builder
.CreateCall(F
, {Addr
, Val
, ZeroI32
, ZeroI32
, ZeroI1
});
17897 case AMDGPU::BI__builtin_amdgcn_read_exec
:
17898 return EmitAMDGCNBallotForExec(*this, E
, Int64Ty
, Int64Ty
, false);
17899 case AMDGPU::BI__builtin_amdgcn_read_exec_lo
:
17900 return EmitAMDGCNBallotForExec(*this, E
, Int32Ty
, Int32Ty
, false);
17901 case AMDGPU::BI__builtin_amdgcn_read_exec_hi
:
17902 return EmitAMDGCNBallotForExec(*this, E
, Int64Ty
, Int64Ty
, true);
17903 case AMDGPU::BI__builtin_amdgcn_image_bvh_intersect_ray
:
17904 case AMDGPU::BI__builtin_amdgcn_image_bvh_intersect_ray_h
:
17905 case AMDGPU::BI__builtin_amdgcn_image_bvh_intersect_ray_l
:
17906 case AMDGPU::BI__builtin_amdgcn_image_bvh_intersect_ray_lh
: {
17907 llvm::Value
*NodePtr
= EmitScalarExpr(E
->getArg(0));
17908 llvm::Value
*RayExtent
= EmitScalarExpr(E
->getArg(1));
17909 llvm::Value
*RayOrigin
= EmitScalarExpr(E
->getArg(2));
17910 llvm::Value
*RayDir
= EmitScalarExpr(E
->getArg(3));
17911 llvm::Value
*RayInverseDir
= EmitScalarExpr(E
->getArg(4));
17912 llvm::Value
*TextureDescr
= EmitScalarExpr(E
->getArg(5));
17914 // The builtins take these arguments as vec4 where the last element is
17915 // ignored. The intrinsic takes them as vec3.
17916 RayOrigin
= Builder
.CreateShuffleVector(RayOrigin
, RayOrigin
,
17917 ArrayRef
<int>{0, 1, 2});
17919 Builder
.CreateShuffleVector(RayDir
, RayDir
, ArrayRef
<int>{0, 1, 2});
17920 RayInverseDir
= Builder
.CreateShuffleVector(RayInverseDir
, RayInverseDir
,
17921 ArrayRef
<int>{0, 1, 2});
17923 Function
*F
= CGM
.getIntrinsic(Intrinsic::amdgcn_image_bvh_intersect_ray
,
17924 {NodePtr
->getType(), RayDir
->getType()});
17925 return Builder
.CreateCall(F
, {NodePtr
, RayExtent
, RayOrigin
, RayDir
,
17926 RayInverseDir
, TextureDescr
});
17929 case AMDGPU::BI__builtin_amdgcn_ds_bvh_stack_rtn
: {
17930 SmallVector
<Value
*, 4> Args
;
17931 for (int i
= 0, e
= E
->getNumArgs(); i
!= e
; ++i
)
17932 Args
.push_back(EmitScalarExpr(E
->getArg(i
)));
17934 Function
*F
= CGM
.getIntrinsic(Intrinsic::amdgcn_ds_bvh_stack_rtn
);
17935 Value
*Call
= Builder
.CreateCall(F
, Args
);
17936 Value
*Rtn
= Builder
.CreateExtractValue(Call
, 0);
17937 Value
*A
= Builder
.CreateExtractValue(Call
, 1);
17938 llvm::Type
*RetTy
= ConvertType(E
->getType());
17939 Value
*I0
= Builder
.CreateInsertElement(PoisonValue::get(RetTy
), Rtn
,
17941 return Builder
.CreateInsertElement(I0
, A
, 1);
17944 case AMDGPU::BI__builtin_amdgcn_wmma_bf16_16x16x16_bf16_w32
:
17945 case AMDGPU::BI__builtin_amdgcn_wmma_bf16_16x16x16_bf16_w64
:
17946 case AMDGPU::BI__builtin_amdgcn_wmma_f16_16x16x16_f16_w32
:
17947 case AMDGPU::BI__builtin_amdgcn_wmma_f16_16x16x16_f16_w64
:
17948 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_bf16_w32
:
17949 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_bf16_w64
:
17950 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_f16_w32
:
17951 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_f16_w64
:
17952 case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu4_w32
:
17953 case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu4_w64
:
17954 case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu8_w32
:
17955 case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu8_w64
: {
17957 // These operations perform a matrix multiplication and accumulation of
17960 // The return type always matches the type of matrix C.
17961 unsigned ArgForMatchingRetType
;
17962 unsigned BuiltinWMMAOp
;
17964 switch (BuiltinID
) {
17965 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_f16_w32
:
17966 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_f16_w64
:
17967 ArgForMatchingRetType
= 2;
17968 BuiltinWMMAOp
= Intrinsic::amdgcn_wmma_f32_16x16x16_f16
;
17970 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_bf16_w32
:
17971 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_bf16_w64
:
17972 ArgForMatchingRetType
= 2;
17973 BuiltinWMMAOp
= Intrinsic::amdgcn_wmma_f32_16x16x16_bf16
;
17975 case AMDGPU::BI__builtin_amdgcn_wmma_f16_16x16x16_f16_w32
:
17976 case AMDGPU::BI__builtin_amdgcn_wmma_f16_16x16x16_f16_w64
:
17977 ArgForMatchingRetType
= 2;
17978 BuiltinWMMAOp
= Intrinsic::amdgcn_wmma_f16_16x16x16_f16
;
17980 case AMDGPU::BI__builtin_amdgcn_wmma_bf16_16x16x16_bf16_w32
:
17981 case AMDGPU::BI__builtin_amdgcn_wmma_bf16_16x16x16_bf16_w64
:
17982 ArgForMatchingRetType
= 2;
17983 BuiltinWMMAOp
= Intrinsic::amdgcn_wmma_bf16_16x16x16_bf16
;
17985 case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu8_w32
:
17986 case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu8_w64
:
17987 ArgForMatchingRetType
= 4;
17988 BuiltinWMMAOp
= Intrinsic::amdgcn_wmma_i32_16x16x16_iu8
;
17990 case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu4_w32
:
17991 case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu4_w64
:
17992 ArgForMatchingRetType
= 4;
17993 BuiltinWMMAOp
= Intrinsic::amdgcn_wmma_i32_16x16x16_iu4
;
17997 SmallVector
<Value
*, 6> Args
;
17998 for (int i
= 0, e
= E
->getNumArgs(); i
!= e
; ++i
)
17999 Args
.push_back(EmitScalarExpr(E
->getArg(i
)));
18001 Function
*F
= CGM
.getIntrinsic(BuiltinWMMAOp
,
18002 {Args
[ArgForMatchingRetType
]->getType()});
18004 return Builder
.CreateCall(F
, Args
);
18008 case AMDGPU::BI__builtin_amdgcn_workitem_id_x
:
18009 return emitRangedBuiltin(*this, Intrinsic::amdgcn_workitem_id_x
, 0, 1024);
18010 case AMDGPU::BI__builtin_amdgcn_workitem_id_y
:
18011 return emitRangedBuiltin(*this, Intrinsic::amdgcn_workitem_id_y
, 0, 1024);
18012 case AMDGPU::BI__builtin_amdgcn_workitem_id_z
:
18013 return emitRangedBuiltin(*this, Intrinsic::amdgcn_workitem_id_z
, 0, 1024);
18015 // amdgcn workgroup size
18016 case AMDGPU::BI__builtin_amdgcn_workgroup_size_x
:
18017 return EmitAMDGPUWorkGroupSize(*this, 0);
18018 case AMDGPU::BI__builtin_amdgcn_workgroup_size_y
:
18019 return EmitAMDGPUWorkGroupSize(*this, 1);
18020 case AMDGPU::BI__builtin_amdgcn_workgroup_size_z
:
18021 return EmitAMDGPUWorkGroupSize(*this, 2);
18023 // amdgcn grid size
18024 case AMDGPU::BI__builtin_amdgcn_grid_size_x
:
18025 return EmitAMDGPUGridSize(*this, 0);
18026 case AMDGPU::BI__builtin_amdgcn_grid_size_y
:
18027 return EmitAMDGPUGridSize(*this, 1);
18028 case AMDGPU::BI__builtin_amdgcn_grid_size_z
:
18029 return EmitAMDGPUGridSize(*this, 2);
18032 case AMDGPU::BI__builtin_r600_recipsqrt_ieee
:
18033 case AMDGPU::BI__builtin_r600_recipsqrt_ieeef
:
18034 return emitUnaryBuiltin(*this, E
, Intrinsic::r600_recipsqrt_ieee
);
18035 case AMDGPU::BI__builtin_r600_read_tidig_x
:
18036 return emitRangedBuiltin(*this, Intrinsic::r600_read_tidig_x
, 0, 1024);
18037 case AMDGPU::BI__builtin_r600_read_tidig_y
:
18038 return emitRangedBuiltin(*this, Intrinsic::r600_read_tidig_y
, 0, 1024);
18039 case AMDGPU::BI__builtin_r600_read_tidig_z
:
18040 return emitRangedBuiltin(*this, Intrinsic::r600_read_tidig_z
, 0, 1024);
18041 case AMDGPU::BI__builtin_amdgcn_alignbit
: {
18042 llvm::Value
*Src0
= EmitScalarExpr(E
->getArg(0));
18043 llvm::Value
*Src1
= EmitScalarExpr(E
->getArg(1));
18044 llvm::Value
*Src2
= EmitScalarExpr(E
->getArg(2));
18045 Function
*F
= CGM
.getIntrinsic(Intrinsic::fshr
, Src0
->getType());
18046 return Builder
.CreateCall(F
, { Src0
, Src1
, Src2
});
18048 case AMDGPU::BI__builtin_amdgcn_fence
: {
18049 ProcessOrderScopeAMDGCN(EmitScalarExpr(E
->getArg(0)),
18050 EmitScalarExpr(E
->getArg(1)), AO
, SSID
);
18051 return Builder
.CreateFence(AO
, SSID
);
18053 case AMDGPU::BI__builtin_amdgcn_atomic_inc32
:
18054 case AMDGPU::BI__builtin_amdgcn_atomic_inc64
:
18055 case AMDGPU::BI__builtin_amdgcn_atomic_dec32
:
18056 case AMDGPU::BI__builtin_amdgcn_atomic_dec64
: {
18057 llvm::AtomicRMWInst::BinOp BinOp
;
18058 switch (BuiltinID
) {
18059 case AMDGPU::BI__builtin_amdgcn_atomic_inc32
:
18060 case AMDGPU::BI__builtin_amdgcn_atomic_inc64
:
18061 BinOp
= llvm::AtomicRMWInst::UIncWrap
;
18063 case AMDGPU::BI__builtin_amdgcn_atomic_dec32
:
18064 case AMDGPU::BI__builtin_amdgcn_atomic_dec64
:
18065 BinOp
= llvm::AtomicRMWInst::UDecWrap
;
18069 Value
*Ptr
= EmitScalarExpr(E
->getArg(0));
18070 Value
*Val
= EmitScalarExpr(E
->getArg(1));
18072 ProcessOrderScopeAMDGCN(EmitScalarExpr(E
->getArg(2)),
18073 EmitScalarExpr(E
->getArg(3)), AO
, SSID
);
18075 QualType PtrTy
= E
->getArg(0)->IgnoreImpCasts()->getType();
18077 PtrTy
->castAs
<PointerType
>()->getPointeeType().isVolatileQualified();
18079 llvm::AtomicRMWInst
*RMW
=
18080 Builder
.CreateAtomicRMW(BinOp
, Ptr
, Val
, AO
, SSID
);
18082 RMW
->setVolatile(true);
18085 case AMDGPU::BI__builtin_amdgcn_s_sendmsg_rtn
:
18086 case AMDGPU::BI__builtin_amdgcn_s_sendmsg_rtnl
: {
18087 llvm::Value
*Arg
= EmitScalarExpr(E
->getArg(0));
18088 llvm::Type
*ResultType
= ConvertType(E
->getType());
18089 // s_sendmsg_rtn is mangled using return type only.
18091 CGM
.getIntrinsic(Intrinsic::amdgcn_s_sendmsg_rtn
, {ResultType
});
18092 return Builder
.CreateCall(F
, {Arg
});
18099 /// Handle a SystemZ function in which the final argument is a pointer
18100 /// to an int that receives the post-instruction CC value. At the LLVM level
18101 /// this is represented as a function that returns a {result, cc} pair.
18102 static Value
*EmitSystemZIntrinsicWithCC(CodeGenFunction
&CGF
,
18103 unsigned IntrinsicID
,
18104 const CallExpr
*E
) {
18105 unsigned NumArgs
= E
->getNumArgs() - 1;
18106 SmallVector
<Value
*, 8> Args(NumArgs
);
18107 for (unsigned I
= 0; I
< NumArgs
; ++I
)
18108 Args
[I
] = CGF
.EmitScalarExpr(E
->getArg(I
));
18109 Address CCPtr
= CGF
.EmitPointerWithAlignment(E
->getArg(NumArgs
));
18110 Function
*F
= CGF
.CGM
.getIntrinsic(IntrinsicID
);
18111 Value
*Call
= CGF
.Builder
.CreateCall(F
, Args
);
18112 Value
*CC
= CGF
.Builder
.CreateExtractValue(Call
, 1);
18113 CGF
.Builder
.CreateStore(CC
, CCPtr
);
18114 return CGF
.Builder
.CreateExtractValue(Call
, 0);
18117 Value
*CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID
,
18118 const CallExpr
*E
) {
18119 switch (BuiltinID
) {
18120 case SystemZ::BI__builtin_tbegin
: {
18121 Value
*TDB
= EmitScalarExpr(E
->getArg(0));
18122 Value
*Control
= llvm::ConstantInt::get(Int32Ty
, 0xff0c);
18123 Function
*F
= CGM
.getIntrinsic(Intrinsic::s390_tbegin
);
18124 return Builder
.CreateCall(F
, {TDB
, Control
});
18126 case SystemZ::BI__builtin_tbegin_nofloat
: {
18127 Value
*TDB
= EmitScalarExpr(E
->getArg(0));
18128 Value
*Control
= llvm::ConstantInt::get(Int32Ty
, 0xff0c);
18129 Function
*F
= CGM
.getIntrinsic(Intrinsic::s390_tbegin_nofloat
);
18130 return Builder
.CreateCall(F
, {TDB
, Control
});
18132 case SystemZ::BI__builtin_tbeginc
: {
18133 Value
*TDB
= llvm::ConstantPointerNull::get(Int8PtrTy
);
18134 Value
*Control
= llvm::ConstantInt::get(Int32Ty
, 0xff08);
18135 Function
*F
= CGM
.getIntrinsic(Intrinsic::s390_tbeginc
);
18136 return Builder
.CreateCall(F
, {TDB
, Control
});
18138 case SystemZ::BI__builtin_tabort
: {
18139 Value
*Data
= EmitScalarExpr(E
->getArg(0));
18140 Function
*F
= CGM
.getIntrinsic(Intrinsic::s390_tabort
);
18141 return Builder
.CreateCall(F
, Builder
.CreateSExt(Data
, Int64Ty
, "tabort"));
18143 case SystemZ::BI__builtin_non_tx_store
: {
18144 Value
*Address
= EmitScalarExpr(E
->getArg(0));
18145 Value
*Data
= EmitScalarExpr(E
->getArg(1));
18146 Function
*F
= CGM
.getIntrinsic(Intrinsic::s390_ntstg
);
18147 return Builder
.CreateCall(F
, {Data
, Address
});
18150 // Vector builtins. Note that most vector builtins are mapped automatically
18151 // to target-specific LLVM intrinsics. The ones handled specially here can
18152 // be represented via standard LLVM IR, which is preferable to enable common
18153 // LLVM optimizations.
18155 case SystemZ::BI__builtin_s390_vpopctb
:
18156 case SystemZ::BI__builtin_s390_vpopcth
:
18157 case SystemZ::BI__builtin_s390_vpopctf
:
18158 case SystemZ::BI__builtin_s390_vpopctg
: {
18159 llvm::Type
*ResultType
= ConvertType(E
->getType());
18160 Value
*X
= EmitScalarExpr(E
->getArg(0));
18161 Function
*F
= CGM
.getIntrinsic(Intrinsic::ctpop
, ResultType
);
18162 return Builder
.CreateCall(F
, X
);
18165 case SystemZ::BI__builtin_s390_vclzb
:
18166 case SystemZ::BI__builtin_s390_vclzh
:
18167 case SystemZ::BI__builtin_s390_vclzf
:
18168 case SystemZ::BI__builtin_s390_vclzg
: {
18169 llvm::Type
*ResultType
= ConvertType(E
->getType());
18170 Value
*X
= EmitScalarExpr(E
->getArg(0));
18171 Value
*Undef
= ConstantInt::get(Builder
.getInt1Ty(), false);
18172 Function
*F
= CGM
.getIntrinsic(Intrinsic::ctlz
, ResultType
);
18173 return Builder
.CreateCall(F
, {X
, Undef
});
18176 case SystemZ::BI__builtin_s390_vctzb
:
18177 case SystemZ::BI__builtin_s390_vctzh
:
18178 case SystemZ::BI__builtin_s390_vctzf
:
18179 case SystemZ::BI__builtin_s390_vctzg
: {
18180 llvm::Type
*ResultType
= ConvertType(E
->getType());
18181 Value
*X
= EmitScalarExpr(E
->getArg(0));
18182 Value
*Undef
= ConstantInt::get(Builder
.getInt1Ty(), false);
18183 Function
*F
= CGM
.getIntrinsic(Intrinsic::cttz
, ResultType
);
18184 return Builder
.CreateCall(F
, {X
, Undef
});
18187 case SystemZ::BI__builtin_s390_vfsqsb
:
18188 case SystemZ::BI__builtin_s390_vfsqdb
: {
18189 llvm::Type
*ResultType
= ConvertType(E
->getType());
18190 Value
*X
= EmitScalarExpr(E
->getArg(0));
18191 if (Builder
.getIsFPConstrained()) {
18192 Function
*F
= CGM
.getIntrinsic(Intrinsic::experimental_constrained_sqrt
, ResultType
);
18193 return Builder
.CreateConstrainedFPCall(F
, { X
});
18195 Function
*F
= CGM
.getIntrinsic(Intrinsic::sqrt
, ResultType
);
18196 return Builder
.CreateCall(F
, X
);
18199 case SystemZ::BI__builtin_s390_vfmasb
:
18200 case SystemZ::BI__builtin_s390_vfmadb
: {
18201 llvm::Type
*ResultType
= ConvertType(E
->getType());
18202 Value
*X
= EmitScalarExpr(E
->getArg(0));
18203 Value
*Y
= EmitScalarExpr(E
->getArg(1));
18204 Value
*Z
= EmitScalarExpr(E
->getArg(2));
18205 if (Builder
.getIsFPConstrained()) {
18206 Function
*F
= CGM
.getIntrinsic(Intrinsic::experimental_constrained_fma
, ResultType
);
18207 return Builder
.CreateConstrainedFPCall(F
, {X
, Y
, Z
});
18209 Function
*F
= CGM
.getIntrinsic(Intrinsic::fma
, ResultType
);
18210 return Builder
.CreateCall(F
, {X
, Y
, Z
});
18213 case SystemZ::BI__builtin_s390_vfmssb
:
18214 case SystemZ::BI__builtin_s390_vfmsdb
: {
18215 llvm::Type
*ResultType
= ConvertType(E
->getType());
18216 Value
*X
= EmitScalarExpr(E
->getArg(0));
18217 Value
*Y
= EmitScalarExpr(E
->getArg(1));
18218 Value
*Z
= EmitScalarExpr(E
->getArg(2));
18219 if (Builder
.getIsFPConstrained()) {
18220 Function
*F
= CGM
.getIntrinsic(Intrinsic::experimental_constrained_fma
, ResultType
);
18221 return Builder
.CreateConstrainedFPCall(F
, {X
, Y
, Builder
.CreateFNeg(Z
, "neg")});
18223 Function
*F
= CGM
.getIntrinsic(Intrinsic::fma
, ResultType
);
18224 return Builder
.CreateCall(F
, {X
, Y
, Builder
.CreateFNeg(Z
, "neg")});
18227 case SystemZ::BI__builtin_s390_vfnmasb
:
18228 case SystemZ::BI__builtin_s390_vfnmadb
: {
18229 llvm::Type
*ResultType
= ConvertType(E
->getType());
18230 Value
*X
= EmitScalarExpr(E
->getArg(0));
18231 Value
*Y
= EmitScalarExpr(E
->getArg(1));
18232 Value
*Z
= EmitScalarExpr(E
->getArg(2));
18233 if (Builder
.getIsFPConstrained()) {
18234 Function
*F
= CGM
.getIntrinsic(Intrinsic::experimental_constrained_fma
, ResultType
);
18235 return Builder
.CreateFNeg(Builder
.CreateConstrainedFPCall(F
, {X
, Y
, Z
}), "neg");
18237 Function
*F
= CGM
.getIntrinsic(Intrinsic::fma
, ResultType
);
18238 return Builder
.CreateFNeg(Builder
.CreateCall(F
, {X
, Y
, Z
}), "neg");
18241 case SystemZ::BI__builtin_s390_vfnmssb
:
18242 case SystemZ::BI__builtin_s390_vfnmsdb
: {
18243 llvm::Type
*ResultType
= ConvertType(E
->getType());
18244 Value
*X
= EmitScalarExpr(E
->getArg(0));
18245 Value
*Y
= EmitScalarExpr(E
->getArg(1));
18246 Value
*Z
= EmitScalarExpr(E
->getArg(2));
18247 if (Builder
.getIsFPConstrained()) {
18248 Function
*F
= CGM
.getIntrinsic(Intrinsic::experimental_constrained_fma
, ResultType
);
18249 Value
*NegZ
= Builder
.CreateFNeg(Z
, "sub");
18250 return Builder
.CreateFNeg(Builder
.CreateConstrainedFPCall(F
, {X
, Y
, NegZ
}));
18252 Function
*F
= CGM
.getIntrinsic(Intrinsic::fma
, ResultType
);
18253 Value
*NegZ
= Builder
.CreateFNeg(Z
, "neg");
18254 return Builder
.CreateFNeg(Builder
.CreateCall(F
, {X
, Y
, NegZ
}));
18257 case SystemZ::BI__builtin_s390_vflpsb
:
18258 case SystemZ::BI__builtin_s390_vflpdb
: {
18259 llvm::Type
*ResultType
= ConvertType(E
->getType());
18260 Value
*X
= EmitScalarExpr(E
->getArg(0));
18261 Function
*F
= CGM
.getIntrinsic(Intrinsic::fabs
, ResultType
);
18262 return Builder
.CreateCall(F
, X
);
18264 case SystemZ::BI__builtin_s390_vflnsb
:
18265 case SystemZ::BI__builtin_s390_vflndb
: {
18266 llvm::Type
*ResultType
= ConvertType(E
->getType());
18267 Value
*X
= EmitScalarExpr(E
->getArg(0));
18268 Function
*F
= CGM
.getIntrinsic(Intrinsic::fabs
, ResultType
);
18269 return Builder
.CreateFNeg(Builder
.CreateCall(F
, X
), "neg");
18271 case SystemZ::BI__builtin_s390_vfisb
:
18272 case SystemZ::BI__builtin_s390_vfidb
: {
18273 llvm::Type
*ResultType
= ConvertType(E
->getType());
18274 Value
*X
= EmitScalarExpr(E
->getArg(0));
18275 // Constant-fold the M4 and M5 mask arguments.
18276 llvm::APSInt M4
= *E
->getArg(1)->getIntegerConstantExpr(getContext());
18277 llvm::APSInt M5
= *E
->getArg(2)->getIntegerConstantExpr(getContext());
18278 // Check whether this instance can be represented via a LLVM standard
18279 // intrinsic. We only support some combinations of M4 and M5.
18280 Intrinsic::ID ID
= Intrinsic::not_intrinsic
;
18282 switch (M4
.getZExtValue()) {
18284 case 0: // IEEE-inexact exception allowed
18285 switch (M5
.getZExtValue()) {
18287 case 0: ID
= Intrinsic::rint
;
18288 CI
= Intrinsic::experimental_constrained_rint
; break;
18291 case 4: // IEEE-inexact exception suppressed
18292 switch (M5
.getZExtValue()) {
18294 case 0: ID
= Intrinsic::nearbyint
;
18295 CI
= Intrinsic::experimental_constrained_nearbyint
; break;
18296 case 1: ID
= Intrinsic::round
;
18297 CI
= Intrinsic::experimental_constrained_round
; break;
18298 case 5: ID
= Intrinsic::trunc
;
18299 CI
= Intrinsic::experimental_constrained_trunc
; break;
18300 case 6: ID
= Intrinsic::ceil
;
18301 CI
= Intrinsic::experimental_constrained_ceil
; break;
18302 case 7: ID
= Intrinsic::floor
;
18303 CI
= Intrinsic::experimental_constrained_floor
; break;
18307 if (ID
!= Intrinsic::not_intrinsic
) {
18308 if (Builder
.getIsFPConstrained()) {
18309 Function
*F
= CGM
.getIntrinsic(CI
, ResultType
);
18310 return Builder
.CreateConstrainedFPCall(F
, X
);
18312 Function
*F
= CGM
.getIntrinsic(ID
, ResultType
);
18313 return Builder
.CreateCall(F
, X
);
18316 switch (BuiltinID
) { // FIXME: constrained version?
18317 case SystemZ::BI__builtin_s390_vfisb
: ID
= Intrinsic::s390_vfisb
; break;
18318 case SystemZ::BI__builtin_s390_vfidb
: ID
= Intrinsic::s390_vfidb
; break;
18319 default: llvm_unreachable("Unknown BuiltinID");
18321 Function
*F
= CGM
.getIntrinsic(ID
);
18322 Value
*M4Value
= llvm::ConstantInt::get(getLLVMContext(), M4
);
18323 Value
*M5Value
= llvm::ConstantInt::get(getLLVMContext(), M5
);
18324 return Builder
.CreateCall(F
, {X
, M4Value
, M5Value
});
18326 case SystemZ::BI__builtin_s390_vfmaxsb
:
18327 case SystemZ::BI__builtin_s390_vfmaxdb
: {
18328 llvm::Type
*ResultType
= ConvertType(E
->getType());
18329 Value
*X
= EmitScalarExpr(E
->getArg(0));
18330 Value
*Y
= EmitScalarExpr(E
->getArg(1));
18331 // Constant-fold the M4 mask argument.
18332 llvm::APSInt M4
= *E
->getArg(2)->getIntegerConstantExpr(getContext());
18333 // Check whether this instance can be represented via a LLVM standard
18334 // intrinsic. We only support some values of M4.
18335 Intrinsic::ID ID
= Intrinsic::not_intrinsic
;
18337 switch (M4
.getZExtValue()) {
18339 case 4: ID
= Intrinsic::maxnum
;
18340 CI
= Intrinsic::experimental_constrained_maxnum
; break;
18342 if (ID
!= Intrinsic::not_intrinsic
) {
18343 if (Builder
.getIsFPConstrained()) {
18344 Function
*F
= CGM
.getIntrinsic(CI
, ResultType
);
18345 return Builder
.CreateConstrainedFPCall(F
, {X
, Y
});
18347 Function
*F
= CGM
.getIntrinsic(ID
, ResultType
);
18348 return Builder
.CreateCall(F
, {X
, Y
});
18351 switch (BuiltinID
) {
18352 case SystemZ::BI__builtin_s390_vfmaxsb
: ID
= Intrinsic::s390_vfmaxsb
; break;
18353 case SystemZ::BI__builtin_s390_vfmaxdb
: ID
= Intrinsic::s390_vfmaxdb
; break;
18354 default: llvm_unreachable("Unknown BuiltinID");
18356 Function
*F
= CGM
.getIntrinsic(ID
);
18357 Value
*M4Value
= llvm::ConstantInt::get(getLLVMContext(), M4
);
18358 return Builder
.CreateCall(F
, {X
, Y
, M4Value
});
18360 case SystemZ::BI__builtin_s390_vfminsb
:
18361 case SystemZ::BI__builtin_s390_vfmindb
: {
18362 llvm::Type
*ResultType
= ConvertType(E
->getType());
18363 Value
*X
= EmitScalarExpr(E
->getArg(0));
18364 Value
*Y
= EmitScalarExpr(E
->getArg(1));
18365 // Constant-fold the M4 mask argument.
18366 llvm::APSInt M4
= *E
->getArg(2)->getIntegerConstantExpr(getContext());
18367 // Check whether this instance can be represented via a LLVM standard
18368 // intrinsic. We only support some values of M4.
18369 Intrinsic::ID ID
= Intrinsic::not_intrinsic
;
18371 switch (M4
.getZExtValue()) {
18373 case 4: ID
= Intrinsic::minnum
;
18374 CI
= Intrinsic::experimental_constrained_minnum
; break;
18376 if (ID
!= Intrinsic::not_intrinsic
) {
18377 if (Builder
.getIsFPConstrained()) {
18378 Function
*F
= CGM
.getIntrinsic(CI
, ResultType
);
18379 return Builder
.CreateConstrainedFPCall(F
, {X
, Y
});
18381 Function
*F
= CGM
.getIntrinsic(ID
, ResultType
);
18382 return Builder
.CreateCall(F
, {X
, Y
});
18385 switch (BuiltinID
) {
18386 case SystemZ::BI__builtin_s390_vfminsb
: ID
= Intrinsic::s390_vfminsb
; break;
18387 case SystemZ::BI__builtin_s390_vfmindb
: ID
= Intrinsic::s390_vfmindb
; break;
18388 default: llvm_unreachable("Unknown BuiltinID");
18390 Function
*F
= CGM
.getIntrinsic(ID
);
18391 Value
*M4Value
= llvm::ConstantInt::get(getLLVMContext(), M4
);
18392 return Builder
.CreateCall(F
, {X
, Y
, M4Value
});
18395 case SystemZ::BI__builtin_s390_vlbrh
:
18396 case SystemZ::BI__builtin_s390_vlbrf
:
18397 case SystemZ::BI__builtin_s390_vlbrg
: {
18398 llvm::Type
*ResultType
= ConvertType(E
->getType());
18399 Value
*X
= EmitScalarExpr(E
->getArg(0));
18400 Function
*F
= CGM
.getIntrinsic(Intrinsic::bswap
, ResultType
);
18401 return Builder
.CreateCall(F
, X
);
18404 // Vector intrinsics that output the post-instruction CC value.
18406 #define INTRINSIC_WITH_CC(NAME) \
18407 case SystemZ::BI__builtin_##NAME: \
18408 return EmitSystemZIntrinsicWithCC(*this, Intrinsic::NAME, E)
18410 INTRINSIC_WITH_CC(s390_vpkshs
);
18411 INTRINSIC_WITH_CC(s390_vpksfs
);
18412 INTRINSIC_WITH_CC(s390_vpksgs
);
18414 INTRINSIC_WITH_CC(s390_vpklshs
);
18415 INTRINSIC_WITH_CC(s390_vpklsfs
);
18416 INTRINSIC_WITH_CC(s390_vpklsgs
);
18418 INTRINSIC_WITH_CC(s390_vceqbs
);
18419 INTRINSIC_WITH_CC(s390_vceqhs
);
18420 INTRINSIC_WITH_CC(s390_vceqfs
);
18421 INTRINSIC_WITH_CC(s390_vceqgs
);
18423 INTRINSIC_WITH_CC(s390_vchbs
);
18424 INTRINSIC_WITH_CC(s390_vchhs
);
18425 INTRINSIC_WITH_CC(s390_vchfs
);
18426 INTRINSIC_WITH_CC(s390_vchgs
);
18428 INTRINSIC_WITH_CC(s390_vchlbs
);
18429 INTRINSIC_WITH_CC(s390_vchlhs
);
18430 INTRINSIC_WITH_CC(s390_vchlfs
);
18431 INTRINSIC_WITH_CC(s390_vchlgs
);
18433 INTRINSIC_WITH_CC(s390_vfaebs
);
18434 INTRINSIC_WITH_CC(s390_vfaehs
);
18435 INTRINSIC_WITH_CC(s390_vfaefs
);
18437 INTRINSIC_WITH_CC(s390_vfaezbs
);
18438 INTRINSIC_WITH_CC(s390_vfaezhs
);
18439 INTRINSIC_WITH_CC(s390_vfaezfs
);
18441 INTRINSIC_WITH_CC(s390_vfeebs
);
18442 INTRINSIC_WITH_CC(s390_vfeehs
);
18443 INTRINSIC_WITH_CC(s390_vfeefs
);
18445 INTRINSIC_WITH_CC(s390_vfeezbs
);
18446 INTRINSIC_WITH_CC(s390_vfeezhs
);
18447 INTRINSIC_WITH_CC(s390_vfeezfs
);
18449 INTRINSIC_WITH_CC(s390_vfenebs
);
18450 INTRINSIC_WITH_CC(s390_vfenehs
);
18451 INTRINSIC_WITH_CC(s390_vfenefs
);
18453 INTRINSIC_WITH_CC(s390_vfenezbs
);
18454 INTRINSIC_WITH_CC(s390_vfenezhs
);
18455 INTRINSIC_WITH_CC(s390_vfenezfs
);
18457 INTRINSIC_WITH_CC(s390_vistrbs
);
18458 INTRINSIC_WITH_CC(s390_vistrhs
);
18459 INTRINSIC_WITH_CC(s390_vistrfs
);
18461 INTRINSIC_WITH_CC(s390_vstrcbs
);
18462 INTRINSIC_WITH_CC(s390_vstrchs
);
18463 INTRINSIC_WITH_CC(s390_vstrcfs
);
18465 INTRINSIC_WITH_CC(s390_vstrczbs
);
18466 INTRINSIC_WITH_CC(s390_vstrczhs
);
18467 INTRINSIC_WITH_CC(s390_vstrczfs
);
18469 INTRINSIC_WITH_CC(s390_vfcesbs
);
18470 INTRINSIC_WITH_CC(s390_vfcedbs
);
18471 INTRINSIC_WITH_CC(s390_vfchsbs
);
18472 INTRINSIC_WITH_CC(s390_vfchdbs
);
18473 INTRINSIC_WITH_CC(s390_vfchesbs
);
18474 INTRINSIC_WITH_CC(s390_vfchedbs
);
18476 INTRINSIC_WITH_CC(s390_vftcisb
);
18477 INTRINSIC_WITH_CC(s390_vftcidb
);
18479 INTRINSIC_WITH_CC(s390_vstrsb
);
18480 INTRINSIC_WITH_CC(s390_vstrsh
);
18481 INTRINSIC_WITH_CC(s390_vstrsf
);
18483 INTRINSIC_WITH_CC(s390_vstrszb
);
18484 INTRINSIC_WITH_CC(s390_vstrszh
);
18485 INTRINSIC_WITH_CC(s390_vstrszf
);
18487 #undef INTRINSIC_WITH_CC
18495 // Helper classes for mapping MMA builtins to particular LLVM intrinsic variant.
18496 struct NVPTXMmaLdstInfo
{
18497 unsigned NumResults
; // Number of elements to load/store
18498 // Intrinsic IDs for row/col variants. 0 if particular layout is unsupported.
18503 #define MMA_INTR(geom_op_type, layout) \
18504 Intrinsic::nvvm_wmma_##geom_op_type##_##layout##_stride
18505 #define MMA_LDST(n, geom_op_type) \
18506 { n, MMA_INTR(geom_op_type, col), MMA_INTR(geom_op_type, row) }
18508 static NVPTXMmaLdstInfo
getNVPTXMmaLdstInfo(unsigned BuiltinID
) {
18509 switch (BuiltinID
) {
18511 case NVPTX::BI__hmma_m16n16k16_ld_a
:
18512 return MMA_LDST(8, m16n16k16_load_a_f16
);
18513 case NVPTX::BI__hmma_m16n16k16_ld_b
:
18514 return MMA_LDST(8, m16n16k16_load_b_f16
);
18515 case NVPTX::BI__hmma_m16n16k16_ld_c_f16
:
18516 return MMA_LDST(4, m16n16k16_load_c_f16
);
18517 case NVPTX::BI__hmma_m16n16k16_ld_c_f32
:
18518 return MMA_LDST(8, m16n16k16_load_c_f32
);
18519 case NVPTX::BI__hmma_m32n8k16_ld_a
:
18520 return MMA_LDST(8, m32n8k16_load_a_f16
);
18521 case NVPTX::BI__hmma_m32n8k16_ld_b
:
18522 return MMA_LDST(8, m32n8k16_load_b_f16
);
18523 case NVPTX::BI__hmma_m32n8k16_ld_c_f16
:
18524 return MMA_LDST(4, m32n8k16_load_c_f16
);
18525 case NVPTX::BI__hmma_m32n8k16_ld_c_f32
:
18526 return MMA_LDST(8, m32n8k16_load_c_f32
);
18527 case NVPTX::BI__hmma_m8n32k16_ld_a
:
18528 return MMA_LDST(8, m8n32k16_load_a_f16
);
18529 case NVPTX::BI__hmma_m8n32k16_ld_b
:
18530 return MMA_LDST(8, m8n32k16_load_b_f16
);
18531 case NVPTX::BI__hmma_m8n32k16_ld_c_f16
:
18532 return MMA_LDST(4, m8n32k16_load_c_f16
);
18533 case NVPTX::BI__hmma_m8n32k16_ld_c_f32
:
18534 return MMA_LDST(8, m8n32k16_load_c_f32
);
18536 // Integer MMA loads
18537 case NVPTX::BI__imma_m16n16k16_ld_a_s8
:
18538 return MMA_LDST(2, m16n16k16_load_a_s8
);
18539 case NVPTX::BI__imma_m16n16k16_ld_a_u8
:
18540 return MMA_LDST(2, m16n16k16_load_a_u8
);
18541 case NVPTX::BI__imma_m16n16k16_ld_b_s8
:
18542 return MMA_LDST(2, m16n16k16_load_b_s8
);
18543 case NVPTX::BI__imma_m16n16k16_ld_b_u8
:
18544 return MMA_LDST(2, m16n16k16_load_b_u8
);
18545 case NVPTX::BI__imma_m16n16k16_ld_c
:
18546 return MMA_LDST(8, m16n16k16_load_c_s32
);
18547 case NVPTX::BI__imma_m32n8k16_ld_a_s8
:
18548 return MMA_LDST(4, m32n8k16_load_a_s8
);
18549 case NVPTX::BI__imma_m32n8k16_ld_a_u8
:
18550 return MMA_LDST(4, m32n8k16_load_a_u8
);
18551 case NVPTX::BI__imma_m32n8k16_ld_b_s8
:
18552 return MMA_LDST(1, m32n8k16_load_b_s8
);
18553 case NVPTX::BI__imma_m32n8k16_ld_b_u8
:
18554 return MMA_LDST(1, m32n8k16_load_b_u8
);
18555 case NVPTX::BI__imma_m32n8k16_ld_c
:
18556 return MMA_LDST(8, m32n8k16_load_c_s32
);
18557 case NVPTX::BI__imma_m8n32k16_ld_a_s8
:
18558 return MMA_LDST(1, m8n32k16_load_a_s8
);
18559 case NVPTX::BI__imma_m8n32k16_ld_a_u8
:
18560 return MMA_LDST(1, m8n32k16_load_a_u8
);
18561 case NVPTX::BI__imma_m8n32k16_ld_b_s8
:
18562 return MMA_LDST(4, m8n32k16_load_b_s8
);
18563 case NVPTX::BI__imma_m8n32k16_ld_b_u8
:
18564 return MMA_LDST(4, m8n32k16_load_b_u8
);
18565 case NVPTX::BI__imma_m8n32k16_ld_c
:
18566 return MMA_LDST(8, m8n32k16_load_c_s32
);
18568 // Sub-integer MMA loads.
18569 // Only row/col layout is supported by A/B fragments.
18570 case NVPTX::BI__imma_m8n8k32_ld_a_s4
:
18571 return {1, 0, MMA_INTR(m8n8k32_load_a_s4
, row
)};
18572 case NVPTX::BI__imma_m8n8k32_ld_a_u4
:
18573 return {1, 0, MMA_INTR(m8n8k32_load_a_u4
, row
)};
18574 case NVPTX::BI__imma_m8n8k32_ld_b_s4
:
18575 return {1, MMA_INTR(m8n8k32_load_b_s4
, col
), 0};
18576 case NVPTX::BI__imma_m8n8k32_ld_b_u4
:
18577 return {1, MMA_INTR(m8n8k32_load_b_u4
, col
), 0};
18578 case NVPTX::BI__imma_m8n8k32_ld_c
:
18579 return MMA_LDST(2, m8n8k32_load_c_s32
);
18580 case NVPTX::BI__bmma_m8n8k128_ld_a_b1
:
18581 return {1, 0, MMA_INTR(m8n8k128_load_a_b1
, row
)};
18582 case NVPTX::BI__bmma_m8n8k128_ld_b_b1
:
18583 return {1, MMA_INTR(m8n8k128_load_b_b1
, col
), 0};
18584 case NVPTX::BI__bmma_m8n8k128_ld_c
:
18585 return MMA_LDST(2, m8n8k128_load_c_s32
);
18587 // Double MMA loads
18588 case NVPTX::BI__dmma_m8n8k4_ld_a
:
18589 return MMA_LDST(1, m8n8k4_load_a_f64
);
18590 case NVPTX::BI__dmma_m8n8k4_ld_b
:
18591 return MMA_LDST(1, m8n8k4_load_b_f64
);
18592 case NVPTX::BI__dmma_m8n8k4_ld_c
:
18593 return MMA_LDST(2, m8n8k4_load_c_f64
);
18595 // Alternate float MMA loads
18596 case NVPTX::BI__mma_bf16_m16n16k16_ld_a
:
18597 return MMA_LDST(4, m16n16k16_load_a_bf16
);
18598 case NVPTX::BI__mma_bf16_m16n16k16_ld_b
:
18599 return MMA_LDST(4, m16n16k16_load_b_bf16
);
18600 case NVPTX::BI__mma_bf16_m8n32k16_ld_a
:
18601 return MMA_LDST(2, m8n32k16_load_a_bf16
);
18602 case NVPTX::BI__mma_bf16_m8n32k16_ld_b
:
18603 return MMA_LDST(8, m8n32k16_load_b_bf16
);
18604 case NVPTX::BI__mma_bf16_m32n8k16_ld_a
:
18605 return MMA_LDST(8, m32n8k16_load_a_bf16
);
18606 case NVPTX::BI__mma_bf16_m32n8k16_ld_b
:
18607 return MMA_LDST(2, m32n8k16_load_b_bf16
);
18608 case NVPTX::BI__mma_tf32_m16n16k8_ld_a
:
18609 return MMA_LDST(4, m16n16k8_load_a_tf32
);
18610 case NVPTX::BI__mma_tf32_m16n16k8_ld_b
:
18611 return MMA_LDST(4, m16n16k8_load_b_tf32
);
18612 case NVPTX::BI__mma_tf32_m16n16k8_ld_c
:
18613 return MMA_LDST(8, m16n16k8_load_c_f32
);
18615 // NOTE: We need to follow inconsitent naming scheme used by NVCC. Unlike
18616 // PTX and LLVM IR where stores always use fragment D, NVCC builtins always
18617 // use fragment C for both loads and stores.
18619 case NVPTX::BI__hmma_m16n16k16_st_c_f16
:
18620 return MMA_LDST(4, m16n16k16_store_d_f16
);
18621 case NVPTX::BI__hmma_m16n16k16_st_c_f32
:
18622 return MMA_LDST(8, m16n16k16_store_d_f32
);
18623 case NVPTX::BI__hmma_m32n8k16_st_c_f16
:
18624 return MMA_LDST(4, m32n8k16_store_d_f16
);
18625 case NVPTX::BI__hmma_m32n8k16_st_c_f32
:
18626 return MMA_LDST(8, m32n8k16_store_d_f32
);
18627 case NVPTX::BI__hmma_m8n32k16_st_c_f16
:
18628 return MMA_LDST(4, m8n32k16_store_d_f16
);
18629 case NVPTX::BI__hmma_m8n32k16_st_c_f32
:
18630 return MMA_LDST(8, m8n32k16_store_d_f32
);
18632 // Integer and sub-integer MMA stores.
18633 // Another naming quirk. Unlike other MMA builtins that use PTX types in the
18634 // name, integer loads/stores use LLVM's i32.
18635 case NVPTX::BI__imma_m16n16k16_st_c_i32
:
18636 return MMA_LDST(8, m16n16k16_store_d_s32
);
18637 case NVPTX::BI__imma_m32n8k16_st_c_i32
:
18638 return MMA_LDST(8, m32n8k16_store_d_s32
);
18639 case NVPTX::BI__imma_m8n32k16_st_c_i32
:
18640 return MMA_LDST(8, m8n32k16_store_d_s32
);
18641 case NVPTX::BI__imma_m8n8k32_st_c_i32
:
18642 return MMA_LDST(2, m8n8k32_store_d_s32
);
18643 case NVPTX::BI__bmma_m8n8k128_st_c_i32
:
18644 return MMA_LDST(2, m8n8k128_store_d_s32
);
18646 // Double MMA store
18647 case NVPTX::BI__dmma_m8n8k4_st_c_f64
:
18648 return MMA_LDST(2, m8n8k4_store_d_f64
);
18650 // Alternate float MMA store
18651 case NVPTX::BI__mma_m16n16k8_st_c_f32
:
18652 return MMA_LDST(8, m16n16k8_store_d_f32
);
18655 llvm_unreachable("Unknown MMA builtin");
18662 struct NVPTXMmaInfo
{
18668 // Variants are ordered by layout-A/layout-B/satf, where 'row' has priority
18669 // over 'col' for layout. The index of non-satf variants is expected to match
18670 // the undocumented layout constants used by CUDA's mma.hpp.
18671 std::array
<unsigned, 8> Variants
;
18673 unsigned getMMAIntrinsic(int Layout
, bool Satf
) {
18674 unsigned Index
= Layout
+ 4 * Satf
;
18675 if (Index
>= Variants
.size())
18677 return Variants
[Index
];
18681 // Returns an intrinsic that matches Layout and Satf for valid combinations of
18682 // Layout and Satf, 0 otherwise.
18683 static NVPTXMmaInfo
getNVPTXMmaInfo(unsigned BuiltinID
) {
18684 // clang-format off
18685 #define MMA_VARIANTS(geom, type) \
18686 Intrinsic::nvvm_wmma_##geom##_mma_row_row_##type, \
18687 Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type, \
18688 Intrinsic::nvvm_wmma_##geom##_mma_col_row_##type, \
18689 Intrinsic::nvvm_wmma_##geom##_mma_col_col_##type
18690 #define MMA_SATF_VARIANTS(geom, type) \
18691 MMA_VARIANTS(geom, type), \
18692 Intrinsic::nvvm_wmma_##geom##_mma_row_row_##type##_satfinite, \
18693 Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type##_satfinite, \
18694 Intrinsic::nvvm_wmma_##geom##_mma_col_row_##type##_satfinite, \
18695 Intrinsic::nvvm_wmma_##geom##_mma_col_col_##type##_satfinite
18696 // Sub-integer MMA only supports row.col layout.
18697 #define MMA_VARIANTS_I4(geom, type) \
18699 Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type, \
18703 Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type##_satfinite, \
18706 // b1 MMA does not support .satfinite.
18707 #define MMA_VARIANTS_B1_XOR(geom, type) \
18709 Intrinsic::nvvm_wmma_##geom##_mma_xor_popc_row_col_##type, \
18716 #define MMA_VARIANTS_B1_AND(geom, type) \
18718 Intrinsic::nvvm_wmma_##geom##_mma_and_popc_row_col_##type, \
18726 switch (BuiltinID
) {
18728 // Note that 'type' argument of MMA_SATF_VARIANTS uses D_C notation, while
18729 // NumEltsN of return value are ordered as A,B,C,D.
18730 case NVPTX::BI__hmma_m16n16k16_mma_f16f16
:
18731 return {8, 8, 4, 4, {{MMA_SATF_VARIANTS(m16n16k16
, f16_f16
)}}};
18732 case NVPTX::BI__hmma_m16n16k16_mma_f32f16
:
18733 return {8, 8, 4, 8, {{MMA_SATF_VARIANTS(m16n16k16
, f32_f16
)}}};
18734 case NVPTX::BI__hmma_m16n16k16_mma_f16f32
:
18735 return {8, 8, 8, 4, {{MMA_SATF_VARIANTS(m16n16k16
, f16_f32
)}}};
18736 case NVPTX::BI__hmma_m16n16k16_mma_f32f32
:
18737 return {8, 8, 8, 8, {{MMA_SATF_VARIANTS(m16n16k16
, f32_f32
)}}};
18738 case NVPTX::BI__hmma_m32n8k16_mma_f16f16
:
18739 return {8, 8, 4, 4, {{MMA_SATF_VARIANTS(m32n8k16
, f16_f16
)}}};
18740 case NVPTX::BI__hmma_m32n8k16_mma_f32f16
:
18741 return {8, 8, 4, 8, {{MMA_SATF_VARIANTS(m32n8k16
, f32_f16
)}}};
18742 case NVPTX::BI__hmma_m32n8k16_mma_f16f32
:
18743 return {8, 8, 8, 4, {{MMA_SATF_VARIANTS(m32n8k16
, f16_f32
)}}};
18744 case NVPTX::BI__hmma_m32n8k16_mma_f32f32
:
18745 return {8, 8, 8, 8, {{MMA_SATF_VARIANTS(m32n8k16
, f32_f32
)}}};
18746 case NVPTX::BI__hmma_m8n32k16_mma_f16f16
:
18747 return {8, 8, 4, 4, {{MMA_SATF_VARIANTS(m8n32k16
, f16_f16
)}}};
18748 case NVPTX::BI__hmma_m8n32k16_mma_f32f16
:
18749 return {8, 8, 4, 8, {{MMA_SATF_VARIANTS(m8n32k16
, f32_f16
)}}};
18750 case NVPTX::BI__hmma_m8n32k16_mma_f16f32
:
18751 return {8, 8, 8, 4, {{MMA_SATF_VARIANTS(m8n32k16
, f16_f32
)}}};
18752 case NVPTX::BI__hmma_m8n32k16_mma_f32f32
:
18753 return {8, 8, 8, 8, {{MMA_SATF_VARIANTS(m8n32k16
, f32_f32
)}}};
18756 case NVPTX::BI__imma_m16n16k16_mma_s8
:
18757 return {2, 2, 8, 8, {{MMA_SATF_VARIANTS(m16n16k16
, s8
)}}};
18758 case NVPTX::BI__imma_m16n16k16_mma_u8
:
18759 return {2, 2, 8, 8, {{MMA_SATF_VARIANTS(m16n16k16
, u8
)}}};
18760 case NVPTX::BI__imma_m32n8k16_mma_s8
:
18761 return {4, 1, 8, 8, {{MMA_SATF_VARIANTS(m32n8k16
, s8
)}}};
18762 case NVPTX::BI__imma_m32n8k16_mma_u8
:
18763 return {4, 1, 8, 8, {{MMA_SATF_VARIANTS(m32n8k16
, u8
)}}};
18764 case NVPTX::BI__imma_m8n32k16_mma_s8
:
18765 return {1, 4, 8, 8, {{MMA_SATF_VARIANTS(m8n32k16
, s8
)}}};
18766 case NVPTX::BI__imma_m8n32k16_mma_u8
:
18767 return {1, 4, 8, 8, {{MMA_SATF_VARIANTS(m8n32k16
, u8
)}}};
18770 case NVPTX::BI__imma_m8n8k32_mma_s4
:
18771 return {1, 1, 2, 2, {{MMA_VARIANTS_I4(m8n8k32
, s4
)}}};
18772 case NVPTX::BI__imma_m8n8k32_mma_u4
:
18773 return {1, 1, 2, 2, {{MMA_VARIANTS_I4(m8n8k32
, u4
)}}};
18774 case NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1
:
18775 return {1, 1, 2, 2, {{MMA_VARIANTS_B1_XOR(m8n8k128
, b1
)}}};
18776 case NVPTX::BI__bmma_m8n8k128_mma_and_popc_b1
:
18777 return {1, 1, 2, 2, {{MMA_VARIANTS_B1_AND(m8n8k128
, b1
)}}};
18780 case NVPTX::BI__dmma_m8n8k4_mma_f64
:
18781 return {1, 1, 2, 2, {{MMA_VARIANTS(m8n8k4
, f64
)}}};
18783 // Alternate FP MMA
18784 case NVPTX::BI__mma_bf16_m16n16k16_mma_f32
:
18785 return {4, 4, 8, 8, {{MMA_VARIANTS(m16n16k16
, bf16
)}}};
18786 case NVPTX::BI__mma_bf16_m8n32k16_mma_f32
:
18787 return {2, 8, 8, 8, {{MMA_VARIANTS(m8n32k16
, bf16
)}}};
18788 case NVPTX::BI__mma_bf16_m32n8k16_mma_f32
:
18789 return {8, 2, 8, 8, {{MMA_VARIANTS(m32n8k16
, bf16
)}}};
18790 case NVPTX::BI__mma_tf32_m16n16k8_mma_f32
:
18791 return {4, 4, 8, 8, {{MMA_VARIANTS(m16n16k8
, tf32
)}}};
18793 llvm_unreachable("Unexpected builtin ID.");
18795 #undef MMA_VARIANTS
18796 #undef MMA_SATF_VARIANTS
18797 #undef MMA_VARIANTS_I4
18798 #undef MMA_VARIANTS_B1_AND
18799 #undef MMA_VARIANTS_B1_XOR
18802 static Value
*MakeLdgLdu(unsigned IntrinsicID
, CodeGenFunction
&CGF
,
18803 const CallExpr
*E
) {
18804 Value
*Ptr
= CGF
.EmitScalarExpr(E
->getArg(0));
18805 QualType ArgType
= E
->getArg(0)->getType();
18806 clang::CharUnits Align
= CGF
.CGM
.getNaturalPointeeTypeAlignment(ArgType
);
18807 llvm::Type
*ElemTy
= CGF
.ConvertTypeForMem(ArgType
->getPointeeType());
18808 return CGF
.Builder
.CreateCall(
18809 CGF
.CGM
.getIntrinsic(IntrinsicID
, {ElemTy
, Ptr
->getType()}),
18810 {Ptr
, ConstantInt::get(CGF
.Builder
.getInt32Ty(), Align
.getQuantity())});
18813 static Value
*MakeScopedAtomic(unsigned IntrinsicID
, CodeGenFunction
&CGF
,
18814 const CallExpr
*E
) {
18815 Value
*Ptr
= CGF
.EmitScalarExpr(E
->getArg(0));
18816 llvm::Type
*ElemTy
=
18817 CGF
.ConvertTypeForMem(E
->getArg(0)->getType()->getPointeeType());
18818 return CGF
.Builder
.CreateCall(
18819 CGF
.CGM
.getIntrinsic(IntrinsicID
, {ElemTy
, Ptr
->getType()}),
18820 {Ptr
, CGF
.EmitScalarExpr(E
->getArg(1))});
18823 static Value
*MakeCpAsync(unsigned IntrinsicID
, unsigned IntrinsicIDS
,
18824 CodeGenFunction
&CGF
, const CallExpr
*E
,
18826 return E
->getNumArgs() == 3
18827 ? CGF
.Builder
.CreateCall(CGF
.CGM
.getIntrinsic(IntrinsicIDS
),
18828 {CGF
.EmitScalarExpr(E
->getArg(0)),
18829 CGF
.EmitScalarExpr(E
->getArg(1)),
18830 CGF
.EmitScalarExpr(E
->getArg(2))})
18831 : CGF
.Builder
.CreateCall(CGF
.CGM
.getIntrinsic(IntrinsicID
),
18832 {CGF
.EmitScalarExpr(E
->getArg(0)),
18833 CGF
.EmitScalarExpr(E
->getArg(1))});
18836 static Value
*MakeHalfType(unsigned IntrinsicID
, unsigned BuiltinID
,
18837 const CallExpr
*E
, CodeGenFunction
&CGF
) {
18838 auto &C
= CGF
.CGM
.getContext();
18839 if (!(C
.getLangOpts().NativeHalfType
||
18840 !C
.getTargetInfo().useFP16ConversionIntrinsics())) {
18841 CGF
.CGM
.Error(E
->getExprLoc(), C
.BuiltinInfo
.getName(BuiltinID
).str() +
18842 " requires native half type support.");
18846 if (IntrinsicID
== Intrinsic::nvvm_ldg_global_f
||
18847 IntrinsicID
== Intrinsic::nvvm_ldu_global_f
)
18848 return MakeLdgLdu(IntrinsicID
, CGF
, E
);
18850 SmallVector
<Value
*, 16> Args
;
18851 auto *F
= CGF
.CGM
.getIntrinsic(IntrinsicID
);
18852 auto *FTy
= F
->getFunctionType();
18853 unsigned ICEArguments
= 0;
18854 ASTContext::GetBuiltinTypeError Error
;
18855 C
.GetBuiltinType(BuiltinID
, Error
, &ICEArguments
);
18856 assert(Error
== ASTContext::GE_None
&& "Should not codegen an error");
18857 for (unsigned i
= 0, e
= E
->getNumArgs(); i
!= e
; ++i
) {
18858 assert((ICEArguments
& (1 << i
)) == 0);
18859 auto *ArgValue
= CGF
.EmitScalarExpr(E
->getArg(i
));
18860 auto *PTy
= FTy
->getParamType(i
);
18861 if (PTy
!= ArgValue
->getType())
18862 ArgValue
= CGF
.Builder
.CreateBitCast(ArgValue
, PTy
);
18863 Args
.push_back(ArgValue
);
18866 return CGF
.Builder
.CreateCall(F
, Args
);
18870 Value
*CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID
,
18871 const CallExpr
*E
) {
18872 switch (BuiltinID
) {
18873 case NVPTX::BI__nvvm_atom_add_gen_i
:
18874 case NVPTX::BI__nvvm_atom_add_gen_l
:
18875 case NVPTX::BI__nvvm_atom_add_gen_ll
:
18876 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Add
, E
);
18878 case NVPTX::BI__nvvm_atom_sub_gen_i
:
18879 case NVPTX::BI__nvvm_atom_sub_gen_l
:
18880 case NVPTX::BI__nvvm_atom_sub_gen_ll
:
18881 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Sub
, E
);
18883 case NVPTX::BI__nvvm_atom_and_gen_i
:
18884 case NVPTX::BI__nvvm_atom_and_gen_l
:
18885 case NVPTX::BI__nvvm_atom_and_gen_ll
:
18886 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::And
, E
);
18888 case NVPTX::BI__nvvm_atom_or_gen_i
:
18889 case NVPTX::BI__nvvm_atom_or_gen_l
:
18890 case NVPTX::BI__nvvm_atom_or_gen_ll
:
18891 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Or
, E
);
18893 case NVPTX::BI__nvvm_atom_xor_gen_i
:
18894 case NVPTX::BI__nvvm_atom_xor_gen_l
:
18895 case NVPTX::BI__nvvm_atom_xor_gen_ll
:
18896 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Xor
, E
);
18898 case NVPTX::BI__nvvm_atom_xchg_gen_i
:
18899 case NVPTX::BI__nvvm_atom_xchg_gen_l
:
18900 case NVPTX::BI__nvvm_atom_xchg_gen_ll
:
18901 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Xchg
, E
);
18903 case NVPTX::BI__nvvm_atom_max_gen_i
:
18904 case NVPTX::BI__nvvm_atom_max_gen_l
:
18905 case NVPTX::BI__nvvm_atom_max_gen_ll
:
18906 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Max
, E
);
18908 case NVPTX::BI__nvvm_atom_max_gen_ui
:
18909 case NVPTX::BI__nvvm_atom_max_gen_ul
:
18910 case NVPTX::BI__nvvm_atom_max_gen_ull
:
18911 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::UMax
, E
);
18913 case NVPTX::BI__nvvm_atom_min_gen_i
:
18914 case NVPTX::BI__nvvm_atom_min_gen_l
:
18915 case NVPTX::BI__nvvm_atom_min_gen_ll
:
18916 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Min
, E
);
18918 case NVPTX::BI__nvvm_atom_min_gen_ui
:
18919 case NVPTX::BI__nvvm_atom_min_gen_ul
:
18920 case NVPTX::BI__nvvm_atom_min_gen_ull
:
18921 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::UMin
, E
);
18923 case NVPTX::BI__nvvm_atom_cas_gen_i
:
18924 case NVPTX::BI__nvvm_atom_cas_gen_l
:
18925 case NVPTX::BI__nvvm_atom_cas_gen_ll
:
18926 // __nvvm_atom_cas_gen_* should return the old value rather than the
18928 return MakeAtomicCmpXchgValue(*this, E
, /*ReturnBool=*/false);
18930 case NVPTX::BI__nvvm_atom_add_gen_f
:
18931 case NVPTX::BI__nvvm_atom_add_gen_d
: {
18932 Value
*Ptr
= EmitScalarExpr(E
->getArg(0));
18933 Value
*Val
= EmitScalarExpr(E
->getArg(1));
18934 return Builder
.CreateAtomicRMW(llvm::AtomicRMWInst::FAdd
, Ptr
, Val
,
18935 AtomicOrdering::SequentiallyConsistent
);
18938 case NVPTX::BI__nvvm_atom_inc_gen_ui
: {
18939 Value
*Ptr
= EmitScalarExpr(E
->getArg(0));
18940 Value
*Val
= EmitScalarExpr(E
->getArg(1));
18941 Function
*FnALI32
=
18942 CGM
.getIntrinsic(Intrinsic::nvvm_atomic_load_inc_32
, Ptr
->getType());
18943 return Builder
.CreateCall(FnALI32
, {Ptr
, Val
});
18946 case NVPTX::BI__nvvm_atom_dec_gen_ui
: {
18947 Value
*Ptr
= EmitScalarExpr(E
->getArg(0));
18948 Value
*Val
= EmitScalarExpr(E
->getArg(1));
18949 Function
*FnALD32
=
18950 CGM
.getIntrinsic(Intrinsic::nvvm_atomic_load_dec_32
, Ptr
->getType());
18951 return Builder
.CreateCall(FnALD32
, {Ptr
, Val
});
18954 case NVPTX::BI__nvvm_ldg_c
:
18955 case NVPTX::BI__nvvm_ldg_sc
:
18956 case NVPTX::BI__nvvm_ldg_c2
:
18957 case NVPTX::BI__nvvm_ldg_sc2
:
18958 case NVPTX::BI__nvvm_ldg_c4
:
18959 case NVPTX::BI__nvvm_ldg_sc4
:
18960 case NVPTX::BI__nvvm_ldg_s
:
18961 case NVPTX::BI__nvvm_ldg_s2
:
18962 case NVPTX::BI__nvvm_ldg_s4
:
18963 case NVPTX::BI__nvvm_ldg_i
:
18964 case NVPTX::BI__nvvm_ldg_i2
:
18965 case NVPTX::BI__nvvm_ldg_i4
:
18966 case NVPTX::BI__nvvm_ldg_l
:
18967 case NVPTX::BI__nvvm_ldg_l2
:
18968 case NVPTX::BI__nvvm_ldg_ll
:
18969 case NVPTX::BI__nvvm_ldg_ll2
:
18970 case NVPTX::BI__nvvm_ldg_uc
:
18971 case NVPTX::BI__nvvm_ldg_uc2
:
18972 case NVPTX::BI__nvvm_ldg_uc4
:
18973 case NVPTX::BI__nvvm_ldg_us
:
18974 case NVPTX::BI__nvvm_ldg_us2
:
18975 case NVPTX::BI__nvvm_ldg_us4
:
18976 case NVPTX::BI__nvvm_ldg_ui
:
18977 case NVPTX::BI__nvvm_ldg_ui2
:
18978 case NVPTX::BI__nvvm_ldg_ui4
:
18979 case NVPTX::BI__nvvm_ldg_ul
:
18980 case NVPTX::BI__nvvm_ldg_ul2
:
18981 case NVPTX::BI__nvvm_ldg_ull
:
18982 case NVPTX::BI__nvvm_ldg_ull2
:
18983 // PTX Interoperability section 2.2: "For a vector with an even number of
18984 // elements, its alignment is set to number of elements times the alignment
18985 // of its member: n*alignof(t)."
18986 return MakeLdgLdu(Intrinsic::nvvm_ldg_global_i
, *this, E
);
18987 case NVPTX::BI__nvvm_ldg_f
:
18988 case NVPTX::BI__nvvm_ldg_f2
:
18989 case NVPTX::BI__nvvm_ldg_f4
:
18990 case NVPTX::BI__nvvm_ldg_d
:
18991 case NVPTX::BI__nvvm_ldg_d2
:
18992 return MakeLdgLdu(Intrinsic::nvvm_ldg_global_f
, *this, E
);
18994 case NVPTX::BI__nvvm_ldu_c
:
18995 case NVPTX::BI__nvvm_ldu_sc
:
18996 case NVPTX::BI__nvvm_ldu_c2
:
18997 case NVPTX::BI__nvvm_ldu_sc2
:
18998 case NVPTX::BI__nvvm_ldu_c4
:
18999 case NVPTX::BI__nvvm_ldu_sc4
:
19000 case NVPTX::BI__nvvm_ldu_s
:
19001 case NVPTX::BI__nvvm_ldu_s2
:
19002 case NVPTX::BI__nvvm_ldu_s4
:
19003 case NVPTX::BI__nvvm_ldu_i
:
19004 case NVPTX::BI__nvvm_ldu_i2
:
19005 case NVPTX::BI__nvvm_ldu_i4
:
19006 case NVPTX::BI__nvvm_ldu_l
:
19007 case NVPTX::BI__nvvm_ldu_l2
:
19008 case NVPTX::BI__nvvm_ldu_ll
:
19009 case NVPTX::BI__nvvm_ldu_ll2
:
19010 case NVPTX::BI__nvvm_ldu_uc
:
19011 case NVPTX::BI__nvvm_ldu_uc2
:
19012 case NVPTX::BI__nvvm_ldu_uc4
:
19013 case NVPTX::BI__nvvm_ldu_us
:
19014 case NVPTX::BI__nvvm_ldu_us2
:
19015 case NVPTX::BI__nvvm_ldu_us4
:
19016 case NVPTX::BI__nvvm_ldu_ui
:
19017 case NVPTX::BI__nvvm_ldu_ui2
:
19018 case NVPTX::BI__nvvm_ldu_ui4
:
19019 case NVPTX::BI__nvvm_ldu_ul
:
19020 case NVPTX::BI__nvvm_ldu_ul2
:
19021 case NVPTX::BI__nvvm_ldu_ull
:
19022 case NVPTX::BI__nvvm_ldu_ull2
:
19023 return MakeLdgLdu(Intrinsic::nvvm_ldu_global_i
, *this, E
);
19024 case NVPTX::BI__nvvm_ldu_f
:
19025 case NVPTX::BI__nvvm_ldu_f2
:
19026 case NVPTX::BI__nvvm_ldu_f4
:
19027 case NVPTX::BI__nvvm_ldu_d
:
19028 case NVPTX::BI__nvvm_ldu_d2
:
19029 return MakeLdgLdu(Intrinsic::nvvm_ldu_global_f
, *this, E
);
19031 case NVPTX::BI__nvvm_atom_cta_add_gen_i
:
19032 case NVPTX::BI__nvvm_atom_cta_add_gen_l
:
19033 case NVPTX::BI__nvvm_atom_cta_add_gen_ll
:
19034 return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_i_cta
, *this, E
);
19035 case NVPTX::BI__nvvm_atom_sys_add_gen_i
:
19036 case NVPTX::BI__nvvm_atom_sys_add_gen_l
:
19037 case NVPTX::BI__nvvm_atom_sys_add_gen_ll
:
19038 return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_i_sys
, *this, E
);
19039 case NVPTX::BI__nvvm_atom_cta_add_gen_f
:
19040 case NVPTX::BI__nvvm_atom_cta_add_gen_d
:
19041 return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_f_cta
, *this, E
);
19042 case NVPTX::BI__nvvm_atom_sys_add_gen_f
:
19043 case NVPTX::BI__nvvm_atom_sys_add_gen_d
:
19044 return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_f_sys
, *this, E
);
19045 case NVPTX::BI__nvvm_atom_cta_xchg_gen_i
:
19046 case NVPTX::BI__nvvm_atom_cta_xchg_gen_l
:
19047 case NVPTX::BI__nvvm_atom_cta_xchg_gen_ll
:
19048 return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_gen_i_cta
, *this, E
);
19049 case NVPTX::BI__nvvm_atom_sys_xchg_gen_i
:
19050 case NVPTX::BI__nvvm_atom_sys_xchg_gen_l
:
19051 case NVPTX::BI__nvvm_atom_sys_xchg_gen_ll
:
19052 return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_gen_i_sys
, *this, E
);
19053 case NVPTX::BI__nvvm_atom_cta_max_gen_i
:
19054 case NVPTX::BI__nvvm_atom_cta_max_gen_ui
:
19055 case NVPTX::BI__nvvm_atom_cta_max_gen_l
:
19056 case NVPTX::BI__nvvm_atom_cta_max_gen_ul
:
19057 case NVPTX::BI__nvvm_atom_cta_max_gen_ll
:
19058 case NVPTX::BI__nvvm_atom_cta_max_gen_ull
:
19059 return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_i_cta
, *this, E
);
19060 case NVPTX::BI__nvvm_atom_sys_max_gen_i
:
19061 case NVPTX::BI__nvvm_atom_sys_max_gen_ui
:
19062 case NVPTX::BI__nvvm_atom_sys_max_gen_l
:
19063 case NVPTX::BI__nvvm_atom_sys_max_gen_ul
:
19064 case NVPTX::BI__nvvm_atom_sys_max_gen_ll
:
19065 case NVPTX::BI__nvvm_atom_sys_max_gen_ull
:
19066 return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_i_sys
, *this, E
);
19067 case NVPTX::BI__nvvm_atom_cta_min_gen_i
:
19068 case NVPTX::BI__nvvm_atom_cta_min_gen_ui
:
19069 case NVPTX::BI__nvvm_atom_cta_min_gen_l
:
19070 case NVPTX::BI__nvvm_atom_cta_min_gen_ul
:
19071 case NVPTX::BI__nvvm_atom_cta_min_gen_ll
:
19072 case NVPTX::BI__nvvm_atom_cta_min_gen_ull
:
19073 return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_i_cta
, *this, E
);
19074 case NVPTX::BI__nvvm_atom_sys_min_gen_i
:
19075 case NVPTX::BI__nvvm_atom_sys_min_gen_ui
:
19076 case NVPTX::BI__nvvm_atom_sys_min_gen_l
:
19077 case NVPTX::BI__nvvm_atom_sys_min_gen_ul
:
19078 case NVPTX::BI__nvvm_atom_sys_min_gen_ll
:
19079 case NVPTX::BI__nvvm_atom_sys_min_gen_ull
:
19080 return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_i_sys
, *this, E
);
19081 case NVPTX::BI__nvvm_atom_cta_inc_gen_ui
:
19082 return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_gen_i_cta
, *this, E
);
19083 case NVPTX::BI__nvvm_atom_cta_dec_gen_ui
:
19084 return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_gen_i_cta
, *this, E
);
19085 case NVPTX::BI__nvvm_atom_sys_inc_gen_ui
:
19086 return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_gen_i_sys
, *this, E
);
19087 case NVPTX::BI__nvvm_atom_sys_dec_gen_ui
:
19088 return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_gen_i_sys
, *this, E
);
19089 case NVPTX::BI__nvvm_atom_cta_and_gen_i
:
19090 case NVPTX::BI__nvvm_atom_cta_and_gen_l
:
19091 case NVPTX::BI__nvvm_atom_cta_and_gen_ll
:
19092 return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_gen_i_cta
, *this, E
);
19093 case NVPTX::BI__nvvm_atom_sys_and_gen_i
:
19094 case NVPTX::BI__nvvm_atom_sys_and_gen_l
:
19095 case NVPTX::BI__nvvm_atom_sys_and_gen_ll
:
19096 return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_gen_i_sys
, *this, E
);
19097 case NVPTX::BI__nvvm_atom_cta_or_gen_i
:
19098 case NVPTX::BI__nvvm_atom_cta_or_gen_l
:
19099 case NVPTX::BI__nvvm_atom_cta_or_gen_ll
:
19100 return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_gen_i_cta
, *this, E
);
19101 case NVPTX::BI__nvvm_atom_sys_or_gen_i
:
19102 case NVPTX::BI__nvvm_atom_sys_or_gen_l
:
19103 case NVPTX::BI__nvvm_atom_sys_or_gen_ll
:
19104 return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_gen_i_sys
, *this, E
);
19105 case NVPTX::BI__nvvm_atom_cta_xor_gen_i
:
19106 case NVPTX::BI__nvvm_atom_cta_xor_gen_l
:
19107 case NVPTX::BI__nvvm_atom_cta_xor_gen_ll
:
19108 return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_gen_i_cta
, *this, E
);
19109 case NVPTX::BI__nvvm_atom_sys_xor_gen_i
:
19110 case NVPTX::BI__nvvm_atom_sys_xor_gen_l
:
19111 case NVPTX::BI__nvvm_atom_sys_xor_gen_ll
:
19112 return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_gen_i_sys
, *this, E
);
19113 case NVPTX::BI__nvvm_atom_cta_cas_gen_i
:
19114 case NVPTX::BI__nvvm_atom_cta_cas_gen_l
:
19115 case NVPTX::BI__nvvm_atom_cta_cas_gen_ll
: {
19116 Value
*Ptr
= EmitScalarExpr(E
->getArg(0));
19117 llvm::Type
*ElemTy
=
19118 ConvertTypeForMem(E
->getArg(0)->getType()->getPointeeType());
19119 return Builder
.CreateCall(
19121 Intrinsic::nvvm_atomic_cas_gen_i_cta
, {ElemTy
, Ptr
->getType()}),
19122 {Ptr
, EmitScalarExpr(E
->getArg(1)), EmitScalarExpr(E
->getArg(2))});
19124 case NVPTX::BI__nvvm_atom_sys_cas_gen_i
:
19125 case NVPTX::BI__nvvm_atom_sys_cas_gen_l
:
19126 case NVPTX::BI__nvvm_atom_sys_cas_gen_ll
: {
19127 Value
*Ptr
= EmitScalarExpr(E
->getArg(0));
19128 llvm::Type
*ElemTy
=
19129 ConvertTypeForMem(E
->getArg(0)->getType()->getPointeeType());
19130 return Builder
.CreateCall(
19132 Intrinsic::nvvm_atomic_cas_gen_i_sys
, {ElemTy
, Ptr
->getType()}),
19133 {Ptr
, EmitScalarExpr(E
->getArg(1)), EmitScalarExpr(E
->getArg(2))});
19135 case NVPTX::BI__nvvm_match_all_sync_i32p
:
19136 case NVPTX::BI__nvvm_match_all_sync_i64p
: {
19137 Value
*Mask
= EmitScalarExpr(E
->getArg(0));
19138 Value
*Val
= EmitScalarExpr(E
->getArg(1));
19139 Address PredOutPtr
= EmitPointerWithAlignment(E
->getArg(2));
19140 Value
*ResultPair
= Builder
.CreateCall(
19141 CGM
.getIntrinsic(BuiltinID
== NVPTX::BI__nvvm_match_all_sync_i32p
19142 ? Intrinsic::nvvm_match_all_sync_i32p
19143 : Intrinsic::nvvm_match_all_sync_i64p
),
19145 Value
*Pred
= Builder
.CreateZExt(Builder
.CreateExtractValue(ResultPair
, 1),
19146 PredOutPtr
.getElementType());
19147 Builder
.CreateStore(Pred
, PredOutPtr
);
19148 return Builder
.CreateExtractValue(ResultPair
, 0);
19152 case NVPTX::BI__hmma_m16n16k16_ld_a
:
19153 case NVPTX::BI__hmma_m16n16k16_ld_b
:
19154 case NVPTX::BI__hmma_m16n16k16_ld_c_f16
:
19155 case NVPTX::BI__hmma_m16n16k16_ld_c_f32
:
19156 case NVPTX::BI__hmma_m32n8k16_ld_a
:
19157 case NVPTX::BI__hmma_m32n8k16_ld_b
:
19158 case NVPTX::BI__hmma_m32n8k16_ld_c_f16
:
19159 case NVPTX::BI__hmma_m32n8k16_ld_c_f32
:
19160 case NVPTX::BI__hmma_m8n32k16_ld_a
:
19161 case NVPTX::BI__hmma_m8n32k16_ld_b
:
19162 case NVPTX::BI__hmma_m8n32k16_ld_c_f16
:
19163 case NVPTX::BI__hmma_m8n32k16_ld_c_f32
:
19164 // Integer MMA loads.
19165 case NVPTX::BI__imma_m16n16k16_ld_a_s8
:
19166 case NVPTX::BI__imma_m16n16k16_ld_a_u8
:
19167 case NVPTX::BI__imma_m16n16k16_ld_b_s8
:
19168 case NVPTX::BI__imma_m16n16k16_ld_b_u8
:
19169 case NVPTX::BI__imma_m16n16k16_ld_c
:
19170 case NVPTX::BI__imma_m32n8k16_ld_a_s8
:
19171 case NVPTX::BI__imma_m32n8k16_ld_a_u8
:
19172 case NVPTX::BI__imma_m32n8k16_ld_b_s8
:
19173 case NVPTX::BI__imma_m32n8k16_ld_b_u8
:
19174 case NVPTX::BI__imma_m32n8k16_ld_c
:
19175 case NVPTX::BI__imma_m8n32k16_ld_a_s8
:
19176 case NVPTX::BI__imma_m8n32k16_ld_a_u8
:
19177 case NVPTX::BI__imma_m8n32k16_ld_b_s8
:
19178 case NVPTX::BI__imma_m8n32k16_ld_b_u8
:
19179 case NVPTX::BI__imma_m8n32k16_ld_c
:
19180 // Sub-integer MMA loads.
19181 case NVPTX::BI__imma_m8n8k32_ld_a_s4
:
19182 case NVPTX::BI__imma_m8n8k32_ld_a_u4
:
19183 case NVPTX::BI__imma_m8n8k32_ld_b_s4
:
19184 case NVPTX::BI__imma_m8n8k32_ld_b_u4
:
19185 case NVPTX::BI__imma_m8n8k32_ld_c
:
19186 case NVPTX::BI__bmma_m8n8k128_ld_a_b1
:
19187 case NVPTX::BI__bmma_m8n8k128_ld_b_b1
:
19188 case NVPTX::BI__bmma_m8n8k128_ld_c
:
19189 // Double MMA loads.
19190 case NVPTX::BI__dmma_m8n8k4_ld_a
:
19191 case NVPTX::BI__dmma_m8n8k4_ld_b
:
19192 case NVPTX::BI__dmma_m8n8k4_ld_c
:
19193 // Alternate float MMA loads.
19194 case NVPTX::BI__mma_bf16_m16n16k16_ld_a
:
19195 case NVPTX::BI__mma_bf16_m16n16k16_ld_b
:
19196 case NVPTX::BI__mma_bf16_m8n32k16_ld_a
:
19197 case NVPTX::BI__mma_bf16_m8n32k16_ld_b
:
19198 case NVPTX::BI__mma_bf16_m32n8k16_ld_a
:
19199 case NVPTX::BI__mma_bf16_m32n8k16_ld_b
:
19200 case NVPTX::BI__mma_tf32_m16n16k8_ld_a
:
19201 case NVPTX::BI__mma_tf32_m16n16k8_ld_b
:
19202 case NVPTX::BI__mma_tf32_m16n16k8_ld_c
: {
19203 Address Dst
= EmitPointerWithAlignment(E
->getArg(0));
19204 Value
*Src
= EmitScalarExpr(E
->getArg(1));
19205 Value
*Ldm
= EmitScalarExpr(E
->getArg(2));
19206 std::optional
<llvm::APSInt
> isColMajorArg
=
19207 E
->getArg(3)->getIntegerConstantExpr(getContext());
19208 if (!isColMajorArg
)
19210 bool isColMajor
= isColMajorArg
->getSExtValue();
19211 NVPTXMmaLdstInfo II
= getNVPTXMmaLdstInfo(BuiltinID
);
19212 unsigned IID
= isColMajor
? II
.IID_col
: II
.IID_row
;
19217 Builder
.CreateCall(CGM
.getIntrinsic(IID
, Src
->getType()), {Src
, Ldm
});
19219 // Save returned values.
19220 assert(II
.NumResults
);
19221 if (II
.NumResults
== 1) {
19222 Builder
.CreateAlignedStore(Result
, Dst
.getPointer(),
19223 CharUnits::fromQuantity(4));
19225 for (unsigned i
= 0; i
< II
.NumResults
; ++i
) {
19226 Builder
.CreateAlignedStore(
19227 Builder
.CreateBitCast(Builder
.CreateExtractValue(Result
, i
),
19228 Dst
.getElementType()),
19229 Builder
.CreateGEP(Dst
.getElementType(), Dst
.getPointer(),
19230 llvm::ConstantInt::get(IntTy
, i
)),
19231 CharUnits::fromQuantity(4));
19237 case NVPTX::BI__hmma_m16n16k16_st_c_f16
:
19238 case NVPTX::BI__hmma_m16n16k16_st_c_f32
:
19239 case NVPTX::BI__hmma_m32n8k16_st_c_f16
:
19240 case NVPTX::BI__hmma_m32n8k16_st_c_f32
:
19241 case NVPTX::BI__hmma_m8n32k16_st_c_f16
:
19242 case NVPTX::BI__hmma_m8n32k16_st_c_f32
:
19243 case NVPTX::BI__imma_m16n16k16_st_c_i32
:
19244 case NVPTX::BI__imma_m32n8k16_st_c_i32
:
19245 case NVPTX::BI__imma_m8n32k16_st_c_i32
:
19246 case NVPTX::BI__imma_m8n8k32_st_c_i32
:
19247 case NVPTX::BI__bmma_m8n8k128_st_c_i32
:
19248 case NVPTX::BI__dmma_m8n8k4_st_c_f64
:
19249 case NVPTX::BI__mma_m16n16k8_st_c_f32
: {
19250 Value
*Dst
= EmitScalarExpr(E
->getArg(0));
19251 Address Src
= EmitPointerWithAlignment(E
->getArg(1));
19252 Value
*Ldm
= EmitScalarExpr(E
->getArg(2));
19253 std::optional
<llvm::APSInt
> isColMajorArg
=
19254 E
->getArg(3)->getIntegerConstantExpr(getContext());
19255 if (!isColMajorArg
)
19257 bool isColMajor
= isColMajorArg
->getSExtValue();
19258 NVPTXMmaLdstInfo II
= getNVPTXMmaLdstInfo(BuiltinID
);
19259 unsigned IID
= isColMajor
? II
.IID_col
: II
.IID_row
;
19262 Function
*Intrinsic
=
19263 CGM
.getIntrinsic(IID
, Dst
->getType());
19264 llvm::Type
*ParamType
= Intrinsic
->getFunctionType()->getParamType(1);
19265 SmallVector
<Value
*, 10> Values
= {Dst
};
19266 for (unsigned i
= 0; i
< II
.NumResults
; ++i
) {
19267 Value
*V
= Builder
.CreateAlignedLoad(
19268 Src
.getElementType(),
19269 Builder
.CreateGEP(Src
.getElementType(), Src
.getPointer(),
19270 llvm::ConstantInt::get(IntTy
, i
)),
19271 CharUnits::fromQuantity(4));
19272 Values
.push_back(Builder
.CreateBitCast(V
, ParamType
));
19274 Values
.push_back(Ldm
);
19275 Value
*Result
= Builder
.CreateCall(Intrinsic
, Values
);
19279 // BI__hmma_m16n16k16_mma_<Dtype><CType>(d, a, b, c, layout, satf) -->
19280 // Intrinsic::nvvm_wmma_m16n16k16_mma_sync<layout A,B><DType><CType><Satf>
19281 case NVPTX::BI__hmma_m16n16k16_mma_f16f16
:
19282 case NVPTX::BI__hmma_m16n16k16_mma_f32f16
:
19283 case NVPTX::BI__hmma_m16n16k16_mma_f32f32
:
19284 case NVPTX::BI__hmma_m16n16k16_mma_f16f32
:
19285 case NVPTX::BI__hmma_m32n8k16_mma_f16f16
:
19286 case NVPTX::BI__hmma_m32n8k16_mma_f32f16
:
19287 case NVPTX::BI__hmma_m32n8k16_mma_f32f32
:
19288 case NVPTX::BI__hmma_m32n8k16_mma_f16f32
:
19289 case NVPTX::BI__hmma_m8n32k16_mma_f16f16
:
19290 case NVPTX::BI__hmma_m8n32k16_mma_f32f16
:
19291 case NVPTX::BI__hmma_m8n32k16_mma_f32f32
:
19292 case NVPTX::BI__hmma_m8n32k16_mma_f16f32
:
19293 case NVPTX::BI__imma_m16n16k16_mma_s8
:
19294 case NVPTX::BI__imma_m16n16k16_mma_u8
:
19295 case NVPTX::BI__imma_m32n8k16_mma_s8
:
19296 case NVPTX::BI__imma_m32n8k16_mma_u8
:
19297 case NVPTX::BI__imma_m8n32k16_mma_s8
:
19298 case NVPTX::BI__imma_m8n32k16_mma_u8
:
19299 case NVPTX::BI__imma_m8n8k32_mma_s4
:
19300 case NVPTX::BI__imma_m8n8k32_mma_u4
:
19301 case NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1
:
19302 case NVPTX::BI__bmma_m8n8k128_mma_and_popc_b1
:
19303 case NVPTX::BI__dmma_m8n8k4_mma_f64
:
19304 case NVPTX::BI__mma_bf16_m16n16k16_mma_f32
:
19305 case NVPTX::BI__mma_bf16_m8n32k16_mma_f32
:
19306 case NVPTX::BI__mma_bf16_m32n8k16_mma_f32
:
19307 case NVPTX::BI__mma_tf32_m16n16k8_mma_f32
: {
19308 Address Dst
= EmitPointerWithAlignment(E
->getArg(0));
19309 Address SrcA
= EmitPointerWithAlignment(E
->getArg(1));
19310 Address SrcB
= EmitPointerWithAlignment(E
->getArg(2));
19311 Address SrcC
= EmitPointerWithAlignment(E
->getArg(3));
19312 std::optional
<llvm::APSInt
> LayoutArg
=
19313 E
->getArg(4)->getIntegerConstantExpr(getContext());
19316 int Layout
= LayoutArg
->getSExtValue();
19317 if (Layout
< 0 || Layout
> 3)
19319 llvm::APSInt SatfArg
;
19320 if (BuiltinID
== NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1
||
19321 BuiltinID
== NVPTX::BI__bmma_m8n8k128_mma_and_popc_b1
)
19322 SatfArg
= 0; // .b1 does not have satf argument.
19323 else if (std::optional
<llvm::APSInt
> OptSatfArg
=
19324 E
->getArg(5)->getIntegerConstantExpr(getContext()))
19325 SatfArg
= *OptSatfArg
;
19328 bool Satf
= SatfArg
.getSExtValue();
19329 NVPTXMmaInfo MI
= getNVPTXMmaInfo(BuiltinID
);
19330 unsigned IID
= MI
.getMMAIntrinsic(Layout
, Satf
);
19331 if (IID
== 0) // Unsupported combination of Layout/Satf.
19334 SmallVector
<Value
*, 24> Values
;
19335 Function
*Intrinsic
= CGM
.getIntrinsic(IID
);
19336 llvm::Type
*AType
= Intrinsic
->getFunctionType()->getParamType(0);
19338 for (unsigned i
= 0; i
< MI
.NumEltsA
; ++i
) {
19339 Value
*V
= Builder
.CreateAlignedLoad(
19340 SrcA
.getElementType(),
19341 Builder
.CreateGEP(SrcA
.getElementType(), SrcA
.getPointer(),
19342 llvm::ConstantInt::get(IntTy
, i
)),
19343 CharUnits::fromQuantity(4));
19344 Values
.push_back(Builder
.CreateBitCast(V
, AType
));
19347 llvm::Type
*BType
= Intrinsic
->getFunctionType()->getParamType(MI
.NumEltsA
);
19348 for (unsigned i
= 0; i
< MI
.NumEltsB
; ++i
) {
19349 Value
*V
= Builder
.CreateAlignedLoad(
19350 SrcB
.getElementType(),
19351 Builder
.CreateGEP(SrcB
.getElementType(), SrcB
.getPointer(),
19352 llvm::ConstantInt::get(IntTy
, i
)),
19353 CharUnits::fromQuantity(4));
19354 Values
.push_back(Builder
.CreateBitCast(V
, BType
));
19357 llvm::Type
*CType
=
19358 Intrinsic
->getFunctionType()->getParamType(MI
.NumEltsA
+ MI
.NumEltsB
);
19359 for (unsigned i
= 0; i
< MI
.NumEltsC
; ++i
) {
19360 Value
*V
= Builder
.CreateAlignedLoad(
19361 SrcC
.getElementType(),
19362 Builder
.CreateGEP(SrcC
.getElementType(), SrcC
.getPointer(),
19363 llvm::ConstantInt::get(IntTy
, i
)),
19364 CharUnits::fromQuantity(4));
19365 Values
.push_back(Builder
.CreateBitCast(V
, CType
));
19367 Value
*Result
= Builder
.CreateCall(Intrinsic
, Values
);
19368 llvm::Type
*DType
= Dst
.getElementType();
19369 for (unsigned i
= 0; i
< MI
.NumEltsD
; ++i
)
19370 Builder
.CreateAlignedStore(
19371 Builder
.CreateBitCast(Builder
.CreateExtractValue(Result
, i
), DType
),
19372 Builder
.CreateGEP(Dst
.getElementType(), Dst
.getPointer(),
19373 llvm::ConstantInt::get(IntTy
, i
)),
19374 CharUnits::fromQuantity(4));
19377 // The following builtins require half type support
19378 case NVPTX::BI__nvvm_ex2_approx_f16
:
19379 return MakeHalfType(Intrinsic::nvvm_ex2_approx_f16
, BuiltinID
, E
, *this);
19380 case NVPTX::BI__nvvm_ex2_approx_f16x2
:
19381 return MakeHalfType(Intrinsic::nvvm_ex2_approx_f16x2
, BuiltinID
, E
, *this);
19382 case NVPTX::BI__nvvm_ff2f16x2_rn
:
19383 return MakeHalfType(Intrinsic::nvvm_ff2f16x2_rn
, BuiltinID
, E
, *this);
19384 case NVPTX::BI__nvvm_ff2f16x2_rn_relu
:
19385 return MakeHalfType(Intrinsic::nvvm_ff2f16x2_rn_relu
, BuiltinID
, E
, *this);
19386 case NVPTX::BI__nvvm_ff2f16x2_rz
:
19387 return MakeHalfType(Intrinsic::nvvm_ff2f16x2_rz
, BuiltinID
, E
, *this);
19388 case NVPTX::BI__nvvm_ff2f16x2_rz_relu
:
19389 return MakeHalfType(Intrinsic::nvvm_ff2f16x2_rz_relu
, BuiltinID
, E
, *this);
19390 case NVPTX::BI__nvvm_fma_rn_f16
:
19391 return MakeHalfType(Intrinsic::nvvm_fma_rn_f16
, BuiltinID
, E
, *this);
19392 case NVPTX::BI__nvvm_fma_rn_f16x2
:
19393 return MakeHalfType(Intrinsic::nvvm_fma_rn_f16x2
, BuiltinID
, E
, *this);
19394 case NVPTX::BI__nvvm_fma_rn_ftz_f16
:
19395 return MakeHalfType(Intrinsic::nvvm_fma_rn_ftz_f16
, BuiltinID
, E
, *this);
19396 case NVPTX::BI__nvvm_fma_rn_ftz_f16x2
:
19397 return MakeHalfType(Intrinsic::nvvm_fma_rn_ftz_f16x2
, BuiltinID
, E
, *this);
19398 case NVPTX::BI__nvvm_fma_rn_ftz_relu_f16
:
19399 return MakeHalfType(Intrinsic::nvvm_fma_rn_ftz_relu_f16
, BuiltinID
, E
,
19401 case NVPTX::BI__nvvm_fma_rn_ftz_relu_f16x2
:
19402 return MakeHalfType(Intrinsic::nvvm_fma_rn_ftz_relu_f16x2
, BuiltinID
, E
,
19404 case NVPTX::BI__nvvm_fma_rn_ftz_sat_f16
:
19405 return MakeHalfType(Intrinsic::nvvm_fma_rn_ftz_sat_f16
, BuiltinID
, E
,
19407 case NVPTX::BI__nvvm_fma_rn_ftz_sat_f16x2
:
19408 return MakeHalfType(Intrinsic::nvvm_fma_rn_ftz_sat_f16x2
, BuiltinID
, E
,
19410 case NVPTX::BI__nvvm_fma_rn_relu_f16
:
19411 return MakeHalfType(Intrinsic::nvvm_fma_rn_relu_f16
, BuiltinID
, E
, *this);
19412 case NVPTX::BI__nvvm_fma_rn_relu_f16x2
:
19413 return MakeHalfType(Intrinsic::nvvm_fma_rn_relu_f16x2
, BuiltinID
, E
, *this);
19414 case NVPTX::BI__nvvm_fma_rn_sat_f16
:
19415 return MakeHalfType(Intrinsic::nvvm_fma_rn_sat_f16
, BuiltinID
, E
, *this);
19416 case NVPTX::BI__nvvm_fma_rn_sat_f16x2
:
19417 return MakeHalfType(Intrinsic::nvvm_fma_rn_sat_f16x2
, BuiltinID
, E
, *this);
19418 case NVPTX::BI__nvvm_fmax_f16
:
19419 return MakeHalfType(Intrinsic::nvvm_fmax_f16
, BuiltinID
, E
, *this);
19420 case NVPTX::BI__nvvm_fmax_f16x2
:
19421 return MakeHalfType(Intrinsic::nvvm_fmax_f16x2
, BuiltinID
, E
, *this);
19422 case NVPTX::BI__nvvm_fmax_ftz_f16
:
19423 return MakeHalfType(Intrinsic::nvvm_fmax_ftz_f16
, BuiltinID
, E
, *this);
19424 case NVPTX::BI__nvvm_fmax_ftz_f16x2
:
19425 return MakeHalfType(Intrinsic::nvvm_fmax_ftz_f16x2
, BuiltinID
, E
, *this);
19426 case NVPTX::BI__nvvm_fmax_ftz_nan_f16
:
19427 return MakeHalfType(Intrinsic::nvvm_fmax_ftz_nan_f16
, BuiltinID
, E
, *this);
19428 case NVPTX::BI__nvvm_fmax_ftz_nan_f16x2
:
19429 return MakeHalfType(Intrinsic::nvvm_fmax_ftz_nan_f16x2
, BuiltinID
, E
,
19431 case NVPTX::BI__nvvm_fmax_ftz_nan_xorsign_abs_f16
:
19432 return MakeHalfType(Intrinsic::nvvm_fmax_ftz_nan_xorsign_abs_f16
, BuiltinID
,
19434 case NVPTX::BI__nvvm_fmax_ftz_nan_xorsign_abs_f16x2
:
19435 return MakeHalfType(Intrinsic::nvvm_fmax_ftz_nan_xorsign_abs_f16x2
,
19436 BuiltinID
, E
, *this);
19437 case NVPTX::BI__nvvm_fmax_ftz_xorsign_abs_f16
:
19438 return MakeHalfType(Intrinsic::nvvm_fmax_ftz_xorsign_abs_f16
, BuiltinID
, E
,
19440 case NVPTX::BI__nvvm_fmax_ftz_xorsign_abs_f16x2
:
19441 return MakeHalfType(Intrinsic::nvvm_fmax_ftz_xorsign_abs_f16x2
, BuiltinID
,
19443 case NVPTX::BI__nvvm_fmax_nan_f16
:
19444 return MakeHalfType(Intrinsic::nvvm_fmax_nan_f16
, BuiltinID
, E
, *this);
19445 case NVPTX::BI__nvvm_fmax_nan_f16x2
:
19446 return MakeHalfType(Intrinsic::nvvm_fmax_nan_f16x2
, BuiltinID
, E
, *this);
19447 case NVPTX::BI__nvvm_fmax_nan_xorsign_abs_f16
:
19448 return MakeHalfType(Intrinsic::nvvm_fmax_nan_xorsign_abs_f16
, BuiltinID
, E
,
19450 case NVPTX::BI__nvvm_fmax_nan_xorsign_abs_f16x2
:
19451 return MakeHalfType(Intrinsic::nvvm_fmax_nan_xorsign_abs_f16x2
, BuiltinID
,
19453 case NVPTX::BI__nvvm_fmax_xorsign_abs_f16
:
19454 return MakeHalfType(Intrinsic::nvvm_fmax_xorsign_abs_f16
, BuiltinID
, E
,
19456 case NVPTX::BI__nvvm_fmax_xorsign_abs_f16x2
:
19457 return MakeHalfType(Intrinsic::nvvm_fmax_xorsign_abs_f16x2
, BuiltinID
, E
,
19459 case NVPTX::BI__nvvm_fmin_f16
:
19460 return MakeHalfType(Intrinsic::nvvm_fmin_f16
, BuiltinID
, E
, *this);
19461 case NVPTX::BI__nvvm_fmin_f16x2
:
19462 return MakeHalfType(Intrinsic::nvvm_fmin_f16x2
, BuiltinID
, E
, *this);
19463 case NVPTX::BI__nvvm_fmin_ftz_f16
:
19464 return MakeHalfType(Intrinsic::nvvm_fmin_ftz_f16
, BuiltinID
, E
, *this);
19465 case NVPTX::BI__nvvm_fmin_ftz_f16x2
:
19466 return MakeHalfType(Intrinsic::nvvm_fmin_ftz_f16x2
, BuiltinID
, E
, *this);
19467 case NVPTX::BI__nvvm_fmin_ftz_nan_f16
:
19468 return MakeHalfType(Intrinsic::nvvm_fmin_ftz_nan_f16
, BuiltinID
, E
, *this);
19469 case NVPTX::BI__nvvm_fmin_ftz_nan_f16x2
:
19470 return MakeHalfType(Intrinsic::nvvm_fmin_ftz_nan_f16x2
, BuiltinID
, E
,
19472 case NVPTX::BI__nvvm_fmin_ftz_nan_xorsign_abs_f16
:
19473 return MakeHalfType(Intrinsic::nvvm_fmin_ftz_nan_xorsign_abs_f16
, BuiltinID
,
19475 case NVPTX::BI__nvvm_fmin_ftz_nan_xorsign_abs_f16x2
:
19476 return MakeHalfType(Intrinsic::nvvm_fmin_ftz_nan_xorsign_abs_f16x2
,
19477 BuiltinID
, E
, *this);
19478 case NVPTX::BI__nvvm_fmin_ftz_xorsign_abs_f16
:
19479 return MakeHalfType(Intrinsic::nvvm_fmin_ftz_xorsign_abs_f16
, BuiltinID
, E
,
19481 case NVPTX::BI__nvvm_fmin_ftz_xorsign_abs_f16x2
:
19482 return MakeHalfType(Intrinsic::nvvm_fmin_ftz_xorsign_abs_f16x2
, BuiltinID
,
19484 case NVPTX::BI__nvvm_fmin_nan_f16
:
19485 return MakeHalfType(Intrinsic::nvvm_fmin_nan_f16
, BuiltinID
, E
, *this);
19486 case NVPTX::BI__nvvm_fmin_nan_f16x2
:
19487 return MakeHalfType(Intrinsic::nvvm_fmin_nan_f16x2
, BuiltinID
, E
, *this);
19488 case NVPTX::BI__nvvm_fmin_nan_xorsign_abs_f16
:
19489 return MakeHalfType(Intrinsic::nvvm_fmin_nan_xorsign_abs_f16
, BuiltinID
, E
,
19491 case NVPTX::BI__nvvm_fmin_nan_xorsign_abs_f16x2
:
19492 return MakeHalfType(Intrinsic::nvvm_fmin_nan_xorsign_abs_f16x2
, BuiltinID
,
19494 case NVPTX::BI__nvvm_fmin_xorsign_abs_f16
:
19495 return MakeHalfType(Intrinsic::nvvm_fmin_xorsign_abs_f16
, BuiltinID
, E
,
19497 case NVPTX::BI__nvvm_fmin_xorsign_abs_f16x2
:
19498 return MakeHalfType(Intrinsic::nvvm_fmin_xorsign_abs_f16x2
, BuiltinID
, E
,
19500 case NVPTX::BI__nvvm_ldg_h
:
19501 return MakeHalfType(Intrinsic::nvvm_ldg_global_f
, BuiltinID
, E
, *this);
19502 case NVPTX::BI__nvvm_ldg_h2
:
19503 return MakeHalfType(Intrinsic::nvvm_ldg_global_f
, BuiltinID
, E
, *this);
19504 case NVPTX::BI__nvvm_ldu_h
:
19505 return MakeHalfType(Intrinsic::nvvm_ldu_global_f
, BuiltinID
, E
, *this);
19506 case NVPTX::BI__nvvm_ldu_h2
: {
19507 return MakeHalfType(Intrinsic::nvvm_ldu_global_f
, BuiltinID
, E
, *this);
19509 case NVPTX::BI__nvvm_cp_async_ca_shared_global_4
:
19510 return MakeCpAsync(Intrinsic::nvvm_cp_async_ca_shared_global_4
,
19511 Intrinsic::nvvm_cp_async_ca_shared_global_4_s
, *this, E
,
19513 case NVPTX::BI__nvvm_cp_async_ca_shared_global_8
:
19514 return MakeCpAsync(Intrinsic::nvvm_cp_async_ca_shared_global_8
,
19515 Intrinsic::nvvm_cp_async_ca_shared_global_8_s
, *this, E
,
19517 case NVPTX::BI__nvvm_cp_async_ca_shared_global_16
:
19518 return MakeCpAsync(Intrinsic::nvvm_cp_async_ca_shared_global_16
,
19519 Intrinsic::nvvm_cp_async_ca_shared_global_16_s
, *this, E
,
19521 case NVPTX::BI__nvvm_cp_async_cg_shared_global_16
:
19522 return MakeCpAsync(Intrinsic::nvvm_cp_async_cg_shared_global_16
,
19523 Intrinsic::nvvm_cp_async_cg_shared_global_16_s
, *this, E
,
19525 case NVPTX::BI__nvvm_read_ptx_sreg_clusterid_x
:
19526 return Builder
.CreateCall(
19527 CGM
.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_clusterid_x
));
19528 case NVPTX::BI__nvvm_read_ptx_sreg_clusterid_y
:
19529 return Builder
.CreateCall(
19530 CGM
.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_clusterid_y
));
19531 case NVPTX::BI__nvvm_read_ptx_sreg_clusterid_z
:
19532 return Builder
.CreateCall(
19533 CGM
.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_clusterid_z
));
19534 case NVPTX::BI__nvvm_read_ptx_sreg_clusterid_w
:
19535 return Builder
.CreateCall(
19536 CGM
.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_clusterid_w
));
19537 case NVPTX::BI__nvvm_read_ptx_sreg_nclusterid_x
:
19538 return Builder
.CreateCall(
19539 CGM
.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_nclusterid_x
));
19540 case NVPTX::BI__nvvm_read_ptx_sreg_nclusterid_y
:
19541 return Builder
.CreateCall(
19542 CGM
.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_nclusterid_y
));
19543 case NVPTX::BI__nvvm_read_ptx_sreg_nclusterid_z
:
19544 return Builder
.CreateCall(
19545 CGM
.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_nclusterid_z
));
19546 case NVPTX::BI__nvvm_read_ptx_sreg_nclusterid_w
:
19547 return Builder
.CreateCall(
19548 CGM
.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_nclusterid_w
));
19549 case NVPTX::BI__nvvm_read_ptx_sreg_cluster_ctaid_x
:
19550 return Builder
.CreateCall(
19551 CGM
.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_cluster_ctaid_x
));
19552 case NVPTX::BI__nvvm_read_ptx_sreg_cluster_ctaid_y
:
19553 return Builder
.CreateCall(
19554 CGM
.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_cluster_ctaid_y
));
19555 case NVPTX::BI__nvvm_read_ptx_sreg_cluster_ctaid_z
:
19556 return Builder
.CreateCall(
19557 CGM
.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_cluster_ctaid_z
));
19558 case NVPTX::BI__nvvm_read_ptx_sreg_cluster_ctaid_w
:
19559 return Builder
.CreateCall(
19560 CGM
.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_cluster_ctaid_w
));
19561 case NVPTX::BI__nvvm_read_ptx_sreg_cluster_nctaid_x
:
19562 return Builder
.CreateCall(
19563 CGM
.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_cluster_nctaid_x
));
19564 case NVPTX::BI__nvvm_read_ptx_sreg_cluster_nctaid_y
:
19565 return Builder
.CreateCall(
19566 CGM
.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_cluster_nctaid_y
));
19567 case NVPTX::BI__nvvm_read_ptx_sreg_cluster_nctaid_z
:
19568 return Builder
.CreateCall(
19569 CGM
.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_cluster_nctaid_z
));
19570 case NVPTX::BI__nvvm_read_ptx_sreg_cluster_nctaid_w
:
19571 return Builder
.CreateCall(
19572 CGM
.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_cluster_nctaid_w
));
19573 case NVPTX::BI__nvvm_read_ptx_sreg_cluster_ctarank
:
19574 return Builder
.CreateCall(
19575 CGM
.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_cluster_ctarank
));
19576 case NVPTX::BI__nvvm_read_ptx_sreg_cluster_nctarank
:
19577 return Builder
.CreateCall(
19578 CGM
.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_cluster_nctarank
));
19579 case NVPTX::BI__nvvm_is_explicit_cluster
:
19580 return Builder
.CreateCall(
19581 CGM
.getIntrinsic(Intrinsic::nvvm_is_explicit_cluster
));
19582 case NVPTX::BI__nvvm_isspacep_shared_cluster
:
19583 return Builder
.CreateCall(
19584 CGM
.getIntrinsic(Intrinsic::nvvm_isspacep_shared_cluster
),
19585 EmitScalarExpr(E
->getArg(0)));
19586 case NVPTX::BI__nvvm_mapa
:
19587 return Builder
.CreateCall(
19588 CGM
.getIntrinsic(Intrinsic::nvvm_mapa
),
19589 {EmitScalarExpr(E
->getArg(0)), EmitScalarExpr(E
->getArg(1))});
19590 case NVPTX::BI__nvvm_mapa_shared_cluster
:
19591 return Builder
.CreateCall(
19592 CGM
.getIntrinsic(Intrinsic::nvvm_mapa_shared_cluster
),
19593 {EmitScalarExpr(E
->getArg(0)), EmitScalarExpr(E
->getArg(1))});
19594 case NVPTX::BI__nvvm_getctarank
:
19595 return Builder
.CreateCall(
19596 CGM
.getIntrinsic(Intrinsic::nvvm_getctarank
),
19597 EmitScalarExpr(E
->getArg(0)));
19598 case NVPTX::BI__nvvm_getctarank_shared_cluster
:
19599 return Builder
.CreateCall(
19600 CGM
.getIntrinsic(Intrinsic::nvvm_getctarank_shared_cluster
),
19601 EmitScalarExpr(E
->getArg(0)));
19602 case NVPTX::BI__nvvm_barrier_cluster_arrive
:
19603 return Builder
.CreateCall(
19604 CGM
.getIntrinsic(Intrinsic::nvvm_barrier_cluster_arrive
));
19605 case NVPTX::BI__nvvm_barrier_cluster_arrive_relaxed
:
19606 return Builder
.CreateCall(
19607 CGM
.getIntrinsic(Intrinsic::nvvm_barrier_cluster_arrive_relaxed
));
19608 case NVPTX::BI__nvvm_barrier_cluster_wait
:
19609 return Builder
.CreateCall(
19610 CGM
.getIntrinsic(Intrinsic::nvvm_barrier_cluster_wait
));
19611 case NVPTX::BI__nvvm_fence_sc_cluster
:
19612 return Builder
.CreateCall(
19613 CGM
.getIntrinsic(Intrinsic::nvvm_fence_sc_cluster
));
19620 struct BuiltinAlignArgs
{
19621 llvm::Value
*Src
= nullptr;
19622 llvm::Type
*SrcType
= nullptr;
19623 llvm::Value
*Alignment
= nullptr;
19624 llvm::Value
*Mask
= nullptr;
19625 llvm::IntegerType
*IntType
= nullptr;
19627 BuiltinAlignArgs(const CallExpr
*E
, CodeGenFunction
&CGF
) {
19628 QualType AstType
= E
->getArg(0)->getType();
19629 if (AstType
->isArrayType())
19630 Src
= CGF
.EmitArrayToPointerDecay(E
->getArg(0)).getPointer();
19632 Src
= CGF
.EmitScalarExpr(E
->getArg(0));
19633 SrcType
= Src
->getType();
19634 if (SrcType
->isPointerTy()) {
19635 IntType
= IntegerType::get(
19636 CGF
.getLLVMContext(),
19637 CGF
.CGM
.getDataLayout().getIndexTypeSizeInBits(SrcType
));
19639 assert(SrcType
->isIntegerTy());
19640 IntType
= cast
<llvm::IntegerType
>(SrcType
);
19642 Alignment
= CGF
.EmitScalarExpr(E
->getArg(1));
19643 Alignment
= CGF
.Builder
.CreateZExtOrTrunc(Alignment
, IntType
, "alignment");
19644 auto *One
= llvm::ConstantInt::get(IntType
, 1);
19645 Mask
= CGF
.Builder
.CreateSub(Alignment
, One
, "mask");
19650 /// Generate (x & (y-1)) == 0.
19651 RValue
CodeGenFunction::EmitBuiltinIsAligned(const CallExpr
*E
) {
19652 BuiltinAlignArgs
Args(E
, *this);
19653 llvm::Value
*SrcAddress
= Args
.Src
;
19654 if (Args
.SrcType
->isPointerTy())
19656 Builder
.CreateBitOrPointerCast(Args
.Src
, Args
.IntType
, "src_addr");
19657 return RValue::get(Builder
.CreateICmpEQ(
19658 Builder
.CreateAnd(SrcAddress
, Args
.Mask
, "set_bits"),
19659 llvm::Constant::getNullValue(Args
.IntType
), "is_aligned"));
19662 /// Generate (x & ~(y-1)) to align down or ((x+(y-1)) & ~(y-1)) to align up.
19663 /// Note: For pointer types we can avoid ptrtoint/inttoptr pairs by using the
19664 /// llvm.ptrmask intrinsic (with a GEP before in the align_up case).
19665 /// TODO: actually use ptrmask once most optimization passes know about it.
19666 RValue
CodeGenFunction::EmitBuiltinAlignTo(const CallExpr
*E
, bool AlignUp
) {
19667 BuiltinAlignArgs
Args(E
, *this);
19668 llvm::Value
*SrcAddr
= Args
.Src
;
19669 if (Args
.Src
->getType()->isPointerTy())
19670 SrcAddr
= Builder
.CreatePtrToInt(Args
.Src
, Args
.IntType
, "intptr");
19671 llvm::Value
*SrcForMask
= SrcAddr
;
19673 // When aligning up we have to first add the mask to ensure we go over the
19674 // next alignment value and then align down to the next valid multiple.
19675 // By adding the mask, we ensure that align_up on an already aligned
19676 // value will not change the value.
19677 SrcForMask
= Builder
.CreateAdd(SrcForMask
, Args
.Mask
, "over_boundary");
19679 // Invert the mask to only clear the lower bits.
19680 llvm::Value
*InvertedMask
= Builder
.CreateNot(Args
.Mask
, "inverted_mask");
19681 llvm::Value
*Result
=
19682 Builder
.CreateAnd(SrcForMask
, InvertedMask
, "aligned_result");
19683 if (Args
.Src
->getType()->isPointerTy()) {
19684 /// TODO: Use ptrmask instead of ptrtoint+gep once it is optimized well.
19685 // Result = Builder.CreateIntrinsic(
19686 // Intrinsic::ptrmask, {Args.SrcType, SrcForMask->getType(), Args.IntType},
19687 // {SrcForMask, NegatedMask}, nullptr, "aligned_result");
19688 Result
->setName("aligned_intptr");
19689 llvm::Value
*Difference
= Builder
.CreateSub(Result
, SrcAddr
, "diff");
19690 // The result must point to the same underlying allocation. This means we
19691 // can use an inbounds GEP to enable better optimization.
19692 if (getLangOpts().isSignedOverflowDefined())
19694 Builder
.CreateGEP(Int8Ty
, Args
.Src
, Difference
, "aligned_result");
19696 Result
= EmitCheckedInBoundsGEP(Int8Ty
, Args
.Src
, Difference
,
19697 /*SignedIndices=*/true,
19698 /*isSubtraction=*/!AlignUp
,
19699 E
->getExprLoc(), "aligned_result");
19700 // Emit an alignment assumption to ensure that the new alignment is
19701 // propagated to loads/stores, etc.
19702 emitAlignmentAssumption(Result
, E
, E
->getExprLoc(), Args
.Alignment
);
19704 assert(Result
->getType() == Args
.SrcType
);
19705 return RValue::get(Result
);
19708 Value
*CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID
,
19709 const CallExpr
*E
) {
19710 switch (BuiltinID
) {
19711 case WebAssembly::BI__builtin_wasm_memory_size
: {
19712 llvm::Type
*ResultType
= ConvertType(E
->getType());
19713 Value
*I
= EmitScalarExpr(E
->getArg(0));
19715 CGM
.getIntrinsic(Intrinsic::wasm_memory_size
, ResultType
);
19716 return Builder
.CreateCall(Callee
, I
);
19718 case WebAssembly::BI__builtin_wasm_memory_grow
: {
19719 llvm::Type
*ResultType
= ConvertType(E
->getType());
19720 Value
*Args
[] = {EmitScalarExpr(E
->getArg(0)),
19721 EmitScalarExpr(E
->getArg(1))};
19723 CGM
.getIntrinsic(Intrinsic::wasm_memory_grow
, ResultType
);
19724 return Builder
.CreateCall(Callee
, Args
);
19726 case WebAssembly::BI__builtin_wasm_tls_size
: {
19727 llvm::Type
*ResultType
= ConvertType(E
->getType());
19728 Function
*Callee
= CGM
.getIntrinsic(Intrinsic::wasm_tls_size
, ResultType
);
19729 return Builder
.CreateCall(Callee
);
19731 case WebAssembly::BI__builtin_wasm_tls_align
: {
19732 llvm::Type
*ResultType
= ConvertType(E
->getType());
19733 Function
*Callee
= CGM
.getIntrinsic(Intrinsic::wasm_tls_align
, ResultType
);
19734 return Builder
.CreateCall(Callee
);
19736 case WebAssembly::BI__builtin_wasm_tls_base
: {
19737 Function
*Callee
= CGM
.getIntrinsic(Intrinsic::wasm_tls_base
);
19738 return Builder
.CreateCall(Callee
);
19740 case WebAssembly::BI__builtin_wasm_throw
: {
19741 Value
*Tag
= EmitScalarExpr(E
->getArg(0));
19742 Value
*Obj
= EmitScalarExpr(E
->getArg(1));
19743 Function
*Callee
= CGM
.getIntrinsic(Intrinsic::wasm_throw
);
19744 return Builder
.CreateCall(Callee
, {Tag
, Obj
});
19746 case WebAssembly::BI__builtin_wasm_rethrow
: {
19747 Function
*Callee
= CGM
.getIntrinsic(Intrinsic::wasm_rethrow
);
19748 return Builder
.CreateCall(Callee
);
19750 case WebAssembly::BI__builtin_wasm_memory_atomic_wait32
: {
19751 Value
*Addr
= EmitScalarExpr(E
->getArg(0));
19752 Value
*Expected
= EmitScalarExpr(E
->getArg(1));
19753 Value
*Timeout
= EmitScalarExpr(E
->getArg(2));
19754 Function
*Callee
= CGM
.getIntrinsic(Intrinsic::wasm_memory_atomic_wait32
);
19755 return Builder
.CreateCall(Callee
, {Addr
, Expected
, Timeout
});
19757 case WebAssembly::BI__builtin_wasm_memory_atomic_wait64
: {
19758 Value
*Addr
= EmitScalarExpr(E
->getArg(0));
19759 Value
*Expected
= EmitScalarExpr(E
->getArg(1));
19760 Value
*Timeout
= EmitScalarExpr(E
->getArg(2));
19761 Function
*Callee
= CGM
.getIntrinsic(Intrinsic::wasm_memory_atomic_wait64
);
19762 return Builder
.CreateCall(Callee
, {Addr
, Expected
, Timeout
});
19764 case WebAssembly::BI__builtin_wasm_memory_atomic_notify
: {
19765 Value
*Addr
= EmitScalarExpr(E
->getArg(0));
19766 Value
*Count
= EmitScalarExpr(E
->getArg(1));
19767 Function
*Callee
= CGM
.getIntrinsic(Intrinsic::wasm_memory_atomic_notify
);
19768 return Builder
.CreateCall(Callee
, {Addr
, Count
});
19770 case WebAssembly::BI__builtin_wasm_trunc_s_i32_f32
:
19771 case WebAssembly::BI__builtin_wasm_trunc_s_i32_f64
:
19772 case WebAssembly::BI__builtin_wasm_trunc_s_i64_f32
:
19773 case WebAssembly::BI__builtin_wasm_trunc_s_i64_f64
: {
19774 Value
*Src
= EmitScalarExpr(E
->getArg(0));
19775 llvm::Type
*ResT
= ConvertType(E
->getType());
19777 CGM
.getIntrinsic(Intrinsic::wasm_trunc_signed
, {ResT
, Src
->getType()});
19778 return Builder
.CreateCall(Callee
, {Src
});
19780 case WebAssembly::BI__builtin_wasm_trunc_u_i32_f32
:
19781 case WebAssembly::BI__builtin_wasm_trunc_u_i32_f64
:
19782 case WebAssembly::BI__builtin_wasm_trunc_u_i64_f32
:
19783 case WebAssembly::BI__builtin_wasm_trunc_u_i64_f64
: {
19784 Value
*Src
= EmitScalarExpr(E
->getArg(0));
19785 llvm::Type
*ResT
= ConvertType(E
->getType());
19786 Function
*Callee
= CGM
.getIntrinsic(Intrinsic::wasm_trunc_unsigned
,
19787 {ResT
, Src
->getType()});
19788 return Builder
.CreateCall(Callee
, {Src
});
19790 case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32_f32
:
19791 case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32_f64
:
19792 case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i64_f32
:
19793 case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i64_f64
:
19794 case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32x4_f32x4
: {
19795 Value
*Src
= EmitScalarExpr(E
->getArg(0));
19796 llvm::Type
*ResT
= ConvertType(E
->getType());
19798 CGM
.getIntrinsic(Intrinsic::fptosi_sat
, {ResT
, Src
->getType()});
19799 return Builder
.CreateCall(Callee
, {Src
});
19801 case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32_f32
:
19802 case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32_f64
:
19803 case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i64_f32
:
19804 case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i64_f64
:
19805 case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32x4_f32x4
: {
19806 Value
*Src
= EmitScalarExpr(E
->getArg(0));
19807 llvm::Type
*ResT
= ConvertType(E
->getType());
19809 CGM
.getIntrinsic(Intrinsic::fptoui_sat
, {ResT
, Src
->getType()});
19810 return Builder
.CreateCall(Callee
, {Src
});
19812 case WebAssembly::BI__builtin_wasm_min_f32
:
19813 case WebAssembly::BI__builtin_wasm_min_f64
:
19814 case WebAssembly::BI__builtin_wasm_min_f32x4
:
19815 case WebAssembly::BI__builtin_wasm_min_f64x2
: {
19816 Value
*LHS
= EmitScalarExpr(E
->getArg(0));
19817 Value
*RHS
= EmitScalarExpr(E
->getArg(1));
19819 CGM
.getIntrinsic(Intrinsic::minimum
, ConvertType(E
->getType()));
19820 return Builder
.CreateCall(Callee
, {LHS
, RHS
});
19822 case WebAssembly::BI__builtin_wasm_max_f32
:
19823 case WebAssembly::BI__builtin_wasm_max_f64
:
19824 case WebAssembly::BI__builtin_wasm_max_f32x4
:
19825 case WebAssembly::BI__builtin_wasm_max_f64x2
: {
19826 Value
*LHS
= EmitScalarExpr(E
->getArg(0));
19827 Value
*RHS
= EmitScalarExpr(E
->getArg(1));
19829 CGM
.getIntrinsic(Intrinsic::maximum
, ConvertType(E
->getType()));
19830 return Builder
.CreateCall(Callee
, {LHS
, RHS
});
19832 case WebAssembly::BI__builtin_wasm_pmin_f32x4
:
19833 case WebAssembly::BI__builtin_wasm_pmin_f64x2
: {
19834 Value
*LHS
= EmitScalarExpr(E
->getArg(0));
19835 Value
*RHS
= EmitScalarExpr(E
->getArg(1));
19837 CGM
.getIntrinsic(Intrinsic::wasm_pmin
, ConvertType(E
->getType()));
19838 return Builder
.CreateCall(Callee
, {LHS
, RHS
});
19840 case WebAssembly::BI__builtin_wasm_pmax_f32x4
:
19841 case WebAssembly::BI__builtin_wasm_pmax_f64x2
: {
19842 Value
*LHS
= EmitScalarExpr(E
->getArg(0));
19843 Value
*RHS
= EmitScalarExpr(E
->getArg(1));
19845 CGM
.getIntrinsic(Intrinsic::wasm_pmax
, ConvertType(E
->getType()));
19846 return Builder
.CreateCall(Callee
, {LHS
, RHS
});
19848 case WebAssembly::BI__builtin_wasm_ceil_f32x4
:
19849 case WebAssembly::BI__builtin_wasm_floor_f32x4
:
19850 case WebAssembly::BI__builtin_wasm_trunc_f32x4
:
19851 case WebAssembly::BI__builtin_wasm_nearest_f32x4
:
19852 case WebAssembly::BI__builtin_wasm_ceil_f64x2
:
19853 case WebAssembly::BI__builtin_wasm_floor_f64x2
:
19854 case WebAssembly::BI__builtin_wasm_trunc_f64x2
:
19855 case WebAssembly::BI__builtin_wasm_nearest_f64x2
: {
19857 switch (BuiltinID
) {
19858 case WebAssembly::BI__builtin_wasm_ceil_f32x4
:
19859 case WebAssembly::BI__builtin_wasm_ceil_f64x2
:
19860 IntNo
= Intrinsic::ceil
;
19862 case WebAssembly::BI__builtin_wasm_floor_f32x4
:
19863 case WebAssembly::BI__builtin_wasm_floor_f64x2
:
19864 IntNo
= Intrinsic::floor
;
19866 case WebAssembly::BI__builtin_wasm_trunc_f32x4
:
19867 case WebAssembly::BI__builtin_wasm_trunc_f64x2
:
19868 IntNo
= Intrinsic::trunc
;
19870 case WebAssembly::BI__builtin_wasm_nearest_f32x4
:
19871 case WebAssembly::BI__builtin_wasm_nearest_f64x2
:
19872 IntNo
= Intrinsic::nearbyint
;
19875 llvm_unreachable("unexpected builtin ID");
19877 Value
*Value
= EmitScalarExpr(E
->getArg(0));
19878 Function
*Callee
= CGM
.getIntrinsic(IntNo
, ConvertType(E
->getType()));
19879 return Builder
.CreateCall(Callee
, Value
);
19881 case WebAssembly::BI__builtin_wasm_ref_null_extern
: {
19882 Function
*Callee
= CGM
.getIntrinsic(Intrinsic::wasm_ref_null_extern
);
19883 return Builder
.CreateCall(Callee
);
19885 case WebAssembly::BI__builtin_wasm_ref_null_func
: {
19886 Function
*Callee
= CGM
.getIntrinsic(Intrinsic::wasm_ref_null_func
);
19887 return Builder
.CreateCall(Callee
);
19889 case WebAssembly::BI__builtin_wasm_swizzle_i8x16
: {
19890 Value
*Src
= EmitScalarExpr(E
->getArg(0));
19891 Value
*Indices
= EmitScalarExpr(E
->getArg(1));
19892 Function
*Callee
= CGM
.getIntrinsic(Intrinsic::wasm_swizzle
);
19893 return Builder
.CreateCall(Callee
, {Src
, Indices
});
19895 case WebAssembly::BI__builtin_wasm_add_sat_s_i8x16
:
19896 case WebAssembly::BI__builtin_wasm_add_sat_u_i8x16
:
19897 case WebAssembly::BI__builtin_wasm_add_sat_s_i16x8
:
19898 case WebAssembly::BI__builtin_wasm_add_sat_u_i16x8
:
19899 case WebAssembly::BI__builtin_wasm_sub_sat_s_i8x16
:
19900 case WebAssembly::BI__builtin_wasm_sub_sat_u_i8x16
:
19901 case WebAssembly::BI__builtin_wasm_sub_sat_s_i16x8
:
19902 case WebAssembly::BI__builtin_wasm_sub_sat_u_i16x8
: {
19904 switch (BuiltinID
) {
19905 case WebAssembly::BI__builtin_wasm_add_sat_s_i8x16
:
19906 case WebAssembly::BI__builtin_wasm_add_sat_s_i16x8
:
19907 IntNo
= Intrinsic::sadd_sat
;
19909 case WebAssembly::BI__builtin_wasm_add_sat_u_i8x16
:
19910 case WebAssembly::BI__builtin_wasm_add_sat_u_i16x8
:
19911 IntNo
= Intrinsic::uadd_sat
;
19913 case WebAssembly::BI__builtin_wasm_sub_sat_s_i8x16
:
19914 case WebAssembly::BI__builtin_wasm_sub_sat_s_i16x8
:
19915 IntNo
= Intrinsic::wasm_sub_sat_signed
;
19917 case WebAssembly::BI__builtin_wasm_sub_sat_u_i8x16
:
19918 case WebAssembly::BI__builtin_wasm_sub_sat_u_i16x8
:
19919 IntNo
= Intrinsic::wasm_sub_sat_unsigned
;
19922 llvm_unreachable("unexpected builtin ID");
19924 Value
*LHS
= EmitScalarExpr(E
->getArg(0));
19925 Value
*RHS
= EmitScalarExpr(E
->getArg(1));
19926 Function
*Callee
= CGM
.getIntrinsic(IntNo
, ConvertType(E
->getType()));
19927 return Builder
.CreateCall(Callee
, {LHS
, RHS
});
19929 case WebAssembly::BI__builtin_wasm_abs_i8x16
:
19930 case WebAssembly::BI__builtin_wasm_abs_i16x8
:
19931 case WebAssembly::BI__builtin_wasm_abs_i32x4
:
19932 case WebAssembly::BI__builtin_wasm_abs_i64x2
: {
19933 Value
*Vec
= EmitScalarExpr(E
->getArg(0));
19934 Value
*Neg
= Builder
.CreateNeg(Vec
, "neg");
19935 Constant
*Zero
= llvm::Constant::getNullValue(Vec
->getType());
19936 Value
*ICmp
= Builder
.CreateICmpSLT(Vec
, Zero
, "abscond");
19937 return Builder
.CreateSelect(ICmp
, Neg
, Vec
, "abs");
19939 case WebAssembly::BI__builtin_wasm_min_s_i8x16
:
19940 case WebAssembly::BI__builtin_wasm_min_u_i8x16
:
19941 case WebAssembly::BI__builtin_wasm_max_s_i8x16
:
19942 case WebAssembly::BI__builtin_wasm_max_u_i8x16
:
19943 case WebAssembly::BI__builtin_wasm_min_s_i16x8
:
19944 case WebAssembly::BI__builtin_wasm_min_u_i16x8
:
19945 case WebAssembly::BI__builtin_wasm_max_s_i16x8
:
19946 case WebAssembly::BI__builtin_wasm_max_u_i16x8
:
19947 case WebAssembly::BI__builtin_wasm_min_s_i32x4
:
19948 case WebAssembly::BI__builtin_wasm_min_u_i32x4
:
19949 case WebAssembly::BI__builtin_wasm_max_s_i32x4
:
19950 case WebAssembly::BI__builtin_wasm_max_u_i32x4
: {
19951 Value
*LHS
= EmitScalarExpr(E
->getArg(0));
19952 Value
*RHS
= EmitScalarExpr(E
->getArg(1));
19954 switch (BuiltinID
) {
19955 case WebAssembly::BI__builtin_wasm_min_s_i8x16
:
19956 case WebAssembly::BI__builtin_wasm_min_s_i16x8
:
19957 case WebAssembly::BI__builtin_wasm_min_s_i32x4
:
19958 ICmp
= Builder
.CreateICmpSLT(LHS
, RHS
);
19960 case WebAssembly::BI__builtin_wasm_min_u_i8x16
:
19961 case WebAssembly::BI__builtin_wasm_min_u_i16x8
:
19962 case WebAssembly::BI__builtin_wasm_min_u_i32x4
:
19963 ICmp
= Builder
.CreateICmpULT(LHS
, RHS
);
19965 case WebAssembly::BI__builtin_wasm_max_s_i8x16
:
19966 case WebAssembly::BI__builtin_wasm_max_s_i16x8
:
19967 case WebAssembly::BI__builtin_wasm_max_s_i32x4
:
19968 ICmp
= Builder
.CreateICmpSGT(LHS
, RHS
);
19970 case WebAssembly::BI__builtin_wasm_max_u_i8x16
:
19971 case WebAssembly::BI__builtin_wasm_max_u_i16x8
:
19972 case WebAssembly::BI__builtin_wasm_max_u_i32x4
:
19973 ICmp
= Builder
.CreateICmpUGT(LHS
, RHS
);
19976 llvm_unreachable("unexpected builtin ID");
19978 return Builder
.CreateSelect(ICmp
, LHS
, RHS
);
19980 case WebAssembly::BI__builtin_wasm_avgr_u_i8x16
:
19981 case WebAssembly::BI__builtin_wasm_avgr_u_i16x8
: {
19982 Value
*LHS
= EmitScalarExpr(E
->getArg(0));
19983 Value
*RHS
= EmitScalarExpr(E
->getArg(1));
19984 Function
*Callee
= CGM
.getIntrinsic(Intrinsic::wasm_avgr_unsigned
,
19985 ConvertType(E
->getType()));
19986 return Builder
.CreateCall(Callee
, {LHS
, RHS
});
19988 case WebAssembly::BI__builtin_wasm_q15mulr_sat_s_i16x8
: {
19989 Value
*LHS
= EmitScalarExpr(E
->getArg(0));
19990 Value
*RHS
= EmitScalarExpr(E
->getArg(1));
19991 Function
*Callee
= CGM
.getIntrinsic(Intrinsic::wasm_q15mulr_sat_signed
);
19992 return Builder
.CreateCall(Callee
, {LHS
, RHS
});
19994 case WebAssembly::BI__builtin_wasm_extadd_pairwise_i8x16_s_i16x8
:
19995 case WebAssembly::BI__builtin_wasm_extadd_pairwise_i8x16_u_i16x8
:
19996 case WebAssembly::BI__builtin_wasm_extadd_pairwise_i16x8_s_i32x4
:
19997 case WebAssembly::BI__builtin_wasm_extadd_pairwise_i16x8_u_i32x4
: {
19998 Value
*Vec
= EmitScalarExpr(E
->getArg(0));
20000 switch (BuiltinID
) {
20001 case WebAssembly::BI__builtin_wasm_extadd_pairwise_i8x16_s_i16x8
:
20002 case WebAssembly::BI__builtin_wasm_extadd_pairwise_i16x8_s_i32x4
:
20003 IntNo
= Intrinsic::wasm_extadd_pairwise_signed
;
20005 case WebAssembly::BI__builtin_wasm_extadd_pairwise_i8x16_u_i16x8
:
20006 case WebAssembly::BI__builtin_wasm_extadd_pairwise_i16x8_u_i32x4
:
20007 IntNo
= Intrinsic::wasm_extadd_pairwise_unsigned
;
20010 llvm_unreachable("unexpected builtin ID");
20013 Function
*Callee
= CGM
.getIntrinsic(IntNo
, ConvertType(E
->getType()));
20014 return Builder
.CreateCall(Callee
, Vec
);
20016 case WebAssembly::BI__builtin_wasm_bitselect
: {
20017 Value
*V1
= EmitScalarExpr(E
->getArg(0));
20018 Value
*V2
= EmitScalarExpr(E
->getArg(1));
20019 Value
*C
= EmitScalarExpr(E
->getArg(2));
20021 CGM
.getIntrinsic(Intrinsic::wasm_bitselect
, ConvertType(E
->getType()));
20022 return Builder
.CreateCall(Callee
, {V1
, V2
, C
});
20024 case WebAssembly::BI__builtin_wasm_dot_s_i32x4_i16x8
: {
20025 Value
*LHS
= EmitScalarExpr(E
->getArg(0));
20026 Value
*RHS
= EmitScalarExpr(E
->getArg(1));
20027 Function
*Callee
= CGM
.getIntrinsic(Intrinsic::wasm_dot
);
20028 return Builder
.CreateCall(Callee
, {LHS
, RHS
});
20030 case WebAssembly::BI__builtin_wasm_popcnt_i8x16
: {
20031 Value
*Vec
= EmitScalarExpr(E
->getArg(0));
20033 CGM
.getIntrinsic(Intrinsic::ctpop
, ConvertType(E
->getType()));
20034 return Builder
.CreateCall(Callee
, {Vec
});
20036 case WebAssembly::BI__builtin_wasm_any_true_v128
:
20037 case WebAssembly::BI__builtin_wasm_all_true_i8x16
:
20038 case WebAssembly::BI__builtin_wasm_all_true_i16x8
:
20039 case WebAssembly::BI__builtin_wasm_all_true_i32x4
:
20040 case WebAssembly::BI__builtin_wasm_all_true_i64x2
: {
20042 switch (BuiltinID
) {
20043 case WebAssembly::BI__builtin_wasm_any_true_v128
:
20044 IntNo
= Intrinsic::wasm_anytrue
;
20046 case WebAssembly::BI__builtin_wasm_all_true_i8x16
:
20047 case WebAssembly::BI__builtin_wasm_all_true_i16x8
:
20048 case WebAssembly::BI__builtin_wasm_all_true_i32x4
:
20049 case WebAssembly::BI__builtin_wasm_all_true_i64x2
:
20050 IntNo
= Intrinsic::wasm_alltrue
;
20053 llvm_unreachable("unexpected builtin ID");
20055 Value
*Vec
= EmitScalarExpr(E
->getArg(0));
20056 Function
*Callee
= CGM
.getIntrinsic(IntNo
, Vec
->getType());
20057 return Builder
.CreateCall(Callee
, {Vec
});
20059 case WebAssembly::BI__builtin_wasm_bitmask_i8x16
:
20060 case WebAssembly::BI__builtin_wasm_bitmask_i16x8
:
20061 case WebAssembly::BI__builtin_wasm_bitmask_i32x4
:
20062 case WebAssembly::BI__builtin_wasm_bitmask_i64x2
: {
20063 Value
*Vec
= EmitScalarExpr(E
->getArg(0));
20065 CGM
.getIntrinsic(Intrinsic::wasm_bitmask
, Vec
->getType());
20066 return Builder
.CreateCall(Callee
, {Vec
});
20068 case WebAssembly::BI__builtin_wasm_abs_f32x4
:
20069 case WebAssembly::BI__builtin_wasm_abs_f64x2
: {
20070 Value
*Vec
= EmitScalarExpr(E
->getArg(0));
20071 Function
*Callee
= CGM
.getIntrinsic(Intrinsic::fabs
, Vec
->getType());
20072 return Builder
.CreateCall(Callee
, {Vec
});
20074 case WebAssembly::BI__builtin_wasm_sqrt_f32x4
:
20075 case WebAssembly::BI__builtin_wasm_sqrt_f64x2
: {
20076 Value
*Vec
= EmitScalarExpr(E
->getArg(0));
20077 Function
*Callee
= CGM
.getIntrinsic(Intrinsic::sqrt
, Vec
->getType());
20078 return Builder
.CreateCall(Callee
, {Vec
});
20080 case WebAssembly::BI__builtin_wasm_narrow_s_i8x16_i16x8
:
20081 case WebAssembly::BI__builtin_wasm_narrow_u_i8x16_i16x8
:
20082 case WebAssembly::BI__builtin_wasm_narrow_s_i16x8_i32x4
:
20083 case WebAssembly::BI__builtin_wasm_narrow_u_i16x8_i32x4
: {
20084 Value
*Low
= EmitScalarExpr(E
->getArg(0));
20085 Value
*High
= EmitScalarExpr(E
->getArg(1));
20087 switch (BuiltinID
) {
20088 case WebAssembly::BI__builtin_wasm_narrow_s_i8x16_i16x8
:
20089 case WebAssembly::BI__builtin_wasm_narrow_s_i16x8_i32x4
:
20090 IntNo
= Intrinsic::wasm_narrow_signed
;
20092 case WebAssembly::BI__builtin_wasm_narrow_u_i8x16_i16x8
:
20093 case WebAssembly::BI__builtin_wasm_narrow_u_i16x8_i32x4
:
20094 IntNo
= Intrinsic::wasm_narrow_unsigned
;
20097 llvm_unreachable("unexpected builtin ID");
20100 CGM
.getIntrinsic(IntNo
, {ConvertType(E
->getType()), Low
->getType()});
20101 return Builder
.CreateCall(Callee
, {Low
, High
});
20103 case WebAssembly::BI__builtin_wasm_trunc_sat_s_zero_f64x2_i32x4
:
20104 case WebAssembly::BI__builtin_wasm_trunc_sat_u_zero_f64x2_i32x4
: {
20105 Value
*Vec
= EmitScalarExpr(E
->getArg(0));
20107 switch (BuiltinID
) {
20108 case WebAssembly::BI__builtin_wasm_trunc_sat_s_zero_f64x2_i32x4
:
20109 IntNo
= Intrinsic::fptosi_sat
;
20111 case WebAssembly::BI__builtin_wasm_trunc_sat_u_zero_f64x2_i32x4
:
20112 IntNo
= Intrinsic::fptoui_sat
;
20115 llvm_unreachable("unexpected builtin ID");
20117 llvm::Type
*SrcT
= Vec
->getType();
20118 llvm::Type
*TruncT
= SrcT
->getWithNewType(Builder
.getInt32Ty());
20119 Function
*Callee
= CGM
.getIntrinsic(IntNo
, {TruncT
, SrcT
});
20120 Value
*Trunc
= Builder
.CreateCall(Callee
, Vec
);
20121 Value
*Splat
= Constant::getNullValue(TruncT
);
20122 return Builder
.CreateShuffleVector(Trunc
, Splat
, ArrayRef
<int>{0, 1, 2, 3});
20124 case WebAssembly::BI__builtin_wasm_shuffle_i8x16
: {
20127 Ops
[OpIdx
++] = EmitScalarExpr(E
->getArg(0));
20128 Ops
[OpIdx
++] = EmitScalarExpr(E
->getArg(1));
20129 while (OpIdx
< 18) {
20130 std::optional
<llvm::APSInt
> LaneConst
=
20131 E
->getArg(OpIdx
)->getIntegerConstantExpr(getContext());
20132 assert(LaneConst
&& "Constant arg isn't actually constant?");
20133 Ops
[OpIdx
++] = llvm::ConstantInt::get(getLLVMContext(), *LaneConst
);
20135 Function
*Callee
= CGM
.getIntrinsic(Intrinsic::wasm_shuffle
);
20136 return Builder
.CreateCall(Callee
, Ops
);
20138 case WebAssembly::BI__builtin_wasm_relaxed_madd_f32x4
:
20139 case WebAssembly::BI__builtin_wasm_relaxed_nmadd_f32x4
:
20140 case WebAssembly::BI__builtin_wasm_relaxed_madd_f64x2
:
20141 case WebAssembly::BI__builtin_wasm_relaxed_nmadd_f64x2
: {
20142 Value
*A
= EmitScalarExpr(E
->getArg(0));
20143 Value
*B
= EmitScalarExpr(E
->getArg(1));
20144 Value
*C
= EmitScalarExpr(E
->getArg(2));
20146 switch (BuiltinID
) {
20147 case WebAssembly::BI__builtin_wasm_relaxed_madd_f32x4
:
20148 case WebAssembly::BI__builtin_wasm_relaxed_madd_f64x2
:
20149 IntNo
= Intrinsic::wasm_relaxed_madd
;
20151 case WebAssembly::BI__builtin_wasm_relaxed_nmadd_f32x4
:
20152 case WebAssembly::BI__builtin_wasm_relaxed_nmadd_f64x2
:
20153 IntNo
= Intrinsic::wasm_relaxed_nmadd
;
20156 llvm_unreachable("unexpected builtin ID");
20158 Function
*Callee
= CGM
.getIntrinsic(IntNo
, A
->getType());
20159 return Builder
.CreateCall(Callee
, {A
, B
, C
});
20161 case WebAssembly::BI__builtin_wasm_relaxed_laneselect_i8x16
:
20162 case WebAssembly::BI__builtin_wasm_relaxed_laneselect_i16x8
:
20163 case WebAssembly::BI__builtin_wasm_relaxed_laneselect_i32x4
:
20164 case WebAssembly::BI__builtin_wasm_relaxed_laneselect_i64x2
: {
20165 Value
*A
= EmitScalarExpr(E
->getArg(0));
20166 Value
*B
= EmitScalarExpr(E
->getArg(1));
20167 Value
*C
= EmitScalarExpr(E
->getArg(2));
20169 CGM
.getIntrinsic(Intrinsic::wasm_relaxed_laneselect
, A
->getType());
20170 return Builder
.CreateCall(Callee
, {A
, B
, C
});
20172 case WebAssembly::BI__builtin_wasm_relaxed_swizzle_i8x16
: {
20173 Value
*Src
= EmitScalarExpr(E
->getArg(0));
20174 Value
*Indices
= EmitScalarExpr(E
->getArg(1));
20175 Function
*Callee
= CGM
.getIntrinsic(Intrinsic::wasm_relaxed_swizzle
);
20176 return Builder
.CreateCall(Callee
, {Src
, Indices
});
20178 case WebAssembly::BI__builtin_wasm_relaxed_min_f32x4
:
20179 case WebAssembly::BI__builtin_wasm_relaxed_max_f32x4
:
20180 case WebAssembly::BI__builtin_wasm_relaxed_min_f64x2
:
20181 case WebAssembly::BI__builtin_wasm_relaxed_max_f64x2
: {
20182 Value
*LHS
= EmitScalarExpr(E
->getArg(0));
20183 Value
*RHS
= EmitScalarExpr(E
->getArg(1));
20185 switch (BuiltinID
) {
20186 case WebAssembly::BI__builtin_wasm_relaxed_min_f32x4
:
20187 case WebAssembly::BI__builtin_wasm_relaxed_min_f64x2
:
20188 IntNo
= Intrinsic::wasm_relaxed_min
;
20190 case WebAssembly::BI__builtin_wasm_relaxed_max_f32x4
:
20191 case WebAssembly::BI__builtin_wasm_relaxed_max_f64x2
:
20192 IntNo
= Intrinsic::wasm_relaxed_max
;
20195 llvm_unreachable("unexpected builtin ID");
20197 Function
*Callee
= CGM
.getIntrinsic(IntNo
, LHS
->getType());
20198 return Builder
.CreateCall(Callee
, {LHS
, RHS
});
20200 case WebAssembly::BI__builtin_wasm_relaxed_trunc_s_i32x4_f32x4
:
20201 case WebAssembly::BI__builtin_wasm_relaxed_trunc_u_i32x4_f32x4
:
20202 case WebAssembly::BI__builtin_wasm_relaxed_trunc_s_zero_i32x4_f64x2
:
20203 case WebAssembly::BI__builtin_wasm_relaxed_trunc_u_zero_i32x4_f64x2
: {
20204 Value
*Vec
= EmitScalarExpr(E
->getArg(0));
20206 switch (BuiltinID
) {
20207 case WebAssembly::BI__builtin_wasm_relaxed_trunc_s_i32x4_f32x4
:
20208 IntNo
= Intrinsic::wasm_relaxed_trunc_signed
;
20210 case WebAssembly::BI__builtin_wasm_relaxed_trunc_u_i32x4_f32x4
:
20211 IntNo
= Intrinsic::wasm_relaxed_trunc_unsigned
;
20213 case WebAssembly::BI__builtin_wasm_relaxed_trunc_s_zero_i32x4_f64x2
:
20214 IntNo
= Intrinsic::wasm_relaxed_trunc_signed_zero
;
20216 case WebAssembly::BI__builtin_wasm_relaxed_trunc_u_zero_i32x4_f64x2
:
20217 IntNo
= Intrinsic::wasm_relaxed_trunc_unsigned_zero
;
20220 llvm_unreachable("unexpected builtin ID");
20222 Function
*Callee
= CGM
.getIntrinsic(IntNo
);
20223 return Builder
.CreateCall(Callee
, {Vec
});
20225 case WebAssembly::BI__builtin_wasm_relaxed_q15mulr_s_i16x8
: {
20226 Value
*LHS
= EmitScalarExpr(E
->getArg(0));
20227 Value
*RHS
= EmitScalarExpr(E
->getArg(1));
20228 Function
*Callee
= CGM
.getIntrinsic(Intrinsic::wasm_relaxed_q15mulr_signed
);
20229 return Builder
.CreateCall(Callee
, {LHS
, RHS
});
20231 case WebAssembly::BI__builtin_wasm_relaxed_dot_i8x16_i7x16_s_i16x8
: {
20232 Value
*LHS
= EmitScalarExpr(E
->getArg(0));
20233 Value
*RHS
= EmitScalarExpr(E
->getArg(1));
20235 CGM
.getIntrinsic(Intrinsic::wasm_relaxed_dot_i8x16_i7x16_signed
);
20236 return Builder
.CreateCall(Callee
, {LHS
, RHS
});
20238 case WebAssembly::BI__builtin_wasm_relaxed_dot_i8x16_i7x16_add_s_i32x4
: {
20239 Value
*LHS
= EmitScalarExpr(E
->getArg(0));
20240 Value
*RHS
= EmitScalarExpr(E
->getArg(1));
20241 Value
*Acc
= EmitScalarExpr(E
->getArg(2));
20243 CGM
.getIntrinsic(Intrinsic::wasm_relaxed_dot_i8x16_i7x16_add_signed
);
20244 return Builder
.CreateCall(Callee
, {LHS
, RHS
, Acc
});
20246 case WebAssembly::BI__builtin_wasm_relaxed_dot_bf16x8_add_f32_f32x4
: {
20247 Value
*LHS
= EmitScalarExpr(E
->getArg(0));
20248 Value
*RHS
= EmitScalarExpr(E
->getArg(1));
20249 Value
*Acc
= EmitScalarExpr(E
->getArg(2));
20251 CGM
.getIntrinsic(Intrinsic::wasm_relaxed_dot_bf16x8_add_f32
);
20252 return Builder
.CreateCall(Callee
, {LHS
, RHS
, Acc
});
20254 case WebAssembly::BI__builtin_wasm_table_get
: {
20255 assert(E
->getArg(0)->getType()->isArrayType());
20256 Value
*Table
= EmitArrayToPointerDecay(E
->getArg(0)).getPointer();
20257 Value
*Index
= EmitScalarExpr(E
->getArg(1));
20259 if (E
->getType().isWebAssemblyExternrefType())
20260 Callee
= CGM
.getIntrinsic(Intrinsic::wasm_table_get_externref
);
20261 else if (E
->getType().isWebAssemblyFuncrefType())
20262 Callee
= CGM
.getIntrinsic(Intrinsic::wasm_table_get_funcref
);
20265 "Unexpected reference type for __builtin_wasm_table_get");
20266 return Builder
.CreateCall(Callee
, {Table
, Index
});
20268 case WebAssembly::BI__builtin_wasm_table_set
: {
20269 assert(E
->getArg(0)->getType()->isArrayType());
20270 Value
*Table
= EmitArrayToPointerDecay(E
->getArg(0)).getPointer();
20271 Value
*Index
= EmitScalarExpr(E
->getArg(1));
20272 Value
*Val
= EmitScalarExpr(E
->getArg(2));
20274 if (E
->getArg(2)->getType().isWebAssemblyExternrefType())
20275 Callee
= CGM
.getIntrinsic(Intrinsic::wasm_table_set_externref
);
20276 else if (E
->getArg(2)->getType().isWebAssemblyFuncrefType())
20277 Callee
= CGM
.getIntrinsic(Intrinsic::wasm_table_set_funcref
);
20280 "Unexpected reference type for __builtin_wasm_table_set");
20281 return Builder
.CreateCall(Callee
, {Table
, Index
, Val
});
20283 case WebAssembly::BI__builtin_wasm_table_size
: {
20284 assert(E
->getArg(0)->getType()->isArrayType());
20285 Value
*Value
= EmitArrayToPointerDecay(E
->getArg(0)).getPointer();
20286 Function
*Callee
= CGM
.getIntrinsic(Intrinsic::wasm_table_size
);
20287 return Builder
.CreateCall(Callee
, Value
);
20289 case WebAssembly::BI__builtin_wasm_table_grow
: {
20290 assert(E
->getArg(0)->getType()->isArrayType());
20291 Value
*Table
= EmitArrayToPointerDecay(E
->getArg(0)).getPointer();
20292 Value
*Val
= EmitScalarExpr(E
->getArg(1));
20293 Value
*NElems
= EmitScalarExpr(E
->getArg(2));
20296 if (E
->getArg(1)->getType().isWebAssemblyExternrefType())
20297 Callee
= CGM
.getIntrinsic(Intrinsic::wasm_table_grow_externref
);
20298 else if (E
->getArg(2)->getType().isWebAssemblyFuncrefType())
20299 Callee
= CGM
.getIntrinsic(Intrinsic::wasm_table_fill_funcref
);
20302 "Unexpected reference type for __builtin_wasm_table_grow");
20304 return Builder
.CreateCall(Callee
, {Table
, Val
, NElems
});
20306 case WebAssembly::BI__builtin_wasm_table_fill
: {
20307 assert(E
->getArg(0)->getType()->isArrayType());
20308 Value
*Table
= EmitArrayToPointerDecay(E
->getArg(0)).getPointer();
20309 Value
*Index
= EmitScalarExpr(E
->getArg(1));
20310 Value
*Val
= EmitScalarExpr(E
->getArg(2));
20311 Value
*NElems
= EmitScalarExpr(E
->getArg(3));
20314 if (E
->getArg(2)->getType().isWebAssemblyExternrefType())
20315 Callee
= CGM
.getIntrinsic(Intrinsic::wasm_table_fill_externref
);
20316 else if (E
->getArg(2)->getType().isWebAssemblyFuncrefType())
20317 Callee
= CGM
.getIntrinsic(Intrinsic::wasm_table_fill_funcref
);
20320 "Unexpected reference type for __builtin_wasm_table_fill");
20322 return Builder
.CreateCall(Callee
, {Table
, Index
, Val
, NElems
});
20324 case WebAssembly::BI__builtin_wasm_table_copy
: {
20325 assert(E
->getArg(0)->getType()->isArrayType());
20326 Value
*TableX
= EmitArrayToPointerDecay(E
->getArg(0)).getPointer();
20327 Value
*TableY
= EmitArrayToPointerDecay(E
->getArg(1)).getPointer();
20328 Value
*DstIdx
= EmitScalarExpr(E
->getArg(2));
20329 Value
*SrcIdx
= EmitScalarExpr(E
->getArg(3));
20330 Value
*NElems
= EmitScalarExpr(E
->getArg(4));
20332 Function
*Callee
= CGM
.getIntrinsic(Intrinsic::wasm_table_copy
);
20334 return Builder
.CreateCall(Callee
, {TableX
, TableY
, SrcIdx
, DstIdx
, NElems
});
20341 static std::pair
<Intrinsic::ID
, unsigned>
20342 getIntrinsicForHexagonNonClangBuiltin(unsigned BuiltinID
) {
20344 unsigned BuiltinID
;
20345 Intrinsic::ID IntrinsicID
;
20348 static Info Infos
[] = {
20349 #define CUSTOM_BUILTIN_MAPPING(x,s) \
20350 { Hexagon::BI__builtin_HEXAGON_##x, Intrinsic::hexagon_##x, s },
20351 CUSTOM_BUILTIN_MAPPING(L2_loadrub_pci
, 0)
20352 CUSTOM_BUILTIN_MAPPING(L2_loadrb_pci
, 0)
20353 CUSTOM_BUILTIN_MAPPING(L2_loadruh_pci
, 0)
20354 CUSTOM_BUILTIN_MAPPING(L2_loadrh_pci
, 0)
20355 CUSTOM_BUILTIN_MAPPING(L2_loadri_pci
, 0)
20356 CUSTOM_BUILTIN_MAPPING(L2_loadrd_pci
, 0)
20357 CUSTOM_BUILTIN_MAPPING(L2_loadrub_pcr
, 0)
20358 CUSTOM_BUILTIN_MAPPING(L2_loadrb_pcr
, 0)
20359 CUSTOM_BUILTIN_MAPPING(L2_loadruh_pcr
, 0)
20360 CUSTOM_BUILTIN_MAPPING(L2_loadrh_pcr
, 0)
20361 CUSTOM_BUILTIN_MAPPING(L2_loadri_pcr
, 0)
20362 CUSTOM_BUILTIN_MAPPING(L2_loadrd_pcr
, 0)
20363 CUSTOM_BUILTIN_MAPPING(S2_storerb_pci
, 0)
20364 CUSTOM_BUILTIN_MAPPING(S2_storerh_pci
, 0)
20365 CUSTOM_BUILTIN_MAPPING(S2_storerf_pci
, 0)
20366 CUSTOM_BUILTIN_MAPPING(S2_storeri_pci
, 0)
20367 CUSTOM_BUILTIN_MAPPING(S2_storerd_pci
, 0)
20368 CUSTOM_BUILTIN_MAPPING(S2_storerb_pcr
, 0)
20369 CUSTOM_BUILTIN_MAPPING(S2_storerh_pcr
, 0)
20370 CUSTOM_BUILTIN_MAPPING(S2_storerf_pcr
, 0)
20371 CUSTOM_BUILTIN_MAPPING(S2_storeri_pcr
, 0)
20372 CUSTOM_BUILTIN_MAPPING(S2_storerd_pcr
, 0)
20373 // Legacy builtins that take a vector in place of a vector predicate.
20374 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstoreq
, 64)
20375 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorenq
, 64)
20376 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentq
, 64)
20377 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentnq
, 64)
20378 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstoreq_128B
, 128)
20379 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorenq_128B
, 128)
20380 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentq_128B
, 128)
20381 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentnq_128B
, 128)
20382 #include "clang/Basic/BuiltinsHexagonMapCustomDep.def"
20383 #undef CUSTOM_BUILTIN_MAPPING
20386 auto CmpInfo
= [] (Info A
, Info B
) { return A
.BuiltinID
< B
.BuiltinID
; };
20387 static const bool SortOnce
= (llvm::sort(Infos
, CmpInfo
), true);
20390 const Info
*F
= llvm::lower_bound(Infos
, Info
{BuiltinID
, 0, 0}, CmpInfo
);
20391 if (F
== std::end(Infos
) || F
->BuiltinID
!= BuiltinID
)
20392 return {Intrinsic::not_intrinsic
, 0};
20394 return {F
->IntrinsicID
, F
->VecLen
};
20397 Value
*CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID
,
20398 const CallExpr
*E
) {
20401 std::tie(ID
, VecLen
) = getIntrinsicForHexagonNonClangBuiltin(BuiltinID
);
20403 auto MakeCircOp
= [this, E
](unsigned IntID
, bool IsLoad
) {
20404 // The base pointer is passed by address, so it needs to be loaded.
20405 Address A
= EmitPointerWithAlignment(E
->getArg(0));
20406 Address BP
= Address(A
.getPointer(), Int8PtrTy
, A
.getAlignment());
20407 llvm::Value
*Base
= Builder
.CreateLoad(BP
);
20408 // The treatment of both loads and stores is the same: the arguments for
20409 // the builtin are the same as the arguments for the intrinsic.
20411 // builtin(Base, Inc, Mod, Start) -> intr(Base, Inc, Mod, Start)
20412 // builtin(Base, Mod, Start) -> intr(Base, Mod, Start)
20414 // builtin(Base, Inc, Mod, Val, Start) -> intr(Base, Inc, Mod, Val, Start)
20415 // builtin(Base, Mod, Val, Start) -> intr(Base, Mod, Val, Start)
20416 SmallVector
<llvm::Value
*,5> Ops
= { Base
};
20417 for (unsigned i
= 1, e
= E
->getNumArgs(); i
!= e
; ++i
)
20418 Ops
.push_back(EmitScalarExpr(E
->getArg(i
)));
20420 llvm::Value
*Result
= Builder
.CreateCall(CGM
.getIntrinsic(IntID
), Ops
);
20421 // The load intrinsics generate two results (Value, NewBase), stores
20422 // generate one (NewBase). The new base address needs to be stored.
20423 llvm::Value
*NewBase
= IsLoad
? Builder
.CreateExtractValue(Result
, 1)
20425 llvm::Value
*LV
= EmitScalarExpr(E
->getArg(0));
20426 Address Dest
= EmitPointerWithAlignment(E
->getArg(0));
20427 llvm::Value
*RetVal
=
20428 Builder
.CreateAlignedStore(NewBase
, LV
, Dest
.getAlignment());
20430 RetVal
= Builder
.CreateExtractValue(Result
, 0);
20434 // Handle the conversion of bit-reverse load intrinsics to bit code.
20435 // The intrinsic call after this function only reads from memory and the
20436 // write to memory is dealt by the store instruction.
20437 auto MakeBrevLd
= [this, E
](unsigned IntID
, llvm::Type
*DestTy
) {
20438 // The intrinsic generates one result, which is the new value for the base
20439 // pointer. It needs to be returned. The result of the load instruction is
20440 // passed to intrinsic by address, so the value needs to be stored.
20441 llvm::Value
*BaseAddress
= EmitScalarExpr(E
->getArg(0));
20443 // Expressions like &(*pt++) will be incremented per evaluation.
20444 // EmitPointerWithAlignment and EmitScalarExpr evaluates the expression
20446 Address DestAddr
= EmitPointerWithAlignment(E
->getArg(1));
20447 DestAddr
= Address(DestAddr
.getPointer(), Int8Ty
, DestAddr
.getAlignment());
20448 llvm::Value
*DestAddress
= DestAddr
.getPointer();
20450 // Operands are Base, Dest, Modifier.
20451 // The intrinsic format in LLVM IR is defined as
20452 // { ValueType, i8* } (i8*, i32).
20453 llvm::Value
*Result
= Builder
.CreateCall(
20454 CGM
.getIntrinsic(IntID
), {BaseAddress
, EmitScalarExpr(E
->getArg(2))});
20456 // The value needs to be stored as the variable is passed by reference.
20457 llvm::Value
*DestVal
= Builder
.CreateExtractValue(Result
, 0);
20459 // The store needs to be truncated to fit the destination type.
20460 // While i32 and i64 are natively supported on Hexagon, i8 and i16 needs
20461 // to be handled with stores of respective destination type.
20462 DestVal
= Builder
.CreateTrunc(DestVal
, DestTy
);
20464 Builder
.CreateAlignedStore(DestVal
, DestAddress
, DestAddr
.getAlignment());
20465 // The updated value of the base pointer is returned.
20466 return Builder
.CreateExtractValue(Result
, 1);
20469 auto V2Q
= [this, VecLen
] (llvm::Value
*Vec
) {
20470 Intrinsic::ID ID
= VecLen
== 128 ? Intrinsic::hexagon_V6_vandvrt_128B
20471 : Intrinsic::hexagon_V6_vandvrt
;
20472 return Builder
.CreateCall(CGM
.getIntrinsic(ID
),
20473 {Vec
, Builder
.getInt32(-1)});
20475 auto Q2V
= [this, VecLen
] (llvm::Value
*Pred
) {
20476 Intrinsic::ID ID
= VecLen
== 128 ? Intrinsic::hexagon_V6_vandqrt_128B
20477 : Intrinsic::hexagon_V6_vandqrt
;
20478 return Builder
.CreateCall(CGM
.getIntrinsic(ID
),
20479 {Pred
, Builder
.getInt32(-1)});
20482 switch (BuiltinID
) {
20483 // These intrinsics return a tuple {Vector, VectorPred} in LLVM IR,
20484 // and the corresponding C/C++ builtins use loads/stores to update
20486 case Hexagon::BI__builtin_HEXAGON_V6_vaddcarry
:
20487 case Hexagon::BI__builtin_HEXAGON_V6_vaddcarry_128B
:
20488 case Hexagon::BI__builtin_HEXAGON_V6_vsubcarry
:
20489 case Hexagon::BI__builtin_HEXAGON_V6_vsubcarry_128B
: {
20490 // Get the type from the 0-th argument.
20491 llvm::Type
*VecType
= ConvertType(E
->getArg(0)->getType());
20493 EmitPointerWithAlignment(E
->getArg(2)).withElementType(VecType
);
20494 llvm::Value
*PredIn
= V2Q(Builder
.CreateLoad(PredAddr
));
20495 llvm::Value
*Result
= Builder
.CreateCall(CGM
.getIntrinsic(ID
),
20496 {EmitScalarExpr(E
->getArg(0)), EmitScalarExpr(E
->getArg(1)), PredIn
});
20498 llvm::Value
*PredOut
= Builder
.CreateExtractValue(Result
, 1);
20499 Builder
.CreateAlignedStore(Q2V(PredOut
), PredAddr
.getPointer(),
20500 PredAddr
.getAlignment());
20501 return Builder
.CreateExtractValue(Result
, 0);
20503 // These are identical to the builtins above, except they don't consume
20504 // input carry, only generate carry-out. Since they still produce two
20505 // outputs, generate the store of the predicate, but no load.
20506 case Hexagon::BI__builtin_HEXAGON_V6_vaddcarryo
:
20507 case Hexagon::BI__builtin_HEXAGON_V6_vaddcarryo_128B
:
20508 case Hexagon::BI__builtin_HEXAGON_V6_vsubcarryo
:
20509 case Hexagon::BI__builtin_HEXAGON_V6_vsubcarryo_128B
: {
20510 // Get the type from the 0-th argument.
20511 llvm::Type
*VecType
= ConvertType(E
->getArg(0)->getType());
20513 EmitPointerWithAlignment(E
->getArg(2)).withElementType(VecType
);
20514 llvm::Value
*Result
= Builder
.CreateCall(CGM
.getIntrinsic(ID
),
20515 {EmitScalarExpr(E
->getArg(0)), EmitScalarExpr(E
->getArg(1))});
20517 llvm::Value
*PredOut
= Builder
.CreateExtractValue(Result
, 1);
20518 Builder
.CreateAlignedStore(Q2V(PredOut
), PredAddr
.getPointer(),
20519 PredAddr
.getAlignment());
20520 return Builder
.CreateExtractValue(Result
, 0);
20523 case Hexagon::BI__builtin_HEXAGON_V6_vmaskedstoreq
:
20524 case Hexagon::BI__builtin_HEXAGON_V6_vmaskedstorenq
:
20525 case Hexagon::BI__builtin_HEXAGON_V6_vmaskedstorentq
:
20526 case Hexagon::BI__builtin_HEXAGON_V6_vmaskedstorentnq
:
20527 case Hexagon::BI__builtin_HEXAGON_V6_vmaskedstoreq_128B
:
20528 case Hexagon::BI__builtin_HEXAGON_V6_vmaskedstorenq_128B
:
20529 case Hexagon::BI__builtin_HEXAGON_V6_vmaskedstorentq_128B
:
20530 case Hexagon::BI__builtin_HEXAGON_V6_vmaskedstorentnq_128B
: {
20531 SmallVector
<llvm::Value
*,4> Ops
;
20532 const Expr
*PredOp
= E
->getArg(0);
20533 // There will be an implicit cast to a boolean vector. Strip it.
20534 if (auto *Cast
= dyn_cast
<ImplicitCastExpr
>(PredOp
)) {
20535 if (Cast
->getCastKind() == CK_BitCast
)
20536 PredOp
= Cast
->getSubExpr();
20537 Ops
.push_back(V2Q(EmitScalarExpr(PredOp
)));
20539 for (int i
= 1, e
= E
->getNumArgs(); i
!= e
; ++i
)
20540 Ops
.push_back(EmitScalarExpr(E
->getArg(i
)));
20541 return Builder
.CreateCall(CGM
.getIntrinsic(ID
), Ops
);
20544 case Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci
:
20545 case Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci
:
20546 case Hexagon::BI__builtin_HEXAGON_L2_loadruh_pci
:
20547 case Hexagon::BI__builtin_HEXAGON_L2_loadrh_pci
:
20548 case Hexagon::BI__builtin_HEXAGON_L2_loadri_pci
:
20549 case Hexagon::BI__builtin_HEXAGON_L2_loadrd_pci
:
20550 case Hexagon::BI__builtin_HEXAGON_L2_loadrub_pcr
:
20551 case Hexagon::BI__builtin_HEXAGON_L2_loadrb_pcr
:
20552 case Hexagon::BI__builtin_HEXAGON_L2_loadruh_pcr
:
20553 case Hexagon::BI__builtin_HEXAGON_L2_loadrh_pcr
:
20554 case Hexagon::BI__builtin_HEXAGON_L2_loadri_pcr
:
20555 case Hexagon::BI__builtin_HEXAGON_L2_loadrd_pcr
:
20556 return MakeCircOp(ID
, /*IsLoad=*/true);
20557 case Hexagon::BI__builtin_HEXAGON_S2_storerb_pci
:
20558 case Hexagon::BI__builtin_HEXAGON_S2_storerh_pci
:
20559 case Hexagon::BI__builtin_HEXAGON_S2_storerf_pci
:
20560 case Hexagon::BI__builtin_HEXAGON_S2_storeri_pci
:
20561 case Hexagon::BI__builtin_HEXAGON_S2_storerd_pci
:
20562 case Hexagon::BI__builtin_HEXAGON_S2_storerb_pcr
:
20563 case Hexagon::BI__builtin_HEXAGON_S2_storerh_pcr
:
20564 case Hexagon::BI__builtin_HEXAGON_S2_storerf_pcr
:
20565 case Hexagon::BI__builtin_HEXAGON_S2_storeri_pcr
:
20566 case Hexagon::BI__builtin_HEXAGON_S2_storerd_pcr
:
20567 return MakeCircOp(ID
, /*IsLoad=*/false);
20568 case Hexagon::BI__builtin_brev_ldub
:
20569 return MakeBrevLd(Intrinsic::hexagon_L2_loadrub_pbr
, Int8Ty
);
20570 case Hexagon::BI__builtin_brev_ldb
:
20571 return MakeBrevLd(Intrinsic::hexagon_L2_loadrb_pbr
, Int8Ty
);
20572 case Hexagon::BI__builtin_brev_lduh
:
20573 return MakeBrevLd(Intrinsic::hexagon_L2_loadruh_pbr
, Int16Ty
);
20574 case Hexagon::BI__builtin_brev_ldh
:
20575 return MakeBrevLd(Intrinsic::hexagon_L2_loadrh_pbr
, Int16Ty
);
20576 case Hexagon::BI__builtin_brev_ldw
:
20577 return MakeBrevLd(Intrinsic::hexagon_L2_loadri_pbr
, Int32Ty
);
20578 case Hexagon::BI__builtin_brev_ldd
:
20579 return MakeBrevLd(Intrinsic::hexagon_L2_loadrd_pbr
, Int64Ty
);
20585 Value
*CodeGenFunction::EmitRISCVBuiltinExpr(unsigned BuiltinID
,
20587 ReturnValueSlot ReturnValue
) {
20588 SmallVector
<Value
*, 4> Ops
;
20589 llvm::Type
*ResultType
= ConvertType(E
->getType());
20591 // Find out if any arguments are required to be integer constant expressions.
20592 unsigned ICEArguments
= 0;
20593 ASTContext::GetBuiltinTypeError Error
;
20594 getContext().GetBuiltinType(BuiltinID
, Error
, &ICEArguments
);
20595 if (Error
== ASTContext::GE_Missing_type
) {
20596 // Vector intrinsics don't have a type string.
20597 assert(BuiltinID
>= clang::RISCV::FirstRVVBuiltin
&&
20598 BuiltinID
<= clang::RISCV::LastRVVBuiltin
);
20600 if (BuiltinID
== RISCVVector::BI__builtin_rvv_vget_v
||
20601 BuiltinID
== RISCVVector::BI__builtin_rvv_vset_v
)
20602 ICEArguments
= 1 << 1;
20604 assert(Error
== ASTContext::GE_None
&& "Unexpected error");
20607 if (BuiltinID
== RISCV::BI__builtin_riscv_ntl_load
)
20608 ICEArguments
|= (1 << 1);
20609 if (BuiltinID
== RISCV::BI__builtin_riscv_ntl_store
)
20610 ICEArguments
|= (1 << 2);
20612 for (unsigned i
= 0, e
= E
->getNumArgs(); i
!= e
; i
++) {
20613 // Handle aggregate argument, namely RVV tuple types in segment load/store
20614 if (hasAggregateEvaluationKind(E
->getArg(i
)->getType())) {
20615 LValue L
= EmitAggExprToLValue(E
->getArg(i
));
20616 llvm::Value
*AggValue
= Builder
.CreateLoad(L
.getAddress(*this));
20617 Ops
.push_back(AggValue
);
20621 // If this is a normal argument, just emit it as a scalar.
20622 if ((ICEArguments
& (1 << i
)) == 0) {
20623 Ops
.push_back(EmitScalarExpr(E
->getArg(i
)));
20627 // If this is required to be a constant, constant fold it so that we know
20628 // that the generated intrinsic gets a ConstantInt.
20629 Ops
.push_back(llvm::ConstantInt::get(
20630 getLLVMContext(), *E
->getArg(i
)->getIntegerConstantExpr(getContext())));
20633 Intrinsic::ID ID
= Intrinsic::not_intrinsic
;
20635 // The 0th bit simulates the `vta` of RVV
20636 // The 1st bit simulates the `vma` of RVV
20637 constexpr unsigned RVV_VTA
= 0x1;
20638 constexpr unsigned RVV_VMA
= 0x2;
20639 int PolicyAttrs
= 0;
20640 bool IsMasked
= false;
20642 // Required for overloaded intrinsics.
20643 llvm::SmallVector
<llvm::Type
*, 2> IntrinsicTypes
;
20644 switch (BuiltinID
) {
20645 default: llvm_unreachable("unexpected builtin ID");
20646 case RISCV::BI__builtin_riscv_orc_b_32
:
20647 case RISCV::BI__builtin_riscv_orc_b_64
:
20648 case RISCV::BI__builtin_riscv_clz_32
:
20649 case RISCV::BI__builtin_riscv_clz_64
:
20650 case RISCV::BI__builtin_riscv_ctz_32
:
20651 case RISCV::BI__builtin_riscv_ctz_64
:
20652 case RISCV::BI__builtin_riscv_clmul_32
:
20653 case RISCV::BI__builtin_riscv_clmul_64
:
20654 case RISCV::BI__builtin_riscv_clmulh_32
:
20655 case RISCV::BI__builtin_riscv_clmulh_64
:
20656 case RISCV::BI__builtin_riscv_clmulr_32
:
20657 case RISCV::BI__builtin_riscv_clmulr_64
:
20658 case RISCV::BI__builtin_riscv_xperm4_32
:
20659 case RISCV::BI__builtin_riscv_xperm4_64
:
20660 case RISCV::BI__builtin_riscv_xperm8_32
:
20661 case RISCV::BI__builtin_riscv_xperm8_64
:
20662 case RISCV::BI__builtin_riscv_brev8_32
:
20663 case RISCV::BI__builtin_riscv_brev8_64
:
20664 case RISCV::BI__builtin_riscv_zip_32
:
20665 case RISCV::BI__builtin_riscv_unzip_32
: {
20666 switch (BuiltinID
) {
20667 default: llvm_unreachable("unexpected builtin ID");
20669 case RISCV::BI__builtin_riscv_orc_b_32
:
20670 case RISCV::BI__builtin_riscv_orc_b_64
:
20671 ID
= Intrinsic::riscv_orc_b
;
20673 case RISCV::BI__builtin_riscv_clz_32
:
20674 case RISCV::BI__builtin_riscv_clz_64
: {
20675 Function
*F
= CGM
.getIntrinsic(Intrinsic::ctlz
, Ops
[0]->getType());
20676 Value
*Result
= Builder
.CreateCall(F
, {Ops
[0], Builder
.getInt1(false)});
20677 if (Result
->getType() != ResultType
)
20678 Result
= Builder
.CreateIntCast(Result
, ResultType
, /*isSigned*/true,
20682 case RISCV::BI__builtin_riscv_ctz_32
:
20683 case RISCV::BI__builtin_riscv_ctz_64
: {
20684 Function
*F
= CGM
.getIntrinsic(Intrinsic::cttz
, Ops
[0]->getType());
20685 Value
*Result
= Builder
.CreateCall(F
, {Ops
[0], Builder
.getInt1(false)});
20686 if (Result
->getType() != ResultType
)
20687 Result
= Builder
.CreateIntCast(Result
, ResultType
, /*isSigned*/true,
20693 case RISCV::BI__builtin_riscv_clmul_32
:
20694 case RISCV::BI__builtin_riscv_clmul_64
:
20695 ID
= Intrinsic::riscv_clmul
;
20697 case RISCV::BI__builtin_riscv_clmulh_32
:
20698 case RISCV::BI__builtin_riscv_clmulh_64
:
20699 ID
= Intrinsic::riscv_clmulh
;
20701 case RISCV::BI__builtin_riscv_clmulr_32
:
20702 case RISCV::BI__builtin_riscv_clmulr_64
:
20703 ID
= Intrinsic::riscv_clmulr
;
20707 case RISCV::BI__builtin_riscv_xperm8_32
:
20708 case RISCV::BI__builtin_riscv_xperm8_64
:
20709 ID
= Intrinsic::riscv_xperm8
;
20711 case RISCV::BI__builtin_riscv_xperm4_32
:
20712 case RISCV::BI__builtin_riscv_xperm4_64
:
20713 ID
= Intrinsic::riscv_xperm4
;
20717 case RISCV::BI__builtin_riscv_brev8_32
:
20718 case RISCV::BI__builtin_riscv_brev8_64
:
20719 ID
= Intrinsic::riscv_brev8
;
20721 case RISCV::BI__builtin_riscv_zip_32
:
20722 ID
= Intrinsic::riscv_zip
;
20724 case RISCV::BI__builtin_riscv_unzip_32
:
20725 ID
= Intrinsic::riscv_unzip
;
20729 IntrinsicTypes
= {ResultType
};
20736 case RISCV::BI__builtin_riscv_sha256sig0
:
20737 ID
= Intrinsic::riscv_sha256sig0
;
20739 case RISCV::BI__builtin_riscv_sha256sig1
:
20740 ID
= Intrinsic::riscv_sha256sig1
;
20742 case RISCV::BI__builtin_riscv_sha256sum0
:
20743 ID
= Intrinsic::riscv_sha256sum0
;
20745 case RISCV::BI__builtin_riscv_sha256sum1
:
20746 ID
= Intrinsic::riscv_sha256sum1
;
20750 case RISCV::BI__builtin_riscv_sm4ks
:
20751 ID
= Intrinsic::riscv_sm4ks
;
20753 case RISCV::BI__builtin_riscv_sm4ed
:
20754 ID
= Intrinsic::riscv_sm4ed
;
20758 case RISCV::BI__builtin_riscv_sm3p0
:
20759 ID
= Intrinsic::riscv_sm3p0
;
20761 case RISCV::BI__builtin_riscv_sm3p1
:
20762 ID
= Intrinsic::riscv_sm3p1
;
20766 case RISCV::BI__builtin_riscv_ntl_load
: {
20767 llvm::Type
*ResTy
= ConvertType(E
->getType());
20768 unsigned DomainVal
= 5; // Default __RISCV_NTLH_ALL
20769 if (Ops
.size() == 2)
20770 DomainVal
= cast
<ConstantInt
>(Ops
[1])->getZExtValue();
20772 llvm::MDNode
*RISCVDomainNode
= llvm::MDNode::get(
20774 llvm::ConstantAsMetadata::get(Builder
.getInt32(DomainVal
)));
20775 llvm::MDNode
*NontemporalNode
= llvm::MDNode::get(
20776 getLLVMContext(), llvm::ConstantAsMetadata::get(Builder
.getInt32(1)));
20779 if(ResTy
->isScalableTy()) {
20780 const ScalableVectorType
*SVTy
= cast
<ScalableVectorType
>(ResTy
);
20781 llvm::Type
*ScalarTy
= ResTy
->getScalarType();
20782 Width
= ScalarTy
->getPrimitiveSizeInBits() *
20783 SVTy
->getElementCount().getKnownMinValue();
20785 Width
= ResTy
->getPrimitiveSizeInBits();
20786 LoadInst
*Load
= Builder
.CreateLoad(
20787 Address(Ops
[0], ResTy
, CharUnits::fromQuantity(Width
/ 8)));
20789 Load
->setMetadata(llvm::LLVMContext::MD_nontemporal
, NontemporalNode
);
20790 Load
->setMetadata(CGM
.getModule().getMDKindID("riscv-nontemporal-domain"),
20795 case RISCV::BI__builtin_riscv_ntl_store
: {
20796 unsigned DomainVal
= 5; // Default __RISCV_NTLH_ALL
20797 if (Ops
.size() == 3)
20798 DomainVal
= cast
<ConstantInt
>(Ops
[2])->getZExtValue();
20800 llvm::MDNode
*RISCVDomainNode
= llvm::MDNode::get(
20802 llvm::ConstantAsMetadata::get(Builder
.getInt32(DomainVal
)));
20803 llvm::MDNode
*NontemporalNode
= llvm::MDNode::get(
20804 getLLVMContext(), llvm::ConstantAsMetadata::get(Builder
.getInt32(1)));
20806 StoreInst
*Store
= Builder
.CreateDefaultAlignedStore(Ops
[1], Ops
[0]);
20807 Store
->setMetadata(llvm::LLVMContext::MD_nontemporal
, NontemporalNode
);
20808 Store
->setMetadata(CGM
.getModule().getMDKindID("riscv-nontemporal-domain"),
20814 // Vector builtins are handled from here.
20815 #include "clang/Basic/riscv_vector_builtin_cg.inc"
20816 // SiFive Vector builtins are handled from here.
20817 #include "clang/Basic/riscv_sifive_vector_builtin_cg.inc"
20820 assert(ID
!= Intrinsic::not_intrinsic
);
20822 llvm::Function
*F
= CGM
.getIntrinsic(ID
, IntrinsicTypes
);
20823 return Builder
.CreateCall(F
, Ops
, "");