[lld][WebAssembly] Add `--table-base` setting
[llvm-project.git] / clang / lib / CodeGen / CGBuiltin.cpp
blob25e1b36d05fd97c1f0a8ba62c5526667f474e13f
1 //===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This contains code to emit Builtin calls as LLVM code.
11 //===----------------------------------------------------------------------===//
13 #include "ABIInfo.h"
14 #include "CGCUDARuntime.h"
15 #include "CGCXXABI.h"
16 #include "CGObjCRuntime.h"
17 #include "CGOpenCLRuntime.h"
18 #include "CGRecordLayout.h"
19 #include "CodeGenFunction.h"
20 #include "CodeGenModule.h"
21 #include "ConstantEmitter.h"
22 #include "PatternInit.h"
23 #include "TargetInfo.h"
24 #include "clang/AST/ASTContext.h"
25 #include "clang/AST/Attr.h"
26 #include "clang/AST/Decl.h"
27 #include "clang/AST/OSLog.h"
28 #include "clang/Basic/TargetBuiltins.h"
29 #include "clang/Basic/TargetInfo.h"
30 #include "clang/CodeGen/CGFunctionInfo.h"
31 #include "clang/Frontend/FrontendDiagnostic.h"
32 #include "llvm/ADT/APFloat.h"
33 #include "llvm/ADT/APInt.h"
34 #include "llvm/ADT/FloatingPointMode.h"
35 #include "llvm/ADT/SmallPtrSet.h"
36 #include "llvm/ADT/StringExtras.h"
37 #include "llvm/Analysis/ValueTracking.h"
38 #include "llvm/IR/DataLayout.h"
39 #include "llvm/IR/InlineAsm.h"
40 #include "llvm/IR/Intrinsics.h"
41 #include "llvm/IR/IntrinsicsAArch64.h"
42 #include "llvm/IR/IntrinsicsAMDGPU.h"
43 #include "llvm/IR/IntrinsicsARM.h"
44 #include "llvm/IR/IntrinsicsBPF.h"
45 #include "llvm/IR/IntrinsicsHexagon.h"
46 #include "llvm/IR/IntrinsicsNVPTX.h"
47 #include "llvm/IR/IntrinsicsPowerPC.h"
48 #include "llvm/IR/IntrinsicsR600.h"
49 #include "llvm/IR/IntrinsicsRISCV.h"
50 #include "llvm/IR/IntrinsicsS390.h"
51 #include "llvm/IR/IntrinsicsVE.h"
52 #include "llvm/IR/IntrinsicsWebAssembly.h"
53 #include "llvm/IR/IntrinsicsX86.h"
54 #include "llvm/IR/MDBuilder.h"
55 #include "llvm/IR/MatrixBuilder.h"
56 #include "llvm/Support/ConvertUTF.h"
57 #include "llvm/Support/MathExtras.h"
58 #include "llvm/Support/ScopedPrinter.h"
59 #include "llvm/TargetParser/AArch64TargetParser.h"
60 #include "llvm/TargetParser/X86TargetParser.h"
61 #include <optional>
62 #include <sstream>
64 using namespace clang;
65 using namespace CodeGen;
66 using namespace llvm;
68 static void initializeAlloca(CodeGenFunction &CGF, AllocaInst *AI, Value *Size,
69 Align AlignmentInBytes) {
70 ConstantInt *Byte;
71 switch (CGF.getLangOpts().getTrivialAutoVarInit()) {
72 case LangOptions::TrivialAutoVarInitKind::Uninitialized:
73 // Nothing to initialize.
74 return;
75 case LangOptions::TrivialAutoVarInitKind::Zero:
76 Byte = CGF.Builder.getInt8(0x00);
77 break;
78 case LangOptions::TrivialAutoVarInitKind::Pattern: {
79 llvm::Type *Int8 = llvm::IntegerType::getInt8Ty(CGF.CGM.getLLVMContext());
80 Byte = llvm::dyn_cast<llvm::ConstantInt>(
81 initializationPatternFor(CGF.CGM, Int8));
82 break;
85 if (CGF.CGM.stopAutoInit())
86 return;
87 auto *I = CGF.Builder.CreateMemSet(AI, Byte, Size, AlignmentInBytes);
88 I->addAnnotationMetadata("auto-init");
91 /// getBuiltinLibFunction - Given a builtin id for a function like
92 /// "__builtin_fabsf", return a Function* for "fabsf".
93 llvm::Constant *CodeGenModule::getBuiltinLibFunction(const FunctionDecl *FD,
94 unsigned BuiltinID) {
95 assert(Context.BuiltinInfo.isLibFunction(BuiltinID));
97 // Get the name, skip over the __builtin_ prefix (if necessary).
98 StringRef Name;
99 GlobalDecl D(FD);
101 // TODO: This list should be expanded or refactored after all GCC-compatible
102 // std libcall builtins are implemented.
103 static SmallDenseMap<unsigned, StringRef, 64> F128Builtins{
104 {Builtin::BI__builtin___fprintf_chk, "__fprintf_chkieee128"},
105 {Builtin::BI__builtin___printf_chk, "__printf_chkieee128"},
106 {Builtin::BI__builtin___snprintf_chk, "__snprintf_chkieee128"},
107 {Builtin::BI__builtin___sprintf_chk, "__sprintf_chkieee128"},
108 {Builtin::BI__builtin___vfprintf_chk, "__vfprintf_chkieee128"},
109 {Builtin::BI__builtin___vprintf_chk, "__vprintf_chkieee128"},
110 {Builtin::BI__builtin___vsnprintf_chk, "__vsnprintf_chkieee128"},
111 {Builtin::BI__builtin___vsprintf_chk, "__vsprintf_chkieee128"},
112 {Builtin::BI__builtin_fprintf, "__fprintfieee128"},
113 {Builtin::BI__builtin_printf, "__printfieee128"},
114 {Builtin::BI__builtin_snprintf, "__snprintfieee128"},
115 {Builtin::BI__builtin_sprintf, "__sprintfieee128"},
116 {Builtin::BI__builtin_vfprintf, "__vfprintfieee128"},
117 {Builtin::BI__builtin_vprintf, "__vprintfieee128"},
118 {Builtin::BI__builtin_vsnprintf, "__vsnprintfieee128"},
119 {Builtin::BI__builtin_vsprintf, "__vsprintfieee128"},
120 {Builtin::BI__builtin_fscanf, "__fscanfieee128"},
121 {Builtin::BI__builtin_scanf, "__scanfieee128"},
122 {Builtin::BI__builtin_sscanf, "__sscanfieee128"},
123 {Builtin::BI__builtin_vfscanf, "__vfscanfieee128"},
124 {Builtin::BI__builtin_vscanf, "__vscanfieee128"},
125 {Builtin::BI__builtin_vsscanf, "__vsscanfieee128"},
126 {Builtin::BI__builtin_nexttowardf128, "__nexttowardieee128"},
129 // The AIX library functions frexpl, ldexpl, and modfl are for 128-bit
130 // IBM 'long double' (i.e. __ibm128). Map to the 'double' versions
131 // if it is 64-bit 'long double' mode.
132 static SmallDenseMap<unsigned, StringRef, 4> AIXLongDouble64Builtins{
133 {Builtin::BI__builtin_frexpl, "frexp"},
134 {Builtin::BI__builtin_ldexpl, "ldexp"},
135 {Builtin::BI__builtin_modfl, "modf"},
138 // If the builtin has been declared explicitly with an assembler label,
139 // use the mangled name. This differs from the plain label on platforms
140 // that prefix labels.
141 if (FD->hasAttr<AsmLabelAttr>())
142 Name = getMangledName(D);
143 else {
144 // TODO: This mutation should also be applied to other targets other than
145 // PPC, after backend supports IEEE 128-bit style libcalls.
146 if (getTriple().isPPC64() &&
147 &getTarget().getLongDoubleFormat() == &llvm::APFloat::IEEEquad() &&
148 F128Builtins.find(BuiltinID) != F128Builtins.end())
149 Name = F128Builtins[BuiltinID];
150 else if (getTriple().isOSAIX() &&
151 &getTarget().getLongDoubleFormat() ==
152 &llvm::APFloat::IEEEdouble() &&
153 AIXLongDouble64Builtins.find(BuiltinID) !=
154 AIXLongDouble64Builtins.end())
155 Name = AIXLongDouble64Builtins[BuiltinID];
156 else
157 Name = Context.BuiltinInfo.getName(BuiltinID).substr(10);
160 llvm::FunctionType *Ty =
161 cast<llvm::FunctionType>(getTypes().ConvertType(FD->getType()));
163 return GetOrCreateLLVMFunction(Name, Ty, D, /*ForVTable=*/false);
166 /// Emit the conversions required to turn the given value into an
167 /// integer of the given size.
168 static Value *EmitToInt(CodeGenFunction &CGF, llvm::Value *V,
169 QualType T, llvm::IntegerType *IntType) {
170 V = CGF.EmitToMemory(V, T);
172 if (V->getType()->isPointerTy())
173 return CGF.Builder.CreatePtrToInt(V, IntType);
175 assert(V->getType() == IntType);
176 return V;
179 static Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V,
180 QualType T, llvm::Type *ResultType) {
181 V = CGF.EmitFromMemory(V, T);
183 if (ResultType->isPointerTy())
184 return CGF.Builder.CreateIntToPtr(V, ResultType);
186 assert(V->getType() == ResultType);
187 return V;
190 static llvm::Value *CheckAtomicAlignment(CodeGenFunction &CGF,
191 const CallExpr *E) {
192 ASTContext &Ctx = CGF.getContext();
193 Address Ptr = CGF.EmitPointerWithAlignment(E->getArg(0));
194 unsigned Bytes = Ptr.getElementType()->isPointerTy()
195 ? Ctx.getTypeSizeInChars(Ctx.VoidPtrTy).getQuantity()
196 : Ptr.getElementType()->getScalarSizeInBits() / 8;
197 unsigned Align = Ptr.getAlignment().getQuantity();
198 if (Align % Bytes != 0) {
199 DiagnosticsEngine &Diags = CGF.CGM.getDiags();
200 Diags.Report(E->getBeginLoc(), diag::warn_sync_op_misaligned);
202 return Ptr.getPointer();
205 /// Utility to insert an atomic instruction based on Intrinsic::ID
206 /// and the expression node.
207 static Value *MakeBinaryAtomicValue(
208 CodeGenFunction &CGF, llvm::AtomicRMWInst::BinOp Kind, const CallExpr *E,
209 AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
211 QualType T = E->getType();
212 assert(E->getArg(0)->getType()->isPointerType());
213 assert(CGF.getContext().hasSameUnqualifiedType(T,
214 E->getArg(0)->getType()->getPointeeType()));
215 assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
217 llvm::Value *DestPtr = CheckAtomicAlignment(CGF, E);
219 llvm::IntegerType *IntType = llvm::IntegerType::get(
220 CGF.getLLVMContext(), CGF.getContext().getTypeSize(T));
222 llvm::Value *Args[2];
223 Args[0] = DestPtr;
224 Args[1] = CGF.EmitScalarExpr(E->getArg(1));
225 llvm::Type *ValueType = Args[1]->getType();
226 Args[1] = EmitToInt(CGF, Args[1], T, IntType);
228 llvm::Value *Result = CGF.Builder.CreateAtomicRMW(
229 Kind, Args[0], Args[1], Ordering);
230 return EmitFromInt(CGF, Result, T, ValueType);
233 static Value *EmitNontemporalStore(CodeGenFunction &CGF, const CallExpr *E) {
234 Value *Val = CGF.EmitScalarExpr(E->getArg(0));
235 Value *Address = CGF.EmitScalarExpr(E->getArg(1));
237 Val = CGF.EmitToMemory(Val, E->getArg(0)->getType());
238 LValue LV = CGF.MakeNaturalAlignAddrLValue(Address, E->getArg(0)->getType());
239 LV.setNontemporal(true);
240 CGF.EmitStoreOfScalar(Val, LV, false);
241 return nullptr;
244 static Value *EmitNontemporalLoad(CodeGenFunction &CGF, const CallExpr *E) {
245 Value *Address = CGF.EmitScalarExpr(E->getArg(0));
247 LValue LV = CGF.MakeNaturalAlignAddrLValue(Address, E->getType());
248 LV.setNontemporal(true);
249 return CGF.EmitLoadOfScalar(LV, E->getExprLoc());
252 static RValue EmitBinaryAtomic(CodeGenFunction &CGF,
253 llvm::AtomicRMWInst::BinOp Kind,
254 const CallExpr *E) {
255 return RValue::get(MakeBinaryAtomicValue(CGF, Kind, E));
258 /// Utility to insert an atomic instruction based Intrinsic::ID and
259 /// the expression node, where the return value is the result of the
260 /// operation.
261 static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF,
262 llvm::AtomicRMWInst::BinOp Kind,
263 const CallExpr *E,
264 Instruction::BinaryOps Op,
265 bool Invert = false) {
266 QualType T = E->getType();
267 assert(E->getArg(0)->getType()->isPointerType());
268 assert(CGF.getContext().hasSameUnqualifiedType(T,
269 E->getArg(0)->getType()->getPointeeType()));
270 assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
272 llvm::Value *DestPtr = CheckAtomicAlignment(CGF, E);
274 llvm::IntegerType *IntType = llvm::IntegerType::get(
275 CGF.getLLVMContext(), CGF.getContext().getTypeSize(T));
277 llvm::Value *Args[2];
278 Args[1] = CGF.EmitScalarExpr(E->getArg(1));
279 llvm::Type *ValueType = Args[1]->getType();
280 Args[1] = EmitToInt(CGF, Args[1], T, IntType);
281 Args[0] = DestPtr;
283 llvm::Value *Result = CGF.Builder.CreateAtomicRMW(
284 Kind, Args[0], Args[1], llvm::AtomicOrdering::SequentiallyConsistent);
285 Result = CGF.Builder.CreateBinOp(Op, Result, Args[1]);
286 if (Invert)
287 Result =
288 CGF.Builder.CreateBinOp(llvm::Instruction::Xor, Result,
289 llvm::ConstantInt::getAllOnesValue(IntType));
290 Result = EmitFromInt(CGF, Result, T, ValueType);
291 return RValue::get(Result);
294 /// Utility to insert an atomic cmpxchg instruction.
296 /// @param CGF The current codegen function.
297 /// @param E Builtin call expression to convert to cmpxchg.
298 /// arg0 - address to operate on
299 /// arg1 - value to compare with
300 /// arg2 - new value
301 /// @param ReturnBool Specifies whether to return success flag of
302 /// cmpxchg result or the old value.
304 /// @returns result of cmpxchg, according to ReturnBool
306 /// Note: In order to lower Microsoft's _InterlockedCompareExchange* intrinsics
307 /// invoke the function EmitAtomicCmpXchgForMSIntrin.
308 static Value *MakeAtomicCmpXchgValue(CodeGenFunction &CGF, const CallExpr *E,
309 bool ReturnBool) {
310 QualType T = ReturnBool ? E->getArg(1)->getType() : E->getType();
311 llvm::Value *DestPtr = CheckAtomicAlignment(CGF, E);
313 llvm::IntegerType *IntType = llvm::IntegerType::get(
314 CGF.getLLVMContext(), CGF.getContext().getTypeSize(T));
316 Value *Args[3];
317 Args[0] = DestPtr;
318 Args[1] = CGF.EmitScalarExpr(E->getArg(1));
319 llvm::Type *ValueType = Args[1]->getType();
320 Args[1] = EmitToInt(CGF, Args[1], T, IntType);
321 Args[2] = EmitToInt(CGF, CGF.EmitScalarExpr(E->getArg(2)), T, IntType);
323 Value *Pair = CGF.Builder.CreateAtomicCmpXchg(
324 Args[0], Args[1], Args[2], llvm::AtomicOrdering::SequentiallyConsistent,
325 llvm::AtomicOrdering::SequentiallyConsistent);
326 if (ReturnBool)
327 // Extract boolean success flag and zext it to int.
328 return CGF.Builder.CreateZExt(CGF.Builder.CreateExtractValue(Pair, 1),
329 CGF.ConvertType(E->getType()));
330 else
331 // Extract old value and emit it using the same type as compare value.
332 return EmitFromInt(CGF, CGF.Builder.CreateExtractValue(Pair, 0), T,
333 ValueType);
336 /// This function should be invoked to emit atomic cmpxchg for Microsoft's
337 /// _InterlockedCompareExchange* intrinsics which have the following signature:
338 /// T _InterlockedCompareExchange(T volatile *Destination,
339 /// T Exchange,
340 /// T Comparand);
342 /// Whereas the llvm 'cmpxchg' instruction has the following syntax:
343 /// cmpxchg *Destination, Comparand, Exchange.
344 /// So we need to swap Comparand and Exchange when invoking
345 /// CreateAtomicCmpXchg. That is the reason we could not use the above utility
346 /// function MakeAtomicCmpXchgValue since it expects the arguments to be
347 /// already swapped.
349 static
350 Value *EmitAtomicCmpXchgForMSIntrin(CodeGenFunction &CGF, const CallExpr *E,
351 AtomicOrdering SuccessOrdering = AtomicOrdering::SequentiallyConsistent) {
352 assert(E->getArg(0)->getType()->isPointerType());
353 assert(CGF.getContext().hasSameUnqualifiedType(
354 E->getType(), E->getArg(0)->getType()->getPointeeType()));
355 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(),
356 E->getArg(1)->getType()));
357 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(),
358 E->getArg(2)->getType()));
360 auto *Destination = CGF.EmitScalarExpr(E->getArg(0));
361 auto *Comparand = CGF.EmitScalarExpr(E->getArg(2));
362 auto *Exchange = CGF.EmitScalarExpr(E->getArg(1));
364 // For Release ordering, the failure ordering should be Monotonic.
365 auto FailureOrdering = SuccessOrdering == AtomicOrdering::Release ?
366 AtomicOrdering::Monotonic :
367 SuccessOrdering;
369 // The atomic instruction is marked volatile for consistency with MSVC. This
370 // blocks the few atomics optimizations that LLVM has. If we want to optimize
371 // _Interlocked* operations in the future, we will have to remove the volatile
372 // marker.
373 auto *Result = CGF.Builder.CreateAtomicCmpXchg(
374 Destination, Comparand, Exchange,
375 SuccessOrdering, FailureOrdering);
376 Result->setVolatile(true);
377 return CGF.Builder.CreateExtractValue(Result, 0);
380 // 64-bit Microsoft platforms support 128 bit cmpxchg operations. They are
381 // prototyped like this:
383 // unsigned char _InterlockedCompareExchange128...(
384 // __int64 volatile * _Destination,
385 // __int64 _ExchangeHigh,
386 // __int64 _ExchangeLow,
387 // __int64 * _ComparandResult);
388 static Value *EmitAtomicCmpXchg128ForMSIntrin(CodeGenFunction &CGF,
389 const CallExpr *E,
390 AtomicOrdering SuccessOrdering) {
391 assert(E->getNumArgs() == 4);
392 llvm::Value *Destination = CGF.EmitScalarExpr(E->getArg(0));
393 llvm::Value *ExchangeHigh = CGF.EmitScalarExpr(E->getArg(1));
394 llvm::Value *ExchangeLow = CGF.EmitScalarExpr(E->getArg(2));
395 llvm::Value *ComparandPtr = CGF.EmitScalarExpr(E->getArg(3));
397 assert(Destination->getType()->isPointerTy());
398 assert(!ExchangeHigh->getType()->isPointerTy());
399 assert(!ExchangeLow->getType()->isPointerTy());
400 assert(ComparandPtr->getType()->isPointerTy());
402 // For Release ordering, the failure ordering should be Monotonic.
403 auto FailureOrdering = SuccessOrdering == AtomicOrdering::Release
404 ? AtomicOrdering::Monotonic
405 : SuccessOrdering;
407 // Convert to i128 pointers and values.
408 llvm::Type *Int128Ty = llvm::IntegerType::get(CGF.getLLVMContext(), 128);
409 Address ComparandResult(ComparandPtr, Int128Ty,
410 CGF.getContext().toCharUnitsFromBits(128));
412 // (((i128)hi) << 64) | ((i128)lo)
413 ExchangeHigh = CGF.Builder.CreateZExt(ExchangeHigh, Int128Ty);
414 ExchangeLow = CGF.Builder.CreateZExt(ExchangeLow, Int128Ty);
415 ExchangeHigh =
416 CGF.Builder.CreateShl(ExchangeHigh, llvm::ConstantInt::get(Int128Ty, 64));
417 llvm::Value *Exchange = CGF.Builder.CreateOr(ExchangeHigh, ExchangeLow);
419 // Load the comparand for the instruction.
420 llvm::Value *Comparand = CGF.Builder.CreateLoad(ComparandResult);
422 auto *CXI = CGF.Builder.CreateAtomicCmpXchg(Destination, Comparand, Exchange,
423 SuccessOrdering, FailureOrdering);
425 // The atomic instruction is marked volatile for consistency with MSVC. This
426 // blocks the few atomics optimizations that LLVM has. If we want to optimize
427 // _Interlocked* operations in the future, we will have to remove the volatile
428 // marker.
429 CXI->setVolatile(true);
431 // Store the result as an outparameter.
432 CGF.Builder.CreateStore(CGF.Builder.CreateExtractValue(CXI, 0),
433 ComparandResult);
435 // Get the success boolean and zero extend it to i8.
436 Value *Success = CGF.Builder.CreateExtractValue(CXI, 1);
437 return CGF.Builder.CreateZExt(Success, CGF.Int8Ty);
440 static Value *EmitAtomicIncrementValue(CodeGenFunction &CGF, const CallExpr *E,
441 AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
442 assert(E->getArg(0)->getType()->isPointerType());
444 auto *IntTy = CGF.ConvertType(E->getType());
445 auto *Result = CGF.Builder.CreateAtomicRMW(
446 AtomicRMWInst::Add,
447 CGF.EmitScalarExpr(E->getArg(0)),
448 ConstantInt::get(IntTy, 1),
449 Ordering);
450 return CGF.Builder.CreateAdd(Result, ConstantInt::get(IntTy, 1));
453 static Value *EmitAtomicDecrementValue(CodeGenFunction &CGF, const CallExpr *E,
454 AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
455 assert(E->getArg(0)->getType()->isPointerType());
457 auto *IntTy = CGF.ConvertType(E->getType());
458 auto *Result = CGF.Builder.CreateAtomicRMW(
459 AtomicRMWInst::Sub,
460 CGF.EmitScalarExpr(E->getArg(0)),
461 ConstantInt::get(IntTy, 1),
462 Ordering);
463 return CGF.Builder.CreateSub(Result, ConstantInt::get(IntTy, 1));
466 // Build a plain volatile load.
467 static Value *EmitISOVolatileLoad(CodeGenFunction &CGF, const CallExpr *E) {
468 Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
469 QualType ElTy = E->getArg(0)->getType()->getPointeeType();
470 CharUnits LoadSize = CGF.getContext().getTypeSizeInChars(ElTy);
471 llvm::Type *ITy =
472 llvm::IntegerType::get(CGF.getLLVMContext(), LoadSize.getQuantity() * 8);
473 llvm::LoadInst *Load = CGF.Builder.CreateAlignedLoad(ITy, Ptr, LoadSize);
474 Load->setVolatile(true);
475 return Load;
478 // Build a plain volatile store.
479 static Value *EmitISOVolatileStore(CodeGenFunction &CGF, const CallExpr *E) {
480 Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
481 Value *Value = CGF.EmitScalarExpr(E->getArg(1));
482 QualType ElTy = E->getArg(0)->getType()->getPointeeType();
483 CharUnits StoreSize = CGF.getContext().getTypeSizeInChars(ElTy);
484 llvm::StoreInst *Store =
485 CGF.Builder.CreateAlignedStore(Value, Ptr, StoreSize);
486 Store->setVolatile(true);
487 return Store;
490 // Emit a simple mangled intrinsic that has 1 argument and a return type
491 // matching the argument type. Depending on mode, this may be a constrained
492 // floating-point intrinsic.
493 static Value *emitUnaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
494 const CallExpr *E, unsigned IntrinsicID,
495 unsigned ConstrainedIntrinsicID) {
496 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
498 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
499 if (CGF.Builder.getIsFPConstrained()) {
500 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
501 return CGF.Builder.CreateConstrainedFPCall(F, { Src0 });
502 } else {
503 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
504 return CGF.Builder.CreateCall(F, Src0);
508 // Emit an intrinsic that has 2 operands of the same type as its result.
509 // Depending on mode, this may be a constrained floating-point intrinsic.
510 static Value *emitBinaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
511 const CallExpr *E, unsigned IntrinsicID,
512 unsigned ConstrainedIntrinsicID) {
513 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
514 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
516 if (CGF.Builder.getIsFPConstrained()) {
517 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
518 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
519 return CGF.Builder.CreateConstrainedFPCall(F, { Src0, Src1 });
520 } else {
521 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
522 return CGF.Builder.CreateCall(F, { Src0, Src1 });
526 // Has second type mangled argument.
527 static Value *emitBinaryExpMaybeConstrainedFPBuiltin(
528 CodeGenFunction &CGF, const CallExpr *E, llvm::Intrinsic::ID IntrinsicID,
529 llvm::Intrinsic::ID ConstrainedIntrinsicID) {
530 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
531 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
533 if (CGF.Builder.getIsFPConstrained()) {
534 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
535 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID,
536 {Src0->getType(), Src1->getType()});
537 return CGF.Builder.CreateConstrainedFPCall(F, {Src0, Src1});
540 Function *F =
541 CGF.CGM.getIntrinsic(IntrinsicID, {Src0->getType(), Src1->getType()});
542 return CGF.Builder.CreateCall(F, {Src0, Src1});
545 // Emit an intrinsic that has 3 operands of the same type as its result.
546 // Depending on mode, this may be a constrained floating-point intrinsic.
547 static Value *emitTernaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
548 const CallExpr *E, unsigned IntrinsicID,
549 unsigned ConstrainedIntrinsicID) {
550 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
551 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
552 llvm::Value *Src2 = CGF.EmitScalarExpr(E->getArg(2));
554 if (CGF.Builder.getIsFPConstrained()) {
555 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
556 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
557 return CGF.Builder.CreateConstrainedFPCall(F, { Src0, Src1, Src2 });
558 } else {
559 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
560 return CGF.Builder.CreateCall(F, { Src0, Src1, Src2 });
564 // Emit an intrinsic where all operands are of the same type as the result.
565 // Depending on mode, this may be a constrained floating-point intrinsic.
566 static Value *emitCallMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
567 unsigned IntrinsicID,
568 unsigned ConstrainedIntrinsicID,
569 llvm::Type *Ty,
570 ArrayRef<Value *> Args) {
571 Function *F;
572 if (CGF.Builder.getIsFPConstrained())
573 F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Ty);
574 else
575 F = CGF.CGM.getIntrinsic(IntrinsicID, Ty);
577 if (CGF.Builder.getIsFPConstrained())
578 return CGF.Builder.CreateConstrainedFPCall(F, Args);
579 else
580 return CGF.Builder.CreateCall(F, Args);
583 // Emit a simple mangled intrinsic that has 1 argument and a return type
584 // matching the argument type.
585 static Value *emitUnaryBuiltin(CodeGenFunction &CGF, const CallExpr *E,
586 unsigned IntrinsicID,
587 llvm::StringRef Name = "") {
588 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
590 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
591 return CGF.Builder.CreateCall(F, Src0, Name);
594 // Emit an intrinsic that has 2 operands of the same type as its result.
595 static Value *emitBinaryBuiltin(CodeGenFunction &CGF,
596 const CallExpr *E,
597 unsigned IntrinsicID) {
598 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
599 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
601 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
602 return CGF.Builder.CreateCall(F, { Src0, Src1 });
605 // Emit an intrinsic that has 3 operands of the same type as its result.
606 static Value *emitTernaryBuiltin(CodeGenFunction &CGF,
607 const CallExpr *E,
608 unsigned IntrinsicID) {
609 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
610 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
611 llvm::Value *Src2 = CGF.EmitScalarExpr(E->getArg(2));
613 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
614 return CGF.Builder.CreateCall(F, { Src0, Src1, Src2 });
617 // Emit an intrinsic that has 1 float or double operand, and 1 integer.
618 static Value *emitFPIntBuiltin(CodeGenFunction &CGF,
619 const CallExpr *E,
620 unsigned IntrinsicID) {
621 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
622 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
624 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
625 return CGF.Builder.CreateCall(F, {Src0, Src1});
628 // Emit an intrinsic that has overloaded integer result and fp operand.
629 static Value *
630 emitMaybeConstrainedFPToIntRoundBuiltin(CodeGenFunction &CGF, const CallExpr *E,
631 unsigned IntrinsicID,
632 unsigned ConstrainedIntrinsicID) {
633 llvm::Type *ResultType = CGF.ConvertType(E->getType());
634 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
636 if (CGF.Builder.getIsFPConstrained()) {
637 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
638 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID,
639 {ResultType, Src0->getType()});
640 return CGF.Builder.CreateConstrainedFPCall(F, {Src0});
641 } else {
642 Function *F =
643 CGF.CGM.getIntrinsic(IntrinsicID, {ResultType, Src0->getType()});
644 return CGF.Builder.CreateCall(F, Src0);
648 static Value *emitFrexpBuiltin(CodeGenFunction &CGF, const CallExpr *E,
649 llvm::Intrinsic::ID IntrinsicID) {
650 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
651 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
653 QualType IntPtrTy = E->getArg(1)->getType()->getPointeeType();
654 llvm::Type *IntTy = CGF.ConvertType(IntPtrTy);
655 llvm::Function *F =
656 CGF.CGM.getIntrinsic(IntrinsicID, {Src0->getType(), IntTy});
657 llvm::Value *Call = CGF.Builder.CreateCall(F, Src0);
659 llvm::Value *Exp = CGF.Builder.CreateExtractValue(Call, 1);
660 LValue LV = CGF.MakeNaturalAlignAddrLValue(Src1, IntPtrTy);
661 CGF.EmitStoreOfScalar(Exp, LV);
663 return CGF.Builder.CreateExtractValue(Call, 0);
666 /// EmitFAbs - Emit a call to @llvm.fabs().
667 static Value *EmitFAbs(CodeGenFunction &CGF, Value *V) {
668 Function *F = CGF.CGM.getIntrinsic(Intrinsic::fabs, V->getType());
669 llvm::CallInst *Call = CGF.Builder.CreateCall(F, V);
670 Call->setDoesNotAccessMemory();
671 return Call;
674 /// Emit the computation of the sign bit for a floating point value. Returns
675 /// the i1 sign bit value.
676 static Value *EmitSignBit(CodeGenFunction &CGF, Value *V) {
677 LLVMContext &C = CGF.CGM.getLLVMContext();
679 llvm::Type *Ty = V->getType();
680 int Width = Ty->getPrimitiveSizeInBits();
681 llvm::Type *IntTy = llvm::IntegerType::get(C, Width);
682 V = CGF.Builder.CreateBitCast(V, IntTy);
683 if (Ty->isPPC_FP128Ty()) {
684 // We want the sign bit of the higher-order double. The bitcast we just
685 // did works as if the double-double was stored to memory and then
686 // read as an i128. The "store" will put the higher-order double in the
687 // lower address in both little- and big-Endian modes, but the "load"
688 // will treat those bits as a different part of the i128: the low bits in
689 // little-Endian, the high bits in big-Endian. Therefore, on big-Endian
690 // we need to shift the high bits down to the low before truncating.
691 Width >>= 1;
692 if (CGF.getTarget().isBigEndian()) {
693 Value *ShiftCst = llvm::ConstantInt::get(IntTy, Width);
694 V = CGF.Builder.CreateLShr(V, ShiftCst);
696 // We are truncating value in order to extract the higher-order
697 // double, which we will be using to extract the sign from.
698 IntTy = llvm::IntegerType::get(C, Width);
699 V = CGF.Builder.CreateTrunc(V, IntTy);
701 Value *Zero = llvm::Constant::getNullValue(IntTy);
702 return CGF.Builder.CreateICmpSLT(V, Zero);
705 static RValue emitLibraryCall(CodeGenFunction &CGF, const FunctionDecl *FD,
706 const CallExpr *E, llvm::Constant *calleeValue) {
707 CGCallee callee = CGCallee::forDirect(calleeValue, GlobalDecl(FD));
708 return CGF.EmitCall(E->getCallee()->getType(), callee, E, ReturnValueSlot());
711 /// Emit a call to llvm.{sadd,uadd,ssub,usub,smul,umul}.with.overflow.*
712 /// depending on IntrinsicID.
714 /// \arg CGF The current codegen function.
715 /// \arg IntrinsicID The ID for the Intrinsic we wish to generate.
716 /// \arg X The first argument to the llvm.*.with.overflow.*.
717 /// \arg Y The second argument to the llvm.*.with.overflow.*.
718 /// \arg Carry The carry returned by the llvm.*.with.overflow.*.
719 /// \returns The result (i.e. sum/product) returned by the intrinsic.
720 static llvm::Value *EmitOverflowIntrinsic(CodeGenFunction &CGF,
721 const llvm::Intrinsic::ID IntrinsicID,
722 llvm::Value *X, llvm::Value *Y,
723 llvm::Value *&Carry) {
724 // Make sure we have integers of the same width.
725 assert(X->getType() == Y->getType() &&
726 "Arguments must be the same type. (Did you forget to make sure both "
727 "arguments have the same integer width?)");
729 Function *Callee = CGF.CGM.getIntrinsic(IntrinsicID, X->getType());
730 llvm::Value *Tmp = CGF.Builder.CreateCall(Callee, {X, Y});
731 Carry = CGF.Builder.CreateExtractValue(Tmp, 1);
732 return CGF.Builder.CreateExtractValue(Tmp, 0);
735 static Value *emitRangedBuiltin(CodeGenFunction &CGF,
736 unsigned IntrinsicID,
737 int low, int high) {
738 llvm::MDBuilder MDHelper(CGF.getLLVMContext());
739 llvm::MDNode *RNode = MDHelper.createRange(APInt(32, low), APInt(32, high));
740 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, {});
741 llvm::Instruction *Call = CGF.Builder.CreateCall(F);
742 Call->setMetadata(llvm::LLVMContext::MD_range, RNode);
743 Call->setMetadata(llvm::LLVMContext::MD_noundef,
744 llvm::MDNode::get(CGF.getLLVMContext(), std::nullopt));
745 return Call;
748 namespace {
749 struct WidthAndSignedness {
750 unsigned Width;
751 bool Signed;
755 static WidthAndSignedness
756 getIntegerWidthAndSignedness(const clang::ASTContext &context,
757 const clang::QualType Type) {
758 assert(Type->isIntegerType() && "Given type is not an integer.");
759 unsigned Width = Type->isBooleanType() ? 1
760 : Type->isBitIntType() ? context.getIntWidth(Type)
761 : context.getTypeInfo(Type).Width;
762 bool Signed = Type->isSignedIntegerType();
763 return {Width, Signed};
766 // Given one or more integer types, this function produces an integer type that
767 // encompasses them: any value in one of the given types could be expressed in
768 // the encompassing type.
769 static struct WidthAndSignedness
770 EncompassingIntegerType(ArrayRef<struct WidthAndSignedness> Types) {
771 assert(Types.size() > 0 && "Empty list of types.");
773 // If any of the given types is signed, we must return a signed type.
774 bool Signed = false;
775 for (const auto &Type : Types) {
776 Signed |= Type.Signed;
779 // The encompassing type must have a width greater than or equal to the width
780 // of the specified types. Additionally, if the encompassing type is signed,
781 // its width must be strictly greater than the width of any unsigned types
782 // given.
783 unsigned Width = 0;
784 for (const auto &Type : Types) {
785 unsigned MinWidth = Type.Width + (Signed && !Type.Signed);
786 if (Width < MinWidth) {
787 Width = MinWidth;
791 return {Width, Signed};
794 Value *CodeGenFunction::EmitVAStartEnd(Value *ArgValue, bool IsStart) {
795 Intrinsic::ID inst = IsStart ? Intrinsic::vastart : Intrinsic::vaend;
796 return Builder.CreateCall(CGM.getIntrinsic(inst), ArgValue);
799 /// Checks if using the result of __builtin_object_size(p, @p From) in place of
800 /// __builtin_object_size(p, @p To) is correct
801 static bool areBOSTypesCompatible(int From, int To) {
802 // Note: Our __builtin_object_size implementation currently treats Type=0 and
803 // Type=2 identically. Encoding this implementation detail here may make
804 // improving __builtin_object_size difficult in the future, so it's omitted.
805 return From == To || (From == 0 && To == 1) || (From == 3 && To == 2);
808 static llvm::Value *
809 getDefaultBuiltinObjectSizeResult(unsigned Type, llvm::IntegerType *ResType) {
810 return ConstantInt::get(ResType, (Type & 2) ? 0 : -1, /*isSigned=*/true);
813 llvm::Value *
814 CodeGenFunction::evaluateOrEmitBuiltinObjectSize(const Expr *E, unsigned Type,
815 llvm::IntegerType *ResType,
816 llvm::Value *EmittedE,
817 bool IsDynamic) {
818 uint64_t ObjectSize;
819 if (!E->tryEvaluateObjectSize(ObjectSize, getContext(), Type))
820 return emitBuiltinObjectSize(E, Type, ResType, EmittedE, IsDynamic);
821 return ConstantInt::get(ResType, ObjectSize, /*isSigned=*/true);
824 /// Returns a Value corresponding to the size of the given expression.
825 /// This Value may be either of the following:
826 /// - A llvm::Argument (if E is a param with the pass_object_size attribute on
827 /// it)
828 /// - A call to the @llvm.objectsize intrinsic
830 /// EmittedE is the result of emitting `E` as a scalar expr. If it's non-null
831 /// and we wouldn't otherwise try to reference a pass_object_size parameter,
832 /// we'll call @llvm.objectsize on EmittedE, rather than emitting E.
833 llvm::Value *
834 CodeGenFunction::emitBuiltinObjectSize(const Expr *E, unsigned Type,
835 llvm::IntegerType *ResType,
836 llvm::Value *EmittedE, bool IsDynamic) {
837 // We need to reference an argument if the pointer is a parameter with the
838 // pass_object_size attribute.
839 if (auto *D = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts())) {
840 auto *Param = dyn_cast<ParmVarDecl>(D->getDecl());
841 auto *PS = D->getDecl()->getAttr<PassObjectSizeAttr>();
842 if (Param != nullptr && PS != nullptr &&
843 areBOSTypesCompatible(PS->getType(), Type)) {
844 auto Iter = SizeArguments.find(Param);
845 assert(Iter != SizeArguments.end());
847 const ImplicitParamDecl *D = Iter->second;
848 auto DIter = LocalDeclMap.find(D);
849 assert(DIter != LocalDeclMap.end());
851 return EmitLoadOfScalar(DIter->second, /*Volatile=*/false,
852 getContext().getSizeType(), E->getBeginLoc());
856 // LLVM can't handle Type=3 appropriately, and __builtin_object_size shouldn't
857 // evaluate E for side-effects. In either case, we shouldn't lower to
858 // @llvm.objectsize.
859 if (Type == 3 || (!EmittedE && E->HasSideEffects(getContext())))
860 return getDefaultBuiltinObjectSizeResult(Type, ResType);
862 Value *Ptr = EmittedE ? EmittedE : EmitScalarExpr(E);
863 assert(Ptr->getType()->isPointerTy() &&
864 "Non-pointer passed to __builtin_object_size?");
866 Function *F =
867 CGM.getIntrinsic(Intrinsic::objectsize, {ResType, Ptr->getType()});
869 // LLVM only supports 0 and 2, make sure that we pass along that as a boolean.
870 Value *Min = Builder.getInt1((Type & 2) != 0);
871 // For GCC compatibility, __builtin_object_size treat NULL as unknown size.
872 Value *NullIsUnknown = Builder.getTrue();
873 Value *Dynamic = Builder.getInt1(IsDynamic);
874 return Builder.CreateCall(F, {Ptr, Min, NullIsUnknown, Dynamic});
877 namespace {
878 /// A struct to generically describe a bit test intrinsic.
879 struct BitTest {
880 enum ActionKind : uint8_t { TestOnly, Complement, Reset, Set };
881 enum InterlockingKind : uint8_t {
882 Unlocked,
883 Sequential,
884 Acquire,
885 Release,
886 NoFence
889 ActionKind Action;
890 InterlockingKind Interlocking;
891 bool Is64Bit;
893 static BitTest decodeBitTestBuiltin(unsigned BuiltinID);
895 } // namespace
897 BitTest BitTest::decodeBitTestBuiltin(unsigned BuiltinID) {
898 switch (BuiltinID) {
899 // Main portable variants.
900 case Builtin::BI_bittest:
901 return {TestOnly, Unlocked, false};
902 case Builtin::BI_bittestandcomplement:
903 return {Complement, Unlocked, false};
904 case Builtin::BI_bittestandreset:
905 return {Reset, Unlocked, false};
906 case Builtin::BI_bittestandset:
907 return {Set, Unlocked, false};
908 case Builtin::BI_interlockedbittestandreset:
909 return {Reset, Sequential, false};
910 case Builtin::BI_interlockedbittestandset:
911 return {Set, Sequential, false};
913 // X86-specific 64-bit variants.
914 case Builtin::BI_bittest64:
915 return {TestOnly, Unlocked, true};
916 case Builtin::BI_bittestandcomplement64:
917 return {Complement, Unlocked, true};
918 case Builtin::BI_bittestandreset64:
919 return {Reset, Unlocked, true};
920 case Builtin::BI_bittestandset64:
921 return {Set, Unlocked, true};
922 case Builtin::BI_interlockedbittestandreset64:
923 return {Reset, Sequential, true};
924 case Builtin::BI_interlockedbittestandset64:
925 return {Set, Sequential, true};
927 // ARM/AArch64-specific ordering variants.
928 case Builtin::BI_interlockedbittestandset_acq:
929 return {Set, Acquire, false};
930 case Builtin::BI_interlockedbittestandset_rel:
931 return {Set, Release, false};
932 case Builtin::BI_interlockedbittestandset_nf:
933 return {Set, NoFence, false};
934 case Builtin::BI_interlockedbittestandreset_acq:
935 return {Reset, Acquire, false};
936 case Builtin::BI_interlockedbittestandreset_rel:
937 return {Reset, Release, false};
938 case Builtin::BI_interlockedbittestandreset_nf:
939 return {Reset, NoFence, false};
941 llvm_unreachable("expected only bittest intrinsics");
944 static char bitActionToX86BTCode(BitTest::ActionKind A) {
945 switch (A) {
946 case BitTest::TestOnly: return '\0';
947 case BitTest::Complement: return 'c';
948 case BitTest::Reset: return 'r';
949 case BitTest::Set: return 's';
951 llvm_unreachable("invalid action");
954 static llvm::Value *EmitX86BitTestIntrinsic(CodeGenFunction &CGF,
955 BitTest BT,
956 const CallExpr *E, Value *BitBase,
957 Value *BitPos) {
958 char Action = bitActionToX86BTCode(BT.Action);
959 char SizeSuffix = BT.Is64Bit ? 'q' : 'l';
961 // Build the assembly.
962 SmallString<64> Asm;
963 raw_svector_ostream AsmOS(Asm);
964 if (BT.Interlocking != BitTest::Unlocked)
965 AsmOS << "lock ";
966 AsmOS << "bt";
967 if (Action)
968 AsmOS << Action;
969 AsmOS << SizeSuffix << " $2, ($1)";
971 // Build the constraints. FIXME: We should support immediates when possible.
972 std::string Constraints = "={@ccc},r,r,~{cc},~{memory}";
973 std::string_view MachineClobbers = CGF.getTarget().getClobbers();
974 if (!MachineClobbers.empty()) {
975 Constraints += ',';
976 Constraints += MachineClobbers;
978 llvm::IntegerType *IntType = llvm::IntegerType::get(
979 CGF.getLLVMContext(),
980 CGF.getContext().getTypeSize(E->getArg(1)->getType()));
981 llvm::Type *PtrType = llvm::PointerType::getUnqual(CGF.getLLVMContext());
982 llvm::FunctionType *FTy =
983 llvm::FunctionType::get(CGF.Int8Ty, {PtrType, IntType}, false);
985 llvm::InlineAsm *IA =
986 llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true);
987 return CGF.Builder.CreateCall(IA, {BitBase, BitPos});
990 static llvm::AtomicOrdering
991 getBitTestAtomicOrdering(BitTest::InterlockingKind I) {
992 switch (I) {
993 case BitTest::Unlocked: return llvm::AtomicOrdering::NotAtomic;
994 case BitTest::Sequential: return llvm::AtomicOrdering::SequentiallyConsistent;
995 case BitTest::Acquire: return llvm::AtomicOrdering::Acquire;
996 case BitTest::Release: return llvm::AtomicOrdering::Release;
997 case BitTest::NoFence: return llvm::AtomicOrdering::Monotonic;
999 llvm_unreachable("invalid interlocking");
1002 /// Emit a _bittest* intrinsic. These intrinsics take a pointer to an array of
1003 /// bits and a bit position and read and optionally modify the bit at that
1004 /// position. The position index can be arbitrarily large, i.e. it can be larger
1005 /// than 31 or 63, so we need an indexed load in the general case.
1006 static llvm::Value *EmitBitTestIntrinsic(CodeGenFunction &CGF,
1007 unsigned BuiltinID,
1008 const CallExpr *E) {
1009 Value *BitBase = CGF.EmitScalarExpr(E->getArg(0));
1010 Value *BitPos = CGF.EmitScalarExpr(E->getArg(1));
1012 BitTest BT = BitTest::decodeBitTestBuiltin(BuiltinID);
1014 // X86 has special BT, BTC, BTR, and BTS instructions that handle the array
1015 // indexing operation internally. Use them if possible.
1016 if (CGF.getTarget().getTriple().isX86())
1017 return EmitX86BitTestIntrinsic(CGF, BT, E, BitBase, BitPos);
1019 // Otherwise, use generic code to load one byte and test the bit. Use all but
1020 // the bottom three bits as the array index, and the bottom three bits to form
1021 // a mask.
1022 // Bit = BitBaseI8[BitPos >> 3] & (1 << (BitPos & 0x7)) != 0;
1023 Value *ByteIndex = CGF.Builder.CreateAShr(
1024 BitPos, llvm::ConstantInt::get(BitPos->getType(), 3), "bittest.byteidx");
1025 Value *BitBaseI8 = CGF.Builder.CreatePointerCast(BitBase, CGF.Int8PtrTy);
1026 Address ByteAddr(CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, BitBaseI8,
1027 ByteIndex, "bittest.byteaddr"),
1028 CGF.Int8Ty, CharUnits::One());
1029 Value *PosLow =
1030 CGF.Builder.CreateAnd(CGF.Builder.CreateTrunc(BitPos, CGF.Int8Ty),
1031 llvm::ConstantInt::get(CGF.Int8Ty, 0x7));
1033 // The updating instructions will need a mask.
1034 Value *Mask = nullptr;
1035 if (BT.Action != BitTest::TestOnly) {
1036 Mask = CGF.Builder.CreateShl(llvm::ConstantInt::get(CGF.Int8Ty, 1), PosLow,
1037 "bittest.mask");
1040 // Check the action and ordering of the interlocked intrinsics.
1041 llvm::AtomicOrdering Ordering = getBitTestAtomicOrdering(BT.Interlocking);
1043 Value *OldByte = nullptr;
1044 if (Ordering != llvm::AtomicOrdering::NotAtomic) {
1045 // Emit a combined atomicrmw load/store operation for the interlocked
1046 // intrinsics.
1047 llvm::AtomicRMWInst::BinOp RMWOp = llvm::AtomicRMWInst::Or;
1048 if (BT.Action == BitTest::Reset) {
1049 Mask = CGF.Builder.CreateNot(Mask);
1050 RMWOp = llvm::AtomicRMWInst::And;
1052 OldByte = CGF.Builder.CreateAtomicRMW(RMWOp, ByteAddr.getPointer(), Mask,
1053 Ordering);
1054 } else {
1055 // Emit a plain load for the non-interlocked intrinsics.
1056 OldByte = CGF.Builder.CreateLoad(ByteAddr, "bittest.byte");
1057 Value *NewByte = nullptr;
1058 switch (BT.Action) {
1059 case BitTest::TestOnly:
1060 // Don't store anything.
1061 break;
1062 case BitTest::Complement:
1063 NewByte = CGF.Builder.CreateXor(OldByte, Mask);
1064 break;
1065 case BitTest::Reset:
1066 NewByte = CGF.Builder.CreateAnd(OldByte, CGF.Builder.CreateNot(Mask));
1067 break;
1068 case BitTest::Set:
1069 NewByte = CGF.Builder.CreateOr(OldByte, Mask);
1070 break;
1072 if (NewByte)
1073 CGF.Builder.CreateStore(NewByte, ByteAddr);
1076 // However we loaded the old byte, either by plain load or atomicrmw, shift
1077 // the bit into the low position and mask it to 0 or 1.
1078 Value *ShiftedByte = CGF.Builder.CreateLShr(OldByte, PosLow, "bittest.shr");
1079 return CGF.Builder.CreateAnd(
1080 ShiftedByte, llvm::ConstantInt::get(CGF.Int8Ty, 1), "bittest.res");
1083 static llvm::Value *emitPPCLoadReserveIntrinsic(CodeGenFunction &CGF,
1084 unsigned BuiltinID,
1085 const CallExpr *E) {
1086 Value *Addr = CGF.EmitScalarExpr(E->getArg(0));
1088 SmallString<64> Asm;
1089 raw_svector_ostream AsmOS(Asm);
1090 llvm::IntegerType *RetType = CGF.Int32Ty;
1092 switch (BuiltinID) {
1093 case clang::PPC::BI__builtin_ppc_ldarx:
1094 AsmOS << "ldarx ";
1095 RetType = CGF.Int64Ty;
1096 break;
1097 case clang::PPC::BI__builtin_ppc_lwarx:
1098 AsmOS << "lwarx ";
1099 RetType = CGF.Int32Ty;
1100 break;
1101 case clang::PPC::BI__builtin_ppc_lharx:
1102 AsmOS << "lharx ";
1103 RetType = CGF.Int16Ty;
1104 break;
1105 case clang::PPC::BI__builtin_ppc_lbarx:
1106 AsmOS << "lbarx ";
1107 RetType = CGF.Int8Ty;
1108 break;
1109 default:
1110 llvm_unreachable("Expected only PowerPC load reserve intrinsics");
1113 AsmOS << "$0, ${1:y}";
1115 std::string Constraints = "=r,*Z,~{memory}";
1116 std::string_view MachineClobbers = CGF.getTarget().getClobbers();
1117 if (!MachineClobbers.empty()) {
1118 Constraints += ',';
1119 Constraints += MachineClobbers;
1122 llvm::Type *PtrType = llvm::PointerType::getUnqual(CGF.getLLVMContext());
1123 llvm::FunctionType *FTy = llvm::FunctionType::get(RetType, {PtrType}, false);
1125 llvm::InlineAsm *IA =
1126 llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true);
1127 llvm::CallInst *CI = CGF.Builder.CreateCall(IA, {Addr});
1128 CI->addParamAttr(
1129 0, Attribute::get(CGF.getLLVMContext(), Attribute::ElementType, RetType));
1130 return CI;
1133 namespace {
1134 enum class MSVCSetJmpKind {
1135 _setjmpex,
1136 _setjmp3,
1137 _setjmp
1141 /// MSVC handles setjmp a bit differently on different platforms. On every
1142 /// architecture except 32-bit x86, the frame address is passed. On x86, extra
1143 /// parameters can be passed as variadic arguments, but we always pass none.
1144 static RValue EmitMSVCRTSetJmp(CodeGenFunction &CGF, MSVCSetJmpKind SJKind,
1145 const CallExpr *E) {
1146 llvm::Value *Arg1 = nullptr;
1147 llvm::Type *Arg1Ty = nullptr;
1148 StringRef Name;
1149 bool IsVarArg = false;
1150 if (SJKind == MSVCSetJmpKind::_setjmp3) {
1151 Name = "_setjmp3";
1152 Arg1Ty = CGF.Int32Ty;
1153 Arg1 = llvm::ConstantInt::get(CGF.IntTy, 0);
1154 IsVarArg = true;
1155 } else {
1156 Name = SJKind == MSVCSetJmpKind::_setjmp ? "_setjmp" : "_setjmpex";
1157 Arg1Ty = CGF.Int8PtrTy;
1158 if (CGF.getTarget().getTriple().getArch() == llvm::Triple::aarch64) {
1159 Arg1 = CGF.Builder.CreateCall(
1160 CGF.CGM.getIntrinsic(Intrinsic::sponentry, CGF.AllocaInt8PtrTy));
1161 } else
1162 Arg1 = CGF.Builder.CreateCall(
1163 CGF.CGM.getIntrinsic(Intrinsic::frameaddress, CGF.AllocaInt8PtrTy),
1164 llvm::ConstantInt::get(CGF.Int32Ty, 0));
1167 // Mark the call site and declaration with ReturnsTwice.
1168 llvm::Type *ArgTypes[2] = {CGF.Int8PtrTy, Arg1Ty};
1169 llvm::AttributeList ReturnsTwiceAttr = llvm::AttributeList::get(
1170 CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex,
1171 llvm::Attribute::ReturnsTwice);
1172 llvm::FunctionCallee SetJmpFn = CGF.CGM.CreateRuntimeFunction(
1173 llvm::FunctionType::get(CGF.IntTy, ArgTypes, IsVarArg), Name,
1174 ReturnsTwiceAttr, /*Local=*/true);
1176 llvm::Value *Buf = CGF.Builder.CreateBitOrPointerCast(
1177 CGF.EmitScalarExpr(E->getArg(0)), CGF.Int8PtrTy);
1178 llvm::Value *Args[] = {Buf, Arg1};
1179 llvm::CallBase *CB = CGF.EmitRuntimeCallOrInvoke(SetJmpFn, Args);
1180 CB->setAttributes(ReturnsTwiceAttr);
1181 return RValue::get(CB);
1184 // Many of MSVC builtins are on x64, ARM and AArch64; to avoid repeating code,
1185 // we handle them here.
1186 enum class CodeGenFunction::MSVCIntrin {
1187 _BitScanForward,
1188 _BitScanReverse,
1189 _InterlockedAnd,
1190 _InterlockedDecrement,
1191 _InterlockedExchange,
1192 _InterlockedExchangeAdd,
1193 _InterlockedExchangeSub,
1194 _InterlockedIncrement,
1195 _InterlockedOr,
1196 _InterlockedXor,
1197 _InterlockedExchangeAdd_acq,
1198 _InterlockedExchangeAdd_rel,
1199 _InterlockedExchangeAdd_nf,
1200 _InterlockedExchange_acq,
1201 _InterlockedExchange_rel,
1202 _InterlockedExchange_nf,
1203 _InterlockedCompareExchange_acq,
1204 _InterlockedCompareExchange_rel,
1205 _InterlockedCompareExchange_nf,
1206 _InterlockedCompareExchange128,
1207 _InterlockedCompareExchange128_acq,
1208 _InterlockedCompareExchange128_rel,
1209 _InterlockedCompareExchange128_nf,
1210 _InterlockedOr_acq,
1211 _InterlockedOr_rel,
1212 _InterlockedOr_nf,
1213 _InterlockedXor_acq,
1214 _InterlockedXor_rel,
1215 _InterlockedXor_nf,
1216 _InterlockedAnd_acq,
1217 _InterlockedAnd_rel,
1218 _InterlockedAnd_nf,
1219 _InterlockedIncrement_acq,
1220 _InterlockedIncrement_rel,
1221 _InterlockedIncrement_nf,
1222 _InterlockedDecrement_acq,
1223 _InterlockedDecrement_rel,
1224 _InterlockedDecrement_nf,
1225 __fastfail,
1228 static std::optional<CodeGenFunction::MSVCIntrin>
1229 translateArmToMsvcIntrin(unsigned BuiltinID) {
1230 using MSVCIntrin = CodeGenFunction::MSVCIntrin;
1231 switch (BuiltinID) {
1232 default:
1233 return std::nullopt;
1234 case clang::ARM::BI_BitScanForward:
1235 case clang::ARM::BI_BitScanForward64:
1236 return MSVCIntrin::_BitScanForward;
1237 case clang::ARM::BI_BitScanReverse:
1238 case clang::ARM::BI_BitScanReverse64:
1239 return MSVCIntrin::_BitScanReverse;
1240 case clang::ARM::BI_InterlockedAnd64:
1241 return MSVCIntrin::_InterlockedAnd;
1242 case clang::ARM::BI_InterlockedExchange64:
1243 return MSVCIntrin::_InterlockedExchange;
1244 case clang::ARM::BI_InterlockedExchangeAdd64:
1245 return MSVCIntrin::_InterlockedExchangeAdd;
1246 case clang::ARM::BI_InterlockedExchangeSub64:
1247 return MSVCIntrin::_InterlockedExchangeSub;
1248 case clang::ARM::BI_InterlockedOr64:
1249 return MSVCIntrin::_InterlockedOr;
1250 case clang::ARM::BI_InterlockedXor64:
1251 return MSVCIntrin::_InterlockedXor;
1252 case clang::ARM::BI_InterlockedDecrement64:
1253 return MSVCIntrin::_InterlockedDecrement;
1254 case clang::ARM::BI_InterlockedIncrement64:
1255 return MSVCIntrin::_InterlockedIncrement;
1256 case clang::ARM::BI_InterlockedExchangeAdd8_acq:
1257 case clang::ARM::BI_InterlockedExchangeAdd16_acq:
1258 case clang::ARM::BI_InterlockedExchangeAdd_acq:
1259 case clang::ARM::BI_InterlockedExchangeAdd64_acq:
1260 return MSVCIntrin::_InterlockedExchangeAdd_acq;
1261 case clang::ARM::BI_InterlockedExchangeAdd8_rel:
1262 case clang::ARM::BI_InterlockedExchangeAdd16_rel:
1263 case clang::ARM::BI_InterlockedExchangeAdd_rel:
1264 case clang::ARM::BI_InterlockedExchangeAdd64_rel:
1265 return MSVCIntrin::_InterlockedExchangeAdd_rel;
1266 case clang::ARM::BI_InterlockedExchangeAdd8_nf:
1267 case clang::ARM::BI_InterlockedExchangeAdd16_nf:
1268 case clang::ARM::BI_InterlockedExchangeAdd_nf:
1269 case clang::ARM::BI_InterlockedExchangeAdd64_nf:
1270 return MSVCIntrin::_InterlockedExchangeAdd_nf;
1271 case clang::ARM::BI_InterlockedExchange8_acq:
1272 case clang::ARM::BI_InterlockedExchange16_acq:
1273 case clang::ARM::BI_InterlockedExchange_acq:
1274 case clang::ARM::BI_InterlockedExchange64_acq:
1275 return MSVCIntrin::_InterlockedExchange_acq;
1276 case clang::ARM::BI_InterlockedExchange8_rel:
1277 case clang::ARM::BI_InterlockedExchange16_rel:
1278 case clang::ARM::BI_InterlockedExchange_rel:
1279 case clang::ARM::BI_InterlockedExchange64_rel:
1280 return MSVCIntrin::_InterlockedExchange_rel;
1281 case clang::ARM::BI_InterlockedExchange8_nf:
1282 case clang::ARM::BI_InterlockedExchange16_nf:
1283 case clang::ARM::BI_InterlockedExchange_nf:
1284 case clang::ARM::BI_InterlockedExchange64_nf:
1285 return MSVCIntrin::_InterlockedExchange_nf;
1286 case clang::ARM::BI_InterlockedCompareExchange8_acq:
1287 case clang::ARM::BI_InterlockedCompareExchange16_acq:
1288 case clang::ARM::BI_InterlockedCompareExchange_acq:
1289 case clang::ARM::BI_InterlockedCompareExchange64_acq:
1290 return MSVCIntrin::_InterlockedCompareExchange_acq;
1291 case clang::ARM::BI_InterlockedCompareExchange8_rel:
1292 case clang::ARM::BI_InterlockedCompareExchange16_rel:
1293 case clang::ARM::BI_InterlockedCompareExchange_rel:
1294 case clang::ARM::BI_InterlockedCompareExchange64_rel:
1295 return MSVCIntrin::_InterlockedCompareExchange_rel;
1296 case clang::ARM::BI_InterlockedCompareExchange8_nf:
1297 case clang::ARM::BI_InterlockedCompareExchange16_nf:
1298 case clang::ARM::BI_InterlockedCompareExchange_nf:
1299 case clang::ARM::BI_InterlockedCompareExchange64_nf:
1300 return MSVCIntrin::_InterlockedCompareExchange_nf;
1301 case clang::ARM::BI_InterlockedOr8_acq:
1302 case clang::ARM::BI_InterlockedOr16_acq:
1303 case clang::ARM::BI_InterlockedOr_acq:
1304 case clang::ARM::BI_InterlockedOr64_acq:
1305 return MSVCIntrin::_InterlockedOr_acq;
1306 case clang::ARM::BI_InterlockedOr8_rel:
1307 case clang::ARM::BI_InterlockedOr16_rel:
1308 case clang::ARM::BI_InterlockedOr_rel:
1309 case clang::ARM::BI_InterlockedOr64_rel:
1310 return MSVCIntrin::_InterlockedOr_rel;
1311 case clang::ARM::BI_InterlockedOr8_nf:
1312 case clang::ARM::BI_InterlockedOr16_nf:
1313 case clang::ARM::BI_InterlockedOr_nf:
1314 case clang::ARM::BI_InterlockedOr64_nf:
1315 return MSVCIntrin::_InterlockedOr_nf;
1316 case clang::ARM::BI_InterlockedXor8_acq:
1317 case clang::ARM::BI_InterlockedXor16_acq:
1318 case clang::ARM::BI_InterlockedXor_acq:
1319 case clang::ARM::BI_InterlockedXor64_acq:
1320 return MSVCIntrin::_InterlockedXor_acq;
1321 case clang::ARM::BI_InterlockedXor8_rel:
1322 case clang::ARM::BI_InterlockedXor16_rel:
1323 case clang::ARM::BI_InterlockedXor_rel:
1324 case clang::ARM::BI_InterlockedXor64_rel:
1325 return MSVCIntrin::_InterlockedXor_rel;
1326 case clang::ARM::BI_InterlockedXor8_nf:
1327 case clang::ARM::BI_InterlockedXor16_nf:
1328 case clang::ARM::BI_InterlockedXor_nf:
1329 case clang::ARM::BI_InterlockedXor64_nf:
1330 return MSVCIntrin::_InterlockedXor_nf;
1331 case clang::ARM::BI_InterlockedAnd8_acq:
1332 case clang::ARM::BI_InterlockedAnd16_acq:
1333 case clang::ARM::BI_InterlockedAnd_acq:
1334 case clang::ARM::BI_InterlockedAnd64_acq:
1335 return MSVCIntrin::_InterlockedAnd_acq;
1336 case clang::ARM::BI_InterlockedAnd8_rel:
1337 case clang::ARM::BI_InterlockedAnd16_rel:
1338 case clang::ARM::BI_InterlockedAnd_rel:
1339 case clang::ARM::BI_InterlockedAnd64_rel:
1340 return MSVCIntrin::_InterlockedAnd_rel;
1341 case clang::ARM::BI_InterlockedAnd8_nf:
1342 case clang::ARM::BI_InterlockedAnd16_nf:
1343 case clang::ARM::BI_InterlockedAnd_nf:
1344 case clang::ARM::BI_InterlockedAnd64_nf:
1345 return MSVCIntrin::_InterlockedAnd_nf;
1346 case clang::ARM::BI_InterlockedIncrement16_acq:
1347 case clang::ARM::BI_InterlockedIncrement_acq:
1348 case clang::ARM::BI_InterlockedIncrement64_acq:
1349 return MSVCIntrin::_InterlockedIncrement_acq;
1350 case clang::ARM::BI_InterlockedIncrement16_rel:
1351 case clang::ARM::BI_InterlockedIncrement_rel:
1352 case clang::ARM::BI_InterlockedIncrement64_rel:
1353 return MSVCIntrin::_InterlockedIncrement_rel;
1354 case clang::ARM::BI_InterlockedIncrement16_nf:
1355 case clang::ARM::BI_InterlockedIncrement_nf:
1356 case clang::ARM::BI_InterlockedIncrement64_nf:
1357 return MSVCIntrin::_InterlockedIncrement_nf;
1358 case clang::ARM::BI_InterlockedDecrement16_acq:
1359 case clang::ARM::BI_InterlockedDecrement_acq:
1360 case clang::ARM::BI_InterlockedDecrement64_acq:
1361 return MSVCIntrin::_InterlockedDecrement_acq;
1362 case clang::ARM::BI_InterlockedDecrement16_rel:
1363 case clang::ARM::BI_InterlockedDecrement_rel:
1364 case clang::ARM::BI_InterlockedDecrement64_rel:
1365 return MSVCIntrin::_InterlockedDecrement_rel;
1366 case clang::ARM::BI_InterlockedDecrement16_nf:
1367 case clang::ARM::BI_InterlockedDecrement_nf:
1368 case clang::ARM::BI_InterlockedDecrement64_nf:
1369 return MSVCIntrin::_InterlockedDecrement_nf;
1371 llvm_unreachable("must return from switch");
1374 static std::optional<CodeGenFunction::MSVCIntrin>
1375 translateAarch64ToMsvcIntrin(unsigned BuiltinID) {
1376 using MSVCIntrin = CodeGenFunction::MSVCIntrin;
1377 switch (BuiltinID) {
1378 default:
1379 return std::nullopt;
1380 case clang::AArch64::BI_BitScanForward:
1381 case clang::AArch64::BI_BitScanForward64:
1382 return MSVCIntrin::_BitScanForward;
1383 case clang::AArch64::BI_BitScanReverse:
1384 case clang::AArch64::BI_BitScanReverse64:
1385 return MSVCIntrin::_BitScanReverse;
1386 case clang::AArch64::BI_InterlockedAnd64:
1387 return MSVCIntrin::_InterlockedAnd;
1388 case clang::AArch64::BI_InterlockedExchange64:
1389 return MSVCIntrin::_InterlockedExchange;
1390 case clang::AArch64::BI_InterlockedExchangeAdd64:
1391 return MSVCIntrin::_InterlockedExchangeAdd;
1392 case clang::AArch64::BI_InterlockedExchangeSub64:
1393 return MSVCIntrin::_InterlockedExchangeSub;
1394 case clang::AArch64::BI_InterlockedOr64:
1395 return MSVCIntrin::_InterlockedOr;
1396 case clang::AArch64::BI_InterlockedXor64:
1397 return MSVCIntrin::_InterlockedXor;
1398 case clang::AArch64::BI_InterlockedDecrement64:
1399 return MSVCIntrin::_InterlockedDecrement;
1400 case clang::AArch64::BI_InterlockedIncrement64:
1401 return MSVCIntrin::_InterlockedIncrement;
1402 case clang::AArch64::BI_InterlockedExchangeAdd8_acq:
1403 case clang::AArch64::BI_InterlockedExchangeAdd16_acq:
1404 case clang::AArch64::BI_InterlockedExchangeAdd_acq:
1405 case clang::AArch64::BI_InterlockedExchangeAdd64_acq:
1406 return MSVCIntrin::_InterlockedExchangeAdd_acq;
1407 case clang::AArch64::BI_InterlockedExchangeAdd8_rel:
1408 case clang::AArch64::BI_InterlockedExchangeAdd16_rel:
1409 case clang::AArch64::BI_InterlockedExchangeAdd_rel:
1410 case clang::AArch64::BI_InterlockedExchangeAdd64_rel:
1411 return MSVCIntrin::_InterlockedExchangeAdd_rel;
1412 case clang::AArch64::BI_InterlockedExchangeAdd8_nf:
1413 case clang::AArch64::BI_InterlockedExchangeAdd16_nf:
1414 case clang::AArch64::BI_InterlockedExchangeAdd_nf:
1415 case clang::AArch64::BI_InterlockedExchangeAdd64_nf:
1416 return MSVCIntrin::_InterlockedExchangeAdd_nf;
1417 case clang::AArch64::BI_InterlockedExchange8_acq:
1418 case clang::AArch64::BI_InterlockedExchange16_acq:
1419 case clang::AArch64::BI_InterlockedExchange_acq:
1420 case clang::AArch64::BI_InterlockedExchange64_acq:
1421 return MSVCIntrin::_InterlockedExchange_acq;
1422 case clang::AArch64::BI_InterlockedExchange8_rel:
1423 case clang::AArch64::BI_InterlockedExchange16_rel:
1424 case clang::AArch64::BI_InterlockedExchange_rel:
1425 case clang::AArch64::BI_InterlockedExchange64_rel:
1426 return MSVCIntrin::_InterlockedExchange_rel;
1427 case clang::AArch64::BI_InterlockedExchange8_nf:
1428 case clang::AArch64::BI_InterlockedExchange16_nf:
1429 case clang::AArch64::BI_InterlockedExchange_nf:
1430 case clang::AArch64::BI_InterlockedExchange64_nf:
1431 return MSVCIntrin::_InterlockedExchange_nf;
1432 case clang::AArch64::BI_InterlockedCompareExchange8_acq:
1433 case clang::AArch64::BI_InterlockedCompareExchange16_acq:
1434 case clang::AArch64::BI_InterlockedCompareExchange_acq:
1435 case clang::AArch64::BI_InterlockedCompareExchange64_acq:
1436 return MSVCIntrin::_InterlockedCompareExchange_acq;
1437 case clang::AArch64::BI_InterlockedCompareExchange8_rel:
1438 case clang::AArch64::BI_InterlockedCompareExchange16_rel:
1439 case clang::AArch64::BI_InterlockedCompareExchange_rel:
1440 case clang::AArch64::BI_InterlockedCompareExchange64_rel:
1441 return MSVCIntrin::_InterlockedCompareExchange_rel;
1442 case clang::AArch64::BI_InterlockedCompareExchange8_nf:
1443 case clang::AArch64::BI_InterlockedCompareExchange16_nf:
1444 case clang::AArch64::BI_InterlockedCompareExchange_nf:
1445 case clang::AArch64::BI_InterlockedCompareExchange64_nf:
1446 return MSVCIntrin::_InterlockedCompareExchange_nf;
1447 case clang::AArch64::BI_InterlockedCompareExchange128:
1448 return MSVCIntrin::_InterlockedCompareExchange128;
1449 case clang::AArch64::BI_InterlockedCompareExchange128_acq:
1450 return MSVCIntrin::_InterlockedCompareExchange128_acq;
1451 case clang::AArch64::BI_InterlockedCompareExchange128_nf:
1452 return MSVCIntrin::_InterlockedCompareExchange128_nf;
1453 case clang::AArch64::BI_InterlockedCompareExchange128_rel:
1454 return MSVCIntrin::_InterlockedCompareExchange128_rel;
1455 case clang::AArch64::BI_InterlockedOr8_acq:
1456 case clang::AArch64::BI_InterlockedOr16_acq:
1457 case clang::AArch64::BI_InterlockedOr_acq:
1458 case clang::AArch64::BI_InterlockedOr64_acq:
1459 return MSVCIntrin::_InterlockedOr_acq;
1460 case clang::AArch64::BI_InterlockedOr8_rel:
1461 case clang::AArch64::BI_InterlockedOr16_rel:
1462 case clang::AArch64::BI_InterlockedOr_rel:
1463 case clang::AArch64::BI_InterlockedOr64_rel:
1464 return MSVCIntrin::_InterlockedOr_rel;
1465 case clang::AArch64::BI_InterlockedOr8_nf:
1466 case clang::AArch64::BI_InterlockedOr16_nf:
1467 case clang::AArch64::BI_InterlockedOr_nf:
1468 case clang::AArch64::BI_InterlockedOr64_nf:
1469 return MSVCIntrin::_InterlockedOr_nf;
1470 case clang::AArch64::BI_InterlockedXor8_acq:
1471 case clang::AArch64::BI_InterlockedXor16_acq:
1472 case clang::AArch64::BI_InterlockedXor_acq:
1473 case clang::AArch64::BI_InterlockedXor64_acq:
1474 return MSVCIntrin::_InterlockedXor_acq;
1475 case clang::AArch64::BI_InterlockedXor8_rel:
1476 case clang::AArch64::BI_InterlockedXor16_rel:
1477 case clang::AArch64::BI_InterlockedXor_rel:
1478 case clang::AArch64::BI_InterlockedXor64_rel:
1479 return MSVCIntrin::_InterlockedXor_rel;
1480 case clang::AArch64::BI_InterlockedXor8_nf:
1481 case clang::AArch64::BI_InterlockedXor16_nf:
1482 case clang::AArch64::BI_InterlockedXor_nf:
1483 case clang::AArch64::BI_InterlockedXor64_nf:
1484 return MSVCIntrin::_InterlockedXor_nf;
1485 case clang::AArch64::BI_InterlockedAnd8_acq:
1486 case clang::AArch64::BI_InterlockedAnd16_acq:
1487 case clang::AArch64::BI_InterlockedAnd_acq:
1488 case clang::AArch64::BI_InterlockedAnd64_acq:
1489 return MSVCIntrin::_InterlockedAnd_acq;
1490 case clang::AArch64::BI_InterlockedAnd8_rel:
1491 case clang::AArch64::BI_InterlockedAnd16_rel:
1492 case clang::AArch64::BI_InterlockedAnd_rel:
1493 case clang::AArch64::BI_InterlockedAnd64_rel:
1494 return MSVCIntrin::_InterlockedAnd_rel;
1495 case clang::AArch64::BI_InterlockedAnd8_nf:
1496 case clang::AArch64::BI_InterlockedAnd16_nf:
1497 case clang::AArch64::BI_InterlockedAnd_nf:
1498 case clang::AArch64::BI_InterlockedAnd64_nf:
1499 return MSVCIntrin::_InterlockedAnd_nf;
1500 case clang::AArch64::BI_InterlockedIncrement16_acq:
1501 case clang::AArch64::BI_InterlockedIncrement_acq:
1502 case clang::AArch64::BI_InterlockedIncrement64_acq:
1503 return MSVCIntrin::_InterlockedIncrement_acq;
1504 case clang::AArch64::BI_InterlockedIncrement16_rel:
1505 case clang::AArch64::BI_InterlockedIncrement_rel:
1506 case clang::AArch64::BI_InterlockedIncrement64_rel:
1507 return MSVCIntrin::_InterlockedIncrement_rel;
1508 case clang::AArch64::BI_InterlockedIncrement16_nf:
1509 case clang::AArch64::BI_InterlockedIncrement_nf:
1510 case clang::AArch64::BI_InterlockedIncrement64_nf:
1511 return MSVCIntrin::_InterlockedIncrement_nf;
1512 case clang::AArch64::BI_InterlockedDecrement16_acq:
1513 case clang::AArch64::BI_InterlockedDecrement_acq:
1514 case clang::AArch64::BI_InterlockedDecrement64_acq:
1515 return MSVCIntrin::_InterlockedDecrement_acq;
1516 case clang::AArch64::BI_InterlockedDecrement16_rel:
1517 case clang::AArch64::BI_InterlockedDecrement_rel:
1518 case clang::AArch64::BI_InterlockedDecrement64_rel:
1519 return MSVCIntrin::_InterlockedDecrement_rel;
1520 case clang::AArch64::BI_InterlockedDecrement16_nf:
1521 case clang::AArch64::BI_InterlockedDecrement_nf:
1522 case clang::AArch64::BI_InterlockedDecrement64_nf:
1523 return MSVCIntrin::_InterlockedDecrement_nf;
1525 llvm_unreachable("must return from switch");
1528 static std::optional<CodeGenFunction::MSVCIntrin>
1529 translateX86ToMsvcIntrin(unsigned BuiltinID) {
1530 using MSVCIntrin = CodeGenFunction::MSVCIntrin;
1531 switch (BuiltinID) {
1532 default:
1533 return std::nullopt;
1534 case clang::X86::BI_BitScanForward:
1535 case clang::X86::BI_BitScanForward64:
1536 return MSVCIntrin::_BitScanForward;
1537 case clang::X86::BI_BitScanReverse:
1538 case clang::X86::BI_BitScanReverse64:
1539 return MSVCIntrin::_BitScanReverse;
1540 case clang::X86::BI_InterlockedAnd64:
1541 return MSVCIntrin::_InterlockedAnd;
1542 case clang::X86::BI_InterlockedCompareExchange128:
1543 return MSVCIntrin::_InterlockedCompareExchange128;
1544 case clang::X86::BI_InterlockedExchange64:
1545 return MSVCIntrin::_InterlockedExchange;
1546 case clang::X86::BI_InterlockedExchangeAdd64:
1547 return MSVCIntrin::_InterlockedExchangeAdd;
1548 case clang::X86::BI_InterlockedExchangeSub64:
1549 return MSVCIntrin::_InterlockedExchangeSub;
1550 case clang::X86::BI_InterlockedOr64:
1551 return MSVCIntrin::_InterlockedOr;
1552 case clang::X86::BI_InterlockedXor64:
1553 return MSVCIntrin::_InterlockedXor;
1554 case clang::X86::BI_InterlockedDecrement64:
1555 return MSVCIntrin::_InterlockedDecrement;
1556 case clang::X86::BI_InterlockedIncrement64:
1557 return MSVCIntrin::_InterlockedIncrement;
1559 llvm_unreachable("must return from switch");
1562 // Emit an MSVC intrinsic. Assumes that arguments have *not* been evaluated.
1563 Value *CodeGenFunction::EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID,
1564 const CallExpr *E) {
1565 switch (BuiltinID) {
1566 case MSVCIntrin::_BitScanForward:
1567 case MSVCIntrin::_BitScanReverse: {
1568 Address IndexAddress(EmitPointerWithAlignment(E->getArg(0)));
1569 Value *ArgValue = EmitScalarExpr(E->getArg(1));
1571 llvm::Type *ArgType = ArgValue->getType();
1572 llvm::Type *IndexType = IndexAddress.getElementType();
1573 llvm::Type *ResultType = ConvertType(E->getType());
1575 Value *ArgZero = llvm::Constant::getNullValue(ArgType);
1576 Value *ResZero = llvm::Constant::getNullValue(ResultType);
1577 Value *ResOne = llvm::ConstantInt::get(ResultType, 1);
1579 BasicBlock *Begin = Builder.GetInsertBlock();
1580 BasicBlock *End = createBasicBlock("bitscan_end", this->CurFn);
1581 Builder.SetInsertPoint(End);
1582 PHINode *Result = Builder.CreatePHI(ResultType, 2, "bitscan_result");
1584 Builder.SetInsertPoint(Begin);
1585 Value *IsZero = Builder.CreateICmpEQ(ArgValue, ArgZero);
1586 BasicBlock *NotZero = createBasicBlock("bitscan_not_zero", this->CurFn);
1587 Builder.CreateCondBr(IsZero, End, NotZero);
1588 Result->addIncoming(ResZero, Begin);
1590 Builder.SetInsertPoint(NotZero);
1592 if (BuiltinID == MSVCIntrin::_BitScanForward) {
1593 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
1594 Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()});
1595 ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false);
1596 Builder.CreateStore(ZeroCount, IndexAddress, false);
1597 } else {
1598 unsigned ArgWidth = cast<llvm::IntegerType>(ArgType)->getBitWidth();
1599 Value *ArgTypeLastIndex = llvm::ConstantInt::get(IndexType, ArgWidth - 1);
1601 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
1602 Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()});
1603 ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false);
1604 Value *Index = Builder.CreateNSWSub(ArgTypeLastIndex, ZeroCount);
1605 Builder.CreateStore(Index, IndexAddress, false);
1607 Builder.CreateBr(End);
1608 Result->addIncoming(ResOne, NotZero);
1610 Builder.SetInsertPoint(End);
1611 return Result;
1613 case MSVCIntrin::_InterlockedAnd:
1614 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E);
1615 case MSVCIntrin::_InterlockedExchange:
1616 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E);
1617 case MSVCIntrin::_InterlockedExchangeAdd:
1618 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E);
1619 case MSVCIntrin::_InterlockedExchangeSub:
1620 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Sub, E);
1621 case MSVCIntrin::_InterlockedOr:
1622 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E);
1623 case MSVCIntrin::_InterlockedXor:
1624 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E);
1625 case MSVCIntrin::_InterlockedExchangeAdd_acq:
1626 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
1627 AtomicOrdering::Acquire);
1628 case MSVCIntrin::_InterlockedExchangeAdd_rel:
1629 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
1630 AtomicOrdering::Release);
1631 case MSVCIntrin::_InterlockedExchangeAdd_nf:
1632 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
1633 AtomicOrdering::Monotonic);
1634 case MSVCIntrin::_InterlockedExchange_acq:
1635 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
1636 AtomicOrdering::Acquire);
1637 case MSVCIntrin::_InterlockedExchange_rel:
1638 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
1639 AtomicOrdering::Release);
1640 case MSVCIntrin::_InterlockedExchange_nf:
1641 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
1642 AtomicOrdering::Monotonic);
1643 case MSVCIntrin::_InterlockedCompareExchange_acq:
1644 return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Acquire);
1645 case MSVCIntrin::_InterlockedCompareExchange_rel:
1646 return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Release);
1647 case MSVCIntrin::_InterlockedCompareExchange_nf:
1648 return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Monotonic);
1649 case MSVCIntrin::_InterlockedCompareExchange128:
1650 return EmitAtomicCmpXchg128ForMSIntrin(
1651 *this, E, AtomicOrdering::SequentiallyConsistent);
1652 case MSVCIntrin::_InterlockedCompareExchange128_acq:
1653 return EmitAtomicCmpXchg128ForMSIntrin(*this, E, AtomicOrdering::Acquire);
1654 case MSVCIntrin::_InterlockedCompareExchange128_rel:
1655 return EmitAtomicCmpXchg128ForMSIntrin(*this, E, AtomicOrdering::Release);
1656 case MSVCIntrin::_InterlockedCompareExchange128_nf:
1657 return EmitAtomicCmpXchg128ForMSIntrin(*this, E, AtomicOrdering::Monotonic);
1658 case MSVCIntrin::_InterlockedOr_acq:
1659 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
1660 AtomicOrdering::Acquire);
1661 case MSVCIntrin::_InterlockedOr_rel:
1662 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
1663 AtomicOrdering::Release);
1664 case MSVCIntrin::_InterlockedOr_nf:
1665 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
1666 AtomicOrdering::Monotonic);
1667 case MSVCIntrin::_InterlockedXor_acq:
1668 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
1669 AtomicOrdering::Acquire);
1670 case MSVCIntrin::_InterlockedXor_rel:
1671 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
1672 AtomicOrdering::Release);
1673 case MSVCIntrin::_InterlockedXor_nf:
1674 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
1675 AtomicOrdering::Monotonic);
1676 case MSVCIntrin::_InterlockedAnd_acq:
1677 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
1678 AtomicOrdering::Acquire);
1679 case MSVCIntrin::_InterlockedAnd_rel:
1680 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
1681 AtomicOrdering::Release);
1682 case MSVCIntrin::_InterlockedAnd_nf:
1683 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
1684 AtomicOrdering::Monotonic);
1685 case MSVCIntrin::_InterlockedIncrement_acq:
1686 return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Acquire);
1687 case MSVCIntrin::_InterlockedIncrement_rel:
1688 return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Release);
1689 case MSVCIntrin::_InterlockedIncrement_nf:
1690 return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Monotonic);
1691 case MSVCIntrin::_InterlockedDecrement_acq:
1692 return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Acquire);
1693 case MSVCIntrin::_InterlockedDecrement_rel:
1694 return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Release);
1695 case MSVCIntrin::_InterlockedDecrement_nf:
1696 return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Monotonic);
1698 case MSVCIntrin::_InterlockedDecrement:
1699 return EmitAtomicDecrementValue(*this, E);
1700 case MSVCIntrin::_InterlockedIncrement:
1701 return EmitAtomicIncrementValue(*this, E);
1703 case MSVCIntrin::__fastfail: {
1704 // Request immediate process termination from the kernel. The instruction
1705 // sequences to do this are documented on MSDN:
1706 // https://msdn.microsoft.com/en-us/library/dn774154.aspx
1707 llvm::Triple::ArchType ISA = getTarget().getTriple().getArch();
1708 StringRef Asm, Constraints;
1709 switch (ISA) {
1710 default:
1711 ErrorUnsupported(E, "__fastfail call for this architecture");
1712 break;
1713 case llvm::Triple::x86:
1714 case llvm::Triple::x86_64:
1715 Asm = "int $$0x29";
1716 Constraints = "{cx}";
1717 break;
1718 case llvm::Triple::thumb:
1719 Asm = "udf #251";
1720 Constraints = "{r0}";
1721 break;
1722 case llvm::Triple::aarch64:
1723 Asm = "brk #0xF003";
1724 Constraints = "{w0}";
1726 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, {Int32Ty}, false);
1727 llvm::InlineAsm *IA =
1728 llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true);
1729 llvm::AttributeList NoReturnAttr = llvm::AttributeList::get(
1730 getLLVMContext(), llvm::AttributeList::FunctionIndex,
1731 llvm::Attribute::NoReturn);
1732 llvm::CallInst *CI = Builder.CreateCall(IA, EmitScalarExpr(E->getArg(0)));
1733 CI->setAttributes(NoReturnAttr);
1734 return CI;
1737 llvm_unreachable("Incorrect MSVC intrinsic!");
1740 namespace {
1741 // ARC cleanup for __builtin_os_log_format
1742 struct CallObjCArcUse final : EHScopeStack::Cleanup {
1743 CallObjCArcUse(llvm::Value *object) : object(object) {}
1744 llvm::Value *object;
1746 void Emit(CodeGenFunction &CGF, Flags flags) override {
1747 CGF.EmitARCIntrinsicUse(object);
1752 Value *CodeGenFunction::EmitCheckedArgForBuiltin(const Expr *E,
1753 BuiltinCheckKind Kind) {
1754 assert((Kind == BCK_CLZPassedZero || Kind == BCK_CTZPassedZero)
1755 && "Unsupported builtin check kind");
1757 Value *ArgValue = EmitScalarExpr(E);
1758 if (!SanOpts.has(SanitizerKind::Builtin))
1759 return ArgValue;
1761 SanitizerScope SanScope(this);
1762 Value *Cond = Builder.CreateICmpNE(
1763 ArgValue, llvm::Constant::getNullValue(ArgValue->getType()));
1764 EmitCheck(std::make_pair(Cond, SanitizerKind::Builtin),
1765 SanitizerHandler::InvalidBuiltin,
1766 {EmitCheckSourceLocation(E->getExprLoc()),
1767 llvm::ConstantInt::get(Builder.getInt8Ty(), Kind)},
1768 std::nullopt);
1769 return ArgValue;
1772 static Value *EmitAbs(CodeGenFunction &CGF, Value *ArgValue, bool HasNSW) {
1773 // X < 0 ? -X : X
1774 // TODO: Use phi-node (for better SimplifyCFGPass)
1775 Value *NegOp = CGF.Builder.CreateNeg(ArgValue, "neg", false, HasNSW);
1776 Constant *Zero = llvm::Constant::getNullValue(ArgValue->getType());
1777 Value *CmpResult = CGF.Builder.CreateICmpSLT(ArgValue, Zero, "abscond");
1778 return CGF.Builder.CreateSelect(CmpResult, NegOp, ArgValue, "abs");
1781 static Value *EmitOverflowCheckedAbs(CodeGenFunction &CGF, const CallExpr *E,
1782 bool SanitizeOverflow) {
1783 Value *ArgValue = CGF.EmitScalarExpr(E->getArg(0));
1785 // Try to eliminate overflow check.
1786 if (const auto *VCI = dyn_cast<llvm::ConstantInt>(ArgValue)) {
1787 if (!VCI->isMinSignedValue())
1788 return EmitAbs(CGF, ArgValue, true);
1791 CodeGenFunction::SanitizerScope SanScope(&CGF);
1793 Constant *Zero = Constant::getNullValue(ArgValue->getType());
1794 Value *ResultAndOverflow = CGF.Builder.CreateBinaryIntrinsic(
1795 Intrinsic::ssub_with_overflow, Zero, ArgValue);
1796 Value *Result = CGF.Builder.CreateExtractValue(ResultAndOverflow, 0);
1797 Value *NotOverflow = CGF.Builder.CreateNot(
1798 CGF.Builder.CreateExtractValue(ResultAndOverflow, 1));
1800 // TODO: support -ftrapv-handler.
1801 if (SanitizeOverflow) {
1802 CGF.EmitCheck({{NotOverflow, SanitizerKind::SignedIntegerOverflow}},
1803 SanitizerHandler::NegateOverflow,
1804 {CGF.EmitCheckSourceLocation(E->getArg(0)->getExprLoc()),
1805 CGF.EmitCheckTypeDescriptor(E->getType())},
1806 {ArgValue});
1807 } else
1808 CGF.EmitTrapCheck(NotOverflow, SanitizerHandler::SubOverflow);
1810 Value *CmpResult = CGF.Builder.CreateICmpSLT(ArgValue, Zero, "abscond");
1811 return CGF.Builder.CreateSelect(CmpResult, Result, ArgValue, "abs");
1814 /// Get the argument type for arguments to os_log_helper.
1815 static CanQualType getOSLogArgType(ASTContext &C, int Size) {
1816 QualType UnsignedTy = C.getIntTypeForBitwidth(Size * 8, /*Signed=*/false);
1817 return C.getCanonicalType(UnsignedTy);
1820 llvm::Function *CodeGenFunction::generateBuiltinOSLogHelperFunction(
1821 const analyze_os_log::OSLogBufferLayout &Layout,
1822 CharUnits BufferAlignment) {
1823 ASTContext &Ctx = getContext();
1825 llvm::SmallString<64> Name;
1827 raw_svector_ostream OS(Name);
1828 OS << "__os_log_helper";
1829 OS << "_" << BufferAlignment.getQuantity();
1830 OS << "_" << int(Layout.getSummaryByte());
1831 OS << "_" << int(Layout.getNumArgsByte());
1832 for (const auto &Item : Layout.Items)
1833 OS << "_" << int(Item.getSizeByte()) << "_"
1834 << int(Item.getDescriptorByte());
1837 if (llvm::Function *F = CGM.getModule().getFunction(Name))
1838 return F;
1840 llvm::SmallVector<QualType, 4> ArgTys;
1841 FunctionArgList Args;
1842 Args.push_back(ImplicitParamDecl::Create(
1843 Ctx, nullptr, SourceLocation(), &Ctx.Idents.get("buffer"), Ctx.VoidPtrTy,
1844 ImplicitParamDecl::Other));
1845 ArgTys.emplace_back(Ctx.VoidPtrTy);
1847 for (unsigned int I = 0, E = Layout.Items.size(); I < E; ++I) {
1848 char Size = Layout.Items[I].getSizeByte();
1849 if (!Size)
1850 continue;
1852 QualType ArgTy = getOSLogArgType(Ctx, Size);
1853 Args.push_back(ImplicitParamDecl::Create(
1854 Ctx, nullptr, SourceLocation(),
1855 &Ctx.Idents.get(std::string("arg") + llvm::to_string(I)), ArgTy,
1856 ImplicitParamDecl::Other));
1857 ArgTys.emplace_back(ArgTy);
1860 QualType ReturnTy = Ctx.VoidTy;
1862 // The helper function has linkonce_odr linkage to enable the linker to merge
1863 // identical functions. To ensure the merging always happens, 'noinline' is
1864 // attached to the function when compiling with -Oz.
1865 const CGFunctionInfo &FI =
1866 CGM.getTypes().arrangeBuiltinFunctionDeclaration(ReturnTy, Args);
1867 llvm::FunctionType *FuncTy = CGM.getTypes().GetFunctionType(FI);
1868 llvm::Function *Fn = llvm::Function::Create(
1869 FuncTy, llvm::GlobalValue::LinkOnceODRLinkage, Name, &CGM.getModule());
1870 Fn->setVisibility(llvm::GlobalValue::HiddenVisibility);
1871 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, Fn, /*IsThunk=*/false);
1872 CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Fn);
1873 Fn->setDoesNotThrow();
1875 // Attach 'noinline' at -Oz.
1876 if (CGM.getCodeGenOpts().OptimizeSize == 2)
1877 Fn->addFnAttr(llvm::Attribute::NoInline);
1879 auto NL = ApplyDebugLocation::CreateEmpty(*this);
1880 StartFunction(GlobalDecl(), ReturnTy, Fn, FI, Args);
1882 // Create a scope with an artificial location for the body of this function.
1883 auto AL = ApplyDebugLocation::CreateArtificial(*this);
1885 CharUnits Offset;
1886 Address BufAddr =
1887 Address(Builder.CreateLoad(GetAddrOfLocalVar(Args[0]), "buf"), Int8Ty,
1888 BufferAlignment);
1889 Builder.CreateStore(Builder.getInt8(Layout.getSummaryByte()),
1890 Builder.CreateConstByteGEP(BufAddr, Offset++, "summary"));
1891 Builder.CreateStore(Builder.getInt8(Layout.getNumArgsByte()),
1892 Builder.CreateConstByteGEP(BufAddr, Offset++, "numArgs"));
1894 unsigned I = 1;
1895 for (const auto &Item : Layout.Items) {
1896 Builder.CreateStore(
1897 Builder.getInt8(Item.getDescriptorByte()),
1898 Builder.CreateConstByteGEP(BufAddr, Offset++, "argDescriptor"));
1899 Builder.CreateStore(
1900 Builder.getInt8(Item.getSizeByte()),
1901 Builder.CreateConstByteGEP(BufAddr, Offset++, "argSize"));
1903 CharUnits Size = Item.size();
1904 if (!Size.getQuantity())
1905 continue;
1907 Address Arg = GetAddrOfLocalVar(Args[I]);
1908 Address Addr = Builder.CreateConstByteGEP(BufAddr, Offset, "argData");
1909 Addr = Addr.withElementType(Arg.getElementType());
1910 Builder.CreateStore(Builder.CreateLoad(Arg), Addr);
1911 Offset += Size;
1912 ++I;
1915 FinishFunction();
1917 return Fn;
1920 RValue CodeGenFunction::emitBuiltinOSLogFormat(const CallExpr &E) {
1921 assert(E.getNumArgs() >= 2 &&
1922 "__builtin_os_log_format takes at least 2 arguments");
1923 ASTContext &Ctx = getContext();
1924 analyze_os_log::OSLogBufferLayout Layout;
1925 analyze_os_log::computeOSLogBufferLayout(Ctx, &E, Layout);
1926 Address BufAddr = EmitPointerWithAlignment(E.getArg(0));
1927 llvm::SmallVector<llvm::Value *, 4> RetainableOperands;
1929 // Ignore argument 1, the format string. It is not currently used.
1930 CallArgList Args;
1931 Args.add(RValue::get(BufAddr.getPointer()), Ctx.VoidPtrTy);
1933 for (const auto &Item : Layout.Items) {
1934 int Size = Item.getSizeByte();
1935 if (!Size)
1936 continue;
1938 llvm::Value *ArgVal;
1940 if (Item.getKind() == analyze_os_log::OSLogBufferItem::MaskKind) {
1941 uint64_t Val = 0;
1942 for (unsigned I = 0, E = Item.getMaskType().size(); I < E; ++I)
1943 Val |= ((uint64_t)Item.getMaskType()[I]) << I * 8;
1944 ArgVal = llvm::Constant::getIntegerValue(Int64Ty, llvm::APInt(64, Val));
1945 } else if (const Expr *TheExpr = Item.getExpr()) {
1946 ArgVal = EmitScalarExpr(TheExpr, /*Ignore*/ false);
1948 // If a temporary object that requires destruction after the full
1949 // expression is passed, push a lifetime-extended cleanup to extend its
1950 // lifetime to the end of the enclosing block scope.
1951 auto LifetimeExtendObject = [&](const Expr *E) {
1952 E = E->IgnoreParenCasts();
1953 // Extend lifetimes of objects returned by function calls and message
1954 // sends.
1956 // FIXME: We should do this in other cases in which temporaries are
1957 // created including arguments of non-ARC types (e.g., C++
1958 // temporaries).
1959 if (isa<CallExpr>(E) || isa<ObjCMessageExpr>(E))
1960 return true;
1961 return false;
1964 if (TheExpr->getType()->isObjCRetainableType() &&
1965 getLangOpts().ObjCAutoRefCount && LifetimeExtendObject(TheExpr)) {
1966 assert(getEvaluationKind(TheExpr->getType()) == TEK_Scalar &&
1967 "Only scalar can be a ObjC retainable type");
1968 if (!isa<Constant>(ArgVal)) {
1969 CleanupKind Cleanup = getARCCleanupKind();
1970 QualType Ty = TheExpr->getType();
1971 Address Alloca = Address::invalid();
1972 Address Addr = CreateMemTemp(Ty, "os.log.arg", &Alloca);
1973 ArgVal = EmitARCRetain(Ty, ArgVal);
1974 Builder.CreateStore(ArgVal, Addr);
1975 pushLifetimeExtendedDestroy(Cleanup, Alloca, Ty,
1976 CodeGenFunction::destroyARCStrongPrecise,
1977 Cleanup & EHCleanup);
1979 // Push a clang.arc.use call to ensure ARC optimizer knows that the
1980 // argument has to be alive.
1981 if (CGM.getCodeGenOpts().OptimizationLevel != 0)
1982 pushCleanupAfterFullExpr<CallObjCArcUse>(Cleanup, ArgVal);
1985 } else {
1986 ArgVal = Builder.getInt32(Item.getConstValue().getQuantity());
1989 unsigned ArgValSize =
1990 CGM.getDataLayout().getTypeSizeInBits(ArgVal->getType());
1991 llvm::IntegerType *IntTy = llvm::Type::getIntNTy(getLLVMContext(),
1992 ArgValSize);
1993 ArgVal = Builder.CreateBitOrPointerCast(ArgVal, IntTy);
1994 CanQualType ArgTy = getOSLogArgType(Ctx, Size);
1995 // If ArgVal has type x86_fp80, zero-extend ArgVal.
1996 ArgVal = Builder.CreateZExtOrBitCast(ArgVal, ConvertType(ArgTy));
1997 Args.add(RValue::get(ArgVal), ArgTy);
2000 const CGFunctionInfo &FI =
2001 CGM.getTypes().arrangeBuiltinFunctionCall(Ctx.VoidTy, Args);
2002 llvm::Function *F = CodeGenFunction(CGM).generateBuiltinOSLogHelperFunction(
2003 Layout, BufAddr.getAlignment());
2004 EmitCall(FI, CGCallee::forDirect(F), ReturnValueSlot(), Args);
2005 return RValue::get(BufAddr.getPointer());
2008 static bool isSpecialUnsignedMultiplySignedResult(
2009 unsigned BuiltinID, WidthAndSignedness Op1Info, WidthAndSignedness Op2Info,
2010 WidthAndSignedness ResultInfo) {
2011 return BuiltinID == Builtin::BI__builtin_mul_overflow &&
2012 Op1Info.Width == Op2Info.Width && Op2Info.Width == ResultInfo.Width &&
2013 !Op1Info.Signed && !Op2Info.Signed && ResultInfo.Signed;
2016 static RValue EmitCheckedUnsignedMultiplySignedResult(
2017 CodeGenFunction &CGF, const clang::Expr *Op1, WidthAndSignedness Op1Info,
2018 const clang::Expr *Op2, WidthAndSignedness Op2Info,
2019 const clang::Expr *ResultArg, QualType ResultQTy,
2020 WidthAndSignedness ResultInfo) {
2021 assert(isSpecialUnsignedMultiplySignedResult(
2022 Builtin::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo) &&
2023 "Cannot specialize this multiply");
2025 llvm::Value *V1 = CGF.EmitScalarExpr(Op1);
2026 llvm::Value *V2 = CGF.EmitScalarExpr(Op2);
2028 llvm::Value *HasOverflow;
2029 llvm::Value *Result = EmitOverflowIntrinsic(
2030 CGF, llvm::Intrinsic::umul_with_overflow, V1, V2, HasOverflow);
2032 // The intrinsic call will detect overflow when the value is > UINT_MAX,
2033 // however, since the original builtin had a signed result, we need to report
2034 // an overflow when the result is greater than INT_MAX.
2035 auto IntMax = llvm::APInt::getSignedMaxValue(ResultInfo.Width);
2036 llvm::Value *IntMaxValue = llvm::ConstantInt::get(Result->getType(), IntMax);
2038 llvm::Value *IntMaxOverflow = CGF.Builder.CreateICmpUGT(Result, IntMaxValue);
2039 HasOverflow = CGF.Builder.CreateOr(HasOverflow, IntMaxOverflow);
2041 bool isVolatile =
2042 ResultArg->getType()->getPointeeType().isVolatileQualified();
2043 Address ResultPtr = CGF.EmitPointerWithAlignment(ResultArg);
2044 CGF.Builder.CreateStore(CGF.EmitToMemory(Result, ResultQTy), ResultPtr,
2045 isVolatile);
2046 return RValue::get(HasOverflow);
2049 /// Determine if a binop is a checked mixed-sign multiply we can specialize.
2050 static bool isSpecialMixedSignMultiply(unsigned BuiltinID,
2051 WidthAndSignedness Op1Info,
2052 WidthAndSignedness Op2Info,
2053 WidthAndSignedness ResultInfo) {
2054 return BuiltinID == Builtin::BI__builtin_mul_overflow &&
2055 std::max(Op1Info.Width, Op2Info.Width) >= ResultInfo.Width &&
2056 Op1Info.Signed != Op2Info.Signed;
2059 /// Emit a checked mixed-sign multiply. This is a cheaper specialization of
2060 /// the generic checked-binop irgen.
2061 static RValue
2062 EmitCheckedMixedSignMultiply(CodeGenFunction &CGF, const clang::Expr *Op1,
2063 WidthAndSignedness Op1Info, const clang::Expr *Op2,
2064 WidthAndSignedness Op2Info,
2065 const clang::Expr *ResultArg, QualType ResultQTy,
2066 WidthAndSignedness ResultInfo) {
2067 assert(isSpecialMixedSignMultiply(Builtin::BI__builtin_mul_overflow, Op1Info,
2068 Op2Info, ResultInfo) &&
2069 "Not a mixed-sign multipliction we can specialize");
2071 // Emit the signed and unsigned operands.
2072 const clang::Expr *SignedOp = Op1Info.Signed ? Op1 : Op2;
2073 const clang::Expr *UnsignedOp = Op1Info.Signed ? Op2 : Op1;
2074 llvm::Value *Signed = CGF.EmitScalarExpr(SignedOp);
2075 llvm::Value *Unsigned = CGF.EmitScalarExpr(UnsignedOp);
2076 unsigned SignedOpWidth = Op1Info.Signed ? Op1Info.Width : Op2Info.Width;
2077 unsigned UnsignedOpWidth = Op1Info.Signed ? Op2Info.Width : Op1Info.Width;
2079 // One of the operands may be smaller than the other. If so, [s|z]ext it.
2080 if (SignedOpWidth < UnsignedOpWidth)
2081 Signed = CGF.Builder.CreateSExt(Signed, Unsigned->getType(), "op.sext");
2082 if (UnsignedOpWidth < SignedOpWidth)
2083 Unsigned = CGF.Builder.CreateZExt(Unsigned, Signed->getType(), "op.zext");
2085 llvm::Type *OpTy = Signed->getType();
2086 llvm::Value *Zero = llvm::Constant::getNullValue(OpTy);
2087 Address ResultPtr = CGF.EmitPointerWithAlignment(ResultArg);
2088 llvm::Type *ResTy = ResultPtr.getElementType();
2089 unsigned OpWidth = std::max(Op1Info.Width, Op2Info.Width);
2091 // Take the absolute value of the signed operand.
2092 llvm::Value *IsNegative = CGF.Builder.CreateICmpSLT(Signed, Zero);
2093 llvm::Value *AbsOfNegative = CGF.Builder.CreateSub(Zero, Signed);
2094 llvm::Value *AbsSigned =
2095 CGF.Builder.CreateSelect(IsNegative, AbsOfNegative, Signed);
2097 // Perform a checked unsigned multiplication.
2098 llvm::Value *UnsignedOverflow;
2099 llvm::Value *UnsignedResult =
2100 EmitOverflowIntrinsic(CGF, llvm::Intrinsic::umul_with_overflow, AbsSigned,
2101 Unsigned, UnsignedOverflow);
2103 llvm::Value *Overflow, *Result;
2104 if (ResultInfo.Signed) {
2105 // Signed overflow occurs if the result is greater than INT_MAX or lesser
2106 // than INT_MIN, i.e when |Result| > (INT_MAX + IsNegative).
2107 auto IntMax =
2108 llvm::APInt::getSignedMaxValue(ResultInfo.Width).zext(OpWidth);
2109 llvm::Value *MaxResult =
2110 CGF.Builder.CreateAdd(llvm::ConstantInt::get(OpTy, IntMax),
2111 CGF.Builder.CreateZExt(IsNegative, OpTy));
2112 llvm::Value *SignedOverflow =
2113 CGF.Builder.CreateICmpUGT(UnsignedResult, MaxResult);
2114 Overflow = CGF.Builder.CreateOr(UnsignedOverflow, SignedOverflow);
2116 // Prepare the signed result (possibly by negating it).
2117 llvm::Value *NegativeResult = CGF.Builder.CreateNeg(UnsignedResult);
2118 llvm::Value *SignedResult =
2119 CGF.Builder.CreateSelect(IsNegative, NegativeResult, UnsignedResult);
2120 Result = CGF.Builder.CreateTrunc(SignedResult, ResTy);
2121 } else {
2122 // Unsigned overflow occurs if the result is < 0 or greater than UINT_MAX.
2123 llvm::Value *Underflow = CGF.Builder.CreateAnd(
2124 IsNegative, CGF.Builder.CreateIsNotNull(UnsignedResult));
2125 Overflow = CGF.Builder.CreateOr(UnsignedOverflow, Underflow);
2126 if (ResultInfo.Width < OpWidth) {
2127 auto IntMax =
2128 llvm::APInt::getMaxValue(ResultInfo.Width).zext(OpWidth);
2129 llvm::Value *TruncOverflow = CGF.Builder.CreateICmpUGT(
2130 UnsignedResult, llvm::ConstantInt::get(OpTy, IntMax));
2131 Overflow = CGF.Builder.CreateOr(Overflow, TruncOverflow);
2134 // Negate the product if it would be negative in infinite precision.
2135 Result = CGF.Builder.CreateSelect(
2136 IsNegative, CGF.Builder.CreateNeg(UnsignedResult), UnsignedResult);
2138 Result = CGF.Builder.CreateTrunc(Result, ResTy);
2140 assert(Overflow && Result && "Missing overflow or result");
2142 bool isVolatile =
2143 ResultArg->getType()->getPointeeType().isVolatileQualified();
2144 CGF.Builder.CreateStore(CGF.EmitToMemory(Result, ResultQTy), ResultPtr,
2145 isVolatile);
2146 return RValue::get(Overflow);
2149 static bool
2150 TypeRequiresBuiltinLaunderImp(const ASTContext &Ctx, QualType Ty,
2151 llvm::SmallPtrSetImpl<const Decl *> &Seen) {
2152 if (const auto *Arr = Ctx.getAsArrayType(Ty))
2153 Ty = Ctx.getBaseElementType(Arr);
2155 const auto *Record = Ty->getAsCXXRecordDecl();
2156 if (!Record)
2157 return false;
2159 // We've already checked this type, or are in the process of checking it.
2160 if (!Seen.insert(Record).second)
2161 return false;
2163 assert(Record->hasDefinition() &&
2164 "Incomplete types should already be diagnosed");
2166 if (Record->isDynamicClass())
2167 return true;
2169 for (FieldDecl *F : Record->fields()) {
2170 if (TypeRequiresBuiltinLaunderImp(Ctx, F->getType(), Seen))
2171 return true;
2173 return false;
2176 /// Determine if the specified type requires laundering by checking if it is a
2177 /// dynamic class type or contains a subobject which is a dynamic class type.
2178 static bool TypeRequiresBuiltinLaunder(CodeGenModule &CGM, QualType Ty) {
2179 if (!CGM.getCodeGenOpts().StrictVTablePointers)
2180 return false;
2181 llvm::SmallPtrSet<const Decl *, 16> Seen;
2182 return TypeRequiresBuiltinLaunderImp(CGM.getContext(), Ty, Seen);
2185 RValue CodeGenFunction::emitRotate(const CallExpr *E, bool IsRotateRight) {
2186 llvm::Value *Src = EmitScalarExpr(E->getArg(0));
2187 llvm::Value *ShiftAmt = EmitScalarExpr(E->getArg(1));
2189 // The builtin's shift arg may have a different type than the source arg and
2190 // result, but the LLVM intrinsic uses the same type for all values.
2191 llvm::Type *Ty = Src->getType();
2192 ShiftAmt = Builder.CreateIntCast(ShiftAmt, Ty, false);
2194 // Rotate is a special case of LLVM funnel shift - 1st 2 args are the same.
2195 unsigned IID = IsRotateRight ? Intrinsic::fshr : Intrinsic::fshl;
2196 Function *F = CGM.getIntrinsic(IID, Ty);
2197 return RValue::get(Builder.CreateCall(F, { Src, Src, ShiftAmt }));
2200 // Map math builtins for long-double to f128 version.
2201 static unsigned mutateLongDoubleBuiltin(unsigned BuiltinID) {
2202 switch (BuiltinID) {
2203 #define MUTATE_LDBL(func) \
2204 case Builtin::BI__builtin_##func##l: \
2205 return Builtin::BI__builtin_##func##f128;
2206 MUTATE_LDBL(sqrt)
2207 MUTATE_LDBL(cbrt)
2208 MUTATE_LDBL(fabs)
2209 MUTATE_LDBL(log)
2210 MUTATE_LDBL(log2)
2211 MUTATE_LDBL(log10)
2212 MUTATE_LDBL(log1p)
2213 MUTATE_LDBL(logb)
2214 MUTATE_LDBL(exp)
2215 MUTATE_LDBL(exp2)
2216 MUTATE_LDBL(expm1)
2217 MUTATE_LDBL(fdim)
2218 MUTATE_LDBL(hypot)
2219 MUTATE_LDBL(ilogb)
2220 MUTATE_LDBL(pow)
2221 MUTATE_LDBL(fmin)
2222 MUTATE_LDBL(fmax)
2223 MUTATE_LDBL(ceil)
2224 MUTATE_LDBL(trunc)
2225 MUTATE_LDBL(rint)
2226 MUTATE_LDBL(nearbyint)
2227 MUTATE_LDBL(round)
2228 MUTATE_LDBL(floor)
2229 MUTATE_LDBL(lround)
2230 MUTATE_LDBL(llround)
2231 MUTATE_LDBL(lrint)
2232 MUTATE_LDBL(llrint)
2233 MUTATE_LDBL(fmod)
2234 MUTATE_LDBL(modf)
2235 MUTATE_LDBL(nan)
2236 MUTATE_LDBL(nans)
2237 MUTATE_LDBL(inf)
2238 MUTATE_LDBL(fma)
2239 MUTATE_LDBL(sin)
2240 MUTATE_LDBL(cos)
2241 MUTATE_LDBL(tan)
2242 MUTATE_LDBL(sinh)
2243 MUTATE_LDBL(cosh)
2244 MUTATE_LDBL(tanh)
2245 MUTATE_LDBL(asin)
2246 MUTATE_LDBL(acos)
2247 MUTATE_LDBL(atan)
2248 MUTATE_LDBL(asinh)
2249 MUTATE_LDBL(acosh)
2250 MUTATE_LDBL(atanh)
2251 MUTATE_LDBL(atan2)
2252 MUTATE_LDBL(erf)
2253 MUTATE_LDBL(erfc)
2254 MUTATE_LDBL(ldexp)
2255 MUTATE_LDBL(frexp)
2256 MUTATE_LDBL(huge_val)
2257 MUTATE_LDBL(copysign)
2258 MUTATE_LDBL(nextafter)
2259 MUTATE_LDBL(nexttoward)
2260 MUTATE_LDBL(remainder)
2261 MUTATE_LDBL(remquo)
2262 MUTATE_LDBL(scalbln)
2263 MUTATE_LDBL(scalbn)
2264 MUTATE_LDBL(tgamma)
2265 MUTATE_LDBL(lgamma)
2266 #undef MUTATE_LDBL
2267 default:
2268 return BuiltinID;
2272 static Value *tryUseTestFPKind(CodeGenFunction &CGF, unsigned BuiltinID,
2273 Value *V) {
2274 if (CGF.Builder.getIsFPConstrained() &&
2275 CGF.Builder.getDefaultConstrainedExcept() != fp::ebIgnore) {
2276 if (Value *Result =
2277 CGF.getTargetHooks().testFPKind(V, BuiltinID, CGF.Builder, CGF.CGM))
2278 return Result;
2280 return nullptr;
2283 RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
2284 const CallExpr *E,
2285 ReturnValueSlot ReturnValue) {
2286 const FunctionDecl *FD = GD.getDecl()->getAsFunction();
2287 // See if we can constant fold this builtin. If so, don't emit it at all.
2288 // TODO: Extend this handling to all builtin calls that we can constant-fold.
2289 Expr::EvalResult Result;
2290 if (E->isPRValue() && E->EvaluateAsRValue(Result, CGM.getContext()) &&
2291 !Result.hasSideEffects()) {
2292 if (Result.Val.isInt())
2293 return RValue::get(llvm::ConstantInt::get(getLLVMContext(),
2294 Result.Val.getInt()));
2295 if (Result.Val.isFloat())
2296 return RValue::get(llvm::ConstantFP::get(getLLVMContext(),
2297 Result.Val.getFloat()));
2300 // If current long-double semantics is IEEE 128-bit, replace math builtins
2301 // of long-double with f128 equivalent.
2302 // TODO: This mutation should also be applied to other targets other than PPC,
2303 // after backend supports IEEE 128-bit style libcalls.
2304 if (getTarget().getTriple().isPPC64() &&
2305 &getTarget().getLongDoubleFormat() == &llvm::APFloat::IEEEquad())
2306 BuiltinID = mutateLongDoubleBuiltin(BuiltinID);
2308 // If the builtin has been declared explicitly with an assembler label,
2309 // disable the specialized emitting below. Ideally we should communicate the
2310 // rename in IR, or at least avoid generating the intrinsic calls that are
2311 // likely to get lowered to the renamed library functions.
2312 const unsigned BuiltinIDIfNoAsmLabel =
2313 FD->hasAttr<AsmLabelAttr>() ? 0 : BuiltinID;
2315 // There are LLVM math intrinsics/instructions corresponding to math library
2316 // functions except the LLVM op will never set errno while the math library
2317 // might. Also, math builtins have the same semantics as their math library
2318 // twins. Thus, we can transform math library and builtin calls to their
2319 // LLVM counterparts if the call is marked 'const' (known to never set errno).
2320 // In case FP exceptions are enabled, the experimental versions of the
2321 // intrinsics model those.
2322 bool ConstWithoutErrnoAndExceptions =
2323 getContext().BuiltinInfo.isConstWithoutErrnoAndExceptions(BuiltinID);
2324 bool ConstWithoutExceptions =
2325 getContext().BuiltinInfo.isConstWithoutExceptions(BuiltinID);
2326 if (FD->hasAttr<ConstAttr>() ||
2327 ((ConstWithoutErrnoAndExceptions || ConstWithoutExceptions) &&
2328 (!ConstWithoutErrnoAndExceptions || (!getLangOpts().MathErrno)))) {
2329 switch (BuiltinIDIfNoAsmLabel) {
2330 case Builtin::BIceil:
2331 case Builtin::BIceilf:
2332 case Builtin::BIceill:
2333 case Builtin::BI__builtin_ceil:
2334 case Builtin::BI__builtin_ceilf:
2335 case Builtin::BI__builtin_ceilf16:
2336 case Builtin::BI__builtin_ceill:
2337 case Builtin::BI__builtin_ceilf128:
2338 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2339 Intrinsic::ceil,
2340 Intrinsic::experimental_constrained_ceil));
2342 case Builtin::BIcopysign:
2343 case Builtin::BIcopysignf:
2344 case Builtin::BIcopysignl:
2345 case Builtin::BI__builtin_copysign:
2346 case Builtin::BI__builtin_copysignf:
2347 case Builtin::BI__builtin_copysignf16:
2348 case Builtin::BI__builtin_copysignl:
2349 case Builtin::BI__builtin_copysignf128:
2350 return RValue::get(emitBinaryBuiltin(*this, E, Intrinsic::copysign));
2352 case Builtin::BIcos:
2353 case Builtin::BIcosf:
2354 case Builtin::BIcosl:
2355 case Builtin::BI__builtin_cos:
2356 case Builtin::BI__builtin_cosf:
2357 case Builtin::BI__builtin_cosf16:
2358 case Builtin::BI__builtin_cosl:
2359 case Builtin::BI__builtin_cosf128:
2360 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2361 Intrinsic::cos,
2362 Intrinsic::experimental_constrained_cos));
2364 case Builtin::BIexp:
2365 case Builtin::BIexpf:
2366 case Builtin::BIexpl:
2367 case Builtin::BI__builtin_exp:
2368 case Builtin::BI__builtin_expf:
2369 case Builtin::BI__builtin_expf16:
2370 case Builtin::BI__builtin_expl:
2371 case Builtin::BI__builtin_expf128:
2372 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2373 Intrinsic::exp,
2374 Intrinsic::experimental_constrained_exp));
2376 case Builtin::BIexp2:
2377 case Builtin::BIexp2f:
2378 case Builtin::BIexp2l:
2379 case Builtin::BI__builtin_exp2:
2380 case Builtin::BI__builtin_exp2f:
2381 case Builtin::BI__builtin_exp2f16:
2382 case Builtin::BI__builtin_exp2l:
2383 case Builtin::BI__builtin_exp2f128:
2384 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2385 Intrinsic::exp2,
2386 Intrinsic::experimental_constrained_exp2));
2388 case Builtin::BIfabs:
2389 case Builtin::BIfabsf:
2390 case Builtin::BIfabsl:
2391 case Builtin::BI__builtin_fabs:
2392 case Builtin::BI__builtin_fabsf:
2393 case Builtin::BI__builtin_fabsf16:
2394 case Builtin::BI__builtin_fabsl:
2395 case Builtin::BI__builtin_fabsf128:
2396 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::fabs));
2398 case Builtin::BIfloor:
2399 case Builtin::BIfloorf:
2400 case Builtin::BIfloorl:
2401 case Builtin::BI__builtin_floor:
2402 case Builtin::BI__builtin_floorf:
2403 case Builtin::BI__builtin_floorf16:
2404 case Builtin::BI__builtin_floorl:
2405 case Builtin::BI__builtin_floorf128:
2406 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2407 Intrinsic::floor,
2408 Intrinsic::experimental_constrained_floor));
2410 case Builtin::BIfma:
2411 case Builtin::BIfmaf:
2412 case Builtin::BIfmal:
2413 case Builtin::BI__builtin_fma:
2414 case Builtin::BI__builtin_fmaf:
2415 case Builtin::BI__builtin_fmaf16:
2416 case Builtin::BI__builtin_fmal:
2417 case Builtin::BI__builtin_fmaf128:
2418 return RValue::get(emitTernaryMaybeConstrainedFPBuiltin(*this, E,
2419 Intrinsic::fma,
2420 Intrinsic::experimental_constrained_fma));
2422 case Builtin::BIfmax:
2423 case Builtin::BIfmaxf:
2424 case Builtin::BIfmaxl:
2425 case Builtin::BI__builtin_fmax:
2426 case Builtin::BI__builtin_fmaxf:
2427 case Builtin::BI__builtin_fmaxf16:
2428 case Builtin::BI__builtin_fmaxl:
2429 case Builtin::BI__builtin_fmaxf128:
2430 return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E,
2431 Intrinsic::maxnum,
2432 Intrinsic::experimental_constrained_maxnum));
2434 case Builtin::BIfmin:
2435 case Builtin::BIfminf:
2436 case Builtin::BIfminl:
2437 case Builtin::BI__builtin_fmin:
2438 case Builtin::BI__builtin_fminf:
2439 case Builtin::BI__builtin_fminf16:
2440 case Builtin::BI__builtin_fminl:
2441 case Builtin::BI__builtin_fminf128:
2442 return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E,
2443 Intrinsic::minnum,
2444 Intrinsic::experimental_constrained_minnum));
2446 // fmod() is a special-case. It maps to the frem instruction rather than an
2447 // LLVM intrinsic.
2448 case Builtin::BIfmod:
2449 case Builtin::BIfmodf:
2450 case Builtin::BIfmodl:
2451 case Builtin::BI__builtin_fmod:
2452 case Builtin::BI__builtin_fmodf:
2453 case Builtin::BI__builtin_fmodf16:
2454 case Builtin::BI__builtin_fmodl:
2455 case Builtin::BI__builtin_fmodf128: {
2456 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
2457 Value *Arg1 = EmitScalarExpr(E->getArg(0));
2458 Value *Arg2 = EmitScalarExpr(E->getArg(1));
2459 return RValue::get(Builder.CreateFRem(Arg1, Arg2, "fmod"));
2462 case Builtin::BIlog:
2463 case Builtin::BIlogf:
2464 case Builtin::BIlogl:
2465 case Builtin::BI__builtin_log:
2466 case Builtin::BI__builtin_logf:
2467 case Builtin::BI__builtin_logf16:
2468 case Builtin::BI__builtin_logl:
2469 case Builtin::BI__builtin_logf128:
2470 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2471 Intrinsic::log,
2472 Intrinsic::experimental_constrained_log));
2474 case Builtin::BIlog10:
2475 case Builtin::BIlog10f:
2476 case Builtin::BIlog10l:
2477 case Builtin::BI__builtin_log10:
2478 case Builtin::BI__builtin_log10f:
2479 case Builtin::BI__builtin_log10f16:
2480 case Builtin::BI__builtin_log10l:
2481 case Builtin::BI__builtin_log10f128:
2482 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2483 Intrinsic::log10,
2484 Intrinsic::experimental_constrained_log10));
2486 case Builtin::BIlog2:
2487 case Builtin::BIlog2f:
2488 case Builtin::BIlog2l:
2489 case Builtin::BI__builtin_log2:
2490 case Builtin::BI__builtin_log2f:
2491 case Builtin::BI__builtin_log2f16:
2492 case Builtin::BI__builtin_log2l:
2493 case Builtin::BI__builtin_log2f128:
2494 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2495 Intrinsic::log2,
2496 Intrinsic::experimental_constrained_log2));
2498 case Builtin::BInearbyint:
2499 case Builtin::BInearbyintf:
2500 case Builtin::BInearbyintl:
2501 case Builtin::BI__builtin_nearbyint:
2502 case Builtin::BI__builtin_nearbyintf:
2503 case Builtin::BI__builtin_nearbyintl:
2504 case Builtin::BI__builtin_nearbyintf128:
2505 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2506 Intrinsic::nearbyint,
2507 Intrinsic::experimental_constrained_nearbyint));
2509 case Builtin::BIpow:
2510 case Builtin::BIpowf:
2511 case Builtin::BIpowl:
2512 case Builtin::BI__builtin_pow:
2513 case Builtin::BI__builtin_powf:
2514 case Builtin::BI__builtin_powf16:
2515 case Builtin::BI__builtin_powl:
2516 case Builtin::BI__builtin_powf128:
2517 return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E,
2518 Intrinsic::pow,
2519 Intrinsic::experimental_constrained_pow));
2521 case Builtin::BIrint:
2522 case Builtin::BIrintf:
2523 case Builtin::BIrintl:
2524 case Builtin::BI__builtin_rint:
2525 case Builtin::BI__builtin_rintf:
2526 case Builtin::BI__builtin_rintf16:
2527 case Builtin::BI__builtin_rintl:
2528 case Builtin::BI__builtin_rintf128:
2529 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2530 Intrinsic::rint,
2531 Intrinsic::experimental_constrained_rint));
2533 case Builtin::BIround:
2534 case Builtin::BIroundf:
2535 case Builtin::BIroundl:
2536 case Builtin::BI__builtin_round:
2537 case Builtin::BI__builtin_roundf:
2538 case Builtin::BI__builtin_roundf16:
2539 case Builtin::BI__builtin_roundl:
2540 case Builtin::BI__builtin_roundf128:
2541 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2542 Intrinsic::round,
2543 Intrinsic::experimental_constrained_round));
2545 case Builtin::BIroundeven:
2546 case Builtin::BIroundevenf:
2547 case Builtin::BIroundevenl:
2548 case Builtin::BI__builtin_roundeven:
2549 case Builtin::BI__builtin_roundevenf:
2550 case Builtin::BI__builtin_roundevenf16:
2551 case Builtin::BI__builtin_roundevenl:
2552 case Builtin::BI__builtin_roundevenf128:
2553 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2554 Intrinsic::roundeven,
2555 Intrinsic::experimental_constrained_roundeven));
2557 case Builtin::BIsin:
2558 case Builtin::BIsinf:
2559 case Builtin::BIsinl:
2560 case Builtin::BI__builtin_sin:
2561 case Builtin::BI__builtin_sinf:
2562 case Builtin::BI__builtin_sinf16:
2563 case Builtin::BI__builtin_sinl:
2564 case Builtin::BI__builtin_sinf128:
2565 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2566 Intrinsic::sin,
2567 Intrinsic::experimental_constrained_sin));
2569 case Builtin::BIsqrt:
2570 case Builtin::BIsqrtf:
2571 case Builtin::BIsqrtl:
2572 case Builtin::BI__builtin_sqrt:
2573 case Builtin::BI__builtin_sqrtf:
2574 case Builtin::BI__builtin_sqrtf16:
2575 case Builtin::BI__builtin_sqrtl:
2576 case Builtin::BI__builtin_sqrtf128:
2577 case Builtin::BI__builtin_elementwise_sqrt: {
2578 llvm::Value *Call = emitUnaryMaybeConstrainedFPBuiltin(
2579 *this, E, Intrinsic::sqrt, Intrinsic::experimental_constrained_sqrt);
2580 SetSqrtFPAccuracy(Call);
2581 return RValue::get(Call);
2583 case Builtin::BItrunc:
2584 case Builtin::BItruncf:
2585 case Builtin::BItruncl:
2586 case Builtin::BI__builtin_trunc:
2587 case Builtin::BI__builtin_truncf:
2588 case Builtin::BI__builtin_truncf16:
2589 case Builtin::BI__builtin_truncl:
2590 case Builtin::BI__builtin_truncf128:
2591 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2592 Intrinsic::trunc,
2593 Intrinsic::experimental_constrained_trunc));
2595 case Builtin::BIlround:
2596 case Builtin::BIlroundf:
2597 case Builtin::BIlroundl:
2598 case Builtin::BI__builtin_lround:
2599 case Builtin::BI__builtin_lroundf:
2600 case Builtin::BI__builtin_lroundl:
2601 case Builtin::BI__builtin_lroundf128:
2602 return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin(
2603 *this, E, Intrinsic::lround,
2604 Intrinsic::experimental_constrained_lround));
2606 case Builtin::BIllround:
2607 case Builtin::BIllroundf:
2608 case Builtin::BIllroundl:
2609 case Builtin::BI__builtin_llround:
2610 case Builtin::BI__builtin_llroundf:
2611 case Builtin::BI__builtin_llroundl:
2612 case Builtin::BI__builtin_llroundf128:
2613 return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin(
2614 *this, E, Intrinsic::llround,
2615 Intrinsic::experimental_constrained_llround));
2617 case Builtin::BIlrint:
2618 case Builtin::BIlrintf:
2619 case Builtin::BIlrintl:
2620 case Builtin::BI__builtin_lrint:
2621 case Builtin::BI__builtin_lrintf:
2622 case Builtin::BI__builtin_lrintl:
2623 case Builtin::BI__builtin_lrintf128:
2624 return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin(
2625 *this, E, Intrinsic::lrint,
2626 Intrinsic::experimental_constrained_lrint));
2628 case Builtin::BIllrint:
2629 case Builtin::BIllrintf:
2630 case Builtin::BIllrintl:
2631 case Builtin::BI__builtin_llrint:
2632 case Builtin::BI__builtin_llrintf:
2633 case Builtin::BI__builtin_llrintl:
2634 case Builtin::BI__builtin_llrintf128:
2635 return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin(
2636 *this, E, Intrinsic::llrint,
2637 Intrinsic::experimental_constrained_llrint));
2638 case Builtin::BI__builtin_ldexp:
2639 case Builtin::BI__builtin_ldexpf:
2640 case Builtin::BI__builtin_ldexpl:
2641 case Builtin::BI__builtin_ldexpf16:
2642 case Builtin::BI__builtin_ldexpf128: {
2643 return RValue::get(emitBinaryExpMaybeConstrainedFPBuiltin(
2644 *this, E, Intrinsic::ldexp,
2645 Intrinsic::experimental_constrained_ldexp));
2647 default:
2648 break;
2652 switch (BuiltinIDIfNoAsmLabel) {
2653 default: break;
2654 case Builtin::BI__builtin___CFStringMakeConstantString:
2655 case Builtin::BI__builtin___NSStringMakeConstantString:
2656 return RValue::get(ConstantEmitter(*this).emitAbstract(E, E->getType()));
2657 case Builtin::BI__builtin_stdarg_start:
2658 case Builtin::BI__builtin_va_start:
2659 case Builtin::BI__va_start:
2660 case Builtin::BI__builtin_va_end:
2661 EmitVAStartEnd(BuiltinID == Builtin::BI__va_start
2662 ? EmitScalarExpr(E->getArg(0))
2663 : EmitVAListRef(E->getArg(0)).getPointer(),
2664 BuiltinID != Builtin::BI__builtin_va_end);
2665 return RValue::get(nullptr);
2666 case Builtin::BI__builtin_va_copy: {
2667 Value *DstPtr = EmitVAListRef(E->getArg(0)).getPointer();
2668 Value *SrcPtr = EmitVAListRef(E->getArg(1)).getPointer();
2669 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::vacopy), {DstPtr, SrcPtr});
2670 return RValue::get(nullptr);
2672 case Builtin::BIabs:
2673 case Builtin::BIlabs:
2674 case Builtin::BIllabs:
2675 case Builtin::BI__builtin_abs:
2676 case Builtin::BI__builtin_labs:
2677 case Builtin::BI__builtin_llabs: {
2678 bool SanitizeOverflow = SanOpts.has(SanitizerKind::SignedIntegerOverflow);
2680 Value *Result;
2681 switch (getLangOpts().getSignedOverflowBehavior()) {
2682 case LangOptions::SOB_Defined:
2683 Result = EmitAbs(*this, EmitScalarExpr(E->getArg(0)), false);
2684 break;
2685 case LangOptions::SOB_Undefined:
2686 if (!SanitizeOverflow) {
2687 Result = EmitAbs(*this, EmitScalarExpr(E->getArg(0)), true);
2688 break;
2690 [[fallthrough]];
2691 case LangOptions::SOB_Trapping:
2692 // TODO: Somehow handle the corner case when the address of abs is taken.
2693 Result = EmitOverflowCheckedAbs(*this, E, SanitizeOverflow);
2694 break;
2696 return RValue::get(Result);
2698 case Builtin::BI__builtin_complex: {
2699 Value *Real = EmitScalarExpr(E->getArg(0));
2700 Value *Imag = EmitScalarExpr(E->getArg(1));
2701 return RValue::getComplex({Real, Imag});
2703 case Builtin::BI__builtin_conj:
2704 case Builtin::BI__builtin_conjf:
2705 case Builtin::BI__builtin_conjl:
2706 case Builtin::BIconj:
2707 case Builtin::BIconjf:
2708 case Builtin::BIconjl: {
2709 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
2710 Value *Real = ComplexVal.first;
2711 Value *Imag = ComplexVal.second;
2712 Imag = Builder.CreateFNeg(Imag, "neg");
2713 return RValue::getComplex(std::make_pair(Real, Imag));
2715 case Builtin::BI__builtin_creal:
2716 case Builtin::BI__builtin_crealf:
2717 case Builtin::BI__builtin_creall:
2718 case Builtin::BIcreal:
2719 case Builtin::BIcrealf:
2720 case Builtin::BIcreall: {
2721 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
2722 return RValue::get(ComplexVal.first);
2725 case Builtin::BI__builtin_preserve_access_index: {
2726 // Only enabled preserved access index region when debuginfo
2727 // is available as debuginfo is needed to preserve user-level
2728 // access pattern.
2729 if (!getDebugInfo()) {
2730 CGM.Error(E->getExprLoc(), "using builtin_preserve_access_index() without -g");
2731 return RValue::get(EmitScalarExpr(E->getArg(0)));
2734 // Nested builtin_preserve_access_index() not supported
2735 if (IsInPreservedAIRegion) {
2736 CGM.Error(E->getExprLoc(), "nested builtin_preserve_access_index() not supported");
2737 return RValue::get(EmitScalarExpr(E->getArg(0)));
2740 IsInPreservedAIRegion = true;
2741 Value *Res = EmitScalarExpr(E->getArg(0));
2742 IsInPreservedAIRegion = false;
2743 return RValue::get(Res);
2746 case Builtin::BI__builtin_cimag:
2747 case Builtin::BI__builtin_cimagf:
2748 case Builtin::BI__builtin_cimagl:
2749 case Builtin::BIcimag:
2750 case Builtin::BIcimagf:
2751 case Builtin::BIcimagl: {
2752 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
2753 return RValue::get(ComplexVal.second);
2756 case Builtin::BI__builtin_clrsb:
2757 case Builtin::BI__builtin_clrsbl:
2758 case Builtin::BI__builtin_clrsbll: {
2759 // clrsb(x) -> clz(x < 0 ? ~x : x) - 1 or
2760 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2762 llvm::Type *ArgType = ArgValue->getType();
2763 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
2765 llvm::Type *ResultType = ConvertType(E->getType());
2766 Value *Zero = llvm::Constant::getNullValue(ArgType);
2767 Value *IsNeg = Builder.CreateICmpSLT(ArgValue, Zero, "isneg");
2768 Value *Inverse = Builder.CreateNot(ArgValue, "not");
2769 Value *Tmp = Builder.CreateSelect(IsNeg, Inverse, ArgValue);
2770 Value *Ctlz = Builder.CreateCall(F, {Tmp, Builder.getFalse()});
2771 Value *Result = Builder.CreateSub(Ctlz, llvm::ConstantInt::get(ArgType, 1));
2772 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2773 "cast");
2774 return RValue::get(Result);
2776 case Builtin::BI__builtin_ctzs:
2777 case Builtin::BI__builtin_ctz:
2778 case Builtin::BI__builtin_ctzl:
2779 case Builtin::BI__builtin_ctzll: {
2780 Value *ArgValue = EmitCheckedArgForBuiltin(E->getArg(0), BCK_CTZPassedZero);
2782 llvm::Type *ArgType = ArgValue->getType();
2783 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
2785 llvm::Type *ResultType = ConvertType(E->getType());
2786 Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef());
2787 Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef});
2788 if (Result->getType() != ResultType)
2789 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2790 "cast");
2791 return RValue::get(Result);
2793 case Builtin::BI__builtin_clzs:
2794 case Builtin::BI__builtin_clz:
2795 case Builtin::BI__builtin_clzl:
2796 case Builtin::BI__builtin_clzll: {
2797 Value *ArgValue = EmitCheckedArgForBuiltin(E->getArg(0), BCK_CLZPassedZero);
2799 llvm::Type *ArgType = ArgValue->getType();
2800 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
2802 llvm::Type *ResultType = ConvertType(E->getType());
2803 Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef());
2804 Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef});
2805 if (Result->getType() != ResultType)
2806 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2807 "cast");
2808 return RValue::get(Result);
2810 case Builtin::BI__builtin_ffs:
2811 case Builtin::BI__builtin_ffsl:
2812 case Builtin::BI__builtin_ffsll: {
2813 // ffs(x) -> x ? cttz(x) + 1 : 0
2814 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2816 llvm::Type *ArgType = ArgValue->getType();
2817 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
2819 llvm::Type *ResultType = ConvertType(E->getType());
2820 Value *Tmp =
2821 Builder.CreateAdd(Builder.CreateCall(F, {ArgValue, Builder.getTrue()}),
2822 llvm::ConstantInt::get(ArgType, 1));
2823 Value *Zero = llvm::Constant::getNullValue(ArgType);
2824 Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
2825 Value *Result = Builder.CreateSelect(IsZero, Zero, Tmp, "ffs");
2826 if (Result->getType() != ResultType)
2827 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2828 "cast");
2829 return RValue::get(Result);
2831 case Builtin::BI__builtin_parity:
2832 case Builtin::BI__builtin_parityl:
2833 case Builtin::BI__builtin_parityll: {
2834 // parity(x) -> ctpop(x) & 1
2835 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2837 llvm::Type *ArgType = ArgValue->getType();
2838 Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
2840 llvm::Type *ResultType = ConvertType(E->getType());
2841 Value *Tmp = Builder.CreateCall(F, ArgValue);
2842 Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1));
2843 if (Result->getType() != ResultType)
2844 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2845 "cast");
2846 return RValue::get(Result);
2848 case Builtin::BI__lzcnt16:
2849 case Builtin::BI__lzcnt:
2850 case Builtin::BI__lzcnt64: {
2851 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2853 llvm::Type *ArgType = ArgValue->getType();
2854 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
2856 llvm::Type *ResultType = ConvertType(E->getType());
2857 Value *Result = Builder.CreateCall(F, {ArgValue, Builder.getFalse()});
2858 if (Result->getType() != ResultType)
2859 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2860 "cast");
2861 return RValue::get(Result);
2863 case Builtin::BI__popcnt16:
2864 case Builtin::BI__popcnt:
2865 case Builtin::BI__popcnt64:
2866 case Builtin::BI__builtin_popcount:
2867 case Builtin::BI__builtin_popcountl:
2868 case Builtin::BI__builtin_popcountll: {
2869 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2871 llvm::Type *ArgType = ArgValue->getType();
2872 Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
2874 llvm::Type *ResultType = ConvertType(E->getType());
2875 Value *Result = Builder.CreateCall(F, ArgValue);
2876 if (Result->getType() != ResultType)
2877 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2878 "cast");
2879 return RValue::get(Result);
2881 case Builtin::BI__builtin_unpredictable: {
2882 // Always return the argument of __builtin_unpredictable. LLVM does not
2883 // handle this builtin. Metadata for this builtin should be added directly
2884 // to instructions such as branches or switches that use it.
2885 return RValue::get(EmitScalarExpr(E->getArg(0)));
2887 case Builtin::BI__builtin_expect: {
2888 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2889 llvm::Type *ArgType = ArgValue->getType();
2891 Value *ExpectedValue = EmitScalarExpr(E->getArg(1));
2892 // Don't generate llvm.expect on -O0 as the backend won't use it for
2893 // anything.
2894 // Note, we still IRGen ExpectedValue because it could have side-effects.
2895 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
2896 return RValue::get(ArgValue);
2898 Function *FnExpect = CGM.getIntrinsic(Intrinsic::expect, ArgType);
2899 Value *Result =
2900 Builder.CreateCall(FnExpect, {ArgValue, ExpectedValue}, "expval");
2901 return RValue::get(Result);
2903 case Builtin::BI__builtin_expect_with_probability: {
2904 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2905 llvm::Type *ArgType = ArgValue->getType();
2907 Value *ExpectedValue = EmitScalarExpr(E->getArg(1));
2908 llvm::APFloat Probability(0.0);
2909 const Expr *ProbArg = E->getArg(2);
2910 bool EvalSucceed = ProbArg->EvaluateAsFloat(Probability, CGM.getContext());
2911 assert(EvalSucceed && "probability should be able to evaluate as float");
2912 (void)EvalSucceed;
2913 bool LoseInfo = false;
2914 Probability.convert(llvm::APFloat::IEEEdouble(),
2915 llvm::RoundingMode::Dynamic, &LoseInfo);
2916 llvm::Type *Ty = ConvertType(ProbArg->getType());
2917 Constant *Confidence = ConstantFP::get(Ty, Probability);
2918 // Don't generate llvm.expect.with.probability on -O0 as the backend
2919 // won't use it for anything.
2920 // Note, we still IRGen ExpectedValue because it could have side-effects.
2921 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
2922 return RValue::get(ArgValue);
2924 Function *FnExpect =
2925 CGM.getIntrinsic(Intrinsic::expect_with_probability, ArgType);
2926 Value *Result = Builder.CreateCall(
2927 FnExpect, {ArgValue, ExpectedValue, Confidence}, "expval");
2928 return RValue::get(Result);
2930 case Builtin::BI__builtin_assume_aligned: {
2931 const Expr *Ptr = E->getArg(0);
2932 Value *PtrValue = EmitScalarExpr(Ptr);
2933 Value *OffsetValue =
2934 (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) : nullptr;
2936 Value *AlignmentValue = EmitScalarExpr(E->getArg(1));
2937 ConstantInt *AlignmentCI = cast<ConstantInt>(AlignmentValue);
2938 if (AlignmentCI->getValue().ugt(llvm::Value::MaximumAlignment))
2939 AlignmentCI = ConstantInt::get(AlignmentCI->getType(),
2940 llvm::Value::MaximumAlignment);
2942 emitAlignmentAssumption(PtrValue, Ptr,
2943 /*The expr loc is sufficient.*/ SourceLocation(),
2944 AlignmentCI, OffsetValue);
2945 return RValue::get(PtrValue);
2947 case Builtin::BI__assume:
2948 case Builtin::BI__builtin_assume: {
2949 if (E->getArg(0)->HasSideEffects(getContext()))
2950 return RValue::get(nullptr);
2952 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2953 Function *FnAssume = CGM.getIntrinsic(Intrinsic::assume);
2954 Builder.CreateCall(FnAssume, ArgValue);
2955 return RValue::get(nullptr);
2957 case Builtin::BI__builtin_assume_separate_storage: {
2958 const Expr *Arg0 = E->getArg(0);
2959 const Expr *Arg1 = E->getArg(1);
2961 Value *Value0 = EmitScalarExpr(Arg0);
2962 Value *Value1 = EmitScalarExpr(Arg1);
2964 Value *Values[] = {Value0, Value1};
2965 OperandBundleDefT<Value *> OBD("separate_storage", Values);
2966 Builder.CreateAssumption(ConstantInt::getTrue(getLLVMContext()), {OBD});
2967 return RValue::get(nullptr);
2969 case Builtin::BI__arithmetic_fence: {
2970 // Create the builtin call if FastMath is selected, and the target
2971 // supports the builtin, otherwise just return the argument.
2972 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
2973 llvm::FastMathFlags FMF = Builder.getFastMathFlags();
2974 bool isArithmeticFenceEnabled =
2975 FMF.allowReassoc() &&
2976 getContext().getTargetInfo().checkArithmeticFenceSupported();
2977 QualType ArgType = E->getArg(0)->getType();
2978 if (ArgType->isComplexType()) {
2979 if (isArithmeticFenceEnabled) {
2980 QualType ElementType = ArgType->castAs<ComplexType>()->getElementType();
2981 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
2982 Value *Real = Builder.CreateArithmeticFence(ComplexVal.first,
2983 ConvertType(ElementType));
2984 Value *Imag = Builder.CreateArithmeticFence(ComplexVal.second,
2985 ConvertType(ElementType));
2986 return RValue::getComplex(std::make_pair(Real, Imag));
2988 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
2989 Value *Real = ComplexVal.first;
2990 Value *Imag = ComplexVal.second;
2991 return RValue::getComplex(std::make_pair(Real, Imag));
2993 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2994 if (isArithmeticFenceEnabled)
2995 return RValue::get(
2996 Builder.CreateArithmeticFence(ArgValue, ConvertType(ArgType)));
2997 return RValue::get(ArgValue);
2999 case Builtin::BI__builtin_bswap16:
3000 case Builtin::BI__builtin_bswap32:
3001 case Builtin::BI__builtin_bswap64:
3002 case Builtin::BI_byteswap_ushort:
3003 case Builtin::BI_byteswap_ulong:
3004 case Builtin::BI_byteswap_uint64: {
3005 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::bswap));
3007 case Builtin::BI__builtin_bitreverse8:
3008 case Builtin::BI__builtin_bitreverse16:
3009 case Builtin::BI__builtin_bitreverse32:
3010 case Builtin::BI__builtin_bitreverse64: {
3011 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::bitreverse));
3013 case Builtin::BI__builtin_rotateleft8:
3014 case Builtin::BI__builtin_rotateleft16:
3015 case Builtin::BI__builtin_rotateleft32:
3016 case Builtin::BI__builtin_rotateleft64:
3017 case Builtin::BI_rotl8: // Microsoft variants of rotate left
3018 case Builtin::BI_rotl16:
3019 case Builtin::BI_rotl:
3020 case Builtin::BI_lrotl:
3021 case Builtin::BI_rotl64:
3022 return emitRotate(E, false);
3024 case Builtin::BI__builtin_rotateright8:
3025 case Builtin::BI__builtin_rotateright16:
3026 case Builtin::BI__builtin_rotateright32:
3027 case Builtin::BI__builtin_rotateright64:
3028 case Builtin::BI_rotr8: // Microsoft variants of rotate right
3029 case Builtin::BI_rotr16:
3030 case Builtin::BI_rotr:
3031 case Builtin::BI_lrotr:
3032 case Builtin::BI_rotr64:
3033 return emitRotate(E, true);
3035 case Builtin::BI__builtin_constant_p: {
3036 llvm::Type *ResultType = ConvertType(E->getType());
3038 const Expr *Arg = E->getArg(0);
3039 QualType ArgType = Arg->getType();
3040 // FIXME: The allowance for Obj-C pointers and block pointers is historical
3041 // and likely a mistake.
3042 if (!ArgType->isIntegralOrEnumerationType() && !ArgType->isFloatingType() &&
3043 !ArgType->isObjCObjectPointerType() && !ArgType->isBlockPointerType())
3044 // Per the GCC documentation, only numeric constants are recognized after
3045 // inlining.
3046 return RValue::get(ConstantInt::get(ResultType, 0));
3048 if (Arg->HasSideEffects(getContext()))
3049 // The argument is unevaluated, so be conservative if it might have
3050 // side-effects.
3051 return RValue::get(ConstantInt::get(ResultType, 0));
3053 Value *ArgValue = EmitScalarExpr(Arg);
3054 if (ArgType->isObjCObjectPointerType()) {
3055 // Convert Objective-C objects to id because we cannot distinguish between
3056 // LLVM types for Obj-C classes as they are opaque.
3057 ArgType = CGM.getContext().getObjCIdType();
3058 ArgValue = Builder.CreateBitCast(ArgValue, ConvertType(ArgType));
3060 Function *F =
3061 CGM.getIntrinsic(Intrinsic::is_constant, ConvertType(ArgType));
3062 Value *Result = Builder.CreateCall(F, ArgValue);
3063 if (Result->getType() != ResultType)
3064 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/false);
3065 return RValue::get(Result);
3067 case Builtin::BI__builtin_dynamic_object_size:
3068 case Builtin::BI__builtin_object_size: {
3069 unsigned Type =
3070 E->getArg(1)->EvaluateKnownConstInt(getContext()).getZExtValue();
3071 auto *ResType = cast<llvm::IntegerType>(ConvertType(E->getType()));
3073 // We pass this builtin onto the optimizer so that it can figure out the
3074 // object size in more complex cases.
3075 bool IsDynamic = BuiltinID == Builtin::BI__builtin_dynamic_object_size;
3076 return RValue::get(emitBuiltinObjectSize(E->getArg(0), Type, ResType,
3077 /*EmittedE=*/nullptr, IsDynamic));
3079 case Builtin::BI__builtin_prefetch: {
3080 Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0));
3081 // FIXME: Technically these constants should of type 'int', yes?
3082 RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) :
3083 llvm::ConstantInt::get(Int32Ty, 0);
3084 Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) :
3085 llvm::ConstantInt::get(Int32Ty, 3);
3086 Value *Data = llvm::ConstantInt::get(Int32Ty, 1);
3087 Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType());
3088 Builder.CreateCall(F, {Address, RW, Locality, Data});
3089 return RValue::get(nullptr);
3091 case Builtin::BI__builtin_readcyclecounter: {
3092 Function *F = CGM.getIntrinsic(Intrinsic::readcyclecounter);
3093 return RValue::get(Builder.CreateCall(F));
3095 case Builtin::BI__builtin___clear_cache: {
3096 Value *Begin = EmitScalarExpr(E->getArg(0));
3097 Value *End = EmitScalarExpr(E->getArg(1));
3098 Function *F = CGM.getIntrinsic(Intrinsic::clear_cache);
3099 return RValue::get(Builder.CreateCall(F, {Begin, End}));
3101 case Builtin::BI__builtin_trap:
3102 EmitTrapCall(Intrinsic::trap);
3103 return RValue::get(nullptr);
3104 case Builtin::BI__debugbreak:
3105 EmitTrapCall(Intrinsic::debugtrap);
3106 return RValue::get(nullptr);
3107 case Builtin::BI__builtin_unreachable: {
3108 EmitUnreachable(E->getExprLoc());
3110 // We do need to preserve an insertion point.
3111 EmitBlock(createBasicBlock("unreachable.cont"));
3113 return RValue::get(nullptr);
3116 case Builtin::BI__builtin_powi:
3117 case Builtin::BI__builtin_powif:
3118 case Builtin::BI__builtin_powil: {
3119 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
3120 llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
3122 if (Builder.getIsFPConstrained()) {
3123 // FIXME: llvm.powi has 2 mangling types,
3124 // llvm.experimental.constrained.powi has one.
3125 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3126 Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_powi,
3127 Src0->getType());
3128 return RValue::get(Builder.CreateConstrainedFPCall(F, { Src0, Src1 }));
3131 Function *F = CGM.getIntrinsic(Intrinsic::powi,
3132 { Src0->getType(), Src1->getType() });
3133 return RValue::get(Builder.CreateCall(F, { Src0, Src1 }));
3135 case Builtin::BI__builtin_frexp:
3136 case Builtin::BI__builtin_frexpf:
3137 case Builtin::BI__builtin_frexpl:
3138 case Builtin::BI__builtin_frexpf128:
3139 case Builtin::BI__builtin_frexpf16:
3140 return RValue::get(emitFrexpBuiltin(*this, E, Intrinsic::frexp));
3141 case Builtin::BI__builtin_isgreater:
3142 case Builtin::BI__builtin_isgreaterequal:
3143 case Builtin::BI__builtin_isless:
3144 case Builtin::BI__builtin_islessequal:
3145 case Builtin::BI__builtin_islessgreater:
3146 case Builtin::BI__builtin_isunordered: {
3147 // Ordered comparisons: we know the arguments to these are matching scalar
3148 // floating point values.
3149 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3150 Value *LHS = EmitScalarExpr(E->getArg(0));
3151 Value *RHS = EmitScalarExpr(E->getArg(1));
3153 switch (BuiltinID) {
3154 default: llvm_unreachable("Unknown ordered comparison");
3155 case Builtin::BI__builtin_isgreater:
3156 LHS = Builder.CreateFCmpOGT(LHS, RHS, "cmp");
3157 break;
3158 case Builtin::BI__builtin_isgreaterequal:
3159 LHS = Builder.CreateFCmpOGE(LHS, RHS, "cmp");
3160 break;
3161 case Builtin::BI__builtin_isless:
3162 LHS = Builder.CreateFCmpOLT(LHS, RHS, "cmp");
3163 break;
3164 case Builtin::BI__builtin_islessequal:
3165 LHS = Builder.CreateFCmpOLE(LHS, RHS, "cmp");
3166 break;
3167 case Builtin::BI__builtin_islessgreater:
3168 LHS = Builder.CreateFCmpONE(LHS, RHS, "cmp");
3169 break;
3170 case Builtin::BI__builtin_isunordered:
3171 LHS = Builder.CreateFCmpUNO(LHS, RHS, "cmp");
3172 break;
3174 // ZExt bool to int type.
3175 return RValue::get(Builder.CreateZExt(LHS, ConvertType(E->getType())));
3178 case Builtin::BI__builtin_isnan: {
3179 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3180 Value *V = EmitScalarExpr(E->getArg(0));
3181 if (Value *Result = tryUseTestFPKind(*this, BuiltinID, V))
3182 return RValue::get(Result);
3183 return RValue::get(
3184 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcNan),
3185 ConvertType(E->getType())));
3188 case Builtin::BI__builtin_isinf: {
3189 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3190 Value *V = EmitScalarExpr(E->getArg(0));
3191 if (Value *Result = tryUseTestFPKind(*this, BuiltinID, V))
3192 return RValue::get(Result);
3193 return RValue::get(
3194 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcInf),
3195 ConvertType(E->getType())));
3198 case Builtin::BIfinite:
3199 case Builtin::BI__finite:
3200 case Builtin::BIfinitef:
3201 case Builtin::BI__finitef:
3202 case Builtin::BIfinitel:
3203 case Builtin::BI__finitel:
3204 case Builtin::BI__builtin_isfinite: {
3205 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3206 Value *V = EmitScalarExpr(E->getArg(0));
3207 if (Value *Result = tryUseTestFPKind(*this, BuiltinID, V))
3208 return RValue::get(Result);
3209 return RValue::get(
3210 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcFinite),
3211 ConvertType(E->getType())));
3214 case Builtin::BI__builtin_isnormal: {
3215 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3216 Value *V = EmitScalarExpr(E->getArg(0));
3217 return RValue::get(
3218 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcNormal),
3219 ConvertType(E->getType())));
3222 case Builtin::BI__builtin_isfpclass: {
3223 Expr::EvalResult Result;
3224 if (!E->getArg(1)->EvaluateAsInt(Result, CGM.getContext()))
3225 break;
3226 uint64_t Test = Result.Val.getInt().getLimitedValue();
3227 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3228 Value *V = EmitScalarExpr(E->getArg(0));
3229 return RValue::get(Builder.CreateZExt(Builder.createIsFPClass(V, Test),
3230 ConvertType(E->getType())));
3233 case Builtin::BI__builtin_nondeterministic_value: {
3234 llvm::Type *Ty = ConvertType(E->getArg(0)->getType());
3236 Value *Result = PoisonValue::get(Ty);
3237 Result = Builder.CreateFreeze(Result);
3239 return RValue::get(Result);
3242 case Builtin::BI__builtin_elementwise_abs: {
3243 Value *Result;
3244 QualType QT = E->getArg(0)->getType();
3246 if (auto *VecTy = QT->getAs<VectorType>())
3247 QT = VecTy->getElementType();
3248 if (QT->isIntegerType())
3249 Result = Builder.CreateBinaryIntrinsic(
3250 llvm::Intrinsic::abs, EmitScalarExpr(E->getArg(0)),
3251 Builder.getFalse(), nullptr, "elt.abs");
3252 else
3253 Result = emitUnaryBuiltin(*this, E, llvm::Intrinsic::fabs, "elt.abs");
3255 return RValue::get(Result);
3258 case Builtin::BI__builtin_elementwise_ceil:
3259 return RValue::get(
3260 emitUnaryBuiltin(*this, E, llvm::Intrinsic::ceil, "elt.ceil"));
3261 case Builtin::BI__builtin_elementwise_exp:
3262 return RValue::get(
3263 emitUnaryBuiltin(*this, E, llvm::Intrinsic::exp, "elt.exp"));
3264 case Builtin::BI__builtin_elementwise_exp2:
3265 return RValue::get(
3266 emitUnaryBuiltin(*this, E, llvm::Intrinsic::exp2, "elt.exp2"));
3267 case Builtin::BI__builtin_elementwise_log:
3268 return RValue::get(
3269 emitUnaryBuiltin(*this, E, llvm::Intrinsic::log, "elt.log"));
3270 case Builtin::BI__builtin_elementwise_log2:
3271 return RValue::get(
3272 emitUnaryBuiltin(*this, E, llvm::Intrinsic::log2, "elt.log2"));
3273 case Builtin::BI__builtin_elementwise_log10:
3274 return RValue::get(
3275 emitUnaryBuiltin(*this, E, llvm::Intrinsic::log10, "elt.log10"));
3276 case Builtin::BI__builtin_elementwise_pow: {
3277 return RValue::get(emitBinaryBuiltin(*this, E, llvm::Intrinsic::pow));
3279 case Builtin::BI__builtin_elementwise_bitreverse:
3280 return RValue::get(emitUnaryBuiltin(*this, E, llvm::Intrinsic::bitreverse,
3281 "elt.bitreverse"));
3282 case Builtin::BI__builtin_elementwise_cos:
3283 return RValue::get(
3284 emitUnaryBuiltin(*this, E, llvm::Intrinsic::cos, "elt.cos"));
3285 case Builtin::BI__builtin_elementwise_floor:
3286 return RValue::get(
3287 emitUnaryBuiltin(*this, E, llvm::Intrinsic::floor, "elt.floor"));
3288 case Builtin::BI__builtin_elementwise_roundeven:
3289 return RValue::get(emitUnaryBuiltin(*this, E, llvm::Intrinsic::roundeven,
3290 "elt.roundeven"));
3291 case Builtin::BI__builtin_elementwise_round:
3292 return RValue::get(emitUnaryBuiltin(*this, E, llvm::Intrinsic::round,
3293 "elt.round"));
3294 case Builtin::BI__builtin_elementwise_rint:
3295 return RValue::get(emitUnaryBuiltin(*this, E, llvm::Intrinsic::rint,
3296 "elt.rint"));
3297 case Builtin::BI__builtin_elementwise_nearbyint:
3298 return RValue::get(emitUnaryBuiltin(*this, E, llvm::Intrinsic::nearbyint,
3299 "elt.nearbyint"));
3300 case Builtin::BI__builtin_elementwise_sin:
3301 return RValue::get(
3302 emitUnaryBuiltin(*this, E, llvm::Intrinsic::sin, "elt.sin"));
3304 case Builtin::BI__builtin_elementwise_trunc:
3305 return RValue::get(
3306 emitUnaryBuiltin(*this, E, llvm::Intrinsic::trunc, "elt.trunc"));
3307 case Builtin::BI__builtin_elementwise_canonicalize:
3308 return RValue::get(
3309 emitUnaryBuiltin(*this, E, llvm::Intrinsic::canonicalize, "elt.canonicalize"));
3310 case Builtin::BI__builtin_elementwise_copysign:
3311 return RValue::get(emitBinaryBuiltin(*this, E, llvm::Intrinsic::copysign));
3312 case Builtin::BI__builtin_elementwise_fma:
3313 return RValue::get(emitTernaryBuiltin(*this, E, llvm::Intrinsic::fma));
3314 case Builtin::BI__builtin_elementwise_add_sat:
3315 case Builtin::BI__builtin_elementwise_sub_sat: {
3316 Value *Op0 = EmitScalarExpr(E->getArg(0));
3317 Value *Op1 = EmitScalarExpr(E->getArg(1));
3318 Value *Result;
3319 assert(Op0->getType()->isIntOrIntVectorTy() && "integer type expected");
3320 QualType Ty = E->getArg(0)->getType();
3321 if (auto *VecTy = Ty->getAs<VectorType>())
3322 Ty = VecTy->getElementType();
3323 bool IsSigned = Ty->isSignedIntegerType();
3324 unsigned Opc;
3325 if (BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_elementwise_add_sat)
3326 Opc = IsSigned ? llvm::Intrinsic::sadd_sat : llvm::Intrinsic::uadd_sat;
3327 else
3328 Opc = IsSigned ? llvm::Intrinsic::ssub_sat : llvm::Intrinsic::usub_sat;
3329 Result = Builder.CreateBinaryIntrinsic(Opc, Op0, Op1, nullptr, "elt.sat");
3330 return RValue::get(Result);
3333 case Builtin::BI__builtin_elementwise_max: {
3334 Value *Op0 = EmitScalarExpr(E->getArg(0));
3335 Value *Op1 = EmitScalarExpr(E->getArg(1));
3336 Value *Result;
3337 if (Op0->getType()->isIntOrIntVectorTy()) {
3338 QualType Ty = E->getArg(0)->getType();
3339 if (auto *VecTy = Ty->getAs<VectorType>())
3340 Ty = VecTy->getElementType();
3341 Result = Builder.CreateBinaryIntrinsic(Ty->isSignedIntegerType()
3342 ? llvm::Intrinsic::smax
3343 : llvm::Intrinsic::umax,
3344 Op0, Op1, nullptr, "elt.max");
3345 } else
3346 Result = Builder.CreateMaxNum(Op0, Op1, "elt.max");
3347 return RValue::get(Result);
3349 case Builtin::BI__builtin_elementwise_min: {
3350 Value *Op0 = EmitScalarExpr(E->getArg(0));
3351 Value *Op1 = EmitScalarExpr(E->getArg(1));
3352 Value *Result;
3353 if (Op0->getType()->isIntOrIntVectorTy()) {
3354 QualType Ty = E->getArg(0)->getType();
3355 if (auto *VecTy = Ty->getAs<VectorType>())
3356 Ty = VecTy->getElementType();
3357 Result = Builder.CreateBinaryIntrinsic(Ty->isSignedIntegerType()
3358 ? llvm::Intrinsic::smin
3359 : llvm::Intrinsic::umin,
3360 Op0, Op1, nullptr, "elt.min");
3361 } else
3362 Result = Builder.CreateMinNum(Op0, Op1, "elt.min");
3363 return RValue::get(Result);
3366 case Builtin::BI__builtin_reduce_max: {
3367 auto GetIntrinsicID = [](QualType QT) {
3368 if (auto *VecTy = QT->getAs<VectorType>())
3369 QT = VecTy->getElementType();
3370 if (QT->isSignedIntegerType())
3371 return llvm::Intrinsic::vector_reduce_smax;
3372 if (QT->isUnsignedIntegerType())
3373 return llvm::Intrinsic::vector_reduce_umax;
3374 assert(QT->isFloatingType() && "must have a float here");
3375 return llvm::Intrinsic::vector_reduce_fmax;
3377 return RValue::get(emitUnaryBuiltin(
3378 *this, E, GetIntrinsicID(E->getArg(0)->getType()), "rdx.min"));
3381 case Builtin::BI__builtin_reduce_min: {
3382 auto GetIntrinsicID = [](QualType QT) {
3383 if (auto *VecTy = QT->getAs<VectorType>())
3384 QT = VecTy->getElementType();
3385 if (QT->isSignedIntegerType())
3386 return llvm::Intrinsic::vector_reduce_smin;
3387 if (QT->isUnsignedIntegerType())
3388 return llvm::Intrinsic::vector_reduce_umin;
3389 assert(QT->isFloatingType() && "must have a float here");
3390 return llvm::Intrinsic::vector_reduce_fmin;
3393 return RValue::get(emitUnaryBuiltin(
3394 *this, E, GetIntrinsicID(E->getArg(0)->getType()), "rdx.min"));
3397 case Builtin::BI__builtin_reduce_add:
3398 return RValue::get(emitUnaryBuiltin(
3399 *this, E, llvm::Intrinsic::vector_reduce_add, "rdx.add"));
3400 case Builtin::BI__builtin_reduce_mul:
3401 return RValue::get(emitUnaryBuiltin(
3402 *this, E, llvm::Intrinsic::vector_reduce_mul, "rdx.mul"));
3403 case Builtin::BI__builtin_reduce_xor:
3404 return RValue::get(emitUnaryBuiltin(
3405 *this, E, llvm::Intrinsic::vector_reduce_xor, "rdx.xor"));
3406 case Builtin::BI__builtin_reduce_or:
3407 return RValue::get(emitUnaryBuiltin(
3408 *this, E, llvm::Intrinsic::vector_reduce_or, "rdx.or"));
3409 case Builtin::BI__builtin_reduce_and:
3410 return RValue::get(emitUnaryBuiltin(
3411 *this, E, llvm::Intrinsic::vector_reduce_and, "rdx.and"));
3413 case Builtin::BI__builtin_matrix_transpose: {
3414 auto *MatrixTy = E->getArg(0)->getType()->castAs<ConstantMatrixType>();
3415 Value *MatValue = EmitScalarExpr(E->getArg(0));
3416 MatrixBuilder MB(Builder);
3417 Value *Result = MB.CreateMatrixTranspose(MatValue, MatrixTy->getNumRows(),
3418 MatrixTy->getNumColumns());
3419 return RValue::get(Result);
3422 case Builtin::BI__builtin_matrix_column_major_load: {
3423 MatrixBuilder MB(Builder);
3424 // Emit everything that isn't dependent on the first parameter type
3425 Value *Stride = EmitScalarExpr(E->getArg(3));
3426 const auto *ResultTy = E->getType()->getAs<ConstantMatrixType>();
3427 auto *PtrTy = E->getArg(0)->getType()->getAs<PointerType>();
3428 assert(PtrTy && "arg0 must be of pointer type");
3429 bool IsVolatile = PtrTy->getPointeeType().isVolatileQualified();
3431 Address Src = EmitPointerWithAlignment(E->getArg(0));
3432 EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(0)->getType(),
3433 E->getArg(0)->getExprLoc(), FD, 0);
3434 Value *Result = MB.CreateColumnMajorLoad(
3435 Src.getElementType(), Src.getPointer(),
3436 Align(Src.getAlignment().getQuantity()), Stride, IsVolatile,
3437 ResultTy->getNumRows(), ResultTy->getNumColumns(),
3438 "matrix");
3439 return RValue::get(Result);
3442 case Builtin::BI__builtin_matrix_column_major_store: {
3443 MatrixBuilder MB(Builder);
3444 Value *Matrix = EmitScalarExpr(E->getArg(0));
3445 Address Dst = EmitPointerWithAlignment(E->getArg(1));
3446 Value *Stride = EmitScalarExpr(E->getArg(2));
3448 const auto *MatrixTy = E->getArg(0)->getType()->getAs<ConstantMatrixType>();
3449 auto *PtrTy = E->getArg(1)->getType()->getAs<PointerType>();
3450 assert(PtrTy && "arg1 must be of pointer type");
3451 bool IsVolatile = PtrTy->getPointeeType().isVolatileQualified();
3453 EmitNonNullArgCheck(RValue::get(Dst.getPointer()), E->getArg(1)->getType(),
3454 E->getArg(1)->getExprLoc(), FD, 0);
3455 Value *Result = MB.CreateColumnMajorStore(
3456 Matrix, Dst.getPointer(), Align(Dst.getAlignment().getQuantity()),
3457 Stride, IsVolatile, MatrixTy->getNumRows(), MatrixTy->getNumColumns());
3458 return RValue::get(Result);
3461 case Builtin::BI__builtin_isinf_sign: {
3462 // isinf_sign(x) -> fabs(x) == infinity ? (signbit(x) ? -1 : 1) : 0
3463 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3464 // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here.
3465 Value *Arg = EmitScalarExpr(E->getArg(0));
3466 Value *AbsArg = EmitFAbs(*this, Arg);
3467 Value *IsInf = Builder.CreateFCmpOEQ(
3468 AbsArg, ConstantFP::getInfinity(Arg->getType()), "isinf");
3469 Value *IsNeg = EmitSignBit(*this, Arg);
3471 llvm::Type *IntTy = ConvertType(E->getType());
3472 Value *Zero = Constant::getNullValue(IntTy);
3473 Value *One = ConstantInt::get(IntTy, 1);
3474 Value *NegativeOne = ConstantInt::get(IntTy, -1);
3475 Value *SignResult = Builder.CreateSelect(IsNeg, NegativeOne, One);
3476 Value *Result = Builder.CreateSelect(IsInf, SignResult, Zero);
3477 return RValue::get(Result);
3480 case Builtin::BI__builtin_flt_rounds: {
3481 Function *F = CGM.getIntrinsic(Intrinsic::get_rounding);
3483 llvm::Type *ResultType = ConvertType(E->getType());
3484 Value *Result = Builder.CreateCall(F);
3485 if (Result->getType() != ResultType)
3486 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
3487 "cast");
3488 return RValue::get(Result);
3491 case Builtin::BI__builtin_set_flt_rounds: {
3492 Function *F = CGM.getIntrinsic(Intrinsic::set_rounding);
3494 Value *V = EmitScalarExpr(E->getArg(0));
3495 Builder.CreateCall(F, V);
3496 return RValue::get(nullptr);
3499 case Builtin::BI__builtin_fpclassify: {
3500 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3501 // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here.
3502 Value *V = EmitScalarExpr(E->getArg(5));
3503 llvm::Type *Ty = ConvertType(E->getArg(5)->getType());
3505 // Create Result
3506 BasicBlock *Begin = Builder.GetInsertBlock();
3507 BasicBlock *End = createBasicBlock("fpclassify_end", this->CurFn);
3508 Builder.SetInsertPoint(End);
3509 PHINode *Result =
3510 Builder.CreatePHI(ConvertType(E->getArg(0)->getType()), 4,
3511 "fpclassify_result");
3513 // if (V==0) return FP_ZERO
3514 Builder.SetInsertPoint(Begin);
3515 Value *IsZero = Builder.CreateFCmpOEQ(V, Constant::getNullValue(Ty),
3516 "iszero");
3517 Value *ZeroLiteral = EmitScalarExpr(E->getArg(4));
3518 BasicBlock *NotZero = createBasicBlock("fpclassify_not_zero", this->CurFn);
3519 Builder.CreateCondBr(IsZero, End, NotZero);
3520 Result->addIncoming(ZeroLiteral, Begin);
3522 // if (V != V) return FP_NAN
3523 Builder.SetInsertPoint(NotZero);
3524 Value *IsNan = Builder.CreateFCmpUNO(V, V, "cmp");
3525 Value *NanLiteral = EmitScalarExpr(E->getArg(0));
3526 BasicBlock *NotNan = createBasicBlock("fpclassify_not_nan", this->CurFn);
3527 Builder.CreateCondBr(IsNan, End, NotNan);
3528 Result->addIncoming(NanLiteral, NotZero);
3530 // if (fabs(V) == infinity) return FP_INFINITY
3531 Builder.SetInsertPoint(NotNan);
3532 Value *VAbs = EmitFAbs(*this, V);
3533 Value *IsInf =
3534 Builder.CreateFCmpOEQ(VAbs, ConstantFP::getInfinity(V->getType()),
3535 "isinf");
3536 Value *InfLiteral = EmitScalarExpr(E->getArg(1));
3537 BasicBlock *NotInf = createBasicBlock("fpclassify_not_inf", this->CurFn);
3538 Builder.CreateCondBr(IsInf, End, NotInf);
3539 Result->addIncoming(InfLiteral, NotNan);
3541 // if (fabs(V) >= MIN_NORMAL) return FP_NORMAL else FP_SUBNORMAL
3542 Builder.SetInsertPoint(NotInf);
3543 APFloat Smallest = APFloat::getSmallestNormalized(
3544 getContext().getFloatTypeSemantics(E->getArg(5)->getType()));
3545 Value *IsNormal =
3546 Builder.CreateFCmpUGE(VAbs, ConstantFP::get(V->getContext(), Smallest),
3547 "isnormal");
3548 Value *NormalResult =
3549 Builder.CreateSelect(IsNormal, EmitScalarExpr(E->getArg(2)),
3550 EmitScalarExpr(E->getArg(3)));
3551 Builder.CreateBr(End);
3552 Result->addIncoming(NormalResult, NotInf);
3554 // return Result
3555 Builder.SetInsertPoint(End);
3556 return RValue::get(Result);
3559 // An alloca will always return a pointer to the alloca (stack) address
3560 // space. This address space need not be the same as the AST / Language
3561 // default (e.g. in C / C++ auto vars are in the generic address space). At
3562 // the AST level this is handled within CreateTempAlloca et al., but for the
3563 // builtin / dynamic alloca we have to handle it here. We use an explicit cast
3564 // instead of passing an AS to CreateAlloca so as to not inhibit optimisation.
3565 case Builtin::BIalloca:
3566 case Builtin::BI_alloca:
3567 case Builtin::BI__builtin_alloca_uninitialized:
3568 case Builtin::BI__builtin_alloca: {
3569 Value *Size = EmitScalarExpr(E->getArg(0));
3570 const TargetInfo &TI = getContext().getTargetInfo();
3571 // The alignment of the alloca should correspond to __BIGGEST_ALIGNMENT__.
3572 const Align SuitableAlignmentInBytes =
3573 CGM.getContext()
3574 .toCharUnitsFromBits(TI.getSuitableAlign())
3575 .getAsAlign();
3576 AllocaInst *AI = Builder.CreateAlloca(Builder.getInt8Ty(), Size);
3577 AI->setAlignment(SuitableAlignmentInBytes);
3578 if (BuiltinID != Builtin::BI__builtin_alloca_uninitialized)
3579 initializeAlloca(*this, AI, Size, SuitableAlignmentInBytes);
3580 LangAS AAS = getASTAllocaAddressSpace();
3581 LangAS EAS = E->getType()->getPointeeType().getAddressSpace();
3582 if (AAS != EAS) {
3583 llvm::Type *Ty = CGM.getTypes().ConvertType(E->getType());
3584 return RValue::get(getTargetHooks().performAddrSpaceCast(*this, AI, AAS,
3585 EAS, Ty));
3587 return RValue::get(AI);
3590 case Builtin::BI__builtin_alloca_with_align_uninitialized:
3591 case Builtin::BI__builtin_alloca_with_align: {
3592 Value *Size = EmitScalarExpr(E->getArg(0));
3593 Value *AlignmentInBitsValue = EmitScalarExpr(E->getArg(1));
3594 auto *AlignmentInBitsCI = cast<ConstantInt>(AlignmentInBitsValue);
3595 unsigned AlignmentInBits = AlignmentInBitsCI->getZExtValue();
3596 const Align AlignmentInBytes =
3597 CGM.getContext().toCharUnitsFromBits(AlignmentInBits).getAsAlign();
3598 AllocaInst *AI = Builder.CreateAlloca(Builder.getInt8Ty(), Size);
3599 AI->setAlignment(AlignmentInBytes);
3600 if (BuiltinID != Builtin::BI__builtin_alloca_with_align_uninitialized)
3601 initializeAlloca(*this, AI, Size, AlignmentInBytes);
3602 LangAS AAS = getASTAllocaAddressSpace();
3603 LangAS EAS = E->getType()->getPointeeType().getAddressSpace();
3604 if (AAS != EAS) {
3605 llvm::Type *Ty = CGM.getTypes().ConvertType(E->getType());
3606 return RValue::get(getTargetHooks().performAddrSpaceCast(*this, AI, AAS,
3607 EAS, Ty));
3609 return RValue::get(AI);
3612 case Builtin::BIbzero:
3613 case Builtin::BI__builtin_bzero: {
3614 Address Dest = EmitPointerWithAlignment(E->getArg(0));
3615 Value *SizeVal = EmitScalarExpr(E->getArg(1));
3616 EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
3617 E->getArg(0)->getExprLoc(), FD, 0);
3618 Builder.CreateMemSet(Dest, Builder.getInt8(0), SizeVal, false);
3619 return RValue::get(nullptr);
3621 case Builtin::BImemcpy:
3622 case Builtin::BI__builtin_memcpy:
3623 case Builtin::BImempcpy:
3624 case Builtin::BI__builtin_mempcpy: {
3625 Address Dest = EmitPointerWithAlignment(E->getArg(0));
3626 Address Src = EmitPointerWithAlignment(E->getArg(1));
3627 Value *SizeVal = EmitScalarExpr(E->getArg(2));
3628 EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
3629 E->getArg(0)->getExprLoc(), FD, 0);
3630 EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(),
3631 E->getArg(1)->getExprLoc(), FD, 1);
3632 Builder.CreateMemCpy(Dest, Src, SizeVal, false);
3633 if (BuiltinID == Builtin::BImempcpy ||
3634 BuiltinID == Builtin::BI__builtin_mempcpy)
3635 return RValue::get(Builder.CreateInBoundsGEP(Dest.getElementType(),
3636 Dest.getPointer(), SizeVal));
3637 else
3638 return RValue::get(Dest.getPointer());
3641 case Builtin::BI__builtin_memcpy_inline: {
3642 Address Dest = EmitPointerWithAlignment(E->getArg(0));
3643 Address Src = EmitPointerWithAlignment(E->getArg(1));
3644 uint64_t Size =
3645 E->getArg(2)->EvaluateKnownConstInt(getContext()).getZExtValue();
3646 EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
3647 E->getArg(0)->getExprLoc(), FD, 0);
3648 EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(),
3649 E->getArg(1)->getExprLoc(), FD, 1);
3650 Builder.CreateMemCpyInline(Dest, Src, Size);
3651 return RValue::get(nullptr);
3654 case Builtin::BI__builtin_char_memchr:
3655 BuiltinID = Builtin::BI__builtin_memchr;
3656 break;
3658 case Builtin::BI__builtin___memcpy_chk: {
3659 // fold __builtin_memcpy_chk(x, y, cst1, cst2) to memcpy iff cst1<=cst2.
3660 Expr::EvalResult SizeResult, DstSizeResult;
3661 if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
3662 !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
3663 break;
3664 llvm::APSInt Size = SizeResult.Val.getInt();
3665 llvm::APSInt DstSize = DstSizeResult.Val.getInt();
3666 if (Size.ugt(DstSize))
3667 break;
3668 Address Dest = EmitPointerWithAlignment(E->getArg(0));
3669 Address Src = EmitPointerWithAlignment(E->getArg(1));
3670 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
3671 Builder.CreateMemCpy(Dest, Src, SizeVal, false);
3672 return RValue::get(Dest.getPointer());
3675 case Builtin::BI__builtin_objc_memmove_collectable: {
3676 Address DestAddr = EmitPointerWithAlignment(E->getArg(0));
3677 Address SrcAddr = EmitPointerWithAlignment(E->getArg(1));
3678 Value *SizeVal = EmitScalarExpr(E->getArg(2));
3679 CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this,
3680 DestAddr, SrcAddr, SizeVal);
3681 return RValue::get(DestAddr.getPointer());
3684 case Builtin::BI__builtin___memmove_chk: {
3685 // fold __builtin_memmove_chk(x, y, cst1, cst2) to memmove iff cst1<=cst2.
3686 Expr::EvalResult SizeResult, DstSizeResult;
3687 if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
3688 !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
3689 break;
3690 llvm::APSInt Size = SizeResult.Val.getInt();
3691 llvm::APSInt DstSize = DstSizeResult.Val.getInt();
3692 if (Size.ugt(DstSize))
3693 break;
3694 Address Dest = EmitPointerWithAlignment(E->getArg(0));
3695 Address Src = EmitPointerWithAlignment(E->getArg(1));
3696 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
3697 Builder.CreateMemMove(Dest, Src, SizeVal, false);
3698 return RValue::get(Dest.getPointer());
3701 case Builtin::BImemmove:
3702 case Builtin::BI__builtin_memmove: {
3703 Address Dest = EmitPointerWithAlignment(E->getArg(0));
3704 Address Src = EmitPointerWithAlignment(E->getArg(1));
3705 Value *SizeVal = EmitScalarExpr(E->getArg(2));
3706 EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
3707 E->getArg(0)->getExprLoc(), FD, 0);
3708 EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(),
3709 E->getArg(1)->getExprLoc(), FD, 1);
3710 Builder.CreateMemMove(Dest, Src, SizeVal, false);
3711 return RValue::get(Dest.getPointer());
3713 case Builtin::BImemset:
3714 case Builtin::BI__builtin_memset: {
3715 Address Dest = EmitPointerWithAlignment(E->getArg(0));
3716 Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
3717 Builder.getInt8Ty());
3718 Value *SizeVal = EmitScalarExpr(E->getArg(2));
3719 EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
3720 E->getArg(0)->getExprLoc(), FD, 0);
3721 Builder.CreateMemSet(Dest, ByteVal, SizeVal, false);
3722 return RValue::get(Dest.getPointer());
3724 case Builtin::BI__builtin_memset_inline: {
3725 Address Dest = EmitPointerWithAlignment(E->getArg(0));
3726 Value *ByteVal =
3727 Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)), Builder.getInt8Ty());
3728 uint64_t Size =
3729 E->getArg(2)->EvaluateKnownConstInt(getContext()).getZExtValue();
3730 EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
3731 E->getArg(0)->getExprLoc(), FD, 0);
3732 Builder.CreateMemSetInline(Dest, ByteVal, Size);
3733 return RValue::get(nullptr);
3735 case Builtin::BI__builtin___memset_chk: {
3736 // fold __builtin_memset_chk(x, y, cst1, cst2) to memset iff cst1<=cst2.
3737 Expr::EvalResult SizeResult, DstSizeResult;
3738 if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
3739 !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
3740 break;
3741 llvm::APSInt Size = SizeResult.Val.getInt();
3742 llvm::APSInt DstSize = DstSizeResult.Val.getInt();
3743 if (Size.ugt(DstSize))
3744 break;
3745 Address Dest = EmitPointerWithAlignment(E->getArg(0));
3746 Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
3747 Builder.getInt8Ty());
3748 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
3749 Builder.CreateMemSet(Dest, ByteVal, SizeVal, false);
3750 return RValue::get(Dest.getPointer());
3752 case Builtin::BI__builtin_wmemchr: {
3753 // The MSVC runtime library does not provide a definition of wmemchr, so we
3754 // need an inline implementation.
3755 if (!getTarget().getTriple().isOSMSVCRT())
3756 break;
3758 llvm::Type *WCharTy = ConvertType(getContext().WCharTy);
3759 Value *Str = EmitScalarExpr(E->getArg(0));
3760 Value *Chr = EmitScalarExpr(E->getArg(1));
3761 Value *Size = EmitScalarExpr(E->getArg(2));
3763 BasicBlock *Entry = Builder.GetInsertBlock();
3764 BasicBlock *CmpEq = createBasicBlock("wmemchr.eq");
3765 BasicBlock *Next = createBasicBlock("wmemchr.next");
3766 BasicBlock *Exit = createBasicBlock("wmemchr.exit");
3767 Value *SizeEq0 = Builder.CreateICmpEQ(Size, ConstantInt::get(SizeTy, 0));
3768 Builder.CreateCondBr(SizeEq0, Exit, CmpEq);
3770 EmitBlock(CmpEq);
3771 PHINode *StrPhi = Builder.CreatePHI(Str->getType(), 2);
3772 StrPhi->addIncoming(Str, Entry);
3773 PHINode *SizePhi = Builder.CreatePHI(SizeTy, 2);
3774 SizePhi->addIncoming(Size, Entry);
3775 CharUnits WCharAlign =
3776 getContext().getTypeAlignInChars(getContext().WCharTy);
3777 Value *StrCh = Builder.CreateAlignedLoad(WCharTy, StrPhi, WCharAlign);
3778 Value *FoundChr = Builder.CreateConstInBoundsGEP1_32(WCharTy, StrPhi, 0);
3779 Value *StrEqChr = Builder.CreateICmpEQ(StrCh, Chr);
3780 Builder.CreateCondBr(StrEqChr, Exit, Next);
3782 EmitBlock(Next);
3783 Value *NextStr = Builder.CreateConstInBoundsGEP1_32(WCharTy, StrPhi, 1);
3784 Value *NextSize = Builder.CreateSub(SizePhi, ConstantInt::get(SizeTy, 1));
3785 Value *NextSizeEq0 =
3786 Builder.CreateICmpEQ(NextSize, ConstantInt::get(SizeTy, 0));
3787 Builder.CreateCondBr(NextSizeEq0, Exit, CmpEq);
3788 StrPhi->addIncoming(NextStr, Next);
3789 SizePhi->addIncoming(NextSize, Next);
3791 EmitBlock(Exit);
3792 PHINode *Ret = Builder.CreatePHI(Str->getType(), 3);
3793 Ret->addIncoming(llvm::Constant::getNullValue(Str->getType()), Entry);
3794 Ret->addIncoming(llvm::Constant::getNullValue(Str->getType()), Next);
3795 Ret->addIncoming(FoundChr, CmpEq);
3796 return RValue::get(Ret);
3798 case Builtin::BI__builtin_wmemcmp: {
3799 // The MSVC runtime library does not provide a definition of wmemcmp, so we
3800 // need an inline implementation.
3801 if (!getTarget().getTriple().isOSMSVCRT())
3802 break;
3804 llvm::Type *WCharTy = ConvertType(getContext().WCharTy);
3806 Value *Dst = EmitScalarExpr(E->getArg(0));
3807 Value *Src = EmitScalarExpr(E->getArg(1));
3808 Value *Size = EmitScalarExpr(E->getArg(2));
3810 BasicBlock *Entry = Builder.GetInsertBlock();
3811 BasicBlock *CmpGT = createBasicBlock("wmemcmp.gt");
3812 BasicBlock *CmpLT = createBasicBlock("wmemcmp.lt");
3813 BasicBlock *Next = createBasicBlock("wmemcmp.next");
3814 BasicBlock *Exit = createBasicBlock("wmemcmp.exit");
3815 Value *SizeEq0 = Builder.CreateICmpEQ(Size, ConstantInt::get(SizeTy, 0));
3816 Builder.CreateCondBr(SizeEq0, Exit, CmpGT);
3818 EmitBlock(CmpGT);
3819 PHINode *DstPhi = Builder.CreatePHI(Dst->getType(), 2);
3820 DstPhi->addIncoming(Dst, Entry);
3821 PHINode *SrcPhi = Builder.CreatePHI(Src->getType(), 2);
3822 SrcPhi->addIncoming(Src, Entry);
3823 PHINode *SizePhi = Builder.CreatePHI(SizeTy, 2);
3824 SizePhi->addIncoming(Size, Entry);
3825 CharUnits WCharAlign =
3826 getContext().getTypeAlignInChars(getContext().WCharTy);
3827 Value *DstCh = Builder.CreateAlignedLoad(WCharTy, DstPhi, WCharAlign);
3828 Value *SrcCh = Builder.CreateAlignedLoad(WCharTy, SrcPhi, WCharAlign);
3829 Value *DstGtSrc = Builder.CreateICmpUGT(DstCh, SrcCh);
3830 Builder.CreateCondBr(DstGtSrc, Exit, CmpLT);
3832 EmitBlock(CmpLT);
3833 Value *DstLtSrc = Builder.CreateICmpULT(DstCh, SrcCh);
3834 Builder.CreateCondBr(DstLtSrc, Exit, Next);
3836 EmitBlock(Next);
3837 Value *NextDst = Builder.CreateConstInBoundsGEP1_32(WCharTy, DstPhi, 1);
3838 Value *NextSrc = Builder.CreateConstInBoundsGEP1_32(WCharTy, SrcPhi, 1);
3839 Value *NextSize = Builder.CreateSub(SizePhi, ConstantInt::get(SizeTy, 1));
3840 Value *NextSizeEq0 =
3841 Builder.CreateICmpEQ(NextSize, ConstantInt::get(SizeTy, 0));
3842 Builder.CreateCondBr(NextSizeEq0, Exit, CmpGT);
3843 DstPhi->addIncoming(NextDst, Next);
3844 SrcPhi->addIncoming(NextSrc, Next);
3845 SizePhi->addIncoming(NextSize, Next);
3847 EmitBlock(Exit);
3848 PHINode *Ret = Builder.CreatePHI(IntTy, 4);
3849 Ret->addIncoming(ConstantInt::get(IntTy, 0), Entry);
3850 Ret->addIncoming(ConstantInt::get(IntTy, 1), CmpGT);
3851 Ret->addIncoming(ConstantInt::get(IntTy, -1), CmpLT);
3852 Ret->addIncoming(ConstantInt::get(IntTy, 0), Next);
3853 return RValue::get(Ret);
3855 case Builtin::BI__builtin_dwarf_cfa: {
3856 // The offset in bytes from the first argument to the CFA.
3858 // Why on earth is this in the frontend? Is there any reason at
3859 // all that the backend can't reasonably determine this while
3860 // lowering llvm.eh.dwarf.cfa()?
3862 // TODO: If there's a satisfactory reason, add a target hook for
3863 // this instead of hard-coding 0, which is correct for most targets.
3864 int32_t Offset = 0;
3866 Function *F = CGM.getIntrinsic(Intrinsic::eh_dwarf_cfa);
3867 return RValue::get(Builder.CreateCall(F,
3868 llvm::ConstantInt::get(Int32Ty, Offset)));
3870 case Builtin::BI__builtin_return_address: {
3871 Value *Depth = ConstantEmitter(*this).emitAbstract(E->getArg(0),
3872 getContext().UnsignedIntTy);
3873 Function *F = CGM.getIntrinsic(Intrinsic::returnaddress);
3874 return RValue::get(Builder.CreateCall(F, Depth));
3876 case Builtin::BI_ReturnAddress: {
3877 Function *F = CGM.getIntrinsic(Intrinsic::returnaddress);
3878 return RValue::get(Builder.CreateCall(F, Builder.getInt32(0)));
3880 case Builtin::BI__builtin_frame_address: {
3881 Value *Depth = ConstantEmitter(*this).emitAbstract(E->getArg(0),
3882 getContext().UnsignedIntTy);
3883 Function *F = CGM.getIntrinsic(Intrinsic::frameaddress, AllocaInt8PtrTy);
3884 return RValue::get(Builder.CreateCall(F, Depth));
3886 case Builtin::BI__builtin_extract_return_addr: {
3887 Value *Address = EmitScalarExpr(E->getArg(0));
3888 Value *Result = getTargetHooks().decodeReturnAddress(*this, Address);
3889 return RValue::get(Result);
3891 case Builtin::BI__builtin_frob_return_addr: {
3892 Value *Address = EmitScalarExpr(E->getArg(0));
3893 Value *Result = getTargetHooks().encodeReturnAddress(*this, Address);
3894 return RValue::get(Result);
3896 case Builtin::BI__builtin_dwarf_sp_column: {
3897 llvm::IntegerType *Ty
3898 = cast<llvm::IntegerType>(ConvertType(E->getType()));
3899 int Column = getTargetHooks().getDwarfEHStackPointer(CGM);
3900 if (Column == -1) {
3901 CGM.ErrorUnsupported(E, "__builtin_dwarf_sp_column");
3902 return RValue::get(llvm::UndefValue::get(Ty));
3904 return RValue::get(llvm::ConstantInt::get(Ty, Column, true));
3906 case Builtin::BI__builtin_init_dwarf_reg_size_table: {
3907 Value *Address = EmitScalarExpr(E->getArg(0));
3908 if (getTargetHooks().initDwarfEHRegSizeTable(*this, Address))
3909 CGM.ErrorUnsupported(E, "__builtin_init_dwarf_reg_size_table");
3910 return RValue::get(llvm::UndefValue::get(ConvertType(E->getType())));
3912 case Builtin::BI__builtin_eh_return: {
3913 Value *Int = EmitScalarExpr(E->getArg(0));
3914 Value *Ptr = EmitScalarExpr(E->getArg(1));
3916 llvm::IntegerType *IntTy = cast<llvm::IntegerType>(Int->getType());
3917 assert((IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) &&
3918 "LLVM's __builtin_eh_return only supports 32- and 64-bit variants");
3919 Function *F =
3920 CGM.getIntrinsic(IntTy->getBitWidth() == 32 ? Intrinsic::eh_return_i32
3921 : Intrinsic::eh_return_i64);
3922 Builder.CreateCall(F, {Int, Ptr});
3923 Builder.CreateUnreachable();
3925 // We do need to preserve an insertion point.
3926 EmitBlock(createBasicBlock("builtin_eh_return.cont"));
3928 return RValue::get(nullptr);
3930 case Builtin::BI__builtin_unwind_init: {
3931 Function *F = CGM.getIntrinsic(Intrinsic::eh_unwind_init);
3932 Builder.CreateCall(F);
3933 return RValue::get(nullptr);
3935 case Builtin::BI__builtin_extend_pointer: {
3936 // Extends a pointer to the size of an _Unwind_Word, which is
3937 // uint64_t on all platforms. Generally this gets poked into a
3938 // register and eventually used as an address, so if the
3939 // addressing registers are wider than pointers and the platform
3940 // doesn't implicitly ignore high-order bits when doing
3941 // addressing, we need to make sure we zext / sext based on
3942 // the platform's expectations.
3944 // See: http://gcc.gnu.org/ml/gcc-bugs/2002-02/msg00237.html
3946 // Cast the pointer to intptr_t.
3947 Value *Ptr = EmitScalarExpr(E->getArg(0));
3948 Value *Result = Builder.CreatePtrToInt(Ptr, IntPtrTy, "extend.cast");
3950 // If that's 64 bits, we're done.
3951 if (IntPtrTy->getBitWidth() == 64)
3952 return RValue::get(Result);
3954 // Otherwise, ask the codegen data what to do.
3955 if (getTargetHooks().extendPointerWithSExt())
3956 return RValue::get(Builder.CreateSExt(Result, Int64Ty, "extend.sext"));
3957 else
3958 return RValue::get(Builder.CreateZExt(Result, Int64Ty, "extend.zext"));
3960 case Builtin::BI__builtin_setjmp: {
3961 // Buffer is a void**.
3962 Address Buf = EmitPointerWithAlignment(E->getArg(0));
3964 // Store the frame pointer to the setjmp buffer.
3965 Value *FrameAddr = Builder.CreateCall(
3966 CGM.getIntrinsic(Intrinsic::frameaddress, AllocaInt8PtrTy),
3967 ConstantInt::get(Int32Ty, 0));
3968 Builder.CreateStore(FrameAddr, Buf);
3970 // Store the stack pointer to the setjmp buffer.
3971 Value *StackAddr = Builder.CreateStackSave();
3972 assert(Buf.getPointer()->getType() == StackAddr->getType());
3974 Address StackSaveSlot = Builder.CreateConstInBoundsGEP(Buf, 2);
3975 Builder.CreateStore(StackAddr, StackSaveSlot);
3977 // Call LLVM's EH setjmp, which is lightweight.
3978 Function *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp);
3979 return RValue::get(Builder.CreateCall(F, Buf.getPointer()));
3981 case Builtin::BI__builtin_longjmp: {
3982 Value *Buf = EmitScalarExpr(E->getArg(0));
3984 // Call LLVM's EH longjmp, which is lightweight.
3985 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::eh_sjlj_longjmp), Buf);
3987 // longjmp doesn't return; mark this as unreachable.
3988 Builder.CreateUnreachable();
3990 // We do need to preserve an insertion point.
3991 EmitBlock(createBasicBlock("longjmp.cont"));
3993 return RValue::get(nullptr);
3995 case Builtin::BI__builtin_launder: {
3996 const Expr *Arg = E->getArg(0);
3997 QualType ArgTy = Arg->getType()->getPointeeType();
3998 Value *Ptr = EmitScalarExpr(Arg);
3999 if (TypeRequiresBuiltinLaunder(CGM, ArgTy))
4000 Ptr = Builder.CreateLaunderInvariantGroup(Ptr);
4002 return RValue::get(Ptr);
4004 case Builtin::BI__sync_fetch_and_add:
4005 case Builtin::BI__sync_fetch_and_sub:
4006 case Builtin::BI__sync_fetch_and_or:
4007 case Builtin::BI__sync_fetch_and_and:
4008 case Builtin::BI__sync_fetch_and_xor:
4009 case Builtin::BI__sync_fetch_and_nand:
4010 case Builtin::BI__sync_add_and_fetch:
4011 case Builtin::BI__sync_sub_and_fetch:
4012 case Builtin::BI__sync_and_and_fetch:
4013 case Builtin::BI__sync_or_and_fetch:
4014 case Builtin::BI__sync_xor_and_fetch:
4015 case Builtin::BI__sync_nand_and_fetch:
4016 case Builtin::BI__sync_val_compare_and_swap:
4017 case Builtin::BI__sync_bool_compare_and_swap:
4018 case Builtin::BI__sync_lock_test_and_set:
4019 case Builtin::BI__sync_lock_release:
4020 case Builtin::BI__sync_swap:
4021 llvm_unreachable("Shouldn't make it through sema");
4022 case Builtin::BI__sync_fetch_and_add_1:
4023 case Builtin::BI__sync_fetch_and_add_2:
4024 case Builtin::BI__sync_fetch_and_add_4:
4025 case Builtin::BI__sync_fetch_and_add_8:
4026 case Builtin::BI__sync_fetch_and_add_16:
4027 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Add, E);
4028 case Builtin::BI__sync_fetch_and_sub_1:
4029 case Builtin::BI__sync_fetch_and_sub_2:
4030 case Builtin::BI__sync_fetch_and_sub_4:
4031 case Builtin::BI__sync_fetch_and_sub_8:
4032 case Builtin::BI__sync_fetch_and_sub_16:
4033 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Sub, E);
4034 case Builtin::BI__sync_fetch_and_or_1:
4035 case Builtin::BI__sync_fetch_and_or_2:
4036 case Builtin::BI__sync_fetch_and_or_4:
4037 case Builtin::BI__sync_fetch_and_or_8:
4038 case Builtin::BI__sync_fetch_and_or_16:
4039 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Or, E);
4040 case Builtin::BI__sync_fetch_and_and_1:
4041 case Builtin::BI__sync_fetch_and_and_2:
4042 case Builtin::BI__sync_fetch_and_and_4:
4043 case Builtin::BI__sync_fetch_and_and_8:
4044 case Builtin::BI__sync_fetch_and_and_16:
4045 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::And, E);
4046 case Builtin::BI__sync_fetch_and_xor_1:
4047 case Builtin::BI__sync_fetch_and_xor_2:
4048 case Builtin::BI__sync_fetch_and_xor_4:
4049 case Builtin::BI__sync_fetch_and_xor_8:
4050 case Builtin::BI__sync_fetch_and_xor_16:
4051 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xor, E);
4052 case Builtin::BI__sync_fetch_and_nand_1:
4053 case Builtin::BI__sync_fetch_and_nand_2:
4054 case Builtin::BI__sync_fetch_and_nand_4:
4055 case Builtin::BI__sync_fetch_and_nand_8:
4056 case Builtin::BI__sync_fetch_and_nand_16:
4057 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Nand, E);
4059 // Clang extensions: not overloaded yet.
4060 case Builtin::BI__sync_fetch_and_min:
4061 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Min, E);
4062 case Builtin::BI__sync_fetch_and_max:
4063 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Max, E);
4064 case Builtin::BI__sync_fetch_and_umin:
4065 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMin, E);
4066 case Builtin::BI__sync_fetch_and_umax:
4067 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMax, E);
4069 case Builtin::BI__sync_add_and_fetch_1:
4070 case Builtin::BI__sync_add_and_fetch_2:
4071 case Builtin::BI__sync_add_and_fetch_4:
4072 case Builtin::BI__sync_add_and_fetch_8:
4073 case Builtin::BI__sync_add_and_fetch_16:
4074 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Add, E,
4075 llvm::Instruction::Add);
4076 case Builtin::BI__sync_sub_and_fetch_1:
4077 case Builtin::BI__sync_sub_and_fetch_2:
4078 case Builtin::BI__sync_sub_and_fetch_4:
4079 case Builtin::BI__sync_sub_and_fetch_8:
4080 case Builtin::BI__sync_sub_and_fetch_16:
4081 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Sub, E,
4082 llvm::Instruction::Sub);
4083 case Builtin::BI__sync_and_and_fetch_1:
4084 case Builtin::BI__sync_and_and_fetch_2:
4085 case Builtin::BI__sync_and_and_fetch_4:
4086 case Builtin::BI__sync_and_and_fetch_8:
4087 case Builtin::BI__sync_and_and_fetch_16:
4088 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::And, E,
4089 llvm::Instruction::And);
4090 case Builtin::BI__sync_or_and_fetch_1:
4091 case Builtin::BI__sync_or_and_fetch_2:
4092 case Builtin::BI__sync_or_and_fetch_4:
4093 case Builtin::BI__sync_or_and_fetch_8:
4094 case Builtin::BI__sync_or_and_fetch_16:
4095 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Or, E,
4096 llvm::Instruction::Or);
4097 case Builtin::BI__sync_xor_and_fetch_1:
4098 case Builtin::BI__sync_xor_and_fetch_2:
4099 case Builtin::BI__sync_xor_and_fetch_4:
4100 case Builtin::BI__sync_xor_and_fetch_8:
4101 case Builtin::BI__sync_xor_and_fetch_16:
4102 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Xor, E,
4103 llvm::Instruction::Xor);
4104 case Builtin::BI__sync_nand_and_fetch_1:
4105 case Builtin::BI__sync_nand_and_fetch_2:
4106 case Builtin::BI__sync_nand_and_fetch_4:
4107 case Builtin::BI__sync_nand_and_fetch_8:
4108 case Builtin::BI__sync_nand_and_fetch_16:
4109 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Nand, E,
4110 llvm::Instruction::And, true);
4112 case Builtin::BI__sync_val_compare_and_swap_1:
4113 case Builtin::BI__sync_val_compare_and_swap_2:
4114 case Builtin::BI__sync_val_compare_and_swap_4:
4115 case Builtin::BI__sync_val_compare_and_swap_8:
4116 case Builtin::BI__sync_val_compare_and_swap_16:
4117 return RValue::get(MakeAtomicCmpXchgValue(*this, E, false));
4119 case Builtin::BI__sync_bool_compare_and_swap_1:
4120 case Builtin::BI__sync_bool_compare_and_swap_2:
4121 case Builtin::BI__sync_bool_compare_and_swap_4:
4122 case Builtin::BI__sync_bool_compare_and_swap_8:
4123 case Builtin::BI__sync_bool_compare_and_swap_16:
4124 return RValue::get(MakeAtomicCmpXchgValue(*this, E, true));
4126 case Builtin::BI__sync_swap_1:
4127 case Builtin::BI__sync_swap_2:
4128 case Builtin::BI__sync_swap_4:
4129 case Builtin::BI__sync_swap_8:
4130 case Builtin::BI__sync_swap_16:
4131 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
4133 case Builtin::BI__sync_lock_test_and_set_1:
4134 case Builtin::BI__sync_lock_test_and_set_2:
4135 case Builtin::BI__sync_lock_test_and_set_4:
4136 case Builtin::BI__sync_lock_test_and_set_8:
4137 case Builtin::BI__sync_lock_test_and_set_16:
4138 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
4140 case Builtin::BI__sync_lock_release_1:
4141 case Builtin::BI__sync_lock_release_2:
4142 case Builtin::BI__sync_lock_release_4:
4143 case Builtin::BI__sync_lock_release_8:
4144 case Builtin::BI__sync_lock_release_16: {
4145 Value *Ptr = CheckAtomicAlignment(*this, E);
4146 QualType ElTy = E->getArg(0)->getType()->getPointeeType();
4147 CharUnits StoreSize = getContext().getTypeSizeInChars(ElTy);
4148 llvm::Type *ITy =
4149 llvm::IntegerType::get(getLLVMContext(), StoreSize.getQuantity() * 8);
4150 llvm::StoreInst *Store =
4151 Builder.CreateAlignedStore(llvm::Constant::getNullValue(ITy), Ptr,
4152 StoreSize);
4153 Store->setAtomic(llvm::AtomicOrdering::Release);
4154 return RValue::get(nullptr);
4157 case Builtin::BI__sync_synchronize: {
4158 // We assume this is supposed to correspond to a C++0x-style
4159 // sequentially-consistent fence (i.e. this is only usable for
4160 // synchronization, not device I/O or anything like that). This intrinsic
4161 // is really badly designed in the sense that in theory, there isn't
4162 // any way to safely use it... but in practice, it mostly works
4163 // to use it with non-atomic loads and stores to get acquire/release
4164 // semantics.
4165 Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent);
4166 return RValue::get(nullptr);
4169 case Builtin::BI__builtin_nontemporal_load:
4170 return RValue::get(EmitNontemporalLoad(*this, E));
4171 case Builtin::BI__builtin_nontemporal_store:
4172 return RValue::get(EmitNontemporalStore(*this, E));
4173 case Builtin::BI__c11_atomic_is_lock_free:
4174 case Builtin::BI__atomic_is_lock_free: {
4175 // Call "bool __atomic_is_lock_free(size_t size, void *ptr)". For the
4176 // __c11 builtin, ptr is 0 (indicating a properly-aligned object), since
4177 // _Atomic(T) is always properly-aligned.
4178 const char *LibCallName = "__atomic_is_lock_free";
4179 CallArgList Args;
4180 Args.add(RValue::get(EmitScalarExpr(E->getArg(0))),
4181 getContext().getSizeType());
4182 if (BuiltinID == Builtin::BI__atomic_is_lock_free)
4183 Args.add(RValue::get(EmitScalarExpr(E->getArg(1))),
4184 getContext().VoidPtrTy);
4185 else
4186 Args.add(RValue::get(llvm::Constant::getNullValue(VoidPtrTy)),
4187 getContext().VoidPtrTy);
4188 const CGFunctionInfo &FuncInfo =
4189 CGM.getTypes().arrangeBuiltinFunctionCall(E->getType(), Args);
4190 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
4191 llvm::FunctionCallee Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
4192 return EmitCall(FuncInfo, CGCallee::forDirect(Func),
4193 ReturnValueSlot(), Args);
4196 case Builtin::BI__atomic_test_and_set: {
4197 // Look at the argument type to determine whether this is a volatile
4198 // operation. The parameter type is always volatile.
4199 QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
4200 bool Volatile =
4201 PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
4203 Value *Ptr = EmitScalarExpr(E->getArg(0));
4204 Value *NewVal = Builder.getInt8(1);
4205 Value *Order = EmitScalarExpr(E->getArg(1));
4206 if (isa<llvm::ConstantInt>(Order)) {
4207 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
4208 AtomicRMWInst *Result = nullptr;
4209 switch (ord) {
4210 case 0: // memory_order_relaxed
4211 default: // invalid order
4212 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
4213 llvm::AtomicOrdering::Monotonic);
4214 break;
4215 case 1: // memory_order_consume
4216 case 2: // memory_order_acquire
4217 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
4218 llvm::AtomicOrdering::Acquire);
4219 break;
4220 case 3: // memory_order_release
4221 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
4222 llvm::AtomicOrdering::Release);
4223 break;
4224 case 4: // memory_order_acq_rel
4226 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
4227 llvm::AtomicOrdering::AcquireRelease);
4228 break;
4229 case 5: // memory_order_seq_cst
4230 Result = Builder.CreateAtomicRMW(
4231 llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
4232 llvm::AtomicOrdering::SequentiallyConsistent);
4233 break;
4235 Result->setVolatile(Volatile);
4236 return RValue::get(Builder.CreateIsNotNull(Result, "tobool"));
4239 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
4241 llvm::BasicBlock *BBs[5] = {
4242 createBasicBlock("monotonic", CurFn),
4243 createBasicBlock("acquire", CurFn),
4244 createBasicBlock("release", CurFn),
4245 createBasicBlock("acqrel", CurFn),
4246 createBasicBlock("seqcst", CurFn)
4248 llvm::AtomicOrdering Orders[5] = {
4249 llvm::AtomicOrdering::Monotonic, llvm::AtomicOrdering::Acquire,
4250 llvm::AtomicOrdering::Release, llvm::AtomicOrdering::AcquireRelease,
4251 llvm::AtomicOrdering::SequentiallyConsistent};
4253 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
4254 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]);
4256 Builder.SetInsertPoint(ContBB);
4257 PHINode *Result = Builder.CreatePHI(Int8Ty, 5, "was_set");
4259 for (unsigned i = 0; i < 5; ++i) {
4260 Builder.SetInsertPoint(BBs[i]);
4261 AtomicRMWInst *RMW = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
4262 Ptr, NewVal, Orders[i]);
4263 RMW->setVolatile(Volatile);
4264 Result->addIncoming(RMW, BBs[i]);
4265 Builder.CreateBr(ContBB);
4268 SI->addCase(Builder.getInt32(0), BBs[0]);
4269 SI->addCase(Builder.getInt32(1), BBs[1]);
4270 SI->addCase(Builder.getInt32(2), BBs[1]);
4271 SI->addCase(Builder.getInt32(3), BBs[2]);
4272 SI->addCase(Builder.getInt32(4), BBs[3]);
4273 SI->addCase(Builder.getInt32(5), BBs[4]);
4275 Builder.SetInsertPoint(ContBB);
4276 return RValue::get(Builder.CreateIsNotNull(Result, "tobool"));
4279 case Builtin::BI__atomic_clear: {
4280 QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
4281 bool Volatile =
4282 PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
4284 Address Ptr = EmitPointerWithAlignment(E->getArg(0));
4285 Ptr = Ptr.withElementType(Int8Ty);
4286 Value *NewVal = Builder.getInt8(0);
4287 Value *Order = EmitScalarExpr(E->getArg(1));
4288 if (isa<llvm::ConstantInt>(Order)) {
4289 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
4290 StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile);
4291 switch (ord) {
4292 case 0: // memory_order_relaxed
4293 default: // invalid order
4294 Store->setOrdering(llvm::AtomicOrdering::Monotonic);
4295 break;
4296 case 3: // memory_order_release
4297 Store->setOrdering(llvm::AtomicOrdering::Release);
4298 break;
4299 case 5: // memory_order_seq_cst
4300 Store->setOrdering(llvm::AtomicOrdering::SequentiallyConsistent);
4301 break;
4303 return RValue::get(nullptr);
4306 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
4308 llvm::BasicBlock *BBs[3] = {
4309 createBasicBlock("monotonic", CurFn),
4310 createBasicBlock("release", CurFn),
4311 createBasicBlock("seqcst", CurFn)
4313 llvm::AtomicOrdering Orders[3] = {
4314 llvm::AtomicOrdering::Monotonic, llvm::AtomicOrdering::Release,
4315 llvm::AtomicOrdering::SequentiallyConsistent};
4317 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
4318 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]);
4320 for (unsigned i = 0; i < 3; ++i) {
4321 Builder.SetInsertPoint(BBs[i]);
4322 StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile);
4323 Store->setOrdering(Orders[i]);
4324 Builder.CreateBr(ContBB);
4327 SI->addCase(Builder.getInt32(0), BBs[0]);
4328 SI->addCase(Builder.getInt32(3), BBs[1]);
4329 SI->addCase(Builder.getInt32(5), BBs[2]);
4331 Builder.SetInsertPoint(ContBB);
4332 return RValue::get(nullptr);
4335 case Builtin::BI__atomic_thread_fence:
4336 case Builtin::BI__atomic_signal_fence:
4337 case Builtin::BI__c11_atomic_thread_fence:
4338 case Builtin::BI__c11_atomic_signal_fence: {
4339 llvm::SyncScope::ID SSID;
4340 if (BuiltinID == Builtin::BI__atomic_signal_fence ||
4341 BuiltinID == Builtin::BI__c11_atomic_signal_fence)
4342 SSID = llvm::SyncScope::SingleThread;
4343 else
4344 SSID = llvm::SyncScope::System;
4345 Value *Order = EmitScalarExpr(E->getArg(0));
4346 if (isa<llvm::ConstantInt>(Order)) {
4347 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
4348 switch (ord) {
4349 case 0: // memory_order_relaxed
4350 default: // invalid order
4351 break;
4352 case 1: // memory_order_consume
4353 case 2: // memory_order_acquire
4354 Builder.CreateFence(llvm::AtomicOrdering::Acquire, SSID);
4355 break;
4356 case 3: // memory_order_release
4357 Builder.CreateFence(llvm::AtomicOrdering::Release, SSID);
4358 break;
4359 case 4: // memory_order_acq_rel
4360 Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease, SSID);
4361 break;
4362 case 5: // memory_order_seq_cst
4363 Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, SSID);
4364 break;
4366 return RValue::get(nullptr);
4369 llvm::BasicBlock *AcquireBB, *ReleaseBB, *AcqRelBB, *SeqCstBB;
4370 AcquireBB = createBasicBlock("acquire", CurFn);
4371 ReleaseBB = createBasicBlock("release", CurFn);
4372 AcqRelBB = createBasicBlock("acqrel", CurFn);
4373 SeqCstBB = createBasicBlock("seqcst", CurFn);
4374 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
4376 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
4377 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, ContBB);
4379 Builder.SetInsertPoint(AcquireBB);
4380 Builder.CreateFence(llvm::AtomicOrdering::Acquire, SSID);
4381 Builder.CreateBr(ContBB);
4382 SI->addCase(Builder.getInt32(1), AcquireBB);
4383 SI->addCase(Builder.getInt32(2), AcquireBB);
4385 Builder.SetInsertPoint(ReleaseBB);
4386 Builder.CreateFence(llvm::AtomicOrdering::Release, SSID);
4387 Builder.CreateBr(ContBB);
4388 SI->addCase(Builder.getInt32(3), ReleaseBB);
4390 Builder.SetInsertPoint(AcqRelBB);
4391 Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease, SSID);
4392 Builder.CreateBr(ContBB);
4393 SI->addCase(Builder.getInt32(4), AcqRelBB);
4395 Builder.SetInsertPoint(SeqCstBB);
4396 Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, SSID);
4397 Builder.CreateBr(ContBB);
4398 SI->addCase(Builder.getInt32(5), SeqCstBB);
4400 Builder.SetInsertPoint(ContBB);
4401 return RValue::get(nullptr);
4404 case Builtin::BI__builtin_signbit:
4405 case Builtin::BI__builtin_signbitf:
4406 case Builtin::BI__builtin_signbitl: {
4407 return RValue::get(
4408 Builder.CreateZExt(EmitSignBit(*this, EmitScalarExpr(E->getArg(0))),
4409 ConvertType(E->getType())));
4411 case Builtin::BI__warn_memset_zero_len:
4412 return RValue::getIgnored();
4413 case Builtin::BI__annotation: {
4414 // Re-encode each wide string to UTF8 and make an MDString.
4415 SmallVector<Metadata *, 1> Strings;
4416 for (const Expr *Arg : E->arguments()) {
4417 const auto *Str = cast<StringLiteral>(Arg->IgnoreParenCasts());
4418 assert(Str->getCharByteWidth() == 2);
4419 StringRef WideBytes = Str->getBytes();
4420 std::string StrUtf8;
4421 if (!convertUTF16ToUTF8String(
4422 ArrayRef(WideBytes.data(), WideBytes.size()), StrUtf8)) {
4423 CGM.ErrorUnsupported(E, "non-UTF16 __annotation argument");
4424 continue;
4426 Strings.push_back(llvm::MDString::get(getLLVMContext(), StrUtf8));
4429 // Build and MDTuple of MDStrings and emit the intrinsic call.
4430 llvm::Function *F =
4431 CGM.getIntrinsic(llvm::Intrinsic::codeview_annotation, {});
4432 MDTuple *StrTuple = MDTuple::get(getLLVMContext(), Strings);
4433 Builder.CreateCall(F, MetadataAsValue::get(getLLVMContext(), StrTuple));
4434 return RValue::getIgnored();
4436 case Builtin::BI__builtin_annotation: {
4437 llvm::Value *AnnVal = EmitScalarExpr(E->getArg(0));
4438 llvm::Function *F =
4439 CGM.getIntrinsic(llvm::Intrinsic::annotation,
4440 {AnnVal->getType(), CGM.ConstGlobalsPtrTy});
4442 // Get the annotation string, go through casts. Sema requires this to be a
4443 // non-wide string literal, potentially casted, so the cast<> is safe.
4444 const Expr *AnnotationStrExpr = E->getArg(1)->IgnoreParenCasts();
4445 StringRef Str = cast<StringLiteral>(AnnotationStrExpr)->getString();
4446 return RValue::get(
4447 EmitAnnotationCall(F, AnnVal, Str, E->getExprLoc(), nullptr));
4449 case Builtin::BI__builtin_addcb:
4450 case Builtin::BI__builtin_addcs:
4451 case Builtin::BI__builtin_addc:
4452 case Builtin::BI__builtin_addcl:
4453 case Builtin::BI__builtin_addcll:
4454 case Builtin::BI__builtin_subcb:
4455 case Builtin::BI__builtin_subcs:
4456 case Builtin::BI__builtin_subc:
4457 case Builtin::BI__builtin_subcl:
4458 case Builtin::BI__builtin_subcll: {
4460 // We translate all of these builtins from expressions of the form:
4461 // int x = ..., y = ..., carryin = ..., carryout, result;
4462 // result = __builtin_addc(x, y, carryin, &carryout);
4464 // to LLVM IR of the form:
4466 // %tmp1 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %x, i32 %y)
4467 // %tmpsum1 = extractvalue {i32, i1} %tmp1, 0
4468 // %carry1 = extractvalue {i32, i1} %tmp1, 1
4469 // %tmp2 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %tmpsum1,
4470 // i32 %carryin)
4471 // %result = extractvalue {i32, i1} %tmp2, 0
4472 // %carry2 = extractvalue {i32, i1} %tmp2, 1
4473 // %tmp3 = or i1 %carry1, %carry2
4474 // %tmp4 = zext i1 %tmp3 to i32
4475 // store i32 %tmp4, i32* %carryout
4477 // Scalarize our inputs.
4478 llvm::Value *X = EmitScalarExpr(E->getArg(0));
4479 llvm::Value *Y = EmitScalarExpr(E->getArg(1));
4480 llvm::Value *Carryin = EmitScalarExpr(E->getArg(2));
4481 Address CarryOutPtr = EmitPointerWithAlignment(E->getArg(3));
4483 // Decide if we are lowering to a uadd.with.overflow or usub.with.overflow.
4484 llvm::Intrinsic::ID IntrinsicId;
4485 switch (BuiltinID) {
4486 default: llvm_unreachable("Unknown multiprecision builtin id.");
4487 case Builtin::BI__builtin_addcb:
4488 case Builtin::BI__builtin_addcs:
4489 case Builtin::BI__builtin_addc:
4490 case Builtin::BI__builtin_addcl:
4491 case Builtin::BI__builtin_addcll:
4492 IntrinsicId = llvm::Intrinsic::uadd_with_overflow;
4493 break;
4494 case Builtin::BI__builtin_subcb:
4495 case Builtin::BI__builtin_subcs:
4496 case Builtin::BI__builtin_subc:
4497 case Builtin::BI__builtin_subcl:
4498 case Builtin::BI__builtin_subcll:
4499 IntrinsicId = llvm::Intrinsic::usub_with_overflow;
4500 break;
4503 // Construct our resulting LLVM IR expression.
4504 llvm::Value *Carry1;
4505 llvm::Value *Sum1 = EmitOverflowIntrinsic(*this, IntrinsicId,
4506 X, Y, Carry1);
4507 llvm::Value *Carry2;
4508 llvm::Value *Sum2 = EmitOverflowIntrinsic(*this, IntrinsicId,
4509 Sum1, Carryin, Carry2);
4510 llvm::Value *CarryOut = Builder.CreateZExt(Builder.CreateOr(Carry1, Carry2),
4511 X->getType());
4512 Builder.CreateStore(CarryOut, CarryOutPtr);
4513 return RValue::get(Sum2);
4516 case Builtin::BI__builtin_add_overflow:
4517 case Builtin::BI__builtin_sub_overflow:
4518 case Builtin::BI__builtin_mul_overflow: {
4519 const clang::Expr *LeftArg = E->getArg(0);
4520 const clang::Expr *RightArg = E->getArg(1);
4521 const clang::Expr *ResultArg = E->getArg(2);
4523 clang::QualType ResultQTy =
4524 ResultArg->getType()->castAs<PointerType>()->getPointeeType();
4526 WidthAndSignedness LeftInfo =
4527 getIntegerWidthAndSignedness(CGM.getContext(), LeftArg->getType());
4528 WidthAndSignedness RightInfo =
4529 getIntegerWidthAndSignedness(CGM.getContext(), RightArg->getType());
4530 WidthAndSignedness ResultInfo =
4531 getIntegerWidthAndSignedness(CGM.getContext(), ResultQTy);
4533 // Handle mixed-sign multiplication as a special case, because adding
4534 // runtime or backend support for our generic irgen would be too expensive.
4535 if (isSpecialMixedSignMultiply(BuiltinID, LeftInfo, RightInfo, ResultInfo))
4536 return EmitCheckedMixedSignMultiply(*this, LeftArg, LeftInfo, RightArg,
4537 RightInfo, ResultArg, ResultQTy,
4538 ResultInfo);
4540 if (isSpecialUnsignedMultiplySignedResult(BuiltinID, LeftInfo, RightInfo,
4541 ResultInfo))
4542 return EmitCheckedUnsignedMultiplySignedResult(
4543 *this, LeftArg, LeftInfo, RightArg, RightInfo, ResultArg, ResultQTy,
4544 ResultInfo);
4546 WidthAndSignedness EncompassingInfo =
4547 EncompassingIntegerType({LeftInfo, RightInfo, ResultInfo});
4549 llvm::Type *EncompassingLLVMTy =
4550 llvm::IntegerType::get(CGM.getLLVMContext(), EncompassingInfo.Width);
4552 llvm::Type *ResultLLVMTy = CGM.getTypes().ConvertType(ResultQTy);
4554 llvm::Intrinsic::ID IntrinsicId;
4555 switch (BuiltinID) {
4556 default:
4557 llvm_unreachable("Unknown overflow builtin id.");
4558 case Builtin::BI__builtin_add_overflow:
4559 IntrinsicId = EncompassingInfo.Signed
4560 ? llvm::Intrinsic::sadd_with_overflow
4561 : llvm::Intrinsic::uadd_with_overflow;
4562 break;
4563 case Builtin::BI__builtin_sub_overflow:
4564 IntrinsicId = EncompassingInfo.Signed
4565 ? llvm::Intrinsic::ssub_with_overflow
4566 : llvm::Intrinsic::usub_with_overflow;
4567 break;
4568 case Builtin::BI__builtin_mul_overflow:
4569 IntrinsicId = EncompassingInfo.Signed
4570 ? llvm::Intrinsic::smul_with_overflow
4571 : llvm::Intrinsic::umul_with_overflow;
4572 break;
4575 llvm::Value *Left = EmitScalarExpr(LeftArg);
4576 llvm::Value *Right = EmitScalarExpr(RightArg);
4577 Address ResultPtr = EmitPointerWithAlignment(ResultArg);
4579 // Extend each operand to the encompassing type.
4580 Left = Builder.CreateIntCast(Left, EncompassingLLVMTy, LeftInfo.Signed);
4581 Right = Builder.CreateIntCast(Right, EncompassingLLVMTy, RightInfo.Signed);
4583 // Perform the operation on the extended values.
4584 llvm::Value *Overflow, *Result;
4585 Result = EmitOverflowIntrinsic(*this, IntrinsicId, Left, Right, Overflow);
4587 if (EncompassingInfo.Width > ResultInfo.Width) {
4588 // The encompassing type is wider than the result type, so we need to
4589 // truncate it.
4590 llvm::Value *ResultTrunc = Builder.CreateTrunc(Result, ResultLLVMTy);
4592 // To see if the truncation caused an overflow, we will extend
4593 // the result and then compare it to the original result.
4594 llvm::Value *ResultTruncExt = Builder.CreateIntCast(
4595 ResultTrunc, EncompassingLLVMTy, ResultInfo.Signed);
4596 llvm::Value *TruncationOverflow =
4597 Builder.CreateICmpNE(Result, ResultTruncExt);
4599 Overflow = Builder.CreateOr(Overflow, TruncationOverflow);
4600 Result = ResultTrunc;
4603 // Finally, store the result using the pointer.
4604 bool isVolatile =
4605 ResultArg->getType()->getPointeeType().isVolatileQualified();
4606 Builder.CreateStore(EmitToMemory(Result, ResultQTy), ResultPtr, isVolatile);
4608 return RValue::get(Overflow);
4611 case Builtin::BI__builtin_uadd_overflow:
4612 case Builtin::BI__builtin_uaddl_overflow:
4613 case Builtin::BI__builtin_uaddll_overflow:
4614 case Builtin::BI__builtin_usub_overflow:
4615 case Builtin::BI__builtin_usubl_overflow:
4616 case Builtin::BI__builtin_usubll_overflow:
4617 case Builtin::BI__builtin_umul_overflow:
4618 case Builtin::BI__builtin_umull_overflow:
4619 case Builtin::BI__builtin_umulll_overflow:
4620 case Builtin::BI__builtin_sadd_overflow:
4621 case Builtin::BI__builtin_saddl_overflow:
4622 case Builtin::BI__builtin_saddll_overflow:
4623 case Builtin::BI__builtin_ssub_overflow:
4624 case Builtin::BI__builtin_ssubl_overflow:
4625 case Builtin::BI__builtin_ssubll_overflow:
4626 case Builtin::BI__builtin_smul_overflow:
4627 case Builtin::BI__builtin_smull_overflow:
4628 case Builtin::BI__builtin_smulll_overflow: {
4630 // We translate all of these builtins directly to the relevant llvm IR node.
4632 // Scalarize our inputs.
4633 llvm::Value *X = EmitScalarExpr(E->getArg(0));
4634 llvm::Value *Y = EmitScalarExpr(E->getArg(1));
4635 Address SumOutPtr = EmitPointerWithAlignment(E->getArg(2));
4637 // Decide which of the overflow intrinsics we are lowering to:
4638 llvm::Intrinsic::ID IntrinsicId;
4639 switch (BuiltinID) {
4640 default: llvm_unreachable("Unknown overflow builtin id.");
4641 case Builtin::BI__builtin_uadd_overflow:
4642 case Builtin::BI__builtin_uaddl_overflow:
4643 case Builtin::BI__builtin_uaddll_overflow:
4644 IntrinsicId = llvm::Intrinsic::uadd_with_overflow;
4645 break;
4646 case Builtin::BI__builtin_usub_overflow:
4647 case Builtin::BI__builtin_usubl_overflow:
4648 case Builtin::BI__builtin_usubll_overflow:
4649 IntrinsicId = llvm::Intrinsic::usub_with_overflow;
4650 break;
4651 case Builtin::BI__builtin_umul_overflow:
4652 case Builtin::BI__builtin_umull_overflow:
4653 case Builtin::BI__builtin_umulll_overflow:
4654 IntrinsicId = llvm::Intrinsic::umul_with_overflow;
4655 break;
4656 case Builtin::BI__builtin_sadd_overflow:
4657 case Builtin::BI__builtin_saddl_overflow:
4658 case Builtin::BI__builtin_saddll_overflow:
4659 IntrinsicId = llvm::Intrinsic::sadd_with_overflow;
4660 break;
4661 case Builtin::BI__builtin_ssub_overflow:
4662 case Builtin::BI__builtin_ssubl_overflow:
4663 case Builtin::BI__builtin_ssubll_overflow:
4664 IntrinsicId = llvm::Intrinsic::ssub_with_overflow;
4665 break;
4666 case Builtin::BI__builtin_smul_overflow:
4667 case Builtin::BI__builtin_smull_overflow:
4668 case Builtin::BI__builtin_smulll_overflow:
4669 IntrinsicId = llvm::Intrinsic::smul_with_overflow;
4670 break;
4674 llvm::Value *Carry;
4675 llvm::Value *Sum = EmitOverflowIntrinsic(*this, IntrinsicId, X, Y, Carry);
4676 Builder.CreateStore(Sum, SumOutPtr);
4678 return RValue::get(Carry);
4680 case Builtin::BIaddressof:
4681 case Builtin::BI__addressof:
4682 case Builtin::BI__builtin_addressof:
4683 return RValue::get(EmitLValue(E->getArg(0)).getPointer(*this));
4684 case Builtin::BI__builtin_function_start:
4685 return RValue::get(CGM.GetFunctionStart(
4686 E->getArg(0)->getAsBuiltinConstantDeclRef(CGM.getContext())));
4687 case Builtin::BI__builtin_operator_new:
4688 return EmitBuiltinNewDeleteCall(
4689 E->getCallee()->getType()->castAs<FunctionProtoType>(), E, false);
4690 case Builtin::BI__builtin_operator_delete:
4691 EmitBuiltinNewDeleteCall(
4692 E->getCallee()->getType()->castAs<FunctionProtoType>(), E, true);
4693 return RValue::get(nullptr);
4695 case Builtin::BI__builtin_is_aligned:
4696 return EmitBuiltinIsAligned(E);
4697 case Builtin::BI__builtin_align_up:
4698 return EmitBuiltinAlignTo(E, true);
4699 case Builtin::BI__builtin_align_down:
4700 return EmitBuiltinAlignTo(E, false);
4702 case Builtin::BI__noop:
4703 // __noop always evaluates to an integer literal zero.
4704 return RValue::get(ConstantInt::get(IntTy, 0));
4705 case Builtin::BI__builtin_call_with_static_chain: {
4706 const CallExpr *Call = cast<CallExpr>(E->getArg(0));
4707 const Expr *Chain = E->getArg(1);
4708 return EmitCall(Call->getCallee()->getType(),
4709 EmitCallee(Call->getCallee()), Call, ReturnValue,
4710 EmitScalarExpr(Chain));
4712 case Builtin::BI_InterlockedExchange8:
4713 case Builtin::BI_InterlockedExchange16:
4714 case Builtin::BI_InterlockedExchange:
4715 case Builtin::BI_InterlockedExchangePointer:
4716 return RValue::get(
4717 EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange, E));
4718 case Builtin::BI_InterlockedCompareExchangePointer:
4719 case Builtin::BI_InterlockedCompareExchangePointer_nf: {
4720 llvm::Type *RTy;
4721 llvm::IntegerType *IntType = IntegerType::get(
4722 getLLVMContext(), getContext().getTypeSize(E->getType()));
4724 llvm::Value *Destination = EmitScalarExpr(E->getArg(0));
4726 llvm::Value *Exchange = EmitScalarExpr(E->getArg(1));
4727 RTy = Exchange->getType();
4728 Exchange = Builder.CreatePtrToInt(Exchange, IntType);
4730 llvm::Value *Comparand =
4731 Builder.CreatePtrToInt(EmitScalarExpr(E->getArg(2)), IntType);
4733 auto Ordering =
4734 BuiltinID == Builtin::BI_InterlockedCompareExchangePointer_nf ?
4735 AtomicOrdering::Monotonic : AtomicOrdering::SequentiallyConsistent;
4737 auto Result = Builder.CreateAtomicCmpXchg(Destination, Comparand, Exchange,
4738 Ordering, Ordering);
4739 Result->setVolatile(true);
4741 return RValue::get(Builder.CreateIntToPtr(Builder.CreateExtractValue(Result,
4743 RTy));
4745 case Builtin::BI_InterlockedCompareExchange8:
4746 case Builtin::BI_InterlockedCompareExchange16:
4747 case Builtin::BI_InterlockedCompareExchange:
4748 case Builtin::BI_InterlockedCompareExchange64:
4749 return RValue::get(EmitAtomicCmpXchgForMSIntrin(*this, E));
4750 case Builtin::BI_InterlockedIncrement16:
4751 case Builtin::BI_InterlockedIncrement:
4752 return RValue::get(
4753 EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement, E));
4754 case Builtin::BI_InterlockedDecrement16:
4755 case Builtin::BI_InterlockedDecrement:
4756 return RValue::get(
4757 EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement, E));
4758 case Builtin::BI_InterlockedAnd8:
4759 case Builtin::BI_InterlockedAnd16:
4760 case Builtin::BI_InterlockedAnd:
4761 return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd, E));
4762 case Builtin::BI_InterlockedExchangeAdd8:
4763 case Builtin::BI_InterlockedExchangeAdd16:
4764 case Builtin::BI_InterlockedExchangeAdd:
4765 return RValue::get(
4766 EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd, E));
4767 case Builtin::BI_InterlockedExchangeSub8:
4768 case Builtin::BI_InterlockedExchangeSub16:
4769 case Builtin::BI_InterlockedExchangeSub:
4770 return RValue::get(
4771 EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeSub, E));
4772 case Builtin::BI_InterlockedOr8:
4773 case Builtin::BI_InterlockedOr16:
4774 case Builtin::BI_InterlockedOr:
4775 return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr, E));
4776 case Builtin::BI_InterlockedXor8:
4777 case Builtin::BI_InterlockedXor16:
4778 case Builtin::BI_InterlockedXor:
4779 return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor, E));
4781 case Builtin::BI_bittest64:
4782 case Builtin::BI_bittest:
4783 case Builtin::BI_bittestandcomplement64:
4784 case Builtin::BI_bittestandcomplement:
4785 case Builtin::BI_bittestandreset64:
4786 case Builtin::BI_bittestandreset:
4787 case Builtin::BI_bittestandset64:
4788 case Builtin::BI_bittestandset:
4789 case Builtin::BI_interlockedbittestandreset:
4790 case Builtin::BI_interlockedbittestandreset64:
4791 case Builtin::BI_interlockedbittestandset64:
4792 case Builtin::BI_interlockedbittestandset:
4793 case Builtin::BI_interlockedbittestandset_acq:
4794 case Builtin::BI_interlockedbittestandset_rel:
4795 case Builtin::BI_interlockedbittestandset_nf:
4796 case Builtin::BI_interlockedbittestandreset_acq:
4797 case Builtin::BI_interlockedbittestandreset_rel:
4798 case Builtin::BI_interlockedbittestandreset_nf:
4799 return RValue::get(EmitBitTestIntrinsic(*this, BuiltinID, E));
4801 // These builtins exist to emit regular volatile loads and stores not
4802 // affected by the -fms-volatile setting.
4803 case Builtin::BI__iso_volatile_load8:
4804 case Builtin::BI__iso_volatile_load16:
4805 case Builtin::BI__iso_volatile_load32:
4806 case Builtin::BI__iso_volatile_load64:
4807 return RValue::get(EmitISOVolatileLoad(*this, E));
4808 case Builtin::BI__iso_volatile_store8:
4809 case Builtin::BI__iso_volatile_store16:
4810 case Builtin::BI__iso_volatile_store32:
4811 case Builtin::BI__iso_volatile_store64:
4812 return RValue::get(EmitISOVolatileStore(*this, E));
4814 case Builtin::BI__exception_code:
4815 case Builtin::BI_exception_code:
4816 return RValue::get(EmitSEHExceptionCode());
4817 case Builtin::BI__exception_info:
4818 case Builtin::BI_exception_info:
4819 return RValue::get(EmitSEHExceptionInfo());
4820 case Builtin::BI__abnormal_termination:
4821 case Builtin::BI_abnormal_termination:
4822 return RValue::get(EmitSEHAbnormalTermination());
4823 case Builtin::BI_setjmpex:
4824 if (getTarget().getTriple().isOSMSVCRT() && E->getNumArgs() == 1 &&
4825 E->getArg(0)->getType()->isPointerType())
4826 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmpex, E);
4827 break;
4828 case Builtin::BI_setjmp:
4829 if (getTarget().getTriple().isOSMSVCRT() && E->getNumArgs() == 1 &&
4830 E->getArg(0)->getType()->isPointerType()) {
4831 if (getTarget().getTriple().getArch() == llvm::Triple::x86)
4832 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmp3, E);
4833 else if (getTarget().getTriple().getArch() == llvm::Triple::aarch64)
4834 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmpex, E);
4835 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmp, E);
4837 break;
4839 // C++ std:: builtins.
4840 case Builtin::BImove:
4841 case Builtin::BImove_if_noexcept:
4842 case Builtin::BIforward:
4843 case Builtin::BIforward_like:
4844 case Builtin::BIas_const:
4845 return RValue::get(EmitLValue(E->getArg(0)).getPointer(*this));
4846 case Builtin::BI__GetExceptionInfo: {
4847 if (llvm::GlobalVariable *GV =
4848 CGM.getCXXABI().getThrowInfo(FD->getParamDecl(0)->getType()))
4849 return RValue::get(llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy));
4850 break;
4853 case Builtin::BI__fastfail:
4854 return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::__fastfail, E));
4856 case Builtin::BI__builtin_coro_id:
4857 return EmitCoroutineIntrinsic(E, Intrinsic::coro_id);
4858 case Builtin::BI__builtin_coro_promise:
4859 return EmitCoroutineIntrinsic(E, Intrinsic::coro_promise);
4860 case Builtin::BI__builtin_coro_resume:
4861 EmitCoroutineIntrinsic(E, Intrinsic::coro_resume);
4862 return RValue::get(nullptr);
4863 case Builtin::BI__builtin_coro_frame:
4864 return EmitCoroutineIntrinsic(E, Intrinsic::coro_frame);
4865 case Builtin::BI__builtin_coro_noop:
4866 return EmitCoroutineIntrinsic(E, Intrinsic::coro_noop);
4867 case Builtin::BI__builtin_coro_free:
4868 return EmitCoroutineIntrinsic(E, Intrinsic::coro_free);
4869 case Builtin::BI__builtin_coro_destroy:
4870 EmitCoroutineIntrinsic(E, Intrinsic::coro_destroy);
4871 return RValue::get(nullptr);
4872 case Builtin::BI__builtin_coro_done:
4873 return EmitCoroutineIntrinsic(E, Intrinsic::coro_done);
4874 case Builtin::BI__builtin_coro_alloc:
4875 return EmitCoroutineIntrinsic(E, Intrinsic::coro_alloc);
4876 case Builtin::BI__builtin_coro_begin:
4877 return EmitCoroutineIntrinsic(E, Intrinsic::coro_begin);
4878 case Builtin::BI__builtin_coro_end:
4879 return EmitCoroutineIntrinsic(E, Intrinsic::coro_end);
4880 case Builtin::BI__builtin_coro_suspend:
4881 return EmitCoroutineIntrinsic(E, Intrinsic::coro_suspend);
4882 case Builtin::BI__builtin_coro_size:
4883 return EmitCoroutineIntrinsic(E, Intrinsic::coro_size);
4884 case Builtin::BI__builtin_coro_align:
4885 return EmitCoroutineIntrinsic(E, Intrinsic::coro_align);
4887 // OpenCL v2.0 s6.13.16.2, Built-in pipe read and write functions
4888 case Builtin::BIread_pipe:
4889 case Builtin::BIwrite_pipe: {
4890 Value *Arg0 = EmitScalarExpr(E->getArg(0)),
4891 *Arg1 = EmitScalarExpr(E->getArg(1));
4892 CGOpenCLRuntime OpenCLRT(CGM);
4893 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
4894 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
4896 // Type of the generic packet parameter.
4897 unsigned GenericAS =
4898 getContext().getTargetAddressSpace(LangAS::opencl_generic);
4899 llvm::Type *I8PTy = llvm::PointerType::get(
4900 llvm::Type::getInt8Ty(getLLVMContext()), GenericAS);
4902 // Testing which overloaded version we should generate the call for.
4903 if (2U == E->getNumArgs()) {
4904 const char *Name = (BuiltinID == Builtin::BIread_pipe) ? "__read_pipe_2"
4905 : "__write_pipe_2";
4906 // Creating a generic function type to be able to call with any builtin or
4907 // user defined type.
4908 llvm::Type *ArgTys[] = {Arg0->getType(), I8PTy, Int32Ty, Int32Ty};
4909 llvm::FunctionType *FTy = llvm::FunctionType::get(
4910 Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
4911 Value *BCast = Builder.CreatePointerCast(Arg1, I8PTy);
4912 return RValue::get(
4913 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
4914 {Arg0, BCast, PacketSize, PacketAlign}));
4915 } else {
4916 assert(4 == E->getNumArgs() &&
4917 "Illegal number of parameters to pipe function");
4918 const char *Name = (BuiltinID == Builtin::BIread_pipe) ? "__read_pipe_4"
4919 : "__write_pipe_4";
4921 llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, I8PTy,
4922 Int32Ty, Int32Ty};
4923 Value *Arg2 = EmitScalarExpr(E->getArg(2)),
4924 *Arg3 = EmitScalarExpr(E->getArg(3));
4925 llvm::FunctionType *FTy = llvm::FunctionType::get(
4926 Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
4927 Value *BCast = Builder.CreatePointerCast(Arg3, I8PTy);
4928 // We know the third argument is an integer type, but we may need to cast
4929 // it to i32.
4930 if (Arg2->getType() != Int32Ty)
4931 Arg2 = Builder.CreateZExtOrTrunc(Arg2, Int32Ty);
4932 return RValue::get(
4933 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
4934 {Arg0, Arg1, Arg2, BCast, PacketSize, PacketAlign}));
4937 // OpenCL v2.0 s6.13.16 ,s9.17.3.5 - Built-in pipe reserve read and write
4938 // functions
4939 case Builtin::BIreserve_read_pipe:
4940 case Builtin::BIreserve_write_pipe:
4941 case Builtin::BIwork_group_reserve_read_pipe:
4942 case Builtin::BIwork_group_reserve_write_pipe:
4943 case Builtin::BIsub_group_reserve_read_pipe:
4944 case Builtin::BIsub_group_reserve_write_pipe: {
4945 // Composing the mangled name for the function.
4946 const char *Name;
4947 if (BuiltinID == Builtin::BIreserve_read_pipe)
4948 Name = "__reserve_read_pipe";
4949 else if (BuiltinID == Builtin::BIreserve_write_pipe)
4950 Name = "__reserve_write_pipe";
4951 else if (BuiltinID == Builtin::BIwork_group_reserve_read_pipe)
4952 Name = "__work_group_reserve_read_pipe";
4953 else if (BuiltinID == Builtin::BIwork_group_reserve_write_pipe)
4954 Name = "__work_group_reserve_write_pipe";
4955 else if (BuiltinID == Builtin::BIsub_group_reserve_read_pipe)
4956 Name = "__sub_group_reserve_read_pipe";
4957 else
4958 Name = "__sub_group_reserve_write_pipe";
4960 Value *Arg0 = EmitScalarExpr(E->getArg(0)),
4961 *Arg1 = EmitScalarExpr(E->getArg(1));
4962 llvm::Type *ReservedIDTy = ConvertType(getContext().OCLReserveIDTy);
4963 CGOpenCLRuntime OpenCLRT(CGM);
4964 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
4965 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
4967 // Building the generic function prototype.
4968 llvm::Type *ArgTys[] = {Arg0->getType(), Int32Ty, Int32Ty, Int32Ty};
4969 llvm::FunctionType *FTy = llvm::FunctionType::get(
4970 ReservedIDTy, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
4971 // We know the second argument is an integer type, but we may need to cast
4972 // it to i32.
4973 if (Arg1->getType() != Int32Ty)
4974 Arg1 = Builder.CreateZExtOrTrunc(Arg1, Int32Ty);
4975 return RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
4976 {Arg0, Arg1, PacketSize, PacketAlign}));
4978 // OpenCL v2.0 s6.13.16, s9.17.3.5 - Built-in pipe commit read and write
4979 // functions
4980 case Builtin::BIcommit_read_pipe:
4981 case Builtin::BIcommit_write_pipe:
4982 case Builtin::BIwork_group_commit_read_pipe:
4983 case Builtin::BIwork_group_commit_write_pipe:
4984 case Builtin::BIsub_group_commit_read_pipe:
4985 case Builtin::BIsub_group_commit_write_pipe: {
4986 const char *Name;
4987 if (BuiltinID == Builtin::BIcommit_read_pipe)
4988 Name = "__commit_read_pipe";
4989 else if (BuiltinID == Builtin::BIcommit_write_pipe)
4990 Name = "__commit_write_pipe";
4991 else if (BuiltinID == Builtin::BIwork_group_commit_read_pipe)
4992 Name = "__work_group_commit_read_pipe";
4993 else if (BuiltinID == Builtin::BIwork_group_commit_write_pipe)
4994 Name = "__work_group_commit_write_pipe";
4995 else if (BuiltinID == Builtin::BIsub_group_commit_read_pipe)
4996 Name = "__sub_group_commit_read_pipe";
4997 else
4998 Name = "__sub_group_commit_write_pipe";
5000 Value *Arg0 = EmitScalarExpr(E->getArg(0)),
5001 *Arg1 = EmitScalarExpr(E->getArg(1));
5002 CGOpenCLRuntime OpenCLRT(CGM);
5003 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
5004 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
5006 // Building the generic function prototype.
5007 llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, Int32Ty};
5008 llvm::FunctionType *FTy =
5009 llvm::FunctionType::get(llvm::Type::getVoidTy(getLLVMContext()),
5010 llvm::ArrayRef<llvm::Type *>(ArgTys), false);
5012 return RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
5013 {Arg0, Arg1, PacketSize, PacketAlign}));
5015 // OpenCL v2.0 s6.13.16.4 Built-in pipe query functions
5016 case Builtin::BIget_pipe_num_packets:
5017 case Builtin::BIget_pipe_max_packets: {
5018 const char *BaseName;
5019 const auto *PipeTy = E->getArg(0)->getType()->castAs<PipeType>();
5020 if (BuiltinID == Builtin::BIget_pipe_num_packets)
5021 BaseName = "__get_pipe_num_packets";
5022 else
5023 BaseName = "__get_pipe_max_packets";
5024 std::string Name = std::string(BaseName) +
5025 std::string(PipeTy->isReadOnly() ? "_ro" : "_wo");
5027 // Building the generic function prototype.
5028 Value *Arg0 = EmitScalarExpr(E->getArg(0));
5029 CGOpenCLRuntime OpenCLRT(CGM);
5030 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
5031 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
5032 llvm::Type *ArgTys[] = {Arg0->getType(), Int32Ty, Int32Ty};
5033 llvm::FunctionType *FTy = llvm::FunctionType::get(
5034 Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
5036 return RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
5037 {Arg0, PacketSize, PacketAlign}));
5040 // OpenCL v2.0 s6.13.9 - Address space qualifier functions.
5041 case Builtin::BIto_global:
5042 case Builtin::BIto_local:
5043 case Builtin::BIto_private: {
5044 auto Arg0 = EmitScalarExpr(E->getArg(0));
5045 auto NewArgT = llvm::PointerType::get(Int8Ty,
5046 CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic));
5047 auto NewRetT = llvm::PointerType::get(Int8Ty,
5048 CGM.getContext().getTargetAddressSpace(
5049 E->getType()->getPointeeType().getAddressSpace()));
5050 auto FTy = llvm::FunctionType::get(NewRetT, {NewArgT}, false);
5051 llvm::Value *NewArg;
5052 if (Arg0->getType()->getPointerAddressSpace() !=
5053 NewArgT->getPointerAddressSpace())
5054 NewArg = Builder.CreateAddrSpaceCast(Arg0, NewArgT);
5055 else
5056 NewArg = Builder.CreateBitOrPointerCast(Arg0, NewArgT);
5057 auto NewName = std::string("__") + E->getDirectCallee()->getName().str();
5058 auto NewCall =
5059 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, NewName), {NewArg});
5060 return RValue::get(Builder.CreateBitOrPointerCast(NewCall,
5061 ConvertType(E->getType())));
5064 // OpenCL v2.0, s6.13.17 - Enqueue kernel function.
5065 // It contains four different overload formats specified in Table 6.13.17.1.
5066 case Builtin::BIenqueue_kernel: {
5067 StringRef Name; // Generated function call name
5068 unsigned NumArgs = E->getNumArgs();
5070 llvm::Type *QueueTy = ConvertType(getContext().OCLQueueTy);
5071 llvm::Type *GenericVoidPtrTy = Builder.getPtrTy(
5072 getContext().getTargetAddressSpace(LangAS::opencl_generic));
5074 llvm::Value *Queue = EmitScalarExpr(E->getArg(0));
5075 llvm::Value *Flags = EmitScalarExpr(E->getArg(1));
5076 LValue NDRangeL = EmitAggExprToLValue(E->getArg(2));
5077 llvm::Value *Range = NDRangeL.getAddress(*this).getPointer();
5078 llvm::Type *RangeTy = NDRangeL.getAddress(*this).getType();
5080 if (NumArgs == 4) {
5081 // The most basic form of the call with parameters:
5082 // queue_t, kernel_enqueue_flags_t, ndrange_t, block(void)
5083 Name = "__enqueue_kernel_basic";
5084 llvm::Type *ArgTys[] = {QueueTy, Int32Ty, RangeTy, GenericVoidPtrTy,
5085 GenericVoidPtrTy};
5086 llvm::FunctionType *FTy = llvm::FunctionType::get(
5087 Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
5089 auto Info =
5090 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(3));
5091 llvm::Value *Kernel =
5092 Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
5093 llvm::Value *Block =
5094 Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
5096 AttrBuilder B(Builder.getContext());
5097 B.addByValAttr(NDRangeL.getAddress(*this).getElementType());
5098 llvm::AttributeList ByValAttrSet =
5099 llvm::AttributeList::get(CGM.getModule().getContext(), 3U, B);
5101 auto RTCall =
5102 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name, ByValAttrSet),
5103 {Queue, Flags, Range, Kernel, Block});
5104 RTCall->setAttributes(ByValAttrSet);
5105 return RValue::get(RTCall);
5107 assert(NumArgs >= 5 && "Invalid enqueue_kernel signature");
5109 // Create a temporary array to hold the sizes of local pointer arguments
5110 // for the block. \p First is the position of the first size argument.
5111 auto CreateArrayForSizeVar = [=](unsigned First)
5112 -> std::tuple<llvm::Value *, llvm::Value *, llvm::Value *> {
5113 llvm::APInt ArraySize(32, NumArgs - First);
5114 QualType SizeArrayTy = getContext().getConstantArrayType(
5115 getContext().getSizeType(), ArraySize, nullptr, ArrayType::Normal,
5116 /*IndexTypeQuals=*/0);
5117 auto Tmp = CreateMemTemp(SizeArrayTy, "block_sizes");
5118 llvm::Value *TmpPtr = Tmp.getPointer();
5119 llvm::Value *TmpSize = EmitLifetimeStart(
5120 CGM.getDataLayout().getTypeAllocSize(Tmp.getElementType()), TmpPtr);
5121 llvm::Value *ElemPtr;
5122 // Each of the following arguments specifies the size of the corresponding
5123 // argument passed to the enqueued block.
5124 auto *Zero = llvm::ConstantInt::get(IntTy, 0);
5125 for (unsigned I = First; I < NumArgs; ++I) {
5126 auto *Index = llvm::ConstantInt::get(IntTy, I - First);
5127 auto *GEP = Builder.CreateGEP(Tmp.getElementType(), TmpPtr,
5128 {Zero, Index});
5129 if (I == First)
5130 ElemPtr = GEP;
5131 auto *V =
5132 Builder.CreateZExtOrTrunc(EmitScalarExpr(E->getArg(I)), SizeTy);
5133 Builder.CreateAlignedStore(
5134 V, GEP, CGM.getDataLayout().getPrefTypeAlign(SizeTy));
5136 return std::tie(ElemPtr, TmpSize, TmpPtr);
5139 // Could have events and/or varargs.
5140 if (E->getArg(3)->getType()->isBlockPointerType()) {
5141 // No events passed, but has variadic arguments.
5142 Name = "__enqueue_kernel_varargs";
5143 auto Info =
5144 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(3));
5145 llvm::Value *Kernel =
5146 Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
5147 auto *Block = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
5148 llvm::Value *ElemPtr, *TmpSize, *TmpPtr;
5149 std::tie(ElemPtr, TmpSize, TmpPtr) = CreateArrayForSizeVar(4);
5151 // Create a vector of the arguments, as well as a constant value to
5152 // express to the runtime the number of variadic arguments.
5153 llvm::Value *const Args[] = {Queue, Flags,
5154 Range, Kernel,
5155 Block, ConstantInt::get(IntTy, NumArgs - 4),
5156 ElemPtr};
5157 llvm::Type *const ArgTys[] = {
5158 QueueTy, IntTy, RangeTy, GenericVoidPtrTy,
5159 GenericVoidPtrTy, IntTy, ElemPtr->getType()};
5161 llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
5162 auto Call = RValue::get(
5163 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Args));
5164 if (TmpSize)
5165 EmitLifetimeEnd(TmpSize, TmpPtr);
5166 return Call;
5168 // Any calls now have event arguments passed.
5169 if (NumArgs >= 7) {
5170 llvm::PointerType *PtrTy = llvm::PointerType::get(
5171 CGM.getLLVMContext(),
5172 CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic));
5174 llvm::Value *NumEvents =
5175 Builder.CreateZExtOrTrunc(EmitScalarExpr(E->getArg(3)), Int32Ty);
5177 // Since SemaOpenCLBuiltinEnqueueKernel allows fifth and sixth arguments
5178 // to be a null pointer constant (including `0` literal), we can take it
5179 // into account and emit null pointer directly.
5180 llvm::Value *EventWaitList = nullptr;
5181 if (E->getArg(4)->isNullPointerConstant(
5182 getContext(), Expr::NPC_ValueDependentIsNotNull)) {
5183 EventWaitList = llvm::ConstantPointerNull::get(PtrTy);
5184 } else {
5185 EventWaitList = E->getArg(4)->getType()->isArrayType()
5186 ? EmitArrayToPointerDecay(E->getArg(4)).getPointer()
5187 : EmitScalarExpr(E->getArg(4));
5188 // Convert to generic address space.
5189 EventWaitList = Builder.CreatePointerCast(EventWaitList, PtrTy);
5191 llvm::Value *EventRet = nullptr;
5192 if (E->getArg(5)->isNullPointerConstant(
5193 getContext(), Expr::NPC_ValueDependentIsNotNull)) {
5194 EventRet = llvm::ConstantPointerNull::get(PtrTy);
5195 } else {
5196 EventRet =
5197 Builder.CreatePointerCast(EmitScalarExpr(E->getArg(5)), PtrTy);
5200 auto Info =
5201 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(6));
5202 llvm::Value *Kernel =
5203 Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
5204 llvm::Value *Block =
5205 Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
5207 std::vector<llvm::Type *> ArgTys = {
5208 QueueTy, Int32Ty, RangeTy, Int32Ty,
5209 PtrTy, PtrTy, GenericVoidPtrTy, GenericVoidPtrTy};
5211 std::vector<llvm::Value *> Args = {Queue, Flags, Range,
5212 NumEvents, EventWaitList, EventRet,
5213 Kernel, Block};
5215 if (NumArgs == 7) {
5216 // Has events but no variadics.
5217 Name = "__enqueue_kernel_basic_events";
5218 llvm::FunctionType *FTy = llvm::FunctionType::get(
5219 Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
5220 return RValue::get(
5221 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
5222 llvm::ArrayRef<llvm::Value *>(Args)));
5224 // Has event info and variadics
5225 // Pass the number of variadics to the runtime function too.
5226 Args.push_back(ConstantInt::get(Int32Ty, NumArgs - 7));
5227 ArgTys.push_back(Int32Ty);
5228 Name = "__enqueue_kernel_events_varargs";
5230 llvm::Value *ElemPtr, *TmpSize, *TmpPtr;
5231 std::tie(ElemPtr, TmpSize, TmpPtr) = CreateArrayForSizeVar(7);
5232 Args.push_back(ElemPtr);
5233 ArgTys.push_back(ElemPtr->getType());
5235 llvm::FunctionType *FTy = llvm::FunctionType::get(
5236 Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
5237 auto Call =
5238 RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
5239 llvm::ArrayRef<llvm::Value *>(Args)));
5240 if (TmpSize)
5241 EmitLifetimeEnd(TmpSize, TmpPtr);
5242 return Call;
5244 [[fallthrough]];
5246 // OpenCL v2.0 s6.13.17.6 - Kernel query functions need bitcast of block
5247 // parameter.
5248 case Builtin::BIget_kernel_work_group_size: {
5249 llvm::Type *GenericVoidPtrTy = Builder.getPtrTy(
5250 getContext().getTargetAddressSpace(LangAS::opencl_generic));
5251 auto Info =
5252 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(0));
5253 Value *Kernel =
5254 Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
5255 Value *Arg = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
5256 return RValue::get(EmitRuntimeCall(
5257 CGM.CreateRuntimeFunction(
5258 llvm::FunctionType::get(IntTy, {GenericVoidPtrTy, GenericVoidPtrTy},
5259 false),
5260 "__get_kernel_work_group_size_impl"),
5261 {Kernel, Arg}));
5263 case Builtin::BIget_kernel_preferred_work_group_size_multiple: {
5264 llvm::Type *GenericVoidPtrTy = Builder.getPtrTy(
5265 getContext().getTargetAddressSpace(LangAS::opencl_generic));
5266 auto Info =
5267 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(0));
5268 Value *Kernel =
5269 Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
5270 Value *Arg = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
5271 return RValue::get(EmitRuntimeCall(
5272 CGM.CreateRuntimeFunction(
5273 llvm::FunctionType::get(IntTy, {GenericVoidPtrTy, GenericVoidPtrTy},
5274 false),
5275 "__get_kernel_preferred_work_group_size_multiple_impl"),
5276 {Kernel, Arg}));
5278 case Builtin::BIget_kernel_max_sub_group_size_for_ndrange:
5279 case Builtin::BIget_kernel_sub_group_count_for_ndrange: {
5280 llvm::Type *GenericVoidPtrTy = Builder.getPtrTy(
5281 getContext().getTargetAddressSpace(LangAS::opencl_generic));
5282 LValue NDRangeL = EmitAggExprToLValue(E->getArg(0));
5283 llvm::Value *NDRange = NDRangeL.getAddress(*this).getPointer();
5284 auto Info =
5285 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(1));
5286 Value *Kernel =
5287 Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
5288 Value *Block = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
5289 const char *Name =
5290 BuiltinID == Builtin::BIget_kernel_max_sub_group_size_for_ndrange
5291 ? "__get_kernel_max_sub_group_size_for_ndrange_impl"
5292 : "__get_kernel_sub_group_count_for_ndrange_impl";
5293 return RValue::get(EmitRuntimeCall(
5294 CGM.CreateRuntimeFunction(
5295 llvm::FunctionType::get(
5296 IntTy, {NDRange->getType(), GenericVoidPtrTy, GenericVoidPtrTy},
5297 false),
5298 Name),
5299 {NDRange, Kernel, Block}));
5302 case Builtin::BI__builtin_store_half:
5303 case Builtin::BI__builtin_store_halff: {
5304 Value *Val = EmitScalarExpr(E->getArg(0));
5305 Address Address = EmitPointerWithAlignment(E->getArg(1));
5306 Value *HalfVal = Builder.CreateFPTrunc(Val, Builder.getHalfTy());
5307 Builder.CreateStore(HalfVal, Address);
5308 return RValue::get(nullptr);
5310 case Builtin::BI__builtin_load_half: {
5311 Address Address = EmitPointerWithAlignment(E->getArg(0));
5312 Value *HalfVal = Builder.CreateLoad(Address);
5313 return RValue::get(Builder.CreateFPExt(HalfVal, Builder.getDoubleTy()));
5315 case Builtin::BI__builtin_load_halff: {
5316 Address Address = EmitPointerWithAlignment(E->getArg(0));
5317 Value *HalfVal = Builder.CreateLoad(Address);
5318 return RValue::get(Builder.CreateFPExt(HalfVal, Builder.getFloatTy()));
5320 case Builtin::BIprintf:
5321 if (getTarget().getTriple().isNVPTX() ||
5322 getTarget().getTriple().isAMDGCN()) {
5323 if (getLangOpts().OpenMPIsTargetDevice)
5324 return EmitOpenMPDevicePrintfCallExpr(E);
5325 if (getTarget().getTriple().isNVPTX())
5326 return EmitNVPTXDevicePrintfCallExpr(E);
5327 if (getTarget().getTriple().isAMDGCN() && getLangOpts().HIP)
5328 return EmitAMDGPUDevicePrintfCallExpr(E);
5331 break;
5332 case Builtin::BI__builtin_canonicalize:
5333 case Builtin::BI__builtin_canonicalizef:
5334 case Builtin::BI__builtin_canonicalizef16:
5335 case Builtin::BI__builtin_canonicalizel:
5336 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::canonicalize));
5338 case Builtin::BI__builtin_thread_pointer: {
5339 if (!getContext().getTargetInfo().isTLSSupported())
5340 CGM.ErrorUnsupported(E, "__builtin_thread_pointer");
5341 // Fall through - it's already mapped to the intrinsic by ClangBuiltin.
5342 break;
5344 case Builtin::BI__builtin_os_log_format:
5345 return emitBuiltinOSLogFormat(*E);
5347 case Builtin::BI__xray_customevent: {
5348 if (!ShouldXRayInstrumentFunction())
5349 return RValue::getIgnored();
5351 if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
5352 XRayInstrKind::Custom))
5353 return RValue::getIgnored();
5355 if (const auto *XRayAttr = CurFuncDecl->getAttr<XRayInstrumentAttr>())
5356 if (XRayAttr->neverXRayInstrument() && !AlwaysEmitXRayCustomEvents())
5357 return RValue::getIgnored();
5359 Function *F = CGM.getIntrinsic(Intrinsic::xray_customevent);
5360 auto FTy = F->getFunctionType();
5361 auto Arg0 = E->getArg(0);
5362 auto Arg0Val = EmitScalarExpr(Arg0);
5363 auto Arg0Ty = Arg0->getType();
5364 auto PTy0 = FTy->getParamType(0);
5365 if (PTy0 != Arg0Val->getType()) {
5366 if (Arg0Ty->isArrayType())
5367 Arg0Val = EmitArrayToPointerDecay(Arg0).getPointer();
5368 else
5369 Arg0Val = Builder.CreatePointerCast(Arg0Val, PTy0);
5371 auto Arg1 = EmitScalarExpr(E->getArg(1));
5372 auto PTy1 = FTy->getParamType(1);
5373 if (PTy1 != Arg1->getType())
5374 Arg1 = Builder.CreateTruncOrBitCast(Arg1, PTy1);
5375 return RValue::get(Builder.CreateCall(F, {Arg0Val, Arg1}));
5378 case Builtin::BI__xray_typedevent: {
5379 // TODO: There should be a way to always emit events even if the current
5380 // function is not instrumented. Losing events in a stream can cripple
5381 // a trace.
5382 if (!ShouldXRayInstrumentFunction())
5383 return RValue::getIgnored();
5385 if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
5386 XRayInstrKind::Typed))
5387 return RValue::getIgnored();
5389 if (const auto *XRayAttr = CurFuncDecl->getAttr<XRayInstrumentAttr>())
5390 if (XRayAttr->neverXRayInstrument() && !AlwaysEmitXRayTypedEvents())
5391 return RValue::getIgnored();
5393 Function *F = CGM.getIntrinsic(Intrinsic::xray_typedevent);
5394 auto FTy = F->getFunctionType();
5395 auto Arg0 = EmitScalarExpr(E->getArg(0));
5396 auto PTy0 = FTy->getParamType(0);
5397 if (PTy0 != Arg0->getType())
5398 Arg0 = Builder.CreateTruncOrBitCast(Arg0, PTy0);
5399 auto Arg1 = E->getArg(1);
5400 auto Arg1Val = EmitScalarExpr(Arg1);
5401 auto Arg1Ty = Arg1->getType();
5402 auto PTy1 = FTy->getParamType(1);
5403 if (PTy1 != Arg1Val->getType()) {
5404 if (Arg1Ty->isArrayType())
5405 Arg1Val = EmitArrayToPointerDecay(Arg1).getPointer();
5406 else
5407 Arg1Val = Builder.CreatePointerCast(Arg1Val, PTy1);
5409 auto Arg2 = EmitScalarExpr(E->getArg(2));
5410 auto PTy2 = FTy->getParamType(2);
5411 if (PTy2 != Arg2->getType())
5412 Arg2 = Builder.CreateTruncOrBitCast(Arg2, PTy2);
5413 return RValue::get(Builder.CreateCall(F, {Arg0, Arg1Val, Arg2}));
5416 case Builtin::BI__builtin_ms_va_start:
5417 case Builtin::BI__builtin_ms_va_end:
5418 return RValue::get(
5419 EmitVAStartEnd(EmitMSVAListRef(E->getArg(0)).getPointer(),
5420 BuiltinID == Builtin::BI__builtin_ms_va_start));
5422 case Builtin::BI__builtin_ms_va_copy: {
5423 // Lower this manually. We can't reliably determine whether or not any
5424 // given va_copy() is for a Win64 va_list from the calling convention
5425 // alone, because it's legal to do this from a System V ABI function.
5426 // With opaque pointer types, we won't have enough information in LLVM
5427 // IR to determine this from the argument types, either. Best to do it
5428 // now, while we have enough information.
5429 Address DestAddr = EmitMSVAListRef(E->getArg(0));
5430 Address SrcAddr = EmitMSVAListRef(E->getArg(1));
5432 llvm::Type *BPP = Int8PtrPtrTy;
5434 DestAddr = Address(Builder.CreateBitCast(DestAddr.getPointer(), BPP, "cp"),
5435 Int8PtrTy, DestAddr.getAlignment());
5436 SrcAddr = Address(Builder.CreateBitCast(SrcAddr.getPointer(), BPP, "ap"),
5437 Int8PtrTy, SrcAddr.getAlignment());
5439 Value *ArgPtr = Builder.CreateLoad(SrcAddr, "ap.val");
5440 return RValue::get(Builder.CreateStore(ArgPtr, DestAddr));
5443 case Builtin::BI__builtin_get_device_side_mangled_name: {
5444 auto Name = CGM.getCUDARuntime().getDeviceSideName(
5445 cast<DeclRefExpr>(E->getArg(0)->IgnoreImpCasts())->getDecl());
5446 auto Str = CGM.GetAddrOfConstantCString(Name, "");
5447 llvm::Constant *Zeros[] = {llvm::ConstantInt::get(SizeTy, 0),
5448 llvm::ConstantInt::get(SizeTy, 0)};
5449 auto *Ptr = llvm::ConstantExpr::getGetElementPtr(Str.getElementType(),
5450 Str.getPointer(), Zeros);
5451 return RValue::get(Ptr);
5455 // If this is an alias for a lib function (e.g. __builtin_sin), emit
5456 // the call using the normal call path, but using the unmangled
5457 // version of the function name.
5458 if (getContext().BuiltinInfo.isLibFunction(BuiltinID))
5459 return emitLibraryCall(*this, FD, E,
5460 CGM.getBuiltinLibFunction(FD, BuiltinID));
5462 // If this is a predefined lib function (e.g. malloc), emit the call
5463 // using exactly the normal call path.
5464 if (getContext().BuiltinInfo.isPredefinedLibFunction(BuiltinID))
5465 return emitLibraryCall(*this, FD, E,
5466 cast<llvm::Constant>(EmitScalarExpr(E->getCallee())));
5468 // Check that a call to a target specific builtin has the correct target
5469 // features.
5470 // This is down here to avoid non-target specific builtins, however, if
5471 // generic builtins start to require generic target features then we
5472 // can move this up to the beginning of the function.
5473 checkTargetFeatures(E, FD);
5475 if (unsigned VectorWidth = getContext().BuiltinInfo.getRequiredVectorWidth(BuiltinID))
5476 LargestVectorWidth = std::max(LargestVectorWidth, VectorWidth);
5478 // See if we have a target specific intrinsic.
5479 StringRef Name = getContext().BuiltinInfo.getName(BuiltinID);
5480 Intrinsic::ID IntrinsicID = Intrinsic::not_intrinsic;
5481 StringRef Prefix =
5482 llvm::Triple::getArchTypePrefix(getTarget().getTriple().getArch());
5483 if (!Prefix.empty()) {
5484 IntrinsicID = Intrinsic::getIntrinsicForClangBuiltin(Prefix.data(), Name);
5485 // NOTE we don't need to perform a compatibility flag check here since the
5486 // intrinsics are declared in Builtins*.def via LANGBUILTIN which filter the
5487 // MS builtins via ALL_MS_LANGUAGES and are filtered earlier.
5488 if (IntrinsicID == Intrinsic::not_intrinsic)
5489 IntrinsicID = Intrinsic::getIntrinsicForMSBuiltin(Prefix.data(), Name);
5492 if (IntrinsicID != Intrinsic::not_intrinsic) {
5493 SmallVector<Value*, 16> Args;
5495 // Find out if any arguments are required to be integer constant
5496 // expressions.
5497 unsigned ICEArguments = 0;
5498 ASTContext::GetBuiltinTypeError Error;
5499 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
5500 assert(Error == ASTContext::GE_None && "Should not codegen an error");
5502 Function *F = CGM.getIntrinsic(IntrinsicID);
5503 llvm::FunctionType *FTy = F->getFunctionType();
5505 for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) {
5506 Value *ArgValue;
5507 // If this is a normal argument, just emit it as a scalar.
5508 if ((ICEArguments & (1 << i)) == 0) {
5509 ArgValue = EmitScalarExpr(E->getArg(i));
5510 } else {
5511 // If this is required to be a constant, constant fold it so that we
5512 // know that the generated intrinsic gets a ConstantInt.
5513 ArgValue = llvm::ConstantInt::get(
5514 getLLVMContext(),
5515 *E->getArg(i)->getIntegerConstantExpr(getContext()));
5518 // If the intrinsic arg type is different from the builtin arg type
5519 // we need to do a bit cast.
5520 llvm::Type *PTy = FTy->getParamType(i);
5521 if (PTy != ArgValue->getType()) {
5522 // XXX - vector of pointers?
5523 if (auto *PtrTy = dyn_cast<llvm::PointerType>(PTy)) {
5524 if (PtrTy->getAddressSpace() !=
5525 ArgValue->getType()->getPointerAddressSpace()) {
5526 ArgValue = Builder.CreateAddrSpaceCast(
5527 ArgValue, llvm::PointerType::get(getLLVMContext(),
5528 PtrTy->getAddressSpace()));
5532 assert(PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) &&
5533 "Must be able to losslessly bit cast to param");
5534 // Cast vector type (e.g., v256i32) to x86_amx, this only happen
5535 // in amx intrinsics.
5536 if (PTy->isX86_AMXTy())
5537 ArgValue = Builder.CreateIntrinsic(Intrinsic::x86_cast_vector_to_tile,
5538 {ArgValue->getType()}, {ArgValue});
5539 else
5540 ArgValue = Builder.CreateBitCast(ArgValue, PTy);
5543 Args.push_back(ArgValue);
5546 Value *V = Builder.CreateCall(F, Args);
5547 QualType BuiltinRetType = E->getType();
5549 llvm::Type *RetTy = VoidTy;
5550 if (!BuiltinRetType->isVoidType())
5551 RetTy = ConvertType(BuiltinRetType);
5553 if (RetTy != V->getType()) {
5554 // XXX - vector of pointers?
5555 if (auto *PtrTy = dyn_cast<llvm::PointerType>(RetTy)) {
5556 if (PtrTy->getAddressSpace() != V->getType()->getPointerAddressSpace()) {
5557 V = Builder.CreateAddrSpaceCast(
5558 V, llvm::PointerType::get(getLLVMContext(),
5559 PtrTy->getAddressSpace()));
5563 assert(V->getType()->canLosslesslyBitCastTo(RetTy) &&
5564 "Must be able to losslessly bit cast result type");
5565 // Cast x86_amx to vector type (e.g., v256i32), this only happen
5566 // in amx intrinsics.
5567 if (V->getType()->isX86_AMXTy())
5568 V = Builder.CreateIntrinsic(Intrinsic::x86_cast_tile_to_vector, {RetTy},
5569 {V});
5570 else
5571 V = Builder.CreateBitCast(V, RetTy);
5574 if (RetTy->isVoidTy())
5575 return RValue::get(nullptr);
5577 return RValue::get(V);
5580 // Some target-specific builtins can have aggregate return values, e.g.
5581 // __builtin_arm_mve_vld2q_u32. So if the result is an aggregate, force
5582 // ReturnValue to be non-null, so that the target-specific emission code can
5583 // always just emit into it.
5584 TypeEvaluationKind EvalKind = getEvaluationKind(E->getType());
5585 if (EvalKind == TEK_Aggregate && ReturnValue.isNull()) {
5586 Address DestPtr = CreateMemTemp(E->getType(), "agg.tmp");
5587 ReturnValue = ReturnValueSlot(DestPtr, false);
5590 // Now see if we can emit a target-specific builtin.
5591 if (Value *V = EmitTargetBuiltinExpr(BuiltinID, E, ReturnValue)) {
5592 switch (EvalKind) {
5593 case TEK_Scalar:
5594 if (V->getType()->isVoidTy())
5595 return RValue::get(nullptr);
5596 return RValue::get(V);
5597 case TEK_Aggregate:
5598 return RValue::getAggregate(ReturnValue.getValue(),
5599 ReturnValue.isVolatile());
5600 case TEK_Complex:
5601 llvm_unreachable("No current target builtin returns complex");
5603 llvm_unreachable("Bad evaluation kind in EmitBuiltinExpr");
5606 ErrorUnsupported(E, "builtin function");
5608 // Unknown builtin, for now just dump it out and return undef.
5609 return GetUndefRValue(E->getType());
5612 static Value *EmitTargetArchBuiltinExpr(CodeGenFunction *CGF,
5613 unsigned BuiltinID, const CallExpr *E,
5614 ReturnValueSlot ReturnValue,
5615 llvm::Triple::ArchType Arch) {
5616 switch (Arch) {
5617 case llvm::Triple::arm:
5618 case llvm::Triple::armeb:
5619 case llvm::Triple::thumb:
5620 case llvm::Triple::thumbeb:
5621 return CGF->EmitARMBuiltinExpr(BuiltinID, E, ReturnValue, Arch);
5622 case llvm::Triple::aarch64:
5623 case llvm::Triple::aarch64_32:
5624 case llvm::Triple::aarch64_be:
5625 return CGF->EmitAArch64BuiltinExpr(BuiltinID, E, Arch);
5626 case llvm::Triple::bpfeb:
5627 case llvm::Triple::bpfel:
5628 return CGF->EmitBPFBuiltinExpr(BuiltinID, E);
5629 case llvm::Triple::x86:
5630 case llvm::Triple::x86_64:
5631 return CGF->EmitX86BuiltinExpr(BuiltinID, E);
5632 case llvm::Triple::ppc:
5633 case llvm::Triple::ppcle:
5634 case llvm::Triple::ppc64:
5635 case llvm::Triple::ppc64le:
5636 return CGF->EmitPPCBuiltinExpr(BuiltinID, E);
5637 case llvm::Triple::r600:
5638 case llvm::Triple::amdgcn:
5639 return CGF->EmitAMDGPUBuiltinExpr(BuiltinID, E);
5640 case llvm::Triple::systemz:
5641 return CGF->EmitSystemZBuiltinExpr(BuiltinID, E);
5642 case llvm::Triple::nvptx:
5643 case llvm::Triple::nvptx64:
5644 return CGF->EmitNVPTXBuiltinExpr(BuiltinID, E);
5645 case llvm::Triple::wasm32:
5646 case llvm::Triple::wasm64:
5647 return CGF->EmitWebAssemblyBuiltinExpr(BuiltinID, E);
5648 case llvm::Triple::hexagon:
5649 return CGF->EmitHexagonBuiltinExpr(BuiltinID, E);
5650 case llvm::Triple::riscv32:
5651 case llvm::Triple::riscv64:
5652 return CGF->EmitRISCVBuiltinExpr(BuiltinID, E, ReturnValue);
5653 default:
5654 return nullptr;
5658 Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID,
5659 const CallExpr *E,
5660 ReturnValueSlot ReturnValue) {
5661 if (getContext().BuiltinInfo.isAuxBuiltinID(BuiltinID)) {
5662 assert(getContext().getAuxTargetInfo() && "Missing aux target info");
5663 return EmitTargetArchBuiltinExpr(
5664 this, getContext().BuiltinInfo.getAuxBuiltinID(BuiltinID), E,
5665 ReturnValue, getContext().getAuxTargetInfo()->getTriple().getArch());
5668 return EmitTargetArchBuiltinExpr(this, BuiltinID, E, ReturnValue,
5669 getTarget().getTriple().getArch());
5672 static llvm::FixedVectorType *GetNeonType(CodeGenFunction *CGF,
5673 NeonTypeFlags TypeFlags,
5674 bool HasLegalHalfType = true,
5675 bool V1Ty = false,
5676 bool AllowBFloatArgsAndRet = true) {
5677 int IsQuad = TypeFlags.isQuad();
5678 switch (TypeFlags.getEltType()) {
5679 case NeonTypeFlags::Int8:
5680 case NeonTypeFlags::Poly8:
5681 return llvm::FixedVectorType::get(CGF->Int8Ty, V1Ty ? 1 : (8 << IsQuad));
5682 case NeonTypeFlags::Int16:
5683 case NeonTypeFlags::Poly16:
5684 return llvm::FixedVectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad));
5685 case NeonTypeFlags::BFloat16:
5686 if (AllowBFloatArgsAndRet)
5687 return llvm::FixedVectorType::get(CGF->BFloatTy, V1Ty ? 1 : (4 << IsQuad));
5688 else
5689 return llvm::FixedVectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad));
5690 case NeonTypeFlags::Float16:
5691 if (HasLegalHalfType)
5692 return llvm::FixedVectorType::get(CGF->HalfTy, V1Ty ? 1 : (4 << IsQuad));
5693 else
5694 return llvm::FixedVectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad));
5695 case NeonTypeFlags::Int32:
5696 return llvm::FixedVectorType::get(CGF->Int32Ty, V1Ty ? 1 : (2 << IsQuad));
5697 case NeonTypeFlags::Int64:
5698 case NeonTypeFlags::Poly64:
5699 return llvm::FixedVectorType::get(CGF->Int64Ty, V1Ty ? 1 : (1 << IsQuad));
5700 case NeonTypeFlags::Poly128:
5701 // FIXME: i128 and f128 doesn't get fully support in Clang and llvm.
5702 // There is a lot of i128 and f128 API missing.
5703 // so we use v16i8 to represent poly128 and get pattern matched.
5704 return llvm::FixedVectorType::get(CGF->Int8Ty, 16);
5705 case NeonTypeFlags::Float32:
5706 return llvm::FixedVectorType::get(CGF->FloatTy, V1Ty ? 1 : (2 << IsQuad));
5707 case NeonTypeFlags::Float64:
5708 return llvm::FixedVectorType::get(CGF->DoubleTy, V1Ty ? 1 : (1 << IsQuad));
5710 llvm_unreachable("Unknown vector element type!");
5713 static llvm::VectorType *GetFloatNeonType(CodeGenFunction *CGF,
5714 NeonTypeFlags IntTypeFlags) {
5715 int IsQuad = IntTypeFlags.isQuad();
5716 switch (IntTypeFlags.getEltType()) {
5717 case NeonTypeFlags::Int16:
5718 return llvm::FixedVectorType::get(CGF->HalfTy, (4 << IsQuad));
5719 case NeonTypeFlags::Int32:
5720 return llvm::FixedVectorType::get(CGF->FloatTy, (2 << IsQuad));
5721 case NeonTypeFlags::Int64:
5722 return llvm::FixedVectorType::get(CGF->DoubleTy, (1 << IsQuad));
5723 default:
5724 llvm_unreachable("Type can't be converted to floating-point!");
5728 Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C,
5729 const ElementCount &Count) {
5730 Value *SV = llvm::ConstantVector::getSplat(Count, C);
5731 return Builder.CreateShuffleVector(V, V, SV, "lane");
5734 Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C) {
5735 ElementCount EC = cast<llvm::VectorType>(V->getType())->getElementCount();
5736 return EmitNeonSplat(V, C, EC);
5739 Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl<Value*> &Ops,
5740 const char *name,
5741 unsigned shift, bool rightshift) {
5742 unsigned j = 0;
5743 for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end();
5744 ai != ae; ++ai, ++j) {
5745 if (F->isConstrainedFPIntrinsic())
5746 if (ai->getType()->isMetadataTy())
5747 continue;
5748 if (shift > 0 && shift == j)
5749 Ops[j] = EmitNeonShiftVector(Ops[j], ai->getType(), rightshift);
5750 else
5751 Ops[j] = Builder.CreateBitCast(Ops[j], ai->getType(), name);
5754 if (F->isConstrainedFPIntrinsic())
5755 return Builder.CreateConstrainedFPCall(F, Ops, name);
5756 else
5757 return Builder.CreateCall(F, Ops, name);
5760 Value *CodeGenFunction::EmitNeonShiftVector(Value *V, llvm::Type *Ty,
5761 bool neg) {
5762 int SV = cast<ConstantInt>(V)->getSExtValue();
5763 return ConstantInt::get(Ty, neg ? -SV : SV);
5766 // Right-shift a vector by a constant.
5767 Value *CodeGenFunction::EmitNeonRShiftImm(Value *Vec, Value *Shift,
5768 llvm::Type *Ty, bool usgn,
5769 const char *name) {
5770 llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
5772 int ShiftAmt = cast<ConstantInt>(Shift)->getSExtValue();
5773 int EltSize = VTy->getScalarSizeInBits();
5775 Vec = Builder.CreateBitCast(Vec, Ty);
5777 // lshr/ashr are undefined when the shift amount is equal to the vector
5778 // element size.
5779 if (ShiftAmt == EltSize) {
5780 if (usgn) {
5781 // Right-shifting an unsigned value by its size yields 0.
5782 return llvm::ConstantAggregateZero::get(VTy);
5783 } else {
5784 // Right-shifting a signed value by its size is equivalent
5785 // to a shift of size-1.
5786 --ShiftAmt;
5787 Shift = ConstantInt::get(VTy->getElementType(), ShiftAmt);
5791 Shift = EmitNeonShiftVector(Shift, Ty, false);
5792 if (usgn)
5793 return Builder.CreateLShr(Vec, Shift, name);
5794 else
5795 return Builder.CreateAShr(Vec, Shift, name);
5798 enum {
5799 AddRetType = (1 << 0),
5800 Add1ArgType = (1 << 1),
5801 Add2ArgTypes = (1 << 2),
5803 VectorizeRetType = (1 << 3),
5804 VectorizeArgTypes = (1 << 4),
5806 InventFloatType = (1 << 5),
5807 UnsignedAlts = (1 << 6),
5809 Use64BitVectors = (1 << 7),
5810 Use128BitVectors = (1 << 8),
5812 Vectorize1ArgType = Add1ArgType | VectorizeArgTypes,
5813 VectorRet = AddRetType | VectorizeRetType,
5814 VectorRetGetArgs01 =
5815 AddRetType | Add2ArgTypes | VectorizeRetType | VectorizeArgTypes,
5816 FpCmpzModifiers =
5817 AddRetType | VectorizeRetType | Add1ArgType | InventFloatType
5820 namespace {
5821 struct ARMVectorIntrinsicInfo {
5822 const char *NameHint;
5823 unsigned BuiltinID;
5824 unsigned LLVMIntrinsic;
5825 unsigned AltLLVMIntrinsic;
5826 uint64_t TypeModifier;
5828 bool operator<(unsigned RHSBuiltinID) const {
5829 return BuiltinID < RHSBuiltinID;
5831 bool operator<(const ARMVectorIntrinsicInfo &TE) const {
5832 return BuiltinID < TE.BuiltinID;
5835 } // end anonymous namespace
5837 #define NEONMAP0(NameBase) \
5838 { #NameBase, NEON::BI__builtin_neon_ ## NameBase, 0, 0, 0 }
5840 #define NEONMAP1(NameBase, LLVMIntrinsic, TypeModifier) \
5841 { #NameBase, NEON:: BI__builtin_neon_ ## NameBase, \
5842 Intrinsic::LLVMIntrinsic, 0, TypeModifier }
5844 #define NEONMAP2(NameBase, LLVMIntrinsic, AltLLVMIntrinsic, TypeModifier) \
5845 { #NameBase, NEON:: BI__builtin_neon_ ## NameBase, \
5846 Intrinsic::LLVMIntrinsic, Intrinsic::AltLLVMIntrinsic, \
5847 TypeModifier }
5849 static const ARMVectorIntrinsicInfo ARMSIMDIntrinsicMap [] = {
5850 NEONMAP1(__a32_vcvt_bf16_f32, arm_neon_vcvtfp2bf, 0),
5851 NEONMAP0(splat_lane_v),
5852 NEONMAP0(splat_laneq_v),
5853 NEONMAP0(splatq_lane_v),
5854 NEONMAP0(splatq_laneq_v),
5855 NEONMAP2(vabd_v, arm_neon_vabdu, arm_neon_vabds, Add1ArgType | UnsignedAlts),
5856 NEONMAP2(vabdq_v, arm_neon_vabdu, arm_neon_vabds, Add1ArgType | UnsignedAlts),
5857 NEONMAP1(vabs_v, arm_neon_vabs, 0),
5858 NEONMAP1(vabsq_v, arm_neon_vabs, 0),
5859 NEONMAP0(vadd_v),
5860 NEONMAP0(vaddhn_v),
5861 NEONMAP0(vaddq_v),
5862 NEONMAP1(vaesdq_u8, arm_neon_aesd, 0),
5863 NEONMAP1(vaeseq_u8, arm_neon_aese, 0),
5864 NEONMAP1(vaesimcq_u8, arm_neon_aesimc, 0),
5865 NEONMAP1(vaesmcq_u8, arm_neon_aesmc, 0),
5866 NEONMAP1(vbfdot_f32, arm_neon_bfdot, 0),
5867 NEONMAP1(vbfdotq_f32, arm_neon_bfdot, 0),
5868 NEONMAP1(vbfmlalbq_f32, arm_neon_bfmlalb, 0),
5869 NEONMAP1(vbfmlaltq_f32, arm_neon_bfmlalt, 0),
5870 NEONMAP1(vbfmmlaq_f32, arm_neon_bfmmla, 0),
5871 NEONMAP1(vbsl_v, arm_neon_vbsl, AddRetType),
5872 NEONMAP1(vbslq_v, arm_neon_vbsl, AddRetType),
5873 NEONMAP1(vcadd_rot270_f16, arm_neon_vcadd_rot270, Add1ArgType),
5874 NEONMAP1(vcadd_rot270_f32, arm_neon_vcadd_rot270, Add1ArgType),
5875 NEONMAP1(vcadd_rot90_f16, arm_neon_vcadd_rot90, Add1ArgType),
5876 NEONMAP1(vcadd_rot90_f32, arm_neon_vcadd_rot90, Add1ArgType),
5877 NEONMAP1(vcaddq_rot270_f16, arm_neon_vcadd_rot270, Add1ArgType),
5878 NEONMAP1(vcaddq_rot270_f32, arm_neon_vcadd_rot270, Add1ArgType),
5879 NEONMAP1(vcaddq_rot270_f64, arm_neon_vcadd_rot270, Add1ArgType),
5880 NEONMAP1(vcaddq_rot90_f16, arm_neon_vcadd_rot90, Add1ArgType),
5881 NEONMAP1(vcaddq_rot90_f32, arm_neon_vcadd_rot90, Add1ArgType),
5882 NEONMAP1(vcaddq_rot90_f64, arm_neon_vcadd_rot90, Add1ArgType),
5883 NEONMAP1(vcage_v, arm_neon_vacge, 0),
5884 NEONMAP1(vcageq_v, arm_neon_vacge, 0),
5885 NEONMAP1(vcagt_v, arm_neon_vacgt, 0),
5886 NEONMAP1(vcagtq_v, arm_neon_vacgt, 0),
5887 NEONMAP1(vcale_v, arm_neon_vacge, 0),
5888 NEONMAP1(vcaleq_v, arm_neon_vacge, 0),
5889 NEONMAP1(vcalt_v, arm_neon_vacgt, 0),
5890 NEONMAP1(vcaltq_v, arm_neon_vacgt, 0),
5891 NEONMAP0(vceqz_v),
5892 NEONMAP0(vceqzq_v),
5893 NEONMAP0(vcgez_v),
5894 NEONMAP0(vcgezq_v),
5895 NEONMAP0(vcgtz_v),
5896 NEONMAP0(vcgtzq_v),
5897 NEONMAP0(vclez_v),
5898 NEONMAP0(vclezq_v),
5899 NEONMAP1(vcls_v, arm_neon_vcls, Add1ArgType),
5900 NEONMAP1(vclsq_v, arm_neon_vcls, Add1ArgType),
5901 NEONMAP0(vcltz_v),
5902 NEONMAP0(vcltzq_v),
5903 NEONMAP1(vclz_v, ctlz, Add1ArgType),
5904 NEONMAP1(vclzq_v, ctlz, Add1ArgType),
5905 NEONMAP1(vcnt_v, ctpop, Add1ArgType),
5906 NEONMAP1(vcntq_v, ctpop, Add1ArgType),
5907 NEONMAP1(vcvt_f16_f32, arm_neon_vcvtfp2hf, 0),
5908 NEONMAP0(vcvt_f16_s16),
5909 NEONMAP0(vcvt_f16_u16),
5910 NEONMAP1(vcvt_f32_f16, arm_neon_vcvthf2fp, 0),
5911 NEONMAP0(vcvt_f32_v),
5912 NEONMAP1(vcvt_n_f16_s16, arm_neon_vcvtfxs2fp, 0),
5913 NEONMAP1(vcvt_n_f16_u16, arm_neon_vcvtfxu2fp, 0),
5914 NEONMAP2(vcvt_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
5915 NEONMAP1(vcvt_n_s16_f16, arm_neon_vcvtfp2fxs, 0),
5916 NEONMAP1(vcvt_n_s32_v, arm_neon_vcvtfp2fxs, 0),
5917 NEONMAP1(vcvt_n_s64_v, arm_neon_vcvtfp2fxs, 0),
5918 NEONMAP1(vcvt_n_u16_f16, arm_neon_vcvtfp2fxu, 0),
5919 NEONMAP1(vcvt_n_u32_v, arm_neon_vcvtfp2fxu, 0),
5920 NEONMAP1(vcvt_n_u64_v, arm_neon_vcvtfp2fxu, 0),
5921 NEONMAP0(vcvt_s16_f16),
5922 NEONMAP0(vcvt_s32_v),
5923 NEONMAP0(vcvt_s64_v),
5924 NEONMAP0(vcvt_u16_f16),
5925 NEONMAP0(vcvt_u32_v),
5926 NEONMAP0(vcvt_u64_v),
5927 NEONMAP1(vcvta_s16_f16, arm_neon_vcvtas, 0),
5928 NEONMAP1(vcvta_s32_v, arm_neon_vcvtas, 0),
5929 NEONMAP1(vcvta_s64_v, arm_neon_vcvtas, 0),
5930 NEONMAP1(vcvta_u16_f16, arm_neon_vcvtau, 0),
5931 NEONMAP1(vcvta_u32_v, arm_neon_vcvtau, 0),
5932 NEONMAP1(vcvta_u64_v, arm_neon_vcvtau, 0),
5933 NEONMAP1(vcvtaq_s16_f16, arm_neon_vcvtas, 0),
5934 NEONMAP1(vcvtaq_s32_v, arm_neon_vcvtas, 0),
5935 NEONMAP1(vcvtaq_s64_v, arm_neon_vcvtas, 0),
5936 NEONMAP1(vcvtaq_u16_f16, arm_neon_vcvtau, 0),
5937 NEONMAP1(vcvtaq_u32_v, arm_neon_vcvtau, 0),
5938 NEONMAP1(vcvtaq_u64_v, arm_neon_vcvtau, 0),
5939 NEONMAP1(vcvth_bf16_f32, arm_neon_vcvtbfp2bf, 0),
5940 NEONMAP1(vcvtm_s16_f16, arm_neon_vcvtms, 0),
5941 NEONMAP1(vcvtm_s32_v, arm_neon_vcvtms, 0),
5942 NEONMAP1(vcvtm_s64_v, arm_neon_vcvtms, 0),
5943 NEONMAP1(vcvtm_u16_f16, arm_neon_vcvtmu, 0),
5944 NEONMAP1(vcvtm_u32_v, arm_neon_vcvtmu, 0),
5945 NEONMAP1(vcvtm_u64_v, arm_neon_vcvtmu, 0),
5946 NEONMAP1(vcvtmq_s16_f16, arm_neon_vcvtms, 0),
5947 NEONMAP1(vcvtmq_s32_v, arm_neon_vcvtms, 0),
5948 NEONMAP1(vcvtmq_s64_v, arm_neon_vcvtms, 0),
5949 NEONMAP1(vcvtmq_u16_f16, arm_neon_vcvtmu, 0),
5950 NEONMAP1(vcvtmq_u32_v, arm_neon_vcvtmu, 0),
5951 NEONMAP1(vcvtmq_u64_v, arm_neon_vcvtmu, 0),
5952 NEONMAP1(vcvtn_s16_f16, arm_neon_vcvtns, 0),
5953 NEONMAP1(vcvtn_s32_v, arm_neon_vcvtns, 0),
5954 NEONMAP1(vcvtn_s64_v, arm_neon_vcvtns, 0),
5955 NEONMAP1(vcvtn_u16_f16, arm_neon_vcvtnu, 0),
5956 NEONMAP1(vcvtn_u32_v, arm_neon_vcvtnu, 0),
5957 NEONMAP1(vcvtn_u64_v, arm_neon_vcvtnu, 0),
5958 NEONMAP1(vcvtnq_s16_f16, arm_neon_vcvtns, 0),
5959 NEONMAP1(vcvtnq_s32_v, arm_neon_vcvtns, 0),
5960 NEONMAP1(vcvtnq_s64_v, arm_neon_vcvtns, 0),
5961 NEONMAP1(vcvtnq_u16_f16, arm_neon_vcvtnu, 0),
5962 NEONMAP1(vcvtnq_u32_v, arm_neon_vcvtnu, 0),
5963 NEONMAP1(vcvtnq_u64_v, arm_neon_vcvtnu, 0),
5964 NEONMAP1(vcvtp_s16_f16, arm_neon_vcvtps, 0),
5965 NEONMAP1(vcvtp_s32_v, arm_neon_vcvtps, 0),
5966 NEONMAP1(vcvtp_s64_v, arm_neon_vcvtps, 0),
5967 NEONMAP1(vcvtp_u16_f16, arm_neon_vcvtpu, 0),
5968 NEONMAP1(vcvtp_u32_v, arm_neon_vcvtpu, 0),
5969 NEONMAP1(vcvtp_u64_v, arm_neon_vcvtpu, 0),
5970 NEONMAP1(vcvtpq_s16_f16, arm_neon_vcvtps, 0),
5971 NEONMAP1(vcvtpq_s32_v, arm_neon_vcvtps, 0),
5972 NEONMAP1(vcvtpq_s64_v, arm_neon_vcvtps, 0),
5973 NEONMAP1(vcvtpq_u16_f16, arm_neon_vcvtpu, 0),
5974 NEONMAP1(vcvtpq_u32_v, arm_neon_vcvtpu, 0),
5975 NEONMAP1(vcvtpq_u64_v, arm_neon_vcvtpu, 0),
5976 NEONMAP0(vcvtq_f16_s16),
5977 NEONMAP0(vcvtq_f16_u16),
5978 NEONMAP0(vcvtq_f32_v),
5979 NEONMAP1(vcvtq_n_f16_s16, arm_neon_vcvtfxs2fp, 0),
5980 NEONMAP1(vcvtq_n_f16_u16, arm_neon_vcvtfxu2fp, 0),
5981 NEONMAP2(vcvtq_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
5982 NEONMAP1(vcvtq_n_s16_f16, arm_neon_vcvtfp2fxs, 0),
5983 NEONMAP1(vcvtq_n_s32_v, arm_neon_vcvtfp2fxs, 0),
5984 NEONMAP1(vcvtq_n_s64_v, arm_neon_vcvtfp2fxs, 0),
5985 NEONMAP1(vcvtq_n_u16_f16, arm_neon_vcvtfp2fxu, 0),
5986 NEONMAP1(vcvtq_n_u32_v, arm_neon_vcvtfp2fxu, 0),
5987 NEONMAP1(vcvtq_n_u64_v, arm_neon_vcvtfp2fxu, 0),
5988 NEONMAP0(vcvtq_s16_f16),
5989 NEONMAP0(vcvtq_s32_v),
5990 NEONMAP0(vcvtq_s64_v),
5991 NEONMAP0(vcvtq_u16_f16),
5992 NEONMAP0(vcvtq_u32_v),
5993 NEONMAP0(vcvtq_u64_v),
5994 NEONMAP1(vdot_s32, arm_neon_sdot, 0),
5995 NEONMAP1(vdot_u32, arm_neon_udot, 0),
5996 NEONMAP1(vdotq_s32, arm_neon_sdot, 0),
5997 NEONMAP1(vdotq_u32, arm_neon_udot, 0),
5998 NEONMAP0(vext_v),
5999 NEONMAP0(vextq_v),
6000 NEONMAP0(vfma_v),
6001 NEONMAP0(vfmaq_v),
6002 NEONMAP2(vhadd_v, arm_neon_vhaddu, arm_neon_vhadds, Add1ArgType | UnsignedAlts),
6003 NEONMAP2(vhaddq_v, arm_neon_vhaddu, arm_neon_vhadds, Add1ArgType | UnsignedAlts),
6004 NEONMAP2(vhsub_v, arm_neon_vhsubu, arm_neon_vhsubs, Add1ArgType | UnsignedAlts),
6005 NEONMAP2(vhsubq_v, arm_neon_vhsubu, arm_neon_vhsubs, Add1ArgType | UnsignedAlts),
6006 NEONMAP0(vld1_dup_v),
6007 NEONMAP1(vld1_v, arm_neon_vld1, 0),
6008 NEONMAP1(vld1_x2_v, arm_neon_vld1x2, 0),
6009 NEONMAP1(vld1_x3_v, arm_neon_vld1x3, 0),
6010 NEONMAP1(vld1_x4_v, arm_neon_vld1x4, 0),
6011 NEONMAP0(vld1q_dup_v),
6012 NEONMAP1(vld1q_v, arm_neon_vld1, 0),
6013 NEONMAP1(vld1q_x2_v, arm_neon_vld1x2, 0),
6014 NEONMAP1(vld1q_x3_v, arm_neon_vld1x3, 0),
6015 NEONMAP1(vld1q_x4_v, arm_neon_vld1x4, 0),
6016 NEONMAP1(vld2_dup_v, arm_neon_vld2dup, 0),
6017 NEONMAP1(vld2_lane_v, arm_neon_vld2lane, 0),
6018 NEONMAP1(vld2_v, arm_neon_vld2, 0),
6019 NEONMAP1(vld2q_dup_v, arm_neon_vld2dup, 0),
6020 NEONMAP1(vld2q_lane_v, arm_neon_vld2lane, 0),
6021 NEONMAP1(vld2q_v, arm_neon_vld2, 0),
6022 NEONMAP1(vld3_dup_v, arm_neon_vld3dup, 0),
6023 NEONMAP1(vld3_lane_v, arm_neon_vld3lane, 0),
6024 NEONMAP1(vld3_v, arm_neon_vld3, 0),
6025 NEONMAP1(vld3q_dup_v, arm_neon_vld3dup, 0),
6026 NEONMAP1(vld3q_lane_v, arm_neon_vld3lane, 0),
6027 NEONMAP1(vld3q_v, arm_neon_vld3, 0),
6028 NEONMAP1(vld4_dup_v, arm_neon_vld4dup, 0),
6029 NEONMAP1(vld4_lane_v, arm_neon_vld4lane, 0),
6030 NEONMAP1(vld4_v, arm_neon_vld4, 0),
6031 NEONMAP1(vld4q_dup_v, arm_neon_vld4dup, 0),
6032 NEONMAP1(vld4q_lane_v, arm_neon_vld4lane, 0),
6033 NEONMAP1(vld4q_v, arm_neon_vld4, 0),
6034 NEONMAP2(vmax_v, arm_neon_vmaxu, arm_neon_vmaxs, Add1ArgType | UnsignedAlts),
6035 NEONMAP1(vmaxnm_v, arm_neon_vmaxnm, Add1ArgType),
6036 NEONMAP1(vmaxnmq_v, arm_neon_vmaxnm, Add1ArgType),
6037 NEONMAP2(vmaxq_v, arm_neon_vmaxu, arm_neon_vmaxs, Add1ArgType | UnsignedAlts),
6038 NEONMAP2(vmin_v, arm_neon_vminu, arm_neon_vmins, Add1ArgType | UnsignedAlts),
6039 NEONMAP1(vminnm_v, arm_neon_vminnm, Add1ArgType),
6040 NEONMAP1(vminnmq_v, arm_neon_vminnm, Add1ArgType),
6041 NEONMAP2(vminq_v, arm_neon_vminu, arm_neon_vmins, Add1ArgType | UnsignedAlts),
6042 NEONMAP1(vmmlaq_s32, arm_neon_smmla, 0),
6043 NEONMAP1(vmmlaq_u32, arm_neon_ummla, 0),
6044 NEONMAP0(vmovl_v),
6045 NEONMAP0(vmovn_v),
6046 NEONMAP1(vmul_v, arm_neon_vmulp, Add1ArgType),
6047 NEONMAP0(vmull_v),
6048 NEONMAP1(vmulq_v, arm_neon_vmulp, Add1ArgType),
6049 NEONMAP2(vpadal_v, arm_neon_vpadalu, arm_neon_vpadals, UnsignedAlts),
6050 NEONMAP2(vpadalq_v, arm_neon_vpadalu, arm_neon_vpadals, UnsignedAlts),
6051 NEONMAP1(vpadd_v, arm_neon_vpadd, Add1ArgType),
6052 NEONMAP2(vpaddl_v, arm_neon_vpaddlu, arm_neon_vpaddls, UnsignedAlts),
6053 NEONMAP2(vpaddlq_v, arm_neon_vpaddlu, arm_neon_vpaddls, UnsignedAlts),
6054 NEONMAP1(vpaddq_v, arm_neon_vpadd, Add1ArgType),
6055 NEONMAP2(vpmax_v, arm_neon_vpmaxu, arm_neon_vpmaxs, Add1ArgType | UnsignedAlts),
6056 NEONMAP2(vpmin_v, arm_neon_vpminu, arm_neon_vpmins, Add1ArgType | UnsignedAlts),
6057 NEONMAP1(vqabs_v, arm_neon_vqabs, Add1ArgType),
6058 NEONMAP1(vqabsq_v, arm_neon_vqabs, Add1ArgType),
6059 NEONMAP2(vqadd_v, uadd_sat, sadd_sat, Add1ArgType | UnsignedAlts),
6060 NEONMAP2(vqaddq_v, uadd_sat, sadd_sat, Add1ArgType | UnsignedAlts),
6061 NEONMAP2(vqdmlal_v, arm_neon_vqdmull, sadd_sat, 0),
6062 NEONMAP2(vqdmlsl_v, arm_neon_vqdmull, ssub_sat, 0),
6063 NEONMAP1(vqdmulh_v, arm_neon_vqdmulh, Add1ArgType),
6064 NEONMAP1(vqdmulhq_v, arm_neon_vqdmulh, Add1ArgType),
6065 NEONMAP1(vqdmull_v, arm_neon_vqdmull, Add1ArgType),
6066 NEONMAP2(vqmovn_v, arm_neon_vqmovnu, arm_neon_vqmovns, Add1ArgType | UnsignedAlts),
6067 NEONMAP1(vqmovun_v, arm_neon_vqmovnsu, Add1ArgType),
6068 NEONMAP1(vqneg_v, arm_neon_vqneg, Add1ArgType),
6069 NEONMAP1(vqnegq_v, arm_neon_vqneg, Add1ArgType),
6070 NEONMAP1(vqrdmlah_s16, arm_neon_vqrdmlah, Add1ArgType),
6071 NEONMAP1(vqrdmlah_s32, arm_neon_vqrdmlah, Add1ArgType),
6072 NEONMAP1(vqrdmlahq_s16, arm_neon_vqrdmlah, Add1ArgType),
6073 NEONMAP1(vqrdmlahq_s32, arm_neon_vqrdmlah, Add1ArgType),
6074 NEONMAP1(vqrdmlsh_s16, arm_neon_vqrdmlsh, Add1ArgType),
6075 NEONMAP1(vqrdmlsh_s32, arm_neon_vqrdmlsh, Add1ArgType),
6076 NEONMAP1(vqrdmlshq_s16, arm_neon_vqrdmlsh, Add1ArgType),
6077 NEONMAP1(vqrdmlshq_s32, arm_neon_vqrdmlsh, Add1ArgType),
6078 NEONMAP1(vqrdmulh_v, arm_neon_vqrdmulh, Add1ArgType),
6079 NEONMAP1(vqrdmulhq_v, arm_neon_vqrdmulh, Add1ArgType),
6080 NEONMAP2(vqrshl_v, arm_neon_vqrshiftu, arm_neon_vqrshifts, Add1ArgType | UnsignedAlts),
6081 NEONMAP2(vqrshlq_v, arm_neon_vqrshiftu, arm_neon_vqrshifts, Add1ArgType | UnsignedAlts),
6082 NEONMAP2(vqshl_n_v, arm_neon_vqshiftu, arm_neon_vqshifts, UnsignedAlts),
6083 NEONMAP2(vqshl_v, arm_neon_vqshiftu, arm_neon_vqshifts, Add1ArgType | UnsignedAlts),
6084 NEONMAP2(vqshlq_n_v, arm_neon_vqshiftu, arm_neon_vqshifts, UnsignedAlts),
6085 NEONMAP2(vqshlq_v, arm_neon_vqshiftu, arm_neon_vqshifts, Add1ArgType | UnsignedAlts),
6086 NEONMAP1(vqshlu_n_v, arm_neon_vqshiftsu, 0),
6087 NEONMAP1(vqshluq_n_v, arm_neon_vqshiftsu, 0),
6088 NEONMAP2(vqsub_v, usub_sat, ssub_sat, Add1ArgType | UnsignedAlts),
6089 NEONMAP2(vqsubq_v, usub_sat, ssub_sat, Add1ArgType | UnsignedAlts),
6090 NEONMAP1(vraddhn_v, arm_neon_vraddhn, Add1ArgType),
6091 NEONMAP2(vrecpe_v, arm_neon_vrecpe, arm_neon_vrecpe, 0),
6092 NEONMAP2(vrecpeq_v, arm_neon_vrecpe, arm_neon_vrecpe, 0),
6093 NEONMAP1(vrecps_v, arm_neon_vrecps, Add1ArgType),
6094 NEONMAP1(vrecpsq_v, arm_neon_vrecps, Add1ArgType),
6095 NEONMAP2(vrhadd_v, arm_neon_vrhaddu, arm_neon_vrhadds, Add1ArgType | UnsignedAlts),
6096 NEONMAP2(vrhaddq_v, arm_neon_vrhaddu, arm_neon_vrhadds, Add1ArgType | UnsignedAlts),
6097 NEONMAP1(vrnd_v, arm_neon_vrintz, Add1ArgType),
6098 NEONMAP1(vrnda_v, arm_neon_vrinta, Add1ArgType),
6099 NEONMAP1(vrndaq_v, arm_neon_vrinta, Add1ArgType),
6100 NEONMAP0(vrndi_v),
6101 NEONMAP0(vrndiq_v),
6102 NEONMAP1(vrndm_v, arm_neon_vrintm, Add1ArgType),
6103 NEONMAP1(vrndmq_v, arm_neon_vrintm, Add1ArgType),
6104 NEONMAP1(vrndn_v, arm_neon_vrintn, Add1ArgType),
6105 NEONMAP1(vrndnq_v, arm_neon_vrintn, Add1ArgType),
6106 NEONMAP1(vrndp_v, arm_neon_vrintp, Add1ArgType),
6107 NEONMAP1(vrndpq_v, arm_neon_vrintp, Add1ArgType),
6108 NEONMAP1(vrndq_v, arm_neon_vrintz, Add1ArgType),
6109 NEONMAP1(vrndx_v, arm_neon_vrintx, Add1ArgType),
6110 NEONMAP1(vrndxq_v, arm_neon_vrintx, Add1ArgType),
6111 NEONMAP2(vrshl_v, arm_neon_vrshiftu, arm_neon_vrshifts, Add1ArgType | UnsignedAlts),
6112 NEONMAP2(vrshlq_v, arm_neon_vrshiftu, arm_neon_vrshifts, Add1ArgType | UnsignedAlts),
6113 NEONMAP2(vrshr_n_v, arm_neon_vrshiftu, arm_neon_vrshifts, UnsignedAlts),
6114 NEONMAP2(vrshrq_n_v, arm_neon_vrshiftu, arm_neon_vrshifts, UnsignedAlts),
6115 NEONMAP2(vrsqrte_v, arm_neon_vrsqrte, arm_neon_vrsqrte, 0),
6116 NEONMAP2(vrsqrteq_v, arm_neon_vrsqrte, arm_neon_vrsqrte, 0),
6117 NEONMAP1(vrsqrts_v, arm_neon_vrsqrts, Add1ArgType),
6118 NEONMAP1(vrsqrtsq_v, arm_neon_vrsqrts, Add1ArgType),
6119 NEONMAP1(vrsubhn_v, arm_neon_vrsubhn, Add1ArgType),
6120 NEONMAP1(vsha1su0q_u32, arm_neon_sha1su0, 0),
6121 NEONMAP1(vsha1su1q_u32, arm_neon_sha1su1, 0),
6122 NEONMAP1(vsha256h2q_u32, arm_neon_sha256h2, 0),
6123 NEONMAP1(vsha256hq_u32, arm_neon_sha256h, 0),
6124 NEONMAP1(vsha256su0q_u32, arm_neon_sha256su0, 0),
6125 NEONMAP1(vsha256su1q_u32, arm_neon_sha256su1, 0),
6126 NEONMAP0(vshl_n_v),
6127 NEONMAP2(vshl_v, arm_neon_vshiftu, arm_neon_vshifts, Add1ArgType | UnsignedAlts),
6128 NEONMAP0(vshll_n_v),
6129 NEONMAP0(vshlq_n_v),
6130 NEONMAP2(vshlq_v, arm_neon_vshiftu, arm_neon_vshifts, Add1ArgType | UnsignedAlts),
6131 NEONMAP0(vshr_n_v),
6132 NEONMAP0(vshrn_n_v),
6133 NEONMAP0(vshrq_n_v),
6134 NEONMAP1(vst1_v, arm_neon_vst1, 0),
6135 NEONMAP1(vst1_x2_v, arm_neon_vst1x2, 0),
6136 NEONMAP1(vst1_x3_v, arm_neon_vst1x3, 0),
6137 NEONMAP1(vst1_x4_v, arm_neon_vst1x4, 0),
6138 NEONMAP1(vst1q_v, arm_neon_vst1, 0),
6139 NEONMAP1(vst1q_x2_v, arm_neon_vst1x2, 0),
6140 NEONMAP1(vst1q_x3_v, arm_neon_vst1x3, 0),
6141 NEONMAP1(vst1q_x4_v, arm_neon_vst1x4, 0),
6142 NEONMAP1(vst2_lane_v, arm_neon_vst2lane, 0),
6143 NEONMAP1(vst2_v, arm_neon_vst2, 0),
6144 NEONMAP1(vst2q_lane_v, arm_neon_vst2lane, 0),
6145 NEONMAP1(vst2q_v, arm_neon_vst2, 0),
6146 NEONMAP1(vst3_lane_v, arm_neon_vst3lane, 0),
6147 NEONMAP1(vst3_v, arm_neon_vst3, 0),
6148 NEONMAP1(vst3q_lane_v, arm_neon_vst3lane, 0),
6149 NEONMAP1(vst3q_v, arm_neon_vst3, 0),
6150 NEONMAP1(vst4_lane_v, arm_neon_vst4lane, 0),
6151 NEONMAP1(vst4_v, arm_neon_vst4, 0),
6152 NEONMAP1(vst4q_lane_v, arm_neon_vst4lane, 0),
6153 NEONMAP1(vst4q_v, arm_neon_vst4, 0),
6154 NEONMAP0(vsubhn_v),
6155 NEONMAP0(vtrn_v),
6156 NEONMAP0(vtrnq_v),
6157 NEONMAP0(vtst_v),
6158 NEONMAP0(vtstq_v),
6159 NEONMAP1(vusdot_s32, arm_neon_usdot, 0),
6160 NEONMAP1(vusdotq_s32, arm_neon_usdot, 0),
6161 NEONMAP1(vusmmlaq_s32, arm_neon_usmmla, 0),
6162 NEONMAP0(vuzp_v),
6163 NEONMAP0(vuzpq_v),
6164 NEONMAP0(vzip_v),
6165 NEONMAP0(vzipq_v)
6168 static const ARMVectorIntrinsicInfo AArch64SIMDIntrinsicMap[] = {
6169 NEONMAP1(__a64_vcvtq_low_bf16_f32, aarch64_neon_bfcvtn, 0),
6170 NEONMAP0(splat_lane_v),
6171 NEONMAP0(splat_laneq_v),
6172 NEONMAP0(splatq_lane_v),
6173 NEONMAP0(splatq_laneq_v),
6174 NEONMAP1(vabs_v, aarch64_neon_abs, 0),
6175 NEONMAP1(vabsq_v, aarch64_neon_abs, 0),
6176 NEONMAP0(vadd_v),
6177 NEONMAP0(vaddhn_v),
6178 NEONMAP0(vaddq_p128),
6179 NEONMAP0(vaddq_v),
6180 NEONMAP1(vaesdq_u8, aarch64_crypto_aesd, 0),
6181 NEONMAP1(vaeseq_u8, aarch64_crypto_aese, 0),
6182 NEONMAP1(vaesimcq_u8, aarch64_crypto_aesimc, 0),
6183 NEONMAP1(vaesmcq_u8, aarch64_crypto_aesmc, 0),
6184 NEONMAP2(vbcaxq_s16, aarch64_crypto_bcaxu, aarch64_crypto_bcaxs, Add1ArgType | UnsignedAlts),
6185 NEONMAP2(vbcaxq_s32, aarch64_crypto_bcaxu, aarch64_crypto_bcaxs, Add1ArgType | UnsignedAlts),
6186 NEONMAP2(vbcaxq_s64, aarch64_crypto_bcaxu, aarch64_crypto_bcaxs, Add1ArgType | UnsignedAlts),
6187 NEONMAP2(vbcaxq_s8, aarch64_crypto_bcaxu, aarch64_crypto_bcaxs, Add1ArgType | UnsignedAlts),
6188 NEONMAP2(vbcaxq_u16, aarch64_crypto_bcaxu, aarch64_crypto_bcaxs, Add1ArgType | UnsignedAlts),
6189 NEONMAP2(vbcaxq_u32, aarch64_crypto_bcaxu, aarch64_crypto_bcaxs, Add1ArgType | UnsignedAlts),
6190 NEONMAP2(vbcaxq_u64, aarch64_crypto_bcaxu, aarch64_crypto_bcaxs, Add1ArgType | UnsignedAlts),
6191 NEONMAP2(vbcaxq_u8, aarch64_crypto_bcaxu, aarch64_crypto_bcaxs, Add1ArgType | UnsignedAlts),
6192 NEONMAP1(vbfdot_f32, aarch64_neon_bfdot, 0),
6193 NEONMAP1(vbfdotq_f32, aarch64_neon_bfdot, 0),
6194 NEONMAP1(vbfmlalbq_f32, aarch64_neon_bfmlalb, 0),
6195 NEONMAP1(vbfmlaltq_f32, aarch64_neon_bfmlalt, 0),
6196 NEONMAP1(vbfmmlaq_f32, aarch64_neon_bfmmla, 0),
6197 NEONMAP1(vcadd_rot270_f16, aarch64_neon_vcadd_rot270, Add1ArgType),
6198 NEONMAP1(vcadd_rot270_f32, aarch64_neon_vcadd_rot270, Add1ArgType),
6199 NEONMAP1(vcadd_rot90_f16, aarch64_neon_vcadd_rot90, Add1ArgType),
6200 NEONMAP1(vcadd_rot90_f32, aarch64_neon_vcadd_rot90, Add1ArgType),
6201 NEONMAP1(vcaddq_rot270_f16, aarch64_neon_vcadd_rot270, Add1ArgType),
6202 NEONMAP1(vcaddq_rot270_f32, aarch64_neon_vcadd_rot270, Add1ArgType),
6203 NEONMAP1(vcaddq_rot270_f64, aarch64_neon_vcadd_rot270, Add1ArgType),
6204 NEONMAP1(vcaddq_rot90_f16, aarch64_neon_vcadd_rot90, Add1ArgType),
6205 NEONMAP1(vcaddq_rot90_f32, aarch64_neon_vcadd_rot90, Add1ArgType),
6206 NEONMAP1(vcaddq_rot90_f64, aarch64_neon_vcadd_rot90, Add1ArgType),
6207 NEONMAP1(vcage_v, aarch64_neon_facge, 0),
6208 NEONMAP1(vcageq_v, aarch64_neon_facge, 0),
6209 NEONMAP1(vcagt_v, aarch64_neon_facgt, 0),
6210 NEONMAP1(vcagtq_v, aarch64_neon_facgt, 0),
6211 NEONMAP1(vcale_v, aarch64_neon_facge, 0),
6212 NEONMAP1(vcaleq_v, aarch64_neon_facge, 0),
6213 NEONMAP1(vcalt_v, aarch64_neon_facgt, 0),
6214 NEONMAP1(vcaltq_v, aarch64_neon_facgt, 0),
6215 NEONMAP0(vceqz_v),
6216 NEONMAP0(vceqzq_v),
6217 NEONMAP0(vcgez_v),
6218 NEONMAP0(vcgezq_v),
6219 NEONMAP0(vcgtz_v),
6220 NEONMAP0(vcgtzq_v),
6221 NEONMAP0(vclez_v),
6222 NEONMAP0(vclezq_v),
6223 NEONMAP1(vcls_v, aarch64_neon_cls, Add1ArgType),
6224 NEONMAP1(vclsq_v, aarch64_neon_cls, Add1ArgType),
6225 NEONMAP0(vcltz_v),
6226 NEONMAP0(vcltzq_v),
6227 NEONMAP1(vclz_v, ctlz, Add1ArgType),
6228 NEONMAP1(vclzq_v, ctlz, Add1ArgType),
6229 NEONMAP1(vcmla_f16, aarch64_neon_vcmla_rot0, Add1ArgType),
6230 NEONMAP1(vcmla_f32, aarch64_neon_vcmla_rot0, Add1ArgType),
6231 NEONMAP1(vcmla_rot180_f16, aarch64_neon_vcmla_rot180, Add1ArgType),
6232 NEONMAP1(vcmla_rot180_f32, aarch64_neon_vcmla_rot180, Add1ArgType),
6233 NEONMAP1(vcmla_rot270_f16, aarch64_neon_vcmla_rot270, Add1ArgType),
6234 NEONMAP1(vcmla_rot270_f32, aarch64_neon_vcmla_rot270, Add1ArgType),
6235 NEONMAP1(vcmla_rot90_f16, aarch64_neon_vcmla_rot90, Add1ArgType),
6236 NEONMAP1(vcmla_rot90_f32, aarch64_neon_vcmla_rot90, Add1ArgType),
6237 NEONMAP1(vcmlaq_f16, aarch64_neon_vcmla_rot0, Add1ArgType),
6238 NEONMAP1(vcmlaq_f32, aarch64_neon_vcmla_rot0, Add1ArgType),
6239 NEONMAP1(vcmlaq_f64, aarch64_neon_vcmla_rot0, Add1ArgType),
6240 NEONMAP1(vcmlaq_rot180_f16, aarch64_neon_vcmla_rot180, Add1ArgType),
6241 NEONMAP1(vcmlaq_rot180_f32, aarch64_neon_vcmla_rot180, Add1ArgType),
6242 NEONMAP1(vcmlaq_rot180_f64, aarch64_neon_vcmla_rot180, Add1ArgType),
6243 NEONMAP1(vcmlaq_rot270_f16, aarch64_neon_vcmla_rot270, Add1ArgType),
6244 NEONMAP1(vcmlaq_rot270_f32, aarch64_neon_vcmla_rot270, Add1ArgType),
6245 NEONMAP1(vcmlaq_rot270_f64, aarch64_neon_vcmla_rot270, Add1ArgType),
6246 NEONMAP1(vcmlaq_rot90_f16, aarch64_neon_vcmla_rot90, Add1ArgType),
6247 NEONMAP1(vcmlaq_rot90_f32, aarch64_neon_vcmla_rot90, Add1ArgType),
6248 NEONMAP1(vcmlaq_rot90_f64, aarch64_neon_vcmla_rot90, Add1ArgType),
6249 NEONMAP1(vcnt_v, ctpop, Add1ArgType),
6250 NEONMAP1(vcntq_v, ctpop, Add1ArgType),
6251 NEONMAP1(vcvt_f16_f32, aarch64_neon_vcvtfp2hf, 0),
6252 NEONMAP0(vcvt_f16_s16),
6253 NEONMAP0(vcvt_f16_u16),
6254 NEONMAP1(vcvt_f32_f16, aarch64_neon_vcvthf2fp, 0),
6255 NEONMAP0(vcvt_f32_v),
6256 NEONMAP1(vcvt_n_f16_s16, aarch64_neon_vcvtfxs2fp, 0),
6257 NEONMAP1(vcvt_n_f16_u16, aarch64_neon_vcvtfxu2fp, 0),
6258 NEONMAP2(vcvt_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
6259 NEONMAP2(vcvt_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
6260 NEONMAP1(vcvt_n_s16_f16, aarch64_neon_vcvtfp2fxs, 0),
6261 NEONMAP1(vcvt_n_s32_v, aarch64_neon_vcvtfp2fxs, 0),
6262 NEONMAP1(vcvt_n_s64_v, aarch64_neon_vcvtfp2fxs, 0),
6263 NEONMAP1(vcvt_n_u16_f16, aarch64_neon_vcvtfp2fxu, 0),
6264 NEONMAP1(vcvt_n_u32_v, aarch64_neon_vcvtfp2fxu, 0),
6265 NEONMAP1(vcvt_n_u64_v, aarch64_neon_vcvtfp2fxu, 0),
6266 NEONMAP0(vcvtq_f16_s16),
6267 NEONMAP0(vcvtq_f16_u16),
6268 NEONMAP0(vcvtq_f32_v),
6269 NEONMAP1(vcvtq_high_bf16_f32, aarch64_neon_bfcvtn2, 0),
6270 NEONMAP1(vcvtq_n_f16_s16, aarch64_neon_vcvtfxs2fp, 0),
6271 NEONMAP1(vcvtq_n_f16_u16, aarch64_neon_vcvtfxu2fp, 0),
6272 NEONMAP2(vcvtq_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
6273 NEONMAP2(vcvtq_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
6274 NEONMAP1(vcvtq_n_s16_f16, aarch64_neon_vcvtfp2fxs, 0),
6275 NEONMAP1(vcvtq_n_s32_v, aarch64_neon_vcvtfp2fxs, 0),
6276 NEONMAP1(vcvtq_n_s64_v, aarch64_neon_vcvtfp2fxs, 0),
6277 NEONMAP1(vcvtq_n_u16_f16, aarch64_neon_vcvtfp2fxu, 0),
6278 NEONMAP1(vcvtq_n_u32_v, aarch64_neon_vcvtfp2fxu, 0),
6279 NEONMAP1(vcvtq_n_u64_v, aarch64_neon_vcvtfp2fxu, 0),
6280 NEONMAP1(vcvtx_f32_v, aarch64_neon_fcvtxn, AddRetType | Add1ArgType),
6281 NEONMAP1(vdot_s32, aarch64_neon_sdot, 0),
6282 NEONMAP1(vdot_u32, aarch64_neon_udot, 0),
6283 NEONMAP1(vdotq_s32, aarch64_neon_sdot, 0),
6284 NEONMAP1(vdotq_u32, aarch64_neon_udot, 0),
6285 NEONMAP2(veor3q_s16, aarch64_crypto_eor3u, aarch64_crypto_eor3s, Add1ArgType | UnsignedAlts),
6286 NEONMAP2(veor3q_s32, aarch64_crypto_eor3u, aarch64_crypto_eor3s, Add1ArgType | UnsignedAlts),
6287 NEONMAP2(veor3q_s64, aarch64_crypto_eor3u, aarch64_crypto_eor3s, Add1ArgType | UnsignedAlts),
6288 NEONMAP2(veor3q_s8, aarch64_crypto_eor3u, aarch64_crypto_eor3s, Add1ArgType | UnsignedAlts),
6289 NEONMAP2(veor3q_u16, aarch64_crypto_eor3u, aarch64_crypto_eor3s, Add1ArgType | UnsignedAlts),
6290 NEONMAP2(veor3q_u32, aarch64_crypto_eor3u, aarch64_crypto_eor3s, Add1ArgType | UnsignedAlts),
6291 NEONMAP2(veor3q_u64, aarch64_crypto_eor3u, aarch64_crypto_eor3s, Add1ArgType | UnsignedAlts),
6292 NEONMAP2(veor3q_u8, aarch64_crypto_eor3u, aarch64_crypto_eor3s, Add1ArgType | UnsignedAlts),
6293 NEONMAP0(vext_v),
6294 NEONMAP0(vextq_v),
6295 NEONMAP0(vfma_v),
6296 NEONMAP0(vfmaq_v),
6297 NEONMAP1(vfmlal_high_f16, aarch64_neon_fmlal2, 0),
6298 NEONMAP1(vfmlal_low_f16, aarch64_neon_fmlal, 0),
6299 NEONMAP1(vfmlalq_high_f16, aarch64_neon_fmlal2, 0),
6300 NEONMAP1(vfmlalq_low_f16, aarch64_neon_fmlal, 0),
6301 NEONMAP1(vfmlsl_high_f16, aarch64_neon_fmlsl2, 0),
6302 NEONMAP1(vfmlsl_low_f16, aarch64_neon_fmlsl, 0),
6303 NEONMAP1(vfmlslq_high_f16, aarch64_neon_fmlsl2, 0),
6304 NEONMAP1(vfmlslq_low_f16, aarch64_neon_fmlsl, 0),
6305 NEONMAP2(vhadd_v, aarch64_neon_uhadd, aarch64_neon_shadd, Add1ArgType | UnsignedAlts),
6306 NEONMAP2(vhaddq_v, aarch64_neon_uhadd, aarch64_neon_shadd, Add1ArgType | UnsignedAlts),
6307 NEONMAP2(vhsub_v, aarch64_neon_uhsub, aarch64_neon_shsub, Add1ArgType | UnsignedAlts),
6308 NEONMAP2(vhsubq_v, aarch64_neon_uhsub, aarch64_neon_shsub, Add1ArgType | UnsignedAlts),
6309 NEONMAP1(vld1_x2_v, aarch64_neon_ld1x2, 0),
6310 NEONMAP1(vld1_x3_v, aarch64_neon_ld1x3, 0),
6311 NEONMAP1(vld1_x4_v, aarch64_neon_ld1x4, 0),
6312 NEONMAP1(vld1q_x2_v, aarch64_neon_ld1x2, 0),
6313 NEONMAP1(vld1q_x3_v, aarch64_neon_ld1x3, 0),
6314 NEONMAP1(vld1q_x4_v, aarch64_neon_ld1x4, 0),
6315 NEONMAP1(vmmlaq_s32, aarch64_neon_smmla, 0),
6316 NEONMAP1(vmmlaq_u32, aarch64_neon_ummla, 0),
6317 NEONMAP0(vmovl_v),
6318 NEONMAP0(vmovn_v),
6319 NEONMAP1(vmul_v, aarch64_neon_pmul, Add1ArgType),
6320 NEONMAP1(vmulq_v, aarch64_neon_pmul, Add1ArgType),
6321 NEONMAP1(vpadd_v, aarch64_neon_addp, Add1ArgType),
6322 NEONMAP2(vpaddl_v, aarch64_neon_uaddlp, aarch64_neon_saddlp, UnsignedAlts),
6323 NEONMAP2(vpaddlq_v, aarch64_neon_uaddlp, aarch64_neon_saddlp, UnsignedAlts),
6324 NEONMAP1(vpaddq_v, aarch64_neon_addp, Add1ArgType),
6325 NEONMAP1(vqabs_v, aarch64_neon_sqabs, Add1ArgType),
6326 NEONMAP1(vqabsq_v, aarch64_neon_sqabs, Add1ArgType),
6327 NEONMAP2(vqadd_v, aarch64_neon_uqadd, aarch64_neon_sqadd, Add1ArgType | UnsignedAlts),
6328 NEONMAP2(vqaddq_v, aarch64_neon_uqadd, aarch64_neon_sqadd, Add1ArgType | UnsignedAlts),
6329 NEONMAP2(vqdmlal_v, aarch64_neon_sqdmull, aarch64_neon_sqadd, 0),
6330 NEONMAP2(vqdmlsl_v, aarch64_neon_sqdmull, aarch64_neon_sqsub, 0),
6331 NEONMAP1(vqdmulh_lane_v, aarch64_neon_sqdmulh_lane, 0),
6332 NEONMAP1(vqdmulh_laneq_v, aarch64_neon_sqdmulh_laneq, 0),
6333 NEONMAP1(vqdmulh_v, aarch64_neon_sqdmulh, Add1ArgType),
6334 NEONMAP1(vqdmulhq_lane_v, aarch64_neon_sqdmulh_lane, 0),
6335 NEONMAP1(vqdmulhq_laneq_v, aarch64_neon_sqdmulh_laneq, 0),
6336 NEONMAP1(vqdmulhq_v, aarch64_neon_sqdmulh, Add1ArgType),
6337 NEONMAP1(vqdmull_v, aarch64_neon_sqdmull, Add1ArgType),
6338 NEONMAP2(vqmovn_v, aarch64_neon_uqxtn, aarch64_neon_sqxtn, Add1ArgType | UnsignedAlts),
6339 NEONMAP1(vqmovun_v, aarch64_neon_sqxtun, Add1ArgType),
6340 NEONMAP1(vqneg_v, aarch64_neon_sqneg, Add1ArgType),
6341 NEONMAP1(vqnegq_v, aarch64_neon_sqneg, Add1ArgType),
6342 NEONMAP1(vqrdmlah_s16, aarch64_neon_sqrdmlah, Add1ArgType),
6343 NEONMAP1(vqrdmlah_s32, aarch64_neon_sqrdmlah, Add1ArgType),
6344 NEONMAP1(vqrdmlahq_s16, aarch64_neon_sqrdmlah, Add1ArgType),
6345 NEONMAP1(vqrdmlahq_s32, aarch64_neon_sqrdmlah, Add1ArgType),
6346 NEONMAP1(vqrdmlsh_s16, aarch64_neon_sqrdmlsh, Add1ArgType),
6347 NEONMAP1(vqrdmlsh_s32, aarch64_neon_sqrdmlsh, Add1ArgType),
6348 NEONMAP1(vqrdmlshq_s16, aarch64_neon_sqrdmlsh, Add1ArgType),
6349 NEONMAP1(vqrdmlshq_s32, aarch64_neon_sqrdmlsh, Add1ArgType),
6350 NEONMAP1(vqrdmulh_lane_v, aarch64_neon_sqrdmulh_lane, 0),
6351 NEONMAP1(vqrdmulh_laneq_v, aarch64_neon_sqrdmulh_laneq, 0),
6352 NEONMAP1(vqrdmulh_v, aarch64_neon_sqrdmulh, Add1ArgType),
6353 NEONMAP1(vqrdmulhq_lane_v, aarch64_neon_sqrdmulh_lane, 0),
6354 NEONMAP1(vqrdmulhq_laneq_v, aarch64_neon_sqrdmulh_laneq, 0),
6355 NEONMAP1(vqrdmulhq_v, aarch64_neon_sqrdmulh, Add1ArgType),
6356 NEONMAP2(vqrshl_v, aarch64_neon_uqrshl, aarch64_neon_sqrshl, Add1ArgType | UnsignedAlts),
6357 NEONMAP2(vqrshlq_v, aarch64_neon_uqrshl, aarch64_neon_sqrshl, Add1ArgType | UnsignedAlts),
6358 NEONMAP2(vqshl_n_v, aarch64_neon_uqshl, aarch64_neon_sqshl, UnsignedAlts),
6359 NEONMAP2(vqshl_v, aarch64_neon_uqshl, aarch64_neon_sqshl, Add1ArgType | UnsignedAlts),
6360 NEONMAP2(vqshlq_n_v, aarch64_neon_uqshl, aarch64_neon_sqshl,UnsignedAlts),
6361 NEONMAP2(vqshlq_v, aarch64_neon_uqshl, aarch64_neon_sqshl, Add1ArgType | UnsignedAlts),
6362 NEONMAP1(vqshlu_n_v, aarch64_neon_sqshlu, 0),
6363 NEONMAP1(vqshluq_n_v, aarch64_neon_sqshlu, 0),
6364 NEONMAP2(vqsub_v, aarch64_neon_uqsub, aarch64_neon_sqsub, Add1ArgType | UnsignedAlts),
6365 NEONMAP2(vqsubq_v, aarch64_neon_uqsub, aarch64_neon_sqsub, Add1ArgType | UnsignedAlts),
6366 NEONMAP1(vraddhn_v, aarch64_neon_raddhn, Add1ArgType),
6367 NEONMAP1(vrax1q_u64, aarch64_crypto_rax1, 0),
6368 NEONMAP2(vrecpe_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0),
6369 NEONMAP2(vrecpeq_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0),
6370 NEONMAP1(vrecps_v, aarch64_neon_frecps, Add1ArgType),
6371 NEONMAP1(vrecpsq_v, aarch64_neon_frecps, Add1ArgType),
6372 NEONMAP2(vrhadd_v, aarch64_neon_urhadd, aarch64_neon_srhadd, Add1ArgType | UnsignedAlts),
6373 NEONMAP2(vrhaddq_v, aarch64_neon_urhadd, aarch64_neon_srhadd, Add1ArgType | UnsignedAlts),
6374 NEONMAP1(vrnd32x_f32, aarch64_neon_frint32x, Add1ArgType),
6375 NEONMAP1(vrnd32xq_f32, aarch64_neon_frint32x, Add1ArgType),
6376 NEONMAP1(vrnd32z_f32, aarch64_neon_frint32z, Add1ArgType),
6377 NEONMAP1(vrnd32zq_f32, aarch64_neon_frint32z, Add1ArgType),
6378 NEONMAP1(vrnd64x_f32, aarch64_neon_frint64x, Add1ArgType),
6379 NEONMAP1(vrnd64xq_f32, aarch64_neon_frint64x, Add1ArgType),
6380 NEONMAP1(vrnd64z_f32, aarch64_neon_frint64z, Add1ArgType),
6381 NEONMAP1(vrnd64zq_f32, aarch64_neon_frint64z, Add1ArgType),
6382 NEONMAP0(vrndi_v),
6383 NEONMAP0(vrndiq_v),
6384 NEONMAP2(vrshl_v, aarch64_neon_urshl, aarch64_neon_srshl, Add1ArgType | UnsignedAlts),
6385 NEONMAP2(vrshlq_v, aarch64_neon_urshl, aarch64_neon_srshl, Add1ArgType | UnsignedAlts),
6386 NEONMAP2(vrshr_n_v, aarch64_neon_urshl, aarch64_neon_srshl, UnsignedAlts),
6387 NEONMAP2(vrshrq_n_v, aarch64_neon_urshl, aarch64_neon_srshl, UnsignedAlts),
6388 NEONMAP2(vrsqrte_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0),
6389 NEONMAP2(vrsqrteq_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0),
6390 NEONMAP1(vrsqrts_v, aarch64_neon_frsqrts, Add1ArgType),
6391 NEONMAP1(vrsqrtsq_v, aarch64_neon_frsqrts, Add1ArgType),
6392 NEONMAP1(vrsubhn_v, aarch64_neon_rsubhn, Add1ArgType),
6393 NEONMAP1(vsha1su0q_u32, aarch64_crypto_sha1su0, 0),
6394 NEONMAP1(vsha1su1q_u32, aarch64_crypto_sha1su1, 0),
6395 NEONMAP1(vsha256h2q_u32, aarch64_crypto_sha256h2, 0),
6396 NEONMAP1(vsha256hq_u32, aarch64_crypto_sha256h, 0),
6397 NEONMAP1(vsha256su0q_u32, aarch64_crypto_sha256su0, 0),
6398 NEONMAP1(vsha256su1q_u32, aarch64_crypto_sha256su1, 0),
6399 NEONMAP1(vsha512h2q_u64, aarch64_crypto_sha512h2, 0),
6400 NEONMAP1(vsha512hq_u64, aarch64_crypto_sha512h, 0),
6401 NEONMAP1(vsha512su0q_u64, aarch64_crypto_sha512su0, 0),
6402 NEONMAP1(vsha512su1q_u64, aarch64_crypto_sha512su1, 0),
6403 NEONMAP0(vshl_n_v),
6404 NEONMAP2(vshl_v, aarch64_neon_ushl, aarch64_neon_sshl, Add1ArgType | UnsignedAlts),
6405 NEONMAP0(vshll_n_v),
6406 NEONMAP0(vshlq_n_v),
6407 NEONMAP2(vshlq_v, aarch64_neon_ushl, aarch64_neon_sshl, Add1ArgType | UnsignedAlts),
6408 NEONMAP0(vshr_n_v),
6409 NEONMAP0(vshrn_n_v),
6410 NEONMAP0(vshrq_n_v),
6411 NEONMAP1(vsm3partw1q_u32, aarch64_crypto_sm3partw1, 0),
6412 NEONMAP1(vsm3partw2q_u32, aarch64_crypto_sm3partw2, 0),
6413 NEONMAP1(vsm3ss1q_u32, aarch64_crypto_sm3ss1, 0),
6414 NEONMAP1(vsm3tt1aq_u32, aarch64_crypto_sm3tt1a, 0),
6415 NEONMAP1(vsm3tt1bq_u32, aarch64_crypto_sm3tt1b, 0),
6416 NEONMAP1(vsm3tt2aq_u32, aarch64_crypto_sm3tt2a, 0),
6417 NEONMAP1(vsm3tt2bq_u32, aarch64_crypto_sm3tt2b, 0),
6418 NEONMAP1(vsm4ekeyq_u32, aarch64_crypto_sm4ekey, 0),
6419 NEONMAP1(vsm4eq_u32, aarch64_crypto_sm4e, 0),
6420 NEONMAP1(vst1_x2_v, aarch64_neon_st1x2, 0),
6421 NEONMAP1(vst1_x3_v, aarch64_neon_st1x3, 0),
6422 NEONMAP1(vst1_x4_v, aarch64_neon_st1x4, 0),
6423 NEONMAP1(vst1q_x2_v, aarch64_neon_st1x2, 0),
6424 NEONMAP1(vst1q_x3_v, aarch64_neon_st1x3, 0),
6425 NEONMAP1(vst1q_x4_v, aarch64_neon_st1x4, 0),
6426 NEONMAP0(vsubhn_v),
6427 NEONMAP0(vtst_v),
6428 NEONMAP0(vtstq_v),
6429 NEONMAP1(vusdot_s32, aarch64_neon_usdot, 0),
6430 NEONMAP1(vusdotq_s32, aarch64_neon_usdot, 0),
6431 NEONMAP1(vusmmlaq_s32, aarch64_neon_usmmla, 0),
6432 NEONMAP1(vxarq_u64, aarch64_crypto_xar, 0),
6435 static const ARMVectorIntrinsicInfo AArch64SISDIntrinsicMap[] = {
6436 NEONMAP1(vabdd_f64, aarch64_sisd_fabd, Add1ArgType),
6437 NEONMAP1(vabds_f32, aarch64_sisd_fabd, Add1ArgType),
6438 NEONMAP1(vabsd_s64, aarch64_neon_abs, Add1ArgType),
6439 NEONMAP1(vaddlv_s32, aarch64_neon_saddlv, AddRetType | Add1ArgType),
6440 NEONMAP1(vaddlv_u32, aarch64_neon_uaddlv, AddRetType | Add1ArgType),
6441 NEONMAP1(vaddlvq_s32, aarch64_neon_saddlv, AddRetType | Add1ArgType),
6442 NEONMAP1(vaddlvq_u32, aarch64_neon_uaddlv, AddRetType | Add1ArgType),
6443 NEONMAP1(vaddv_f32, aarch64_neon_faddv, AddRetType | Add1ArgType),
6444 NEONMAP1(vaddv_s32, aarch64_neon_saddv, AddRetType | Add1ArgType),
6445 NEONMAP1(vaddv_u32, aarch64_neon_uaddv, AddRetType | Add1ArgType),
6446 NEONMAP1(vaddvq_f32, aarch64_neon_faddv, AddRetType | Add1ArgType),
6447 NEONMAP1(vaddvq_f64, aarch64_neon_faddv, AddRetType | Add1ArgType),
6448 NEONMAP1(vaddvq_s32, aarch64_neon_saddv, AddRetType | Add1ArgType),
6449 NEONMAP1(vaddvq_s64, aarch64_neon_saddv, AddRetType | Add1ArgType),
6450 NEONMAP1(vaddvq_u32, aarch64_neon_uaddv, AddRetType | Add1ArgType),
6451 NEONMAP1(vaddvq_u64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
6452 NEONMAP1(vcaged_f64, aarch64_neon_facge, AddRetType | Add1ArgType),
6453 NEONMAP1(vcages_f32, aarch64_neon_facge, AddRetType | Add1ArgType),
6454 NEONMAP1(vcagtd_f64, aarch64_neon_facgt, AddRetType | Add1ArgType),
6455 NEONMAP1(vcagts_f32, aarch64_neon_facgt, AddRetType | Add1ArgType),
6456 NEONMAP1(vcaled_f64, aarch64_neon_facge, AddRetType | Add1ArgType),
6457 NEONMAP1(vcales_f32, aarch64_neon_facge, AddRetType | Add1ArgType),
6458 NEONMAP1(vcaltd_f64, aarch64_neon_facgt, AddRetType | Add1ArgType),
6459 NEONMAP1(vcalts_f32, aarch64_neon_facgt, AddRetType | Add1ArgType),
6460 NEONMAP1(vcvtad_s64_f64, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
6461 NEONMAP1(vcvtad_u64_f64, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
6462 NEONMAP1(vcvtas_s32_f32, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
6463 NEONMAP1(vcvtas_u32_f32, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
6464 NEONMAP1(vcvtd_n_f64_s64, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
6465 NEONMAP1(vcvtd_n_f64_u64, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
6466 NEONMAP1(vcvtd_n_s64_f64, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
6467 NEONMAP1(vcvtd_n_u64_f64, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
6468 NEONMAP1(vcvtd_s64_f64, aarch64_neon_fcvtzs, AddRetType | Add1ArgType),
6469 NEONMAP1(vcvtd_u64_f64, aarch64_neon_fcvtzu, AddRetType | Add1ArgType),
6470 NEONMAP1(vcvth_bf16_f32, aarch64_neon_bfcvt, 0),
6471 NEONMAP1(vcvtmd_s64_f64, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
6472 NEONMAP1(vcvtmd_u64_f64, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
6473 NEONMAP1(vcvtms_s32_f32, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
6474 NEONMAP1(vcvtms_u32_f32, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
6475 NEONMAP1(vcvtnd_s64_f64, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
6476 NEONMAP1(vcvtnd_u64_f64, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
6477 NEONMAP1(vcvtns_s32_f32, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
6478 NEONMAP1(vcvtns_u32_f32, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
6479 NEONMAP1(vcvtpd_s64_f64, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
6480 NEONMAP1(vcvtpd_u64_f64, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
6481 NEONMAP1(vcvtps_s32_f32, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
6482 NEONMAP1(vcvtps_u32_f32, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
6483 NEONMAP1(vcvts_n_f32_s32, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
6484 NEONMAP1(vcvts_n_f32_u32, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
6485 NEONMAP1(vcvts_n_s32_f32, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
6486 NEONMAP1(vcvts_n_u32_f32, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
6487 NEONMAP1(vcvts_s32_f32, aarch64_neon_fcvtzs, AddRetType | Add1ArgType),
6488 NEONMAP1(vcvts_u32_f32, aarch64_neon_fcvtzu, AddRetType | Add1ArgType),
6489 NEONMAP1(vcvtxd_f32_f64, aarch64_sisd_fcvtxn, 0),
6490 NEONMAP1(vmaxnmv_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
6491 NEONMAP1(vmaxnmvq_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
6492 NEONMAP1(vmaxnmvq_f64, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
6493 NEONMAP1(vmaxv_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
6494 NEONMAP1(vmaxv_s32, aarch64_neon_smaxv, AddRetType | Add1ArgType),
6495 NEONMAP1(vmaxv_u32, aarch64_neon_umaxv, AddRetType | Add1ArgType),
6496 NEONMAP1(vmaxvq_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
6497 NEONMAP1(vmaxvq_f64, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
6498 NEONMAP1(vmaxvq_s32, aarch64_neon_smaxv, AddRetType | Add1ArgType),
6499 NEONMAP1(vmaxvq_u32, aarch64_neon_umaxv, AddRetType | Add1ArgType),
6500 NEONMAP1(vminnmv_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
6501 NEONMAP1(vminnmvq_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
6502 NEONMAP1(vminnmvq_f64, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
6503 NEONMAP1(vminv_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
6504 NEONMAP1(vminv_s32, aarch64_neon_sminv, AddRetType | Add1ArgType),
6505 NEONMAP1(vminv_u32, aarch64_neon_uminv, AddRetType | Add1ArgType),
6506 NEONMAP1(vminvq_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
6507 NEONMAP1(vminvq_f64, aarch64_neon_fminv, AddRetType | Add1ArgType),
6508 NEONMAP1(vminvq_s32, aarch64_neon_sminv, AddRetType | Add1ArgType),
6509 NEONMAP1(vminvq_u32, aarch64_neon_uminv, AddRetType | Add1ArgType),
6510 NEONMAP1(vmull_p64, aarch64_neon_pmull64, 0),
6511 NEONMAP1(vmulxd_f64, aarch64_neon_fmulx, Add1ArgType),
6512 NEONMAP1(vmulxs_f32, aarch64_neon_fmulx, Add1ArgType),
6513 NEONMAP1(vpaddd_s64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
6514 NEONMAP1(vpaddd_u64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
6515 NEONMAP1(vpmaxnmqd_f64, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
6516 NEONMAP1(vpmaxnms_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
6517 NEONMAP1(vpmaxqd_f64, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
6518 NEONMAP1(vpmaxs_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
6519 NEONMAP1(vpminnmqd_f64, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
6520 NEONMAP1(vpminnms_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
6521 NEONMAP1(vpminqd_f64, aarch64_neon_fminv, AddRetType | Add1ArgType),
6522 NEONMAP1(vpmins_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
6523 NEONMAP1(vqabsb_s8, aarch64_neon_sqabs, Vectorize1ArgType | Use64BitVectors),
6524 NEONMAP1(vqabsd_s64, aarch64_neon_sqabs, Add1ArgType),
6525 NEONMAP1(vqabsh_s16, aarch64_neon_sqabs, Vectorize1ArgType | Use64BitVectors),
6526 NEONMAP1(vqabss_s32, aarch64_neon_sqabs, Add1ArgType),
6527 NEONMAP1(vqaddb_s8, aarch64_neon_sqadd, Vectorize1ArgType | Use64BitVectors),
6528 NEONMAP1(vqaddb_u8, aarch64_neon_uqadd, Vectorize1ArgType | Use64BitVectors),
6529 NEONMAP1(vqaddd_s64, aarch64_neon_sqadd, Add1ArgType),
6530 NEONMAP1(vqaddd_u64, aarch64_neon_uqadd, Add1ArgType),
6531 NEONMAP1(vqaddh_s16, aarch64_neon_sqadd, Vectorize1ArgType | Use64BitVectors),
6532 NEONMAP1(vqaddh_u16, aarch64_neon_uqadd, Vectorize1ArgType | Use64BitVectors),
6533 NEONMAP1(vqadds_s32, aarch64_neon_sqadd, Add1ArgType),
6534 NEONMAP1(vqadds_u32, aarch64_neon_uqadd, Add1ArgType),
6535 NEONMAP1(vqdmulhh_s16, aarch64_neon_sqdmulh, Vectorize1ArgType | Use64BitVectors),
6536 NEONMAP1(vqdmulhs_s32, aarch64_neon_sqdmulh, Add1ArgType),
6537 NEONMAP1(vqdmullh_s16, aarch64_neon_sqdmull, VectorRet | Use128BitVectors),
6538 NEONMAP1(vqdmulls_s32, aarch64_neon_sqdmulls_scalar, 0),
6539 NEONMAP1(vqmovnd_s64, aarch64_neon_scalar_sqxtn, AddRetType | Add1ArgType),
6540 NEONMAP1(vqmovnd_u64, aarch64_neon_scalar_uqxtn, AddRetType | Add1ArgType),
6541 NEONMAP1(vqmovnh_s16, aarch64_neon_sqxtn, VectorRet | Use64BitVectors),
6542 NEONMAP1(vqmovnh_u16, aarch64_neon_uqxtn, VectorRet | Use64BitVectors),
6543 NEONMAP1(vqmovns_s32, aarch64_neon_sqxtn, VectorRet | Use64BitVectors),
6544 NEONMAP1(vqmovns_u32, aarch64_neon_uqxtn, VectorRet | Use64BitVectors),
6545 NEONMAP1(vqmovund_s64, aarch64_neon_scalar_sqxtun, AddRetType | Add1ArgType),
6546 NEONMAP1(vqmovunh_s16, aarch64_neon_sqxtun, VectorRet | Use64BitVectors),
6547 NEONMAP1(vqmovuns_s32, aarch64_neon_sqxtun, VectorRet | Use64BitVectors),
6548 NEONMAP1(vqnegb_s8, aarch64_neon_sqneg, Vectorize1ArgType | Use64BitVectors),
6549 NEONMAP1(vqnegd_s64, aarch64_neon_sqneg, Add1ArgType),
6550 NEONMAP1(vqnegh_s16, aarch64_neon_sqneg, Vectorize1ArgType | Use64BitVectors),
6551 NEONMAP1(vqnegs_s32, aarch64_neon_sqneg, Add1ArgType),
6552 NEONMAP1(vqrdmlahh_s16, aarch64_neon_sqrdmlah, Vectorize1ArgType | Use64BitVectors),
6553 NEONMAP1(vqrdmlahs_s32, aarch64_neon_sqrdmlah, Add1ArgType),
6554 NEONMAP1(vqrdmlshh_s16, aarch64_neon_sqrdmlsh, Vectorize1ArgType | Use64BitVectors),
6555 NEONMAP1(vqrdmlshs_s32, aarch64_neon_sqrdmlsh, Add1ArgType),
6556 NEONMAP1(vqrdmulhh_s16, aarch64_neon_sqrdmulh, Vectorize1ArgType | Use64BitVectors),
6557 NEONMAP1(vqrdmulhs_s32, aarch64_neon_sqrdmulh, Add1ArgType),
6558 NEONMAP1(vqrshlb_s8, aarch64_neon_sqrshl, Vectorize1ArgType | Use64BitVectors),
6559 NEONMAP1(vqrshlb_u8, aarch64_neon_uqrshl, Vectorize1ArgType | Use64BitVectors),
6560 NEONMAP1(vqrshld_s64, aarch64_neon_sqrshl, Add1ArgType),
6561 NEONMAP1(vqrshld_u64, aarch64_neon_uqrshl, Add1ArgType),
6562 NEONMAP1(vqrshlh_s16, aarch64_neon_sqrshl, Vectorize1ArgType | Use64BitVectors),
6563 NEONMAP1(vqrshlh_u16, aarch64_neon_uqrshl, Vectorize1ArgType | Use64BitVectors),
6564 NEONMAP1(vqrshls_s32, aarch64_neon_sqrshl, Add1ArgType),
6565 NEONMAP1(vqrshls_u32, aarch64_neon_uqrshl, Add1ArgType),
6566 NEONMAP1(vqrshrnd_n_s64, aarch64_neon_sqrshrn, AddRetType),
6567 NEONMAP1(vqrshrnd_n_u64, aarch64_neon_uqrshrn, AddRetType),
6568 NEONMAP1(vqrshrnh_n_s16, aarch64_neon_sqrshrn, VectorRet | Use64BitVectors),
6569 NEONMAP1(vqrshrnh_n_u16, aarch64_neon_uqrshrn, VectorRet | Use64BitVectors),
6570 NEONMAP1(vqrshrns_n_s32, aarch64_neon_sqrshrn, VectorRet | Use64BitVectors),
6571 NEONMAP1(vqrshrns_n_u32, aarch64_neon_uqrshrn, VectorRet | Use64BitVectors),
6572 NEONMAP1(vqrshrund_n_s64, aarch64_neon_sqrshrun, AddRetType),
6573 NEONMAP1(vqrshrunh_n_s16, aarch64_neon_sqrshrun, VectorRet | Use64BitVectors),
6574 NEONMAP1(vqrshruns_n_s32, aarch64_neon_sqrshrun, VectorRet | Use64BitVectors),
6575 NEONMAP1(vqshlb_n_s8, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
6576 NEONMAP1(vqshlb_n_u8, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
6577 NEONMAP1(vqshlb_s8, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
6578 NEONMAP1(vqshlb_u8, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
6579 NEONMAP1(vqshld_s64, aarch64_neon_sqshl, Add1ArgType),
6580 NEONMAP1(vqshld_u64, aarch64_neon_uqshl, Add1ArgType),
6581 NEONMAP1(vqshlh_n_s16, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
6582 NEONMAP1(vqshlh_n_u16, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
6583 NEONMAP1(vqshlh_s16, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
6584 NEONMAP1(vqshlh_u16, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
6585 NEONMAP1(vqshls_n_s32, aarch64_neon_sqshl, Add1ArgType),
6586 NEONMAP1(vqshls_n_u32, aarch64_neon_uqshl, Add1ArgType),
6587 NEONMAP1(vqshls_s32, aarch64_neon_sqshl, Add1ArgType),
6588 NEONMAP1(vqshls_u32, aarch64_neon_uqshl, Add1ArgType),
6589 NEONMAP1(vqshlub_n_s8, aarch64_neon_sqshlu, Vectorize1ArgType | Use64BitVectors),
6590 NEONMAP1(vqshluh_n_s16, aarch64_neon_sqshlu, Vectorize1ArgType | Use64BitVectors),
6591 NEONMAP1(vqshlus_n_s32, aarch64_neon_sqshlu, Add1ArgType),
6592 NEONMAP1(vqshrnd_n_s64, aarch64_neon_sqshrn, AddRetType),
6593 NEONMAP1(vqshrnd_n_u64, aarch64_neon_uqshrn, AddRetType),
6594 NEONMAP1(vqshrnh_n_s16, aarch64_neon_sqshrn, VectorRet | Use64BitVectors),
6595 NEONMAP1(vqshrnh_n_u16, aarch64_neon_uqshrn, VectorRet | Use64BitVectors),
6596 NEONMAP1(vqshrns_n_s32, aarch64_neon_sqshrn, VectorRet | Use64BitVectors),
6597 NEONMAP1(vqshrns_n_u32, aarch64_neon_uqshrn, VectorRet | Use64BitVectors),
6598 NEONMAP1(vqshrund_n_s64, aarch64_neon_sqshrun, AddRetType),
6599 NEONMAP1(vqshrunh_n_s16, aarch64_neon_sqshrun, VectorRet | Use64BitVectors),
6600 NEONMAP1(vqshruns_n_s32, aarch64_neon_sqshrun, VectorRet | Use64BitVectors),
6601 NEONMAP1(vqsubb_s8, aarch64_neon_sqsub, Vectorize1ArgType | Use64BitVectors),
6602 NEONMAP1(vqsubb_u8, aarch64_neon_uqsub, Vectorize1ArgType | Use64BitVectors),
6603 NEONMAP1(vqsubd_s64, aarch64_neon_sqsub, Add1ArgType),
6604 NEONMAP1(vqsubd_u64, aarch64_neon_uqsub, Add1ArgType),
6605 NEONMAP1(vqsubh_s16, aarch64_neon_sqsub, Vectorize1ArgType | Use64BitVectors),
6606 NEONMAP1(vqsubh_u16, aarch64_neon_uqsub, Vectorize1ArgType | Use64BitVectors),
6607 NEONMAP1(vqsubs_s32, aarch64_neon_sqsub, Add1ArgType),
6608 NEONMAP1(vqsubs_u32, aarch64_neon_uqsub, Add1ArgType),
6609 NEONMAP1(vrecped_f64, aarch64_neon_frecpe, Add1ArgType),
6610 NEONMAP1(vrecpes_f32, aarch64_neon_frecpe, Add1ArgType),
6611 NEONMAP1(vrecpxd_f64, aarch64_neon_frecpx, Add1ArgType),
6612 NEONMAP1(vrecpxs_f32, aarch64_neon_frecpx, Add1ArgType),
6613 NEONMAP1(vrshld_s64, aarch64_neon_srshl, Add1ArgType),
6614 NEONMAP1(vrshld_u64, aarch64_neon_urshl, Add1ArgType),
6615 NEONMAP1(vrsqrted_f64, aarch64_neon_frsqrte, Add1ArgType),
6616 NEONMAP1(vrsqrtes_f32, aarch64_neon_frsqrte, Add1ArgType),
6617 NEONMAP1(vrsqrtsd_f64, aarch64_neon_frsqrts, Add1ArgType),
6618 NEONMAP1(vrsqrtss_f32, aarch64_neon_frsqrts, Add1ArgType),
6619 NEONMAP1(vsha1cq_u32, aarch64_crypto_sha1c, 0),
6620 NEONMAP1(vsha1h_u32, aarch64_crypto_sha1h, 0),
6621 NEONMAP1(vsha1mq_u32, aarch64_crypto_sha1m, 0),
6622 NEONMAP1(vsha1pq_u32, aarch64_crypto_sha1p, 0),
6623 NEONMAP1(vshld_s64, aarch64_neon_sshl, Add1ArgType),
6624 NEONMAP1(vshld_u64, aarch64_neon_ushl, Add1ArgType),
6625 NEONMAP1(vslid_n_s64, aarch64_neon_vsli, Vectorize1ArgType),
6626 NEONMAP1(vslid_n_u64, aarch64_neon_vsli, Vectorize1ArgType),
6627 NEONMAP1(vsqaddb_u8, aarch64_neon_usqadd, Vectorize1ArgType | Use64BitVectors),
6628 NEONMAP1(vsqaddd_u64, aarch64_neon_usqadd, Add1ArgType),
6629 NEONMAP1(vsqaddh_u16, aarch64_neon_usqadd, Vectorize1ArgType | Use64BitVectors),
6630 NEONMAP1(vsqadds_u32, aarch64_neon_usqadd, Add1ArgType),
6631 NEONMAP1(vsrid_n_s64, aarch64_neon_vsri, Vectorize1ArgType),
6632 NEONMAP1(vsrid_n_u64, aarch64_neon_vsri, Vectorize1ArgType),
6633 NEONMAP1(vuqaddb_s8, aarch64_neon_suqadd, Vectorize1ArgType | Use64BitVectors),
6634 NEONMAP1(vuqaddd_s64, aarch64_neon_suqadd, Add1ArgType),
6635 NEONMAP1(vuqaddh_s16, aarch64_neon_suqadd, Vectorize1ArgType | Use64BitVectors),
6636 NEONMAP1(vuqadds_s32, aarch64_neon_suqadd, Add1ArgType),
6637 // FP16 scalar intrinisics go here.
6638 NEONMAP1(vabdh_f16, aarch64_sisd_fabd, Add1ArgType),
6639 NEONMAP1(vcvtah_s32_f16, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
6640 NEONMAP1(vcvtah_s64_f16, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
6641 NEONMAP1(vcvtah_u32_f16, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
6642 NEONMAP1(vcvtah_u64_f16, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
6643 NEONMAP1(vcvth_n_f16_s32, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
6644 NEONMAP1(vcvth_n_f16_s64, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
6645 NEONMAP1(vcvth_n_f16_u32, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
6646 NEONMAP1(vcvth_n_f16_u64, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
6647 NEONMAP1(vcvth_n_s32_f16, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
6648 NEONMAP1(vcvth_n_s64_f16, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
6649 NEONMAP1(vcvth_n_u32_f16, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
6650 NEONMAP1(vcvth_n_u64_f16, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
6651 NEONMAP1(vcvth_s32_f16, aarch64_neon_fcvtzs, AddRetType | Add1ArgType),
6652 NEONMAP1(vcvth_s64_f16, aarch64_neon_fcvtzs, AddRetType | Add1ArgType),
6653 NEONMAP1(vcvth_u32_f16, aarch64_neon_fcvtzu, AddRetType | Add1ArgType),
6654 NEONMAP1(vcvth_u64_f16, aarch64_neon_fcvtzu, AddRetType | Add1ArgType),
6655 NEONMAP1(vcvtmh_s32_f16, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
6656 NEONMAP1(vcvtmh_s64_f16, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
6657 NEONMAP1(vcvtmh_u32_f16, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
6658 NEONMAP1(vcvtmh_u64_f16, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
6659 NEONMAP1(vcvtnh_s32_f16, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
6660 NEONMAP1(vcvtnh_s64_f16, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
6661 NEONMAP1(vcvtnh_u32_f16, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
6662 NEONMAP1(vcvtnh_u64_f16, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
6663 NEONMAP1(vcvtph_s32_f16, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
6664 NEONMAP1(vcvtph_s64_f16, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
6665 NEONMAP1(vcvtph_u32_f16, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
6666 NEONMAP1(vcvtph_u64_f16, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
6667 NEONMAP1(vmulxh_f16, aarch64_neon_fmulx, Add1ArgType),
6668 NEONMAP1(vrecpeh_f16, aarch64_neon_frecpe, Add1ArgType),
6669 NEONMAP1(vrecpxh_f16, aarch64_neon_frecpx, Add1ArgType),
6670 NEONMAP1(vrsqrteh_f16, aarch64_neon_frsqrte, Add1ArgType),
6671 NEONMAP1(vrsqrtsh_f16, aarch64_neon_frsqrts, Add1ArgType),
6674 // Some intrinsics are equivalent for codegen.
6675 static const std::pair<unsigned, unsigned> NEONEquivalentIntrinsicMap[] = {
6676 { NEON::BI__builtin_neon_splat_lane_bf16, NEON::BI__builtin_neon_splat_lane_v, },
6677 { NEON::BI__builtin_neon_splat_laneq_bf16, NEON::BI__builtin_neon_splat_laneq_v, },
6678 { NEON::BI__builtin_neon_splatq_lane_bf16, NEON::BI__builtin_neon_splatq_lane_v, },
6679 { NEON::BI__builtin_neon_splatq_laneq_bf16, NEON::BI__builtin_neon_splatq_laneq_v, },
6680 { NEON::BI__builtin_neon_vabd_f16, NEON::BI__builtin_neon_vabd_v, },
6681 { NEON::BI__builtin_neon_vabdq_f16, NEON::BI__builtin_neon_vabdq_v, },
6682 { NEON::BI__builtin_neon_vabs_f16, NEON::BI__builtin_neon_vabs_v, },
6683 { NEON::BI__builtin_neon_vabsq_f16, NEON::BI__builtin_neon_vabsq_v, },
6684 { NEON::BI__builtin_neon_vbsl_f16, NEON::BI__builtin_neon_vbsl_v, },
6685 { NEON::BI__builtin_neon_vbslq_f16, NEON::BI__builtin_neon_vbslq_v, },
6686 { NEON::BI__builtin_neon_vcage_f16, NEON::BI__builtin_neon_vcage_v, },
6687 { NEON::BI__builtin_neon_vcageq_f16, NEON::BI__builtin_neon_vcageq_v, },
6688 { NEON::BI__builtin_neon_vcagt_f16, NEON::BI__builtin_neon_vcagt_v, },
6689 { NEON::BI__builtin_neon_vcagtq_f16, NEON::BI__builtin_neon_vcagtq_v, },
6690 { NEON::BI__builtin_neon_vcale_f16, NEON::BI__builtin_neon_vcale_v, },
6691 { NEON::BI__builtin_neon_vcaleq_f16, NEON::BI__builtin_neon_vcaleq_v, },
6692 { NEON::BI__builtin_neon_vcalt_f16, NEON::BI__builtin_neon_vcalt_v, },
6693 { NEON::BI__builtin_neon_vcaltq_f16, NEON::BI__builtin_neon_vcaltq_v, },
6694 { NEON::BI__builtin_neon_vceqz_f16, NEON::BI__builtin_neon_vceqz_v, },
6695 { NEON::BI__builtin_neon_vceqzq_f16, NEON::BI__builtin_neon_vceqzq_v, },
6696 { NEON::BI__builtin_neon_vcgez_f16, NEON::BI__builtin_neon_vcgez_v, },
6697 { NEON::BI__builtin_neon_vcgezq_f16, NEON::BI__builtin_neon_vcgezq_v, },
6698 { NEON::BI__builtin_neon_vcgtz_f16, NEON::BI__builtin_neon_vcgtz_v, },
6699 { NEON::BI__builtin_neon_vcgtzq_f16, NEON::BI__builtin_neon_vcgtzq_v, },
6700 { NEON::BI__builtin_neon_vclez_f16, NEON::BI__builtin_neon_vclez_v, },
6701 { NEON::BI__builtin_neon_vclezq_f16, NEON::BI__builtin_neon_vclezq_v, },
6702 { NEON::BI__builtin_neon_vcltz_f16, NEON::BI__builtin_neon_vcltz_v, },
6703 { NEON::BI__builtin_neon_vcltzq_f16, NEON::BI__builtin_neon_vcltzq_v, },
6704 { NEON::BI__builtin_neon_vext_f16, NEON::BI__builtin_neon_vext_v, },
6705 { NEON::BI__builtin_neon_vextq_f16, NEON::BI__builtin_neon_vextq_v, },
6706 { NEON::BI__builtin_neon_vfma_f16, NEON::BI__builtin_neon_vfma_v, },
6707 { NEON::BI__builtin_neon_vfma_lane_f16, NEON::BI__builtin_neon_vfma_lane_v, },
6708 { NEON::BI__builtin_neon_vfma_laneq_f16, NEON::BI__builtin_neon_vfma_laneq_v, },
6709 { NEON::BI__builtin_neon_vfmaq_f16, NEON::BI__builtin_neon_vfmaq_v, },
6710 { NEON::BI__builtin_neon_vfmaq_lane_f16, NEON::BI__builtin_neon_vfmaq_lane_v, },
6711 { NEON::BI__builtin_neon_vfmaq_laneq_f16, NEON::BI__builtin_neon_vfmaq_laneq_v, },
6712 { NEON::BI__builtin_neon_vld1_bf16_x2, NEON::BI__builtin_neon_vld1_x2_v },
6713 { NEON::BI__builtin_neon_vld1_bf16_x3, NEON::BI__builtin_neon_vld1_x3_v },
6714 { NEON::BI__builtin_neon_vld1_bf16_x4, NEON::BI__builtin_neon_vld1_x4_v },
6715 { NEON::BI__builtin_neon_vld1_bf16, NEON::BI__builtin_neon_vld1_v },
6716 { NEON::BI__builtin_neon_vld1_dup_bf16, NEON::BI__builtin_neon_vld1_dup_v },
6717 { NEON::BI__builtin_neon_vld1_lane_bf16, NEON::BI__builtin_neon_vld1_lane_v },
6718 { NEON::BI__builtin_neon_vld1q_bf16_x2, NEON::BI__builtin_neon_vld1q_x2_v },
6719 { NEON::BI__builtin_neon_vld1q_bf16_x3, NEON::BI__builtin_neon_vld1q_x3_v },
6720 { NEON::BI__builtin_neon_vld1q_bf16_x4, NEON::BI__builtin_neon_vld1q_x4_v },
6721 { NEON::BI__builtin_neon_vld1q_bf16, NEON::BI__builtin_neon_vld1q_v },
6722 { NEON::BI__builtin_neon_vld1q_dup_bf16, NEON::BI__builtin_neon_vld1q_dup_v },
6723 { NEON::BI__builtin_neon_vld1q_lane_bf16, NEON::BI__builtin_neon_vld1q_lane_v },
6724 { NEON::BI__builtin_neon_vld2_bf16, NEON::BI__builtin_neon_vld2_v },
6725 { NEON::BI__builtin_neon_vld2_dup_bf16, NEON::BI__builtin_neon_vld2_dup_v },
6726 { NEON::BI__builtin_neon_vld2_lane_bf16, NEON::BI__builtin_neon_vld2_lane_v },
6727 { NEON::BI__builtin_neon_vld2q_bf16, NEON::BI__builtin_neon_vld2q_v },
6728 { NEON::BI__builtin_neon_vld2q_dup_bf16, NEON::BI__builtin_neon_vld2q_dup_v },
6729 { NEON::BI__builtin_neon_vld2q_lane_bf16, NEON::BI__builtin_neon_vld2q_lane_v },
6730 { NEON::BI__builtin_neon_vld3_bf16, NEON::BI__builtin_neon_vld3_v },
6731 { NEON::BI__builtin_neon_vld3_dup_bf16, NEON::BI__builtin_neon_vld3_dup_v },
6732 { NEON::BI__builtin_neon_vld3_lane_bf16, NEON::BI__builtin_neon_vld3_lane_v },
6733 { NEON::BI__builtin_neon_vld3q_bf16, NEON::BI__builtin_neon_vld3q_v },
6734 { NEON::BI__builtin_neon_vld3q_dup_bf16, NEON::BI__builtin_neon_vld3q_dup_v },
6735 { NEON::BI__builtin_neon_vld3q_lane_bf16, NEON::BI__builtin_neon_vld3q_lane_v },
6736 { NEON::BI__builtin_neon_vld4_bf16, NEON::BI__builtin_neon_vld4_v },
6737 { NEON::BI__builtin_neon_vld4_dup_bf16, NEON::BI__builtin_neon_vld4_dup_v },
6738 { NEON::BI__builtin_neon_vld4_lane_bf16, NEON::BI__builtin_neon_vld4_lane_v },
6739 { NEON::BI__builtin_neon_vld4q_bf16, NEON::BI__builtin_neon_vld4q_v },
6740 { NEON::BI__builtin_neon_vld4q_dup_bf16, NEON::BI__builtin_neon_vld4q_dup_v },
6741 { NEON::BI__builtin_neon_vld4q_lane_bf16, NEON::BI__builtin_neon_vld4q_lane_v },
6742 { NEON::BI__builtin_neon_vmax_f16, NEON::BI__builtin_neon_vmax_v, },
6743 { NEON::BI__builtin_neon_vmaxnm_f16, NEON::BI__builtin_neon_vmaxnm_v, },
6744 { NEON::BI__builtin_neon_vmaxnmq_f16, NEON::BI__builtin_neon_vmaxnmq_v, },
6745 { NEON::BI__builtin_neon_vmaxq_f16, NEON::BI__builtin_neon_vmaxq_v, },
6746 { NEON::BI__builtin_neon_vmin_f16, NEON::BI__builtin_neon_vmin_v, },
6747 { NEON::BI__builtin_neon_vminnm_f16, NEON::BI__builtin_neon_vminnm_v, },
6748 { NEON::BI__builtin_neon_vminnmq_f16, NEON::BI__builtin_neon_vminnmq_v, },
6749 { NEON::BI__builtin_neon_vminq_f16, NEON::BI__builtin_neon_vminq_v, },
6750 { NEON::BI__builtin_neon_vmulx_f16, NEON::BI__builtin_neon_vmulx_v, },
6751 { NEON::BI__builtin_neon_vmulxq_f16, NEON::BI__builtin_neon_vmulxq_v, },
6752 { NEON::BI__builtin_neon_vpadd_f16, NEON::BI__builtin_neon_vpadd_v, },
6753 { NEON::BI__builtin_neon_vpaddq_f16, NEON::BI__builtin_neon_vpaddq_v, },
6754 { NEON::BI__builtin_neon_vpmax_f16, NEON::BI__builtin_neon_vpmax_v, },
6755 { NEON::BI__builtin_neon_vpmaxnm_f16, NEON::BI__builtin_neon_vpmaxnm_v, },
6756 { NEON::BI__builtin_neon_vpmaxnmq_f16, NEON::BI__builtin_neon_vpmaxnmq_v, },
6757 { NEON::BI__builtin_neon_vpmaxq_f16, NEON::BI__builtin_neon_vpmaxq_v, },
6758 { NEON::BI__builtin_neon_vpmin_f16, NEON::BI__builtin_neon_vpmin_v, },
6759 { NEON::BI__builtin_neon_vpminnm_f16, NEON::BI__builtin_neon_vpminnm_v, },
6760 { NEON::BI__builtin_neon_vpminnmq_f16, NEON::BI__builtin_neon_vpminnmq_v, },
6761 { NEON::BI__builtin_neon_vpminq_f16, NEON::BI__builtin_neon_vpminq_v, },
6762 { NEON::BI__builtin_neon_vrecpe_f16, NEON::BI__builtin_neon_vrecpe_v, },
6763 { NEON::BI__builtin_neon_vrecpeq_f16, NEON::BI__builtin_neon_vrecpeq_v, },
6764 { NEON::BI__builtin_neon_vrecps_f16, NEON::BI__builtin_neon_vrecps_v, },
6765 { NEON::BI__builtin_neon_vrecpsq_f16, NEON::BI__builtin_neon_vrecpsq_v, },
6766 { NEON::BI__builtin_neon_vrnd_f16, NEON::BI__builtin_neon_vrnd_v, },
6767 { NEON::BI__builtin_neon_vrnda_f16, NEON::BI__builtin_neon_vrnda_v, },
6768 { NEON::BI__builtin_neon_vrndaq_f16, NEON::BI__builtin_neon_vrndaq_v, },
6769 { NEON::BI__builtin_neon_vrndi_f16, NEON::BI__builtin_neon_vrndi_v, },
6770 { NEON::BI__builtin_neon_vrndiq_f16, NEON::BI__builtin_neon_vrndiq_v, },
6771 { NEON::BI__builtin_neon_vrndm_f16, NEON::BI__builtin_neon_vrndm_v, },
6772 { NEON::BI__builtin_neon_vrndmq_f16, NEON::BI__builtin_neon_vrndmq_v, },
6773 { NEON::BI__builtin_neon_vrndn_f16, NEON::BI__builtin_neon_vrndn_v, },
6774 { NEON::BI__builtin_neon_vrndnq_f16, NEON::BI__builtin_neon_vrndnq_v, },
6775 { NEON::BI__builtin_neon_vrndp_f16, NEON::BI__builtin_neon_vrndp_v, },
6776 { NEON::BI__builtin_neon_vrndpq_f16, NEON::BI__builtin_neon_vrndpq_v, },
6777 { NEON::BI__builtin_neon_vrndq_f16, NEON::BI__builtin_neon_vrndq_v, },
6778 { NEON::BI__builtin_neon_vrndx_f16, NEON::BI__builtin_neon_vrndx_v, },
6779 { NEON::BI__builtin_neon_vrndxq_f16, NEON::BI__builtin_neon_vrndxq_v, },
6780 { NEON::BI__builtin_neon_vrsqrte_f16, NEON::BI__builtin_neon_vrsqrte_v, },
6781 { NEON::BI__builtin_neon_vrsqrteq_f16, NEON::BI__builtin_neon_vrsqrteq_v, },
6782 { NEON::BI__builtin_neon_vrsqrts_f16, NEON::BI__builtin_neon_vrsqrts_v, },
6783 { NEON::BI__builtin_neon_vrsqrtsq_f16, NEON::BI__builtin_neon_vrsqrtsq_v, },
6784 { NEON::BI__builtin_neon_vsqrt_f16, NEON::BI__builtin_neon_vsqrt_v, },
6785 { NEON::BI__builtin_neon_vsqrtq_f16, NEON::BI__builtin_neon_vsqrtq_v, },
6786 { NEON::BI__builtin_neon_vst1_bf16_x2, NEON::BI__builtin_neon_vst1_x2_v },
6787 { NEON::BI__builtin_neon_vst1_bf16_x3, NEON::BI__builtin_neon_vst1_x3_v },
6788 { NEON::BI__builtin_neon_vst1_bf16_x4, NEON::BI__builtin_neon_vst1_x4_v },
6789 { NEON::BI__builtin_neon_vst1_bf16, NEON::BI__builtin_neon_vst1_v },
6790 { NEON::BI__builtin_neon_vst1_lane_bf16, NEON::BI__builtin_neon_vst1_lane_v },
6791 { NEON::BI__builtin_neon_vst1q_bf16_x2, NEON::BI__builtin_neon_vst1q_x2_v },
6792 { NEON::BI__builtin_neon_vst1q_bf16_x3, NEON::BI__builtin_neon_vst1q_x3_v },
6793 { NEON::BI__builtin_neon_vst1q_bf16_x4, NEON::BI__builtin_neon_vst1q_x4_v },
6794 { NEON::BI__builtin_neon_vst1q_bf16, NEON::BI__builtin_neon_vst1q_v },
6795 { NEON::BI__builtin_neon_vst1q_lane_bf16, NEON::BI__builtin_neon_vst1q_lane_v },
6796 { NEON::BI__builtin_neon_vst2_bf16, NEON::BI__builtin_neon_vst2_v },
6797 { NEON::BI__builtin_neon_vst2_lane_bf16, NEON::BI__builtin_neon_vst2_lane_v },
6798 { NEON::BI__builtin_neon_vst2q_bf16, NEON::BI__builtin_neon_vst2q_v },
6799 { NEON::BI__builtin_neon_vst2q_lane_bf16, NEON::BI__builtin_neon_vst2q_lane_v },
6800 { NEON::BI__builtin_neon_vst3_bf16, NEON::BI__builtin_neon_vst3_v },
6801 { NEON::BI__builtin_neon_vst3_lane_bf16, NEON::BI__builtin_neon_vst3_lane_v },
6802 { NEON::BI__builtin_neon_vst3q_bf16, NEON::BI__builtin_neon_vst3q_v },
6803 { NEON::BI__builtin_neon_vst3q_lane_bf16, NEON::BI__builtin_neon_vst3q_lane_v },
6804 { NEON::BI__builtin_neon_vst4_bf16, NEON::BI__builtin_neon_vst4_v },
6805 { NEON::BI__builtin_neon_vst4_lane_bf16, NEON::BI__builtin_neon_vst4_lane_v },
6806 { NEON::BI__builtin_neon_vst4q_bf16, NEON::BI__builtin_neon_vst4q_v },
6807 { NEON::BI__builtin_neon_vst4q_lane_bf16, NEON::BI__builtin_neon_vst4q_lane_v },
6808 { NEON::BI__builtin_neon_vtrn_f16, NEON::BI__builtin_neon_vtrn_v, },
6809 { NEON::BI__builtin_neon_vtrnq_f16, NEON::BI__builtin_neon_vtrnq_v, },
6810 { NEON::BI__builtin_neon_vuzp_f16, NEON::BI__builtin_neon_vuzp_v, },
6811 { NEON::BI__builtin_neon_vuzpq_f16, NEON::BI__builtin_neon_vuzpq_v, },
6812 { NEON::BI__builtin_neon_vzip_f16, NEON::BI__builtin_neon_vzip_v, },
6813 { NEON::BI__builtin_neon_vzipq_f16, NEON::BI__builtin_neon_vzipq_v, },
6814 // The mangling rules cause us to have one ID for each type for vldap1(q)_lane
6815 // and vstl1(q)_lane, but codegen is equivalent for all of them. Choose an
6816 // arbitrary one to be handled as tha canonical variation.
6817 { NEON::BI__builtin_neon_vldap1_lane_u64, NEON::BI__builtin_neon_vldap1_lane_s64 },
6818 { NEON::BI__builtin_neon_vldap1_lane_f64, NEON::BI__builtin_neon_vldap1_lane_s64 },
6819 { NEON::BI__builtin_neon_vldap1_lane_p64, NEON::BI__builtin_neon_vldap1_lane_s64 },
6820 { NEON::BI__builtin_neon_vldap1q_lane_u64, NEON::BI__builtin_neon_vldap1q_lane_s64 },
6821 { NEON::BI__builtin_neon_vldap1q_lane_f64, NEON::BI__builtin_neon_vldap1q_lane_s64 },
6822 { NEON::BI__builtin_neon_vldap1q_lane_p64, NEON::BI__builtin_neon_vldap1q_lane_s64 },
6823 { NEON::BI__builtin_neon_vstl1_lane_u64, NEON::BI__builtin_neon_vstl1_lane_s64 },
6824 { NEON::BI__builtin_neon_vstl1_lane_f64, NEON::BI__builtin_neon_vstl1_lane_s64 },
6825 { NEON::BI__builtin_neon_vstl1_lane_p64, NEON::BI__builtin_neon_vstl1_lane_s64 },
6826 { NEON::BI__builtin_neon_vstl1q_lane_u64, NEON::BI__builtin_neon_vstl1q_lane_s64 },
6827 { NEON::BI__builtin_neon_vstl1q_lane_f64, NEON::BI__builtin_neon_vstl1q_lane_s64 },
6828 { NEON::BI__builtin_neon_vstl1q_lane_p64, NEON::BI__builtin_neon_vstl1q_lane_s64 },
6831 #undef NEONMAP0
6832 #undef NEONMAP1
6833 #undef NEONMAP2
6835 #define SVEMAP1(NameBase, LLVMIntrinsic, TypeModifier) \
6837 #NameBase, SVE::BI__builtin_sve_##NameBase, Intrinsic::LLVMIntrinsic, 0, \
6838 TypeModifier \
6841 #define SVEMAP2(NameBase, TypeModifier) \
6842 { #NameBase, SVE::BI__builtin_sve_##NameBase, 0, 0, TypeModifier }
6843 static const ARMVectorIntrinsicInfo AArch64SVEIntrinsicMap[] = {
6844 #define GET_SVE_LLVM_INTRINSIC_MAP
6845 #include "clang/Basic/arm_sve_builtin_cg.inc"
6846 #include "clang/Basic/BuiltinsAArch64NeonSVEBridge_cg.def"
6847 #undef GET_SVE_LLVM_INTRINSIC_MAP
6850 #undef SVEMAP1
6851 #undef SVEMAP2
6853 #define SMEMAP1(NameBase, LLVMIntrinsic, TypeModifier) \
6855 #NameBase, SME::BI__builtin_sme_##NameBase, Intrinsic::LLVMIntrinsic, 0, \
6856 TypeModifier \
6859 #define SMEMAP2(NameBase, TypeModifier) \
6860 { #NameBase, SME::BI__builtin_sme_##NameBase, 0, 0, TypeModifier }
6861 static const ARMVectorIntrinsicInfo AArch64SMEIntrinsicMap[] = {
6862 #define GET_SME_LLVM_INTRINSIC_MAP
6863 #include "clang/Basic/arm_sme_builtin_cg.inc"
6864 #undef GET_SME_LLVM_INTRINSIC_MAP
6867 #undef SMEMAP1
6868 #undef SMEMAP2
6870 static bool NEONSIMDIntrinsicsProvenSorted = false;
6872 static bool AArch64SIMDIntrinsicsProvenSorted = false;
6873 static bool AArch64SISDIntrinsicsProvenSorted = false;
6874 static bool AArch64SVEIntrinsicsProvenSorted = false;
6875 static bool AArch64SMEIntrinsicsProvenSorted = false;
6877 static const ARMVectorIntrinsicInfo *
6878 findARMVectorIntrinsicInMap(ArrayRef<ARMVectorIntrinsicInfo> IntrinsicMap,
6879 unsigned BuiltinID, bool &MapProvenSorted) {
6881 #ifndef NDEBUG
6882 if (!MapProvenSorted) {
6883 assert(llvm::is_sorted(IntrinsicMap));
6884 MapProvenSorted = true;
6886 #endif
6888 const ARMVectorIntrinsicInfo *Builtin =
6889 llvm::lower_bound(IntrinsicMap, BuiltinID);
6891 if (Builtin != IntrinsicMap.end() && Builtin->BuiltinID == BuiltinID)
6892 return Builtin;
6894 return nullptr;
6897 Function *CodeGenFunction::LookupNeonLLVMIntrinsic(unsigned IntrinsicID,
6898 unsigned Modifier,
6899 llvm::Type *ArgType,
6900 const CallExpr *E) {
6901 int VectorSize = 0;
6902 if (Modifier & Use64BitVectors)
6903 VectorSize = 64;
6904 else if (Modifier & Use128BitVectors)
6905 VectorSize = 128;
6907 // Return type.
6908 SmallVector<llvm::Type *, 3> Tys;
6909 if (Modifier & AddRetType) {
6910 llvm::Type *Ty = ConvertType(E->getCallReturnType(getContext()));
6911 if (Modifier & VectorizeRetType)
6912 Ty = llvm::FixedVectorType::get(
6913 Ty, VectorSize ? VectorSize / Ty->getPrimitiveSizeInBits() : 1);
6915 Tys.push_back(Ty);
6918 // Arguments.
6919 if (Modifier & VectorizeArgTypes) {
6920 int Elts = VectorSize ? VectorSize / ArgType->getPrimitiveSizeInBits() : 1;
6921 ArgType = llvm::FixedVectorType::get(ArgType, Elts);
6924 if (Modifier & (Add1ArgType | Add2ArgTypes))
6925 Tys.push_back(ArgType);
6927 if (Modifier & Add2ArgTypes)
6928 Tys.push_back(ArgType);
6930 if (Modifier & InventFloatType)
6931 Tys.push_back(FloatTy);
6933 return CGM.getIntrinsic(IntrinsicID, Tys);
6936 static Value *EmitCommonNeonSISDBuiltinExpr(
6937 CodeGenFunction &CGF, const ARMVectorIntrinsicInfo &SISDInfo,
6938 SmallVectorImpl<Value *> &Ops, const CallExpr *E) {
6939 unsigned BuiltinID = SISDInfo.BuiltinID;
6940 unsigned int Int = SISDInfo.LLVMIntrinsic;
6941 unsigned Modifier = SISDInfo.TypeModifier;
6942 const char *s = SISDInfo.NameHint;
6944 switch (BuiltinID) {
6945 case NEON::BI__builtin_neon_vcled_s64:
6946 case NEON::BI__builtin_neon_vcled_u64:
6947 case NEON::BI__builtin_neon_vcles_f32:
6948 case NEON::BI__builtin_neon_vcled_f64:
6949 case NEON::BI__builtin_neon_vcltd_s64:
6950 case NEON::BI__builtin_neon_vcltd_u64:
6951 case NEON::BI__builtin_neon_vclts_f32:
6952 case NEON::BI__builtin_neon_vcltd_f64:
6953 case NEON::BI__builtin_neon_vcales_f32:
6954 case NEON::BI__builtin_neon_vcaled_f64:
6955 case NEON::BI__builtin_neon_vcalts_f32:
6956 case NEON::BI__builtin_neon_vcaltd_f64:
6957 // Only one direction of comparisons actually exist, cmle is actually a cmge
6958 // with swapped operands. The table gives us the right intrinsic but we
6959 // still need to do the swap.
6960 std::swap(Ops[0], Ops[1]);
6961 break;
6964 assert(Int && "Generic code assumes a valid intrinsic");
6966 // Determine the type(s) of this overloaded AArch64 intrinsic.
6967 const Expr *Arg = E->getArg(0);
6968 llvm::Type *ArgTy = CGF.ConvertType(Arg->getType());
6969 Function *F = CGF.LookupNeonLLVMIntrinsic(Int, Modifier, ArgTy, E);
6971 int j = 0;
6972 ConstantInt *C0 = ConstantInt::get(CGF.SizeTy, 0);
6973 for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end();
6974 ai != ae; ++ai, ++j) {
6975 llvm::Type *ArgTy = ai->getType();
6976 if (Ops[j]->getType()->getPrimitiveSizeInBits() ==
6977 ArgTy->getPrimitiveSizeInBits())
6978 continue;
6980 assert(ArgTy->isVectorTy() && !Ops[j]->getType()->isVectorTy());
6981 // The constant argument to an _n_ intrinsic always has Int32Ty, so truncate
6982 // it before inserting.
6983 Ops[j] = CGF.Builder.CreateTruncOrBitCast(
6984 Ops[j], cast<llvm::VectorType>(ArgTy)->getElementType());
6985 Ops[j] =
6986 CGF.Builder.CreateInsertElement(PoisonValue::get(ArgTy), Ops[j], C0);
6989 Value *Result = CGF.EmitNeonCall(F, Ops, s);
6990 llvm::Type *ResultType = CGF.ConvertType(E->getType());
6991 if (ResultType->getPrimitiveSizeInBits().getFixedValue() <
6992 Result->getType()->getPrimitiveSizeInBits().getFixedValue())
6993 return CGF.Builder.CreateExtractElement(Result, C0);
6995 return CGF.Builder.CreateBitCast(Result, ResultType, s);
6998 Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
6999 unsigned BuiltinID, unsigned LLVMIntrinsic, unsigned AltLLVMIntrinsic,
7000 const char *NameHint, unsigned Modifier, const CallExpr *E,
7001 SmallVectorImpl<llvm::Value *> &Ops, Address PtrOp0, Address PtrOp1,
7002 llvm::Triple::ArchType Arch) {
7003 // Get the last argument, which specifies the vector type.
7004 const Expr *Arg = E->getArg(E->getNumArgs() - 1);
7005 std::optional<llvm::APSInt> NeonTypeConst =
7006 Arg->getIntegerConstantExpr(getContext());
7007 if (!NeonTypeConst)
7008 return nullptr;
7010 // Determine the type of this overloaded NEON intrinsic.
7011 NeonTypeFlags Type(NeonTypeConst->getZExtValue());
7012 bool Usgn = Type.isUnsigned();
7013 bool Quad = Type.isQuad();
7014 const bool HasLegalHalfType = getTarget().hasLegalHalfType();
7015 const bool AllowBFloatArgsAndRet =
7016 getTargetHooks().getABIInfo().allowBFloatArgsAndRet();
7018 llvm::FixedVectorType *VTy =
7019 GetNeonType(this, Type, HasLegalHalfType, false, AllowBFloatArgsAndRet);
7020 llvm::Type *Ty = VTy;
7021 if (!Ty)
7022 return nullptr;
7024 auto getAlignmentValue32 = [&](Address addr) -> Value* {
7025 return Builder.getInt32(addr.getAlignment().getQuantity());
7028 unsigned Int = LLVMIntrinsic;
7029 if ((Modifier & UnsignedAlts) && !Usgn)
7030 Int = AltLLVMIntrinsic;
7032 switch (BuiltinID) {
7033 default: break;
7034 case NEON::BI__builtin_neon_splat_lane_v:
7035 case NEON::BI__builtin_neon_splat_laneq_v:
7036 case NEON::BI__builtin_neon_splatq_lane_v:
7037 case NEON::BI__builtin_neon_splatq_laneq_v: {
7038 auto NumElements = VTy->getElementCount();
7039 if (BuiltinID == NEON::BI__builtin_neon_splatq_lane_v)
7040 NumElements = NumElements * 2;
7041 if (BuiltinID == NEON::BI__builtin_neon_splat_laneq_v)
7042 NumElements = NumElements.divideCoefficientBy(2);
7044 Ops[0] = Builder.CreateBitCast(Ops[0], VTy);
7045 return EmitNeonSplat(Ops[0], cast<ConstantInt>(Ops[1]), NumElements);
7047 case NEON::BI__builtin_neon_vpadd_v:
7048 case NEON::BI__builtin_neon_vpaddq_v:
7049 // We don't allow fp/int overloading of intrinsics.
7050 if (VTy->getElementType()->isFloatingPointTy() &&
7051 Int == Intrinsic::aarch64_neon_addp)
7052 Int = Intrinsic::aarch64_neon_faddp;
7053 break;
7054 case NEON::BI__builtin_neon_vabs_v:
7055 case NEON::BI__builtin_neon_vabsq_v:
7056 if (VTy->getElementType()->isFloatingPointTy())
7057 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::fabs, Ty), Ops, "vabs");
7058 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Ty), Ops, "vabs");
7059 case NEON::BI__builtin_neon_vadd_v:
7060 case NEON::BI__builtin_neon_vaddq_v: {
7061 llvm::Type *VTy = llvm::FixedVectorType::get(Int8Ty, Quad ? 16 : 8);
7062 Ops[0] = Builder.CreateBitCast(Ops[0], VTy);
7063 Ops[1] = Builder.CreateBitCast(Ops[1], VTy);
7064 Ops[0] = Builder.CreateXor(Ops[0], Ops[1]);
7065 return Builder.CreateBitCast(Ops[0], Ty);
7067 case NEON::BI__builtin_neon_vaddhn_v: {
7068 llvm::FixedVectorType *SrcTy =
7069 llvm::FixedVectorType::getExtendedElementVectorType(VTy);
7071 // %sum = add <4 x i32> %lhs, %rhs
7072 Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
7073 Ops[1] = Builder.CreateBitCast(Ops[1], SrcTy);
7074 Ops[0] = Builder.CreateAdd(Ops[0], Ops[1], "vaddhn");
7076 // %high = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16>
7077 Constant *ShiftAmt =
7078 ConstantInt::get(SrcTy, SrcTy->getScalarSizeInBits() / 2);
7079 Ops[0] = Builder.CreateLShr(Ops[0], ShiftAmt, "vaddhn");
7081 // %res = trunc <4 x i32> %high to <4 x i16>
7082 return Builder.CreateTrunc(Ops[0], VTy, "vaddhn");
7084 case NEON::BI__builtin_neon_vcale_v:
7085 case NEON::BI__builtin_neon_vcaleq_v:
7086 case NEON::BI__builtin_neon_vcalt_v:
7087 case NEON::BI__builtin_neon_vcaltq_v:
7088 std::swap(Ops[0], Ops[1]);
7089 [[fallthrough]];
7090 case NEON::BI__builtin_neon_vcage_v:
7091 case NEON::BI__builtin_neon_vcageq_v:
7092 case NEON::BI__builtin_neon_vcagt_v:
7093 case NEON::BI__builtin_neon_vcagtq_v: {
7094 llvm::Type *Ty;
7095 switch (VTy->getScalarSizeInBits()) {
7096 default: llvm_unreachable("unexpected type");
7097 case 32:
7098 Ty = FloatTy;
7099 break;
7100 case 64:
7101 Ty = DoubleTy;
7102 break;
7103 case 16:
7104 Ty = HalfTy;
7105 break;
7107 auto *VecFlt = llvm::FixedVectorType::get(Ty, VTy->getNumElements());
7108 llvm::Type *Tys[] = { VTy, VecFlt };
7109 Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
7110 return EmitNeonCall(F, Ops, NameHint);
7112 case NEON::BI__builtin_neon_vceqz_v:
7113 case NEON::BI__builtin_neon_vceqzq_v:
7114 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OEQ,
7115 ICmpInst::ICMP_EQ, "vceqz");
7116 case NEON::BI__builtin_neon_vcgez_v:
7117 case NEON::BI__builtin_neon_vcgezq_v:
7118 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OGE,
7119 ICmpInst::ICMP_SGE, "vcgez");
7120 case NEON::BI__builtin_neon_vclez_v:
7121 case NEON::BI__builtin_neon_vclezq_v:
7122 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OLE,
7123 ICmpInst::ICMP_SLE, "vclez");
7124 case NEON::BI__builtin_neon_vcgtz_v:
7125 case NEON::BI__builtin_neon_vcgtzq_v:
7126 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OGT,
7127 ICmpInst::ICMP_SGT, "vcgtz");
7128 case NEON::BI__builtin_neon_vcltz_v:
7129 case NEON::BI__builtin_neon_vcltzq_v:
7130 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OLT,
7131 ICmpInst::ICMP_SLT, "vcltz");
7132 case NEON::BI__builtin_neon_vclz_v:
7133 case NEON::BI__builtin_neon_vclzq_v:
7134 // We generate target-independent intrinsic, which needs a second argument
7135 // for whether or not clz of zero is undefined; on ARM it isn't.
7136 Ops.push_back(Builder.getInt1(getTarget().isCLZForZeroUndef()));
7137 break;
7138 case NEON::BI__builtin_neon_vcvt_f32_v:
7139 case NEON::BI__builtin_neon_vcvtq_f32_v:
7140 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
7141 Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, Quad),
7142 HasLegalHalfType);
7143 return Usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
7144 : Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
7145 case NEON::BI__builtin_neon_vcvt_f16_s16:
7146 case NEON::BI__builtin_neon_vcvt_f16_u16:
7147 case NEON::BI__builtin_neon_vcvtq_f16_s16:
7148 case NEON::BI__builtin_neon_vcvtq_f16_u16:
7149 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
7150 Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float16, false, Quad),
7151 HasLegalHalfType);
7152 return Usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
7153 : Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
7154 case NEON::BI__builtin_neon_vcvt_n_f16_s16:
7155 case NEON::BI__builtin_neon_vcvt_n_f16_u16:
7156 case NEON::BI__builtin_neon_vcvtq_n_f16_s16:
7157 case NEON::BI__builtin_neon_vcvtq_n_f16_u16: {
7158 llvm::Type *Tys[2] = { GetFloatNeonType(this, Type), Ty };
7159 Function *F = CGM.getIntrinsic(Int, Tys);
7160 return EmitNeonCall(F, Ops, "vcvt_n");
7162 case NEON::BI__builtin_neon_vcvt_n_f32_v:
7163 case NEON::BI__builtin_neon_vcvt_n_f64_v:
7164 case NEON::BI__builtin_neon_vcvtq_n_f32_v:
7165 case NEON::BI__builtin_neon_vcvtq_n_f64_v: {
7166 llvm::Type *Tys[2] = { GetFloatNeonType(this, Type), Ty };
7167 Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic;
7168 Function *F = CGM.getIntrinsic(Int, Tys);
7169 return EmitNeonCall(F, Ops, "vcvt_n");
7171 case NEON::BI__builtin_neon_vcvt_n_s16_f16:
7172 case NEON::BI__builtin_neon_vcvt_n_s32_v:
7173 case NEON::BI__builtin_neon_vcvt_n_u16_f16:
7174 case NEON::BI__builtin_neon_vcvt_n_u32_v:
7175 case NEON::BI__builtin_neon_vcvt_n_s64_v:
7176 case NEON::BI__builtin_neon_vcvt_n_u64_v:
7177 case NEON::BI__builtin_neon_vcvtq_n_s16_f16:
7178 case NEON::BI__builtin_neon_vcvtq_n_s32_v:
7179 case NEON::BI__builtin_neon_vcvtq_n_u16_f16:
7180 case NEON::BI__builtin_neon_vcvtq_n_u32_v:
7181 case NEON::BI__builtin_neon_vcvtq_n_s64_v:
7182 case NEON::BI__builtin_neon_vcvtq_n_u64_v: {
7183 llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
7184 Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
7185 return EmitNeonCall(F, Ops, "vcvt_n");
7187 case NEON::BI__builtin_neon_vcvt_s32_v:
7188 case NEON::BI__builtin_neon_vcvt_u32_v:
7189 case NEON::BI__builtin_neon_vcvt_s64_v:
7190 case NEON::BI__builtin_neon_vcvt_u64_v:
7191 case NEON::BI__builtin_neon_vcvt_s16_f16:
7192 case NEON::BI__builtin_neon_vcvt_u16_f16:
7193 case NEON::BI__builtin_neon_vcvtq_s32_v:
7194 case NEON::BI__builtin_neon_vcvtq_u32_v:
7195 case NEON::BI__builtin_neon_vcvtq_s64_v:
7196 case NEON::BI__builtin_neon_vcvtq_u64_v:
7197 case NEON::BI__builtin_neon_vcvtq_s16_f16:
7198 case NEON::BI__builtin_neon_vcvtq_u16_f16: {
7199 Ops[0] = Builder.CreateBitCast(Ops[0], GetFloatNeonType(this, Type));
7200 return Usgn ? Builder.CreateFPToUI(Ops[0], Ty, "vcvt")
7201 : Builder.CreateFPToSI(Ops[0], Ty, "vcvt");
7203 case NEON::BI__builtin_neon_vcvta_s16_f16:
7204 case NEON::BI__builtin_neon_vcvta_s32_v:
7205 case NEON::BI__builtin_neon_vcvta_s64_v:
7206 case NEON::BI__builtin_neon_vcvta_u16_f16:
7207 case NEON::BI__builtin_neon_vcvta_u32_v:
7208 case NEON::BI__builtin_neon_vcvta_u64_v:
7209 case NEON::BI__builtin_neon_vcvtaq_s16_f16:
7210 case NEON::BI__builtin_neon_vcvtaq_s32_v:
7211 case NEON::BI__builtin_neon_vcvtaq_s64_v:
7212 case NEON::BI__builtin_neon_vcvtaq_u16_f16:
7213 case NEON::BI__builtin_neon_vcvtaq_u32_v:
7214 case NEON::BI__builtin_neon_vcvtaq_u64_v:
7215 case NEON::BI__builtin_neon_vcvtn_s16_f16:
7216 case NEON::BI__builtin_neon_vcvtn_s32_v:
7217 case NEON::BI__builtin_neon_vcvtn_s64_v:
7218 case NEON::BI__builtin_neon_vcvtn_u16_f16:
7219 case NEON::BI__builtin_neon_vcvtn_u32_v:
7220 case NEON::BI__builtin_neon_vcvtn_u64_v:
7221 case NEON::BI__builtin_neon_vcvtnq_s16_f16:
7222 case NEON::BI__builtin_neon_vcvtnq_s32_v:
7223 case NEON::BI__builtin_neon_vcvtnq_s64_v:
7224 case NEON::BI__builtin_neon_vcvtnq_u16_f16:
7225 case NEON::BI__builtin_neon_vcvtnq_u32_v:
7226 case NEON::BI__builtin_neon_vcvtnq_u64_v:
7227 case NEON::BI__builtin_neon_vcvtp_s16_f16:
7228 case NEON::BI__builtin_neon_vcvtp_s32_v:
7229 case NEON::BI__builtin_neon_vcvtp_s64_v:
7230 case NEON::BI__builtin_neon_vcvtp_u16_f16:
7231 case NEON::BI__builtin_neon_vcvtp_u32_v:
7232 case NEON::BI__builtin_neon_vcvtp_u64_v:
7233 case NEON::BI__builtin_neon_vcvtpq_s16_f16:
7234 case NEON::BI__builtin_neon_vcvtpq_s32_v:
7235 case NEON::BI__builtin_neon_vcvtpq_s64_v:
7236 case NEON::BI__builtin_neon_vcvtpq_u16_f16:
7237 case NEON::BI__builtin_neon_vcvtpq_u32_v:
7238 case NEON::BI__builtin_neon_vcvtpq_u64_v:
7239 case NEON::BI__builtin_neon_vcvtm_s16_f16:
7240 case NEON::BI__builtin_neon_vcvtm_s32_v:
7241 case NEON::BI__builtin_neon_vcvtm_s64_v:
7242 case NEON::BI__builtin_neon_vcvtm_u16_f16:
7243 case NEON::BI__builtin_neon_vcvtm_u32_v:
7244 case NEON::BI__builtin_neon_vcvtm_u64_v:
7245 case NEON::BI__builtin_neon_vcvtmq_s16_f16:
7246 case NEON::BI__builtin_neon_vcvtmq_s32_v:
7247 case NEON::BI__builtin_neon_vcvtmq_s64_v:
7248 case NEON::BI__builtin_neon_vcvtmq_u16_f16:
7249 case NEON::BI__builtin_neon_vcvtmq_u32_v:
7250 case NEON::BI__builtin_neon_vcvtmq_u64_v: {
7251 llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
7252 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, NameHint);
7254 case NEON::BI__builtin_neon_vcvtx_f32_v: {
7255 llvm::Type *Tys[2] = { VTy->getTruncatedElementVectorType(VTy), Ty};
7256 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, NameHint);
7259 case NEON::BI__builtin_neon_vext_v:
7260 case NEON::BI__builtin_neon_vextq_v: {
7261 int CV = cast<ConstantInt>(Ops[2])->getSExtValue();
7262 SmallVector<int, 16> Indices;
7263 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
7264 Indices.push_back(i+CV);
7266 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
7267 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
7268 return Builder.CreateShuffleVector(Ops[0], Ops[1], Indices, "vext");
7270 case NEON::BI__builtin_neon_vfma_v:
7271 case NEON::BI__builtin_neon_vfmaq_v: {
7272 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
7273 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
7274 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
7276 // NEON intrinsic puts accumulator first, unlike the LLVM fma.
7277 return emitCallMaybeConstrainedFPBuiltin(
7278 *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, Ty,
7279 {Ops[1], Ops[2], Ops[0]});
7281 case NEON::BI__builtin_neon_vld1_v:
7282 case NEON::BI__builtin_neon_vld1q_v: {
7283 llvm::Type *Tys[] = {Ty, Int8PtrTy};
7284 Ops.push_back(getAlignmentValue32(PtrOp0));
7285 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "vld1");
7287 case NEON::BI__builtin_neon_vld1_x2_v:
7288 case NEON::BI__builtin_neon_vld1q_x2_v:
7289 case NEON::BI__builtin_neon_vld1_x3_v:
7290 case NEON::BI__builtin_neon_vld1q_x3_v:
7291 case NEON::BI__builtin_neon_vld1_x4_v:
7292 case NEON::BI__builtin_neon_vld1q_x4_v: {
7293 llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getElementType());
7294 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
7295 llvm::Type *Tys[2] = { VTy, PTy };
7296 Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
7297 Ops[1] = Builder.CreateCall(F, Ops[1], "vld1xN");
7298 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
7299 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
7300 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
7302 case NEON::BI__builtin_neon_vld2_v:
7303 case NEON::BI__builtin_neon_vld2q_v:
7304 case NEON::BI__builtin_neon_vld3_v:
7305 case NEON::BI__builtin_neon_vld3q_v:
7306 case NEON::BI__builtin_neon_vld4_v:
7307 case NEON::BI__builtin_neon_vld4q_v:
7308 case NEON::BI__builtin_neon_vld2_dup_v:
7309 case NEON::BI__builtin_neon_vld2q_dup_v:
7310 case NEON::BI__builtin_neon_vld3_dup_v:
7311 case NEON::BI__builtin_neon_vld3q_dup_v:
7312 case NEON::BI__builtin_neon_vld4_dup_v:
7313 case NEON::BI__builtin_neon_vld4q_dup_v: {
7314 llvm::Type *Tys[] = {Ty, Int8PtrTy};
7315 Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
7316 Value *Align = getAlignmentValue32(PtrOp1);
7317 Ops[1] = Builder.CreateCall(F, {Ops[1], Align}, NameHint);
7318 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
7319 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
7320 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
7322 case NEON::BI__builtin_neon_vld1_dup_v:
7323 case NEON::BI__builtin_neon_vld1q_dup_v: {
7324 Value *V = PoisonValue::get(Ty);
7325 PtrOp0 = PtrOp0.withElementType(VTy->getElementType());
7326 LoadInst *Ld = Builder.CreateLoad(PtrOp0);
7327 llvm::Constant *CI = ConstantInt::get(SizeTy, 0);
7328 Ops[0] = Builder.CreateInsertElement(V, Ld, CI);
7329 return EmitNeonSplat(Ops[0], CI);
7331 case NEON::BI__builtin_neon_vld2_lane_v:
7332 case NEON::BI__builtin_neon_vld2q_lane_v:
7333 case NEON::BI__builtin_neon_vld3_lane_v:
7334 case NEON::BI__builtin_neon_vld3q_lane_v:
7335 case NEON::BI__builtin_neon_vld4_lane_v:
7336 case NEON::BI__builtin_neon_vld4q_lane_v: {
7337 llvm::Type *Tys[] = {Ty, Int8PtrTy};
7338 Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
7339 for (unsigned I = 2; I < Ops.size() - 1; ++I)
7340 Ops[I] = Builder.CreateBitCast(Ops[I], Ty);
7341 Ops.push_back(getAlignmentValue32(PtrOp1));
7342 Ops[1] = Builder.CreateCall(F, ArrayRef(Ops).slice(1), NameHint);
7343 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
7344 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
7345 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
7347 case NEON::BI__builtin_neon_vmovl_v: {
7348 llvm::FixedVectorType *DTy =
7349 llvm::FixedVectorType::getTruncatedElementVectorType(VTy);
7350 Ops[0] = Builder.CreateBitCast(Ops[0], DTy);
7351 if (Usgn)
7352 return Builder.CreateZExt(Ops[0], Ty, "vmovl");
7353 return Builder.CreateSExt(Ops[0], Ty, "vmovl");
7355 case NEON::BI__builtin_neon_vmovn_v: {
7356 llvm::FixedVectorType *QTy =
7357 llvm::FixedVectorType::getExtendedElementVectorType(VTy);
7358 Ops[0] = Builder.CreateBitCast(Ops[0], QTy);
7359 return Builder.CreateTrunc(Ops[0], Ty, "vmovn");
7361 case NEON::BI__builtin_neon_vmull_v:
7362 // FIXME: the integer vmull operations could be emitted in terms of pure
7363 // LLVM IR (2 exts followed by a mul). Unfortunately LLVM has a habit of
7364 // hoisting the exts outside loops. Until global ISel comes along that can
7365 // see through such movement this leads to bad CodeGen. So we need an
7366 // intrinsic for now.
7367 Int = Usgn ? Intrinsic::arm_neon_vmullu : Intrinsic::arm_neon_vmulls;
7368 Int = Type.isPoly() ? (unsigned)Intrinsic::arm_neon_vmullp : Int;
7369 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull");
7370 case NEON::BI__builtin_neon_vpadal_v:
7371 case NEON::BI__builtin_neon_vpadalq_v: {
7372 // The source operand type has twice as many elements of half the size.
7373 unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
7374 llvm::Type *EltTy =
7375 llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
7376 auto *NarrowTy =
7377 llvm::FixedVectorType::get(EltTy, VTy->getNumElements() * 2);
7378 llvm::Type *Tys[2] = { Ty, NarrowTy };
7379 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, NameHint);
7381 case NEON::BI__builtin_neon_vpaddl_v:
7382 case NEON::BI__builtin_neon_vpaddlq_v: {
7383 // The source operand type has twice as many elements of half the size.
7384 unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
7385 llvm::Type *EltTy = llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
7386 auto *NarrowTy =
7387 llvm::FixedVectorType::get(EltTy, VTy->getNumElements() * 2);
7388 llvm::Type *Tys[2] = { Ty, NarrowTy };
7389 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vpaddl");
7391 case NEON::BI__builtin_neon_vqdmlal_v:
7392 case NEON::BI__builtin_neon_vqdmlsl_v: {
7393 SmallVector<Value *, 2> MulOps(Ops.begin() + 1, Ops.end());
7394 Ops[1] =
7395 EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Ty), MulOps, "vqdmlal");
7396 Ops.resize(2);
7397 return EmitNeonCall(CGM.getIntrinsic(AltLLVMIntrinsic, Ty), Ops, NameHint);
7399 case NEON::BI__builtin_neon_vqdmulhq_lane_v:
7400 case NEON::BI__builtin_neon_vqdmulh_lane_v:
7401 case NEON::BI__builtin_neon_vqrdmulhq_lane_v:
7402 case NEON::BI__builtin_neon_vqrdmulh_lane_v: {
7403 auto *RTy = cast<llvm::FixedVectorType>(Ty);
7404 if (BuiltinID == NEON::BI__builtin_neon_vqdmulhq_lane_v ||
7405 BuiltinID == NEON::BI__builtin_neon_vqrdmulhq_lane_v)
7406 RTy = llvm::FixedVectorType::get(RTy->getElementType(),
7407 RTy->getNumElements() * 2);
7408 llvm::Type *Tys[2] = {
7409 RTy, GetNeonType(this, NeonTypeFlags(Type.getEltType(), false,
7410 /*isQuad*/ false))};
7411 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, NameHint);
7413 case NEON::BI__builtin_neon_vqdmulhq_laneq_v:
7414 case NEON::BI__builtin_neon_vqdmulh_laneq_v:
7415 case NEON::BI__builtin_neon_vqrdmulhq_laneq_v:
7416 case NEON::BI__builtin_neon_vqrdmulh_laneq_v: {
7417 llvm::Type *Tys[2] = {
7418 Ty, GetNeonType(this, NeonTypeFlags(Type.getEltType(), false,
7419 /*isQuad*/ true))};
7420 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, NameHint);
7422 case NEON::BI__builtin_neon_vqshl_n_v:
7423 case NEON::BI__builtin_neon_vqshlq_n_v:
7424 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshl_n",
7425 1, false);
7426 case NEON::BI__builtin_neon_vqshlu_n_v:
7427 case NEON::BI__builtin_neon_vqshluq_n_v:
7428 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshlu_n",
7429 1, false);
7430 case NEON::BI__builtin_neon_vrecpe_v:
7431 case NEON::BI__builtin_neon_vrecpeq_v:
7432 case NEON::BI__builtin_neon_vrsqrte_v:
7433 case NEON::BI__builtin_neon_vrsqrteq_v:
7434 Int = Ty->isFPOrFPVectorTy() ? LLVMIntrinsic : AltLLVMIntrinsic;
7435 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, NameHint);
7436 case NEON::BI__builtin_neon_vrndi_v:
7437 case NEON::BI__builtin_neon_vrndiq_v:
7438 Int = Builder.getIsFPConstrained()
7439 ? Intrinsic::experimental_constrained_nearbyint
7440 : Intrinsic::nearbyint;
7441 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, NameHint);
7442 case NEON::BI__builtin_neon_vrshr_n_v:
7443 case NEON::BI__builtin_neon_vrshrq_n_v:
7444 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshr_n",
7445 1, true);
7446 case NEON::BI__builtin_neon_vsha512hq_u64:
7447 case NEON::BI__builtin_neon_vsha512h2q_u64:
7448 case NEON::BI__builtin_neon_vsha512su0q_u64:
7449 case NEON::BI__builtin_neon_vsha512su1q_u64: {
7450 Function *F = CGM.getIntrinsic(Int);
7451 return EmitNeonCall(F, Ops, "");
7453 case NEON::BI__builtin_neon_vshl_n_v:
7454 case NEON::BI__builtin_neon_vshlq_n_v:
7455 Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false);
7456 return Builder.CreateShl(Builder.CreateBitCast(Ops[0],Ty), Ops[1],
7457 "vshl_n");
7458 case NEON::BI__builtin_neon_vshll_n_v: {
7459 llvm::FixedVectorType *SrcTy =
7460 llvm::FixedVectorType::getTruncatedElementVectorType(VTy);
7461 Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
7462 if (Usgn)
7463 Ops[0] = Builder.CreateZExt(Ops[0], VTy);
7464 else
7465 Ops[0] = Builder.CreateSExt(Ops[0], VTy);
7466 Ops[1] = EmitNeonShiftVector(Ops[1], VTy, false);
7467 return Builder.CreateShl(Ops[0], Ops[1], "vshll_n");
7469 case NEON::BI__builtin_neon_vshrn_n_v: {
7470 llvm::FixedVectorType *SrcTy =
7471 llvm::FixedVectorType::getExtendedElementVectorType(VTy);
7472 Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
7473 Ops[1] = EmitNeonShiftVector(Ops[1], SrcTy, false);
7474 if (Usgn)
7475 Ops[0] = Builder.CreateLShr(Ops[0], Ops[1]);
7476 else
7477 Ops[0] = Builder.CreateAShr(Ops[0], Ops[1]);
7478 return Builder.CreateTrunc(Ops[0], Ty, "vshrn_n");
7480 case NEON::BI__builtin_neon_vshr_n_v:
7481 case NEON::BI__builtin_neon_vshrq_n_v:
7482 return EmitNeonRShiftImm(Ops[0], Ops[1], Ty, Usgn, "vshr_n");
7483 case NEON::BI__builtin_neon_vst1_v:
7484 case NEON::BI__builtin_neon_vst1q_v:
7485 case NEON::BI__builtin_neon_vst2_v:
7486 case NEON::BI__builtin_neon_vst2q_v:
7487 case NEON::BI__builtin_neon_vst3_v:
7488 case NEON::BI__builtin_neon_vst3q_v:
7489 case NEON::BI__builtin_neon_vst4_v:
7490 case NEON::BI__builtin_neon_vst4q_v:
7491 case NEON::BI__builtin_neon_vst2_lane_v:
7492 case NEON::BI__builtin_neon_vst2q_lane_v:
7493 case NEON::BI__builtin_neon_vst3_lane_v:
7494 case NEON::BI__builtin_neon_vst3q_lane_v:
7495 case NEON::BI__builtin_neon_vst4_lane_v:
7496 case NEON::BI__builtin_neon_vst4q_lane_v: {
7497 llvm::Type *Tys[] = {Int8PtrTy, Ty};
7498 Ops.push_back(getAlignmentValue32(PtrOp0));
7499 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "");
7501 case NEON::BI__builtin_neon_vsm3partw1q_u32:
7502 case NEON::BI__builtin_neon_vsm3partw2q_u32:
7503 case NEON::BI__builtin_neon_vsm3ss1q_u32:
7504 case NEON::BI__builtin_neon_vsm4ekeyq_u32:
7505 case NEON::BI__builtin_neon_vsm4eq_u32: {
7506 Function *F = CGM.getIntrinsic(Int);
7507 return EmitNeonCall(F, Ops, "");
7509 case NEON::BI__builtin_neon_vsm3tt1aq_u32:
7510 case NEON::BI__builtin_neon_vsm3tt1bq_u32:
7511 case NEON::BI__builtin_neon_vsm3tt2aq_u32:
7512 case NEON::BI__builtin_neon_vsm3tt2bq_u32: {
7513 Function *F = CGM.getIntrinsic(Int);
7514 Ops[3] = Builder.CreateZExt(Ops[3], Int64Ty);
7515 return EmitNeonCall(F, Ops, "");
7517 case NEON::BI__builtin_neon_vst1_x2_v:
7518 case NEON::BI__builtin_neon_vst1q_x2_v:
7519 case NEON::BI__builtin_neon_vst1_x3_v:
7520 case NEON::BI__builtin_neon_vst1q_x3_v:
7521 case NEON::BI__builtin_neon_vst1_x4_v:
7522 case NEON::BI__builtin_neon_vst1q_x4_v: {
7523 llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getElementType());
7524 // TODO: Currently in AArch32 mode the pointer operand comes first, whereas
7525 // in AArch64 it comes last. We may want to stick to one or another.
7526 if (Arch == llvm::Triple::aarch64 || Arch == llvm::Triple::aarch64_be ||
7527 Arch == llvm::Triple::aarch64_32) {
7528 llvm::Type *Tys[2] = { VTy, PTy };
7529 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
7530 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "");
7532 llvm::Type *Tys[2] = { PTy, VTy };
7533 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "");
7535 case NEON::BI__builtin_neon_vsubhn_v: {
7536 llvm::FixedVectorType *SrcTy =
7537 llvm::FixedVectorType::getExtendedElementVectorType(VTy);
7539 // %sum = add <4 x i32> %lhs, %rhs
7540 Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
7541 Ops[1] = Builder.CreateBitCast(Ops[1], SrcTy);
7542 Ops[0] = Builder.CreateSub(Ops[0], Ops[1], "vsubhn");
7544 // %high = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16>
7545 Constant *ShiftAmt =
7546 ConstantInt::get(SrcTy, SrcTy->getScalarSizeInBits() / 2);
7547 Ops[0] = Builder.CreateLShr(Ops[0], ShiftAmt, "vsubhn");
7549 // %res = trunc <4 x i32> %high to <4 x i16>
7550 return Builder.CreateTrunc(Ops[0], VTy, "vsubhn");
7552 case NEON::BI__builtin_neon_vtrn_v:
7553 case NEON::BI__builtin_neon_vtrnq_v: {
7554 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
7555 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
7556 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
7557 Value *SV = nullptr;
7559 for (unsigned vi = 0; vi != 2; ++vi) {
7560 SmallVector<int, 16> Indices;
7561 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
7562 Indices.push_back(i+vi);
7563 Indices.push_back(i+e+vi);
7565 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
7566 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vtrn");
7567 SV = Builder.CreateDefaultAlignedStore(SV, Addr);
7569 return SV;
7571 case NEON::BI__builtin_neon_vtst_v:
7572 case NEON::BI__builtin_neon_vtstq_v: {
7573 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
7574 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
7575 Ops[0] = Builder.CreateAnd(Ops[0], Ops[1]);
7576 Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0],
7577 ConstantAggregateZero::get(Ty));
7578 return Builder.CreateSExt(Ops[0], Ty, "vtst");
7580 case NEON::BI__builtin_neon_vuzp_v:
7581 case NEON::BI__builtin_neon_vuzpq_v: {
7582 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
7583 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
7584 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
7585 Value *SV = nullptr;
7587 for (unsigned vi = 0; vi != 2; ++vi) {
7588 SmallVector<int, 16> Indices;
7589 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
7590 Indices.push_back(2*i+vi);
7592 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
7593 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vuzp");
7594 SV = Builder.CreateDefaultAlignedStore(SV, Addr);
7596 return SV;
7598 case NEON::BI__builtin_neon_vxarq_u64: {
7599 Function *F = CGM.getIntrinsic(Int);
7600 Ops[2] = Builder.CreateZExt(Ops[2], Int64Ty);
7601 return EmitNeonCall(F, Ops, "");
7603 case NEON::BI__builtin_neon_vzip_v:
7604 case NEON::BI__builtin_neon_vzipq_v: {
7605 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
7606 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
7607 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
7608 Value *SV = nullptr;
7610 for (unsigned vi = 0; vi != 2; ++vi) {
7611 SmallVector<int, 16> Indices;
7612 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
7613 Indices.push_back((i + vi*e) >> 1);
7614 Indices.push_back(((i + vi*e) >> 1)+e);
7616 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
7617 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vzip");
7618 SV = Builder.CreateDefaultAlignedStore(SV, Addr);
7620 return SV;
7622 case NEON::BI__builtin_neon_vdot_s32:
7623 case NEON::BI__builtin_neon_vdot_u32:
7624 case NEON::BI__builtin_neon_vdotq_s32:
7625 case NEON::BI__builtin_neon_vdotq_u32: {
7626 auto *InputTy =
7627 llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
7628 llvm::Type *Tys[2] = { Ty, InputTy };
7629 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vdot");
7631 case NEON::BI__builtin_neon_vfmlal_low_f16:
7632 case NEON::BI__builtin_neon_vfmlalq_low_f16: {
7633 auto *InputTy =
7634 llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
7635 llvm::Type *Tys[2] = { Ty, InputTy };
7636 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlal_low");
7638 case NEON::BI__builtin_neon_vfmlsl_low_f16:
7639 case NEON::BI__builtin_neon_vfmlslq_low_f16: {
7640 auto *InputTy =
7641 llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
7642 llvm::Type *Tys[2] = { Ty, InputTy };
7643 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlsl_low");
7645 case NEON::BI__builtin_neon_vfmlal_high_f16:
7646 case NEON::BI__builtin_neon_vfmlalq_high_f16: {
7647 auto *InputTy =
7648 llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
7649 llvm::Type *Tys[2] = { Ty, InputTy };
7650 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlal_high");
7652 case NEON::BI__builtin_neon_vfmlsl_high_f16:
7653 case NEON::BI__builtin_neon_vfmlslq_high_f16: {
7654 auto *InputTy =
7655 llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
7656 llvm::Type *Tys[2] = { Ty, InputTy };
7657 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlsl_high");
7659 case NEON::BI__builtin_neon_vmmlaq_s32:
7660 case NEON::BI__builtin_neon_vmmlaq_u32: {
7661 auto *InputTy =
7662 llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
7663 llvm::Type *Tys[2] = { Ty, InputTy };
7664 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "vmmla");
7666 case NEON::BI__builtin_neon_vusmmlaq_s32: {
7667 auto *InputTy =
7668 llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
7669 llvm::Type *Tys[2] = { Ty, InputTy };
7670 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vusmmla");
7672 case NEON::BI__builtin_neon_vusdot_s32:
7673 case NEON::BI__builtin_neon_vusdotq_s32: {
7674 auto *InputTy =
7675 llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
7676 llvm::Type *Tys[2] = { Ty, InputTy };
7677 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vusdot");
7679 case NEON::BI__builtin_neon_vbfdot_f32:
7680 case NEON::BI__builtin_neon_vbfdotq_f32: {
7681 llvm::Type *InputTy =
7682 llvm::FixedVectorType::get(BFloatTy, Ty->getPrimitiveSizeInBits() / 16);
7683 llvm::Type *Tys[2] = { Ty, InputTy };
7684 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vbfdot");
7686 case NEON::BI__builtin_neon___a32_vcvt_bf16_f32: {
7687 llvm::Type *Tys[1] = { Ty };
7688 Function *F = CGM.getIntrinsic(Int, Tys);
7689 return EmitNeonCall(F, Ops, "vcvtfp2bf");
7694 assert(Int && "Expected valid intrinsic number");
7696 // Determine the type(s) of this overloaded AArch64 intrinsic.
7697 Function *F = LookupNeonLLVMIntrinsic(Int, Modifier, Ty, E);
7699 Value *Result = EmitNeonCall(F, Ops, NameHint);
7700 llvm::Type *ResultType = ConvertType(E->getType());
7701 // AArch64 intrinsic one-element vector type cast to
7702 // scalar type expected by the builtin
7703 return Builder.CreateBitCast(Result, ResultType, NameHint);
7706 Value *CodeGenFunction::EmitAArch64CompareBuiltinExpr(
7707 Value *Op, llvm::Type *Ty, const CmpInst::Predicate Fp,
7708 const CmpInst::Predicate Ip, const Twine &Name) {
7709 llvm::Type *OTy = Op->getType();
7711 // FIXME: this is utterly horrific. We should not be looking at previous
7712 // codegen context to find out what needs doing. Unfortunately TableGen
7713 // currently gives us exactly the same calls for vceqz_f32 and vceqz_s32
7714 // (etc).
7715 if (BitCastInst *BI = dyn_cast<BitCastInst>(Op))
7716 OTy = BI->getOperand(0)->getType();
7718 Op = Builder.CreateBitCast(Op, OTy);
7719 if (OTy->getScalarType()->isFloatingPointTy()) {
7720 if (Fp == CmpInst::FCMP_OEQ)
7721 Op = Builder.CreateFCmp(Fp, Op, Constant::getNullValue(OTy));
7722 else
7723 Op = Builder.CreateFCmpS(Fp, Op, Constant::getNullValue(OTy));
7724 } else {
7725 Op = Builder.CreateICmp(Ip, Op, Constant::getNullValue(OTy));
7727 return Builder.CreateSExt(Op, Ty, Name);
7730 static Value *packTBLDVectorList(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
7731 Value *ExtOp, Value *IndexOp,
7732 llvm::Type *ResTy, unsigned IntID,
7733 const char *Name) {
7734 SmallVector<Value *, 2> TblOps;
7735 if (ExtOp)
7736 TblOps.push_back(ExtOp);
7738 // Build a vector containing sequential number like (0, 1, 2, ..., 15)
7739 SmallVector<int, 16> Indices;
7740 auto *TblTy = cast<llvm::FixedVectorType>(Ops[0]->getType());
7741 for (unsigned i = 0, e = TblTy->getNumElements(); i != e; ++i) {
7742 Indices.push_back(2*i);
7743 Indices.push_back(2*i+1);
7746 int PairPos = 0, End = Ops.size() - 1;
7747 while (PairPos < End) {
7748 TblOps.push_back(CGF.Builder.CreateShuffleVector(Ops[PairPos],
7749 Ops[PairPos+1], Indices,
7750 Name));
7751 PairPos += 2;
7754 // If there's an odd number of 64-bit lookup table, fill the high 64-bit
7755 // of the 128-bit lookup table with zero.
7756 if (PairPos == End) {
7757 Value *ZeroTbl = ConstantAggregateZero::get(TblTy);
7758 TblOps.push_back(CGF.Builder.CreateShuffleVector(Ops[PairPos],
7759 ZeroTbl, Indices, Name));
7762 Function *TblF;
7763 TblOps.push_back(IndexOp);
7764 TblF = CGF.CGM.getIntrinsic(IntID, ResTy);
7766 return CGF.EmitNeonCall(TblF, TblOps, Name);
7769 Value *CodeGenFunction::GetValueForARMHint(unsigned BuiltinID) {
7770 unsigned Value;
7771 switch (BuiltinID) {
7772 default:
7773 return nullptr;
7774 case clang::ARM::BI__builtin_arm_nop:
7775 Value = 0;
7776 break;
7777 case clang::ARM::BI__builtin_arm_yield:
7778 case clang::ARM::BI__yield:
7779 Value = 1;
7780 break;
7781 case clang::ARM::BI__builtin_arm_wfe:
7782 case clang::ARM::BI__wfe:
7783 Value = 2;
7784 break;
7785 case clang::ARM::BI__builtin_arm_wfi:
7786 case clang::ARM::BI__wfi:
7787 Value = 3;
7788 break;
7789 case clang::ARM::BI__builtin_arm_sev:
7790 case clang::ARM::BI__sev:
7791 Value = 4;
7792 break;
7793 case clang::ARM::BI__builtin_arm_sevl:
7794 case clang::ARM::BI__sevl:
7795 Value = 5;
7796 break;
7799 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_hint),
7800 llvm::ConstantInt::get(Int32Ty, Value));
7803 enum SpecialRegisterAccessKind {
7804 NormalRead,
7805 VolatileRead,
7806 Write,
7809 static Value *EmitAMDGCNBallotForExec(CodeGenFunction &CGF, const CallExpr *E,
7810 llvm::Type *RegisterType,
7811 llvm::Type *ValueType) {
7812 CodeGen::CGBuilderTy &Builder = CGF.Builder;
7813 CodeGen::CodeGenModule &CGM = CGF.CGM;
7815 llvm::Type *ResultType = CGF.ConvertType(E->getType());
7816 Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_ballot, {ResultType});
7817 llvm::Value *Call = Builder.CreateCall(F, {Builder.getInt1(true)});
7818 return Call;
7821 // Generates the IR for the read/write special register builtin,
7822 // ValueType is the type of the value that is to be written or read,
7823 // RegisterType is the type of the register being written to or read from.
7824 static Value *EmitSpecialRegisterBuiltin(CodeGenFunction &CGF,
7825 const CallExpr *E,
7826 llvm::Type *RegisterType,
7827 llvm::Type *ValueType,
7828 SpecialRegisterAccessKind AccessKind,
7829 StringRef SysReg = "") {
7830 // write and register intrinsics only support 32, 64 and 128 bit operations.
7831 assert((RegisterType->isIntegerTy(32) || RegisterType->isIntegerTy(64) ||
7832 RegisterType->isIntegerTy(128)) &&
7833 "Unsupported size for register.");
7835 CodeGen::CGBuilderTy &Builder = CGF.Builder;
7836 CodeGen::CodeGenModule &CGM = CGF.CGM;
7837 LLVMContext &Context = CGM.getLLVMContext();
7839 if (SysReg.empty()) {
7840 const Expr *SysRegStrExpr = E->getArg(0)->IgnoreParenCasts();
7841 SysReg = cast<clang::StringLiteral>(SysRegStrExpr)->getString();
7844 llvm::Metadata *Ops[] = { llvm::MDString::get(Context, SysReg) };
7845 llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
7846 llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
7848 llvm::Type *Types[] = { RegisterType };
7850 bool MixedTypes = RegisterType->isIntegerTy(64) && ValueType->isIntegerTy(32);
7851 assert(!(RegisterType->isIntegerTy(32) && ValueType->isIntegerTy(64))
7852 && "Can't fit 64-bit value in 32-bit register");
7854 if (AccessKind != Write) {
7855 assert(AccessKind == NormalRead || AccessKind == VolatileRead);
7856 llvm::Function *F = CGM.getIntrinsic(
7857 AccessKind == VolatileRead ? llvm::Intrinsic::read_volatile_register
7858 : llvm::Intrinsic::read_register,
7859 Types);
7860 llvm::Value *Call = Builder.CreateCall(F, Metadata);
7862 if (MixedTypes)
7863 // Read into 64 bit register and then truncate result to 32 bit.
7864 return Builder.CreateTrunc(Call, ValueType);
7866 if (ValueType->isPointerTy())
7867 // Have i32/i64 result (Call) but want to return a VoidPtrTy (i8*).
7868 return Builder.CreateIntToPtr(Call, ValueType);
7870 return Call;
7873 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types);
7874 llvm::Value *ArgValue = CGF.EmitScalarExpr(E->getArg(1));
7875 if (MixedTypes) {
7876 // Extend 32 bit write value to 64 bit to pass to write.
7877 ArgValue = Builder.CreateZExt(ArgValue, RegisterType);
7878 return Builder.CreateCall(F, { Metadata, ArgValue });
7881 if (ValueType->isPointerTy()) {
7882 // Have VoidPtrTy ArgValue but want to return an i32/i64.
7883 ArgValue = Builder.CreatePtrToInt(ArgValue, RegisterType);
7884 return Builder.CreateCall(F, { Metadata, ArgValue });
7887 return Builder.CreateCall(F, { Metadata, ArgValue });
7890 /// Return true if BuiltinID is an overloaded Neon intrinsic with an extra
7891 /// argument that specifies the vector type.
7892 static bool HasExtraNeonArgument(unsigned BuiltinID) {
7893 switch (BuiltinID) {
7894 default: break;
7895 case NEON::BI__builtin_neon_vget_lane_i8:
7896 case NEON::BI__builtin_neon_vget_lane_i16:
7897 case NEON::BI__builtin_neon_vget_lane_bf16:
7898 case NEON::BI__builtin_neon_vget_lane_i32:
7899 case NEON::BI__builtin_neon_vget_lane_i64:
7900 case NEON::BI__builtin_neon_vget_lane_f32:
7901 case NEON::BI__builtin_neon_vgetq_lane_i8:
7902 case NEON::BI__builtin_neon_vgetq_lane_i16:
7903 case NEON::BI__builtin_neon_vgetq_lane_bf16:
7904 case NEON::BI__builtin_neon_vgetq_lane_i32:
7905 case NEON::BI__builtin_neon_vgetq_lane_i64:
7906 case NEON::BI__builtin_neon_vgetq_lane_f32:
7907 case NEON::BI__builtin_neon_vduph_lane_bf16:
7908 case NEON::BI__builtin_neon_vduph_laneq_bf16:
7909 case NEON::BI__builtin_neon_vset_lane_i8:
7910 case NEON::BI__builtin_neon_vset_lane_i16:
7911 case NEON::BI__builtin_neon_vset_lane_bf16:
7912 case NEON::BI__builtin_neon_vset_lane_i32:
7913 case NEON::BI__builtin_neon_vset_lane_i64:
7914 case NEON::BI__builtin_neon_vset_lane_f32:
7915 case NEON::BI__builtin_neon_vsetq_lane_i8:
7916 case NEON::BI__builtin_neon_vsetq_lane_i16:
7917 case NEON::BI__builtin_neon_vsetq_lane_bf16:
7918 case NEON::BI__builtin_neon_vsetq_lane_i32:
7919 case NEON::BI__builtin_neon_vsetq_lane_i64:
7920 case NEON::BI__builtin_neon_vsetq_lane_f32:
7921 case NEON::BI__builtin_neon_vsha1h_u32:
7922 case NEON::BI__builtin_neon_vsha1cq_u32:
7923 case NEON::BI__builtin_neon_vsha1pq_u32:
7924 case NEON::BI__builtin_neon_vsha1mq_u32:
7925 case NEON::BI__builtin_neon_vcvth_bf16_f32:
7926 case clang::ARM::BI_MoveToCoprocessor:
7927 case clang::ARM::BI_MoveToCoprocessor2:
7928 return false;
7930 return true;
7933 Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
7934 const CallExpr *E,
7935 ReturnValueSlot ReturnValue,
7936 llvm::Triple::ArchType Arch) {
7937 if (auto Hint = GetValueForARMHint(BuiltinID))
7938 return Hint;
7940 if (BuiltinID == clang::ARM::BI__emit) {
7941 bool IsThumb = getTarget().getTriple().getArch() == llvm::Triple::thumb;
7942 llvm::FunctionType *FTy =
7943 llvm::FunctionType::get(VoidTy, /*Variadic=*/false);
7945 Expr::EvalResult Result;
7946 if (!E->getArg(0)->EvaluateAsInt(Result, CGM.getContext()))
7947 llvm_unreachable("Sema will ensure that the parameter is constant");
7949 llvm::APSInt Value = Result.Val.getInt();
7950 uint64_t ZExtValue = Value.zextOrTrunc(IsThumb ? 16 : 32).getZExtValue();
7952 llvm::InlineAsm *Emit =
7953 IsThumb ? InlineAsm::get(FTy, ".inst.n 0x" + utohexstr(ZExtValue), "",
7954 /*hasSideEffects=*/true)
7955 : InlineAsm::get(FTy, ".inst 0x" + utohexstr(ZExtValue), "",
7956 /*hasSideEffects=*/true);
7958 return Builder.CreateCall(Emit);
7961 if (BuiltinID == clang::ARM::BI__builtin_arm_dbg) {
7962 Value *Option = EmitScalarExpr(E->getArg(0));
7963 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_dbg), Option);
7966 if (BuiltinID == clang::ARM::BI__builtin_arm_prefetch) {
7967 Value *Address = EmitScalarExpr(E->getArg(0));
7968 Value *RW = EmitScalarExpr(E->getArg(1));
7969 Value *IsData = EmitScalarExpr(E->getArg(2));
7971 // Locality is not supported on ARM target
7972 Value *Locality = llvm::ConstantInt::get(Int32Ty, 3);
7974 Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType());
7975 return Builder.CreateCall(F, {Address, RW, Locality, IsData});
7978 if (BuiltinID == clang::ARM::BI__builtin_arm_rbit) {
7979 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
7980 return Builder.CreateCall(
7981 CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg, "rbit");
7984 if (BuiltinID == clang::ARM::BI__builtin_arm_clz ||
7985 BuiltinID == clang::ARM::BI__builtin_arm_clz64) {
7986 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
7987 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Arg->getType());
7988 Value *Res = Builder.CreateCall(F, {Arg, Builder.getInt1(false)});
7989 if (BuiltinID == clang::ARM::BI__builtin_arm_clz64)
7990 Res = Builder.CreateTrunc(Res, Builder.getInt32Ty());
7991 return Res;
7995 if (BuiltinID == clang::ARM::BI__builtin_arm_cls) {
7996 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
7997 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_cls), Arg, "cls");
7999 if (BuiltinID == clang::ARM::BI__builtin_arm_cls64) {
8000 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
8001 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_cls64), Arg,
8002 "cls");
8005 if (BuiltinID == clang::ARM::BI__clear_cache) {
8006 assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments");
8007 const FunctionDecl *FD = E->getDirectCallee();
8008 Value *Ops[2];
8009 for (unsigned i = 0; i < 2; i++)
8010 Ops[i] = EmitScalarExpr(E->getArg(i));
8011 llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType());
8012 llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty);
8013 StringRef Name = FD->getName();
8014 return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops);
8017 if (BuiltinID == clang::ARM::BI__builtin_arm_mcrr ||
8018 BuiltinID == clang::ARM::BI__builtin_arm_mcrr2) {
8019 Function *F;
8021 switch (BuiltinID) {
8022 default: llvm_unreachable("unexpected builtin");
8023 case clang::ARM::BI__builtin_arm_mcrr:
8024 F = CGM.getIntrinsic(Intrinsic::arm_mcrr);
8025 break;
8026 case clang::ARM::BI__builtin_arm_mcrr2:
8027 F = CGM.getIntrinsic(Intrinsic::arm_mcrr2);
8028 break;
8031 // MCRR{2} instruction has 5 operands but
8032 // the intrinsic has 4 because Rt and Rt2
8033 // are represented as a single unsigned 64
8034 // bit integer in the intrinsic definition
8035 // but internally it's represented as 2 32
8036 // bit integers.
8038 Value *Coproc = EmitScalarExpr(E->getArg(0));
8039 Value *Opc1 = EmitScalarExpr(E->getArg(1));
8040 Value *RtAndRt2 = EmitScalarExpr(E->getArg(2));
8041 Value *CRm = EmitScalarExpr(E->getArg(3));
8043 Value *C1 = llvm::ConstantInt::get(Int64Ty, 32);
8044 Value *Rt = Builder.CreateTruncOrBitCast(RtAndRt2, Int32Ty);
8045 Value *Rt2 = Builder.CreateLShr(RtAndRt2, C1);
8046 Rt2 = Builder.CreateTruncOrBitCast(Rt2, Int32Ty);
8048 return Builder.CreateCall(F, {Coproc, Opc1, Rt, Rt2, CRm});
8051 if (BuiltinID == clang::ARM::BI__builtin_arm_mrrc ||
8052 BuiltinID == clang::ARM::BI__builtin_arm_mrrc2) {
8053 Function *F;
8055 switch (BuiltinID) {
8056 default: llvm_unreachable("unexpected builtin");
8057 case clang::ARM::BI__builtin_arm_mrrc:
8058 F = CGM.getIntrinsic(Intrinsic::arm_mrrc);
8059 break;
8060 case clang::ARM::BI__builtin_arm_mrrc2:
8061 F = CGM.getIntrinsic(Intrinsic::arm_mrrc2);
8062 break;
8065 Value *Coproc = EmitScalarExpr(E->getArg(0));
8066 Value *Opc1 = EmitScalarExpr(E->getArg(1));
8067 Value *CRm = EmitScalarExpr(E->getArg(2));
8068 Value *RtAndRt2 = Builder.CreateCall(F, {Coproc, Opc1, CRm});
8070 // Returns an unsigned 64 bit integer, represented
8071 // as two 32 bit integers.
8073 Value *Rt = Builder.CreateExtractValue(RtAndRt2, 1);
8074 Value *Rt1 = Builder.CreateExtractValue(RtAndRt2, 0);
8075 Rt = Builder.CreateZExt(Rt, Int64Ty);
8076 Rt1 = Builder.CreateZExt(Rt1, Int64Ty);
8078 Value *ShiftCast = llvm::ConstantInt::get(Int64Ty, 32);
8079 RtAndRt2 = Builder.CreateShl(Rt, ShiftCast, "shl", true);
8080 RtAndRt2 = Builder.CreateOr(RtAndRt2, Rt1);
8082 return Builder.CreateBitCast(RtAndRt2, ConvertType(E->getType()));
8085 if (BuiltinID == clang::ARM::BI__builtin_arm_ldrexd ||
8086 ((BuiltinID == clang::ARM::BI__builtin_arm_ldrex ||
8087 BuiltinID == clang::ARM::BI__builtin_arm_ldaex) &&
8088 getContext().getTypeSize(E->getType()) == 64) ||
8089 BuiltinID == clang::ARM::BI__ldrexd) {
8090 Function *F;
8092 switch (BuiltinID) {
8093 default: llvm_unreachable("unexpected builtin");
8094 case clang::ARM::BI__builtin_arm_ldaex:
8095 F = CGM.getIntrinsic(Intrinsic::arm_ldaexd);
8096 break;
8097 case clang::ARM::BI__builtin_arm_ldrexd:
8098 case clang::ARM::BI__builtin_arm_ldrex:
8099 case clang::ARM::BI__ldrexd:
8100 F = CGM.getIntrinsic(Intrinsic::arm_ldrexd);
8101 break;
8104 Value *LdPtr = EmitScalarExpr(E->getArg(0));
8105 Value *Val = Builder.CreateCall(F, Builder.CreateBitCast(LdPtr, Int8PtrTy),
8106 "ldrexd");
8108 Value *Val0 = Builder.CreateExtractValue(Val, 1);
8109 Value *Val1 = Builder.CreateExtractValue(Val, 0);
8110 Val0 = Builder.CreateZExt(Val0, Int64Ty);
8111 Val1 = Builder.CreateZExt(Val1, Int64Ty);
8113 Value *ShiftCst = llvm::ConstantInt::get(Int64Ty, 32);
8114 Val = Builder.CreateShl(Val0, ShiftCst, "shl", true /* nuw */);
8115 Val = Builder.CreateOr(Val, Val1);
8116 return Builder.CreateBitCast(Val, ConvertType(E->getType()));
8119 if (BuiltinID == clang::ARM::BI__builtin_arm_ldrex ||
8120 BuiltinID == clang::ARM::BI__builtin_arm_ldaex) {
8121 Value *LoadAddr = EmitScalarExpr(E->getArg(0));
8123 QualType Ty = E->getType();
8124 llvm::Type *RealResTy = ConvertType(Ty);
8125 llvm::Type *IntTy =
8126 llvm::IntegerType::get(getLLVMContext(), getContext().getTypeSize(Ty));
8127 llvm::Type *PtrTy = llvm::PointerType::getUnqual(getLLVMContext());
8129 Function *F = CGM.getIntrinsic(
8130 BuiltinID == clang::ARM::BI__builtin_arm_ldaex ? Intrinsic::arm_ldaex
8131 : Intrinsic::arm_ldrex,
8132 PtrTy);
8133 CallInst *Val = Builder.CreateCall(F, LoadAddr, "ldrex");
8134 Val->addParamAttr(
8135 0, Attribute::get(getLLVMContext(), Attribute::ElementType, IntTy));
8137 if (RealResTy->isPointerTy())
8138 return Builder.CreateIntToPtr(Val, RealResTy);
8139 else {
8140 llvm::Type *IntResTy = llvm::IntegerType::get(
8141 getLLVMContext(), CGM.getDataLayout().getTypeSizeInBits(RealResTy));
8142 return Builder.CreateBitCast(Builder.CreateTruncOrBitCast(Val, IntResTy),
8143 RealResTy);
8147 if (BuiltinID == clang::ARM::BI__builtin_arm_strexd ||
8148 ((BuiltinID == clang::ARM::BI__builtin_arm_stlex ||
8149 BuiltinID == clang::ARM::BI__builtin_arm_strex) &&
8150 getContext().getTypeSize(E->getArg(0)->getType()) == 64)) {
8151 Function *F = CGM.getIntrinsic(
8152 BuiltinID == clang::ARM::BI__builtin_arm_stlex ? Intrinsic::arm_stlexd
8153 : Intrinsic::arm_strexd);
8154 llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty);
8156 Address Tmp = CreateMemTemp(E->getArg(0)->getType());
8157 Value *Val = EmitScalarExpr(E->getArg(0));
8158 Builder.CreateStore(Val, Tmp);
8160 Address LdPtr = Tmp.withElementType(STy);
8161 Val = Builder.CreateLoad(LdPtr);
8163 Value *Arg0 = Builder.CreateExtractValue(Val, 0);
8164 Value *Arg1 = Builder.CreateExtractValue(Val, 1);
8165 Value *StPtr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), Int8PtrTy);
8166 return Builder.CreateCall(F, {Arg0, Arg1, StPtr}, "strexd");
8169 if (BuiltinID == clang::ARM::BI__builtin_arm_strex ||
8170 BuiltinID == clang::ARM::BI__builtin_arm_stlex) {
8171 Value *StoreVal = EmitScalarExpr(E->getArg(0));
8172 Value *StoreAddr = EmitScalarExpr(E->getArg(1));
8174 QualType Ty = E->getArg(0)->getType();
8175 llvm::Type *StoreTy =
8176 llvm::IntegerType::get(getLLVMContext(), getContext().getTypeSize(Ty));
8178 if (StoreVal->getType()->isPointerTy())
8179 StoreVal = Builder.CreatePtrToInt(StoreVal, Int32Ty);
8180 else {
8181 llvm::Type *IntTy = llvm::IntegerType::get(
8182 getLLVMContext(),
8183 CGM.getDataLayout().getTypeSizeInBits(StoreVal->getType()));
8184 StoreVal = Builder.CreateBitCast(StoreVal, IntTy);
8185 StoreVal = Builder.CreateZExtOrBitCast(StoreVal, Int32Ty);
8188 Function *F = CGM.getIntrinsic(
8189 BuiltinID == clang::ARM::BI__builtin_arm_stlex ? Intrinsic::arm_stlex
8190 : Intrinsic::arm_strex,
8191 StoreAddr->getType());
8193 CallInst *CI = Builder.CreateCall(F, {StoreVal, StoreAddr}, "strex");
8194 CI->addParamAttr(
8195 1, Attribute::get(getLLVMContext(), Attribute::ElementType, StoreTy));
8196 return CI;
8199 if (BuiltinID == clang::ARM::BI__builtin_arm_clrex) {
8200 Function *F = CGM.getIntrinsic(Intrinsic::arm_clrex);
8201 return Builder.CreateCall(F);
8204 // CRC32
8205 Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic;
8206 switch (BuiltinID) {
8207 case clang::ARM::BI__builtin_arm_crc32b:
8208 CRCIntrinsicID = Intrinsic::arm_crc32b; break;
8209 case clang::ARM::BI__builtin_arm_crc32cb:
8210 CRCIntrinsicID = Intrinsic::arm_crc32cb; break;
8211 case clang::ARM::BI__builtin_arm_crc32h:
8212 CRCIntrinsicID = Intrinsic::arm_crc32h; break;
8213 case clang::ARM::BI__builtin_arm_crc32ch:
8214 CRCIntrinsicID = Intrinsic::arm_crc32ch; break;
8215 case clang::ARM::BI__builtin_arm_crc32w:
8216 case clang::ARM::BI__builtin_arm_crc32d:
8217 CRCIntrinsicID = Intrinsic::arm_crc32w; break;
8218 case clang::ARM::BI__builtin_arm_crc32cw:
8219 case clang::ARM::BI__builtin_arm_crc32cd:
8220 CRCIntrinsicID = Intrinsic::arm_crc32cw; break;
8223 if (CRCIntrinsicID != Intrinsic::not_intrinsic) {
8224 Value *Arg0 = EmitScalarExpr(E->getArg(0));
8225 Value *Arg1 = EmitScalarExpr(E->getArg(1));
8227 // crc32{c,}d intrinsics are implemented as two calls to crc32{c,}w
8228 // intrinsics, hence we need different codegen for these cases.
8229 if (BuiltinID == clang::ARM::BI__builtin_arm_crc32d ||
8230 BuiltinID == clang::ARM::BI__builtin_arm_crc32cd) {
8231 Value *C1 = llvm::ConstantInt::get(Int64Ty, 32);
8232 Value *Arg1a = Builder.CreateTruncOrBitCast(Arg1, Int32Ty);
8233 Value *Arg1b = Builder.CreateLShr(Arg1, C1);
8234 Arg1b = Builder.CreateTruncOrBitCast(Arg1b, Int32Ty);
8236 Function *F = CGM.getIntrinsic(CRCIntrinsicID);
8237 Value *Res = Builder.CreateCall(F, {Arg0, Arg1a});
8238 return Builder.CreateCall(F, {Res, Arg1b});
8239 } else {
8240 Arg1 = Builder.CreateZExtOrBitCast(Arg1, Int32Ty);
8242 Function *F = CGM.getIntrinsic(CRCIntrinsicID);
8243 return Builder.CreateCall(F, {Arg0, Arg1});
8247 if (BuiltinID == clang::ARM::BI__builtin_arm_rsr ||
8248 BuiltinID == clang::ARM::BI__builtin_arm_rsr64 ||
8249 BuiltinID == clang::ARM::BI__builtin_arm_rsrp ||
8250 BuiltinID == clang::ARM::BI__builtin_arm_wsr ||
8251 BuiltinID == clang::ARM::BI__builtin_arm_wsr64 ||
8252 BuiltinID == clang::ARM::BI__builtin_arm_wsrp) {
8254 SpecialRegisterAccessKind AccessKind = Write;
8255 if (BuiltinID == clang::ARM::BI__builtin_arm_rsr ||
8256 BuiltinID == clang::ARM::BI__builtin_arm_rsr64 ||
8257 BuiltinID == clang::ARM::BI__builtin_arm_rsrp)
8258 AccessKind = VolatileRead;
8260 bool IsPointerBuiltin = BuiltinID == clang::ARM::BI__builtin_arm_rsrp ||
8261 BuiltinID == clang::ARM::BI__builtin_arm_wsrp;
8263 bool Is64Bit = BuiltinID == clang::ARM::BI__builtin_arm_rsr64 ||
8264 BuiltinID == clang::ARM::BI__builtin_arm_wsr64;
8266 llvm::Type *ValueType;
8267 llvm::Type *RegisterType;
8268 if (IsPointerBuiltin) {
8269 ValueType = VoidPtrTy;
8270 RegisterType = Int32Ty;
8271 } else if (Is64Bit) {
8272 ValueType = RegisterType = Int64Ty;
8273 } else {
8274 ValueType = RegisterType = Int32Ty;
8277 return EmitSpecialRegisterBuiltin(*this, E, RegisterType, ValueType,
8278 AccessKind);
8281 if (BuiltinID == ARM::BI__builtin_sponentry) {
8282 llvm::Function *F = CGM.getIntrinsic(Intrinsic::sponentry, AllocaInt8PtrTy);
8283 return Builder.CreateCall(F);
8286 // Handle MSVC intrinsics before argument evaluation to prevent double
8287 // evaluation.
8288 if (std::optional<MSVCIntrin> MsvcIntId = translateArmToMsvcIntrin(BuiltinID))
8289 return EmitMSVCBuiltinExpr(*MsvcIntId, E);
8291 // Deal with MVE builtins
8292 if (Value *Result = EmitARMMVEBuiltinExpr(BuiltinID, E, ReturnValue, Arch))
8293 return Result;
8294 // Handle CDE builtins
8295 if (Value *Result = EmitARMCDEBuiltinExpr(BuiltinID, E, ReturnValue, Arch))
8296 return Result;
8298 // Some intrinsics are equivalent - if they are use the base intrinsic ID.
8299 auto It = llvm::find_if(NEONEquivalentIntrinsicMap, [BuiltinID](auto &P) {
8300 return P.first == BuiltinID;
8302 if (It != end(NEONEquivalentIntrinsicMap))
8303 BuiltinID = It->second;
8305 // Find out if any arguments are required to be integer constant
8306 // expressions.
8307 unsigned ICEArguments = 0;
8308 ASTContext::GetBuiltinTypeError Error;
8309 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
8310 assert(Error == ASTContext::GE_None && "Should not codegen an error");
8312 auto getAlignmentValue32 = [&](Address addr) -> Value* {
8313 return Builder.getInt32(addr.getAlignment().getQuantity());
8316 Address PtrOp0 = Address::invalid();
8317 Address PtrOp1 = Address::invalid();
8318 SmallVector<Value*, 4> Ops;
8319 bool HasExtraArg = HasExtraNeonArgument(BuiltinID);
8320 unsigned NumArgs = E->getNumArgs() - (HasExtraArg ? 1 : 0);
8321 for (unsigned i = 0, e = NumArgs; i != e; i++) {
8322 if (i == 0) {
8323 switch (BuiltinID) {
8324 case NEON::BI__builtin_neon_vld1_v:
8325 case NEON::BI__builtin_neon_vld1q_v:
8326 case NEON::BI__builtin_neon_vld1q_lane_v:
8327 case NEON::BI__builtin_neon_vld1_lane_v:
8328 case NEON::BI__builtin_neon_vld1_dup_v:
8329 case NEON::BI__builtin_neon_vld1q_dup_v:
8330 case NEON::BI__builtin_neon_vst1_v:
8331 case NEON::BI__builtin_neon_vst1q_v:
8332 case NEON::BI__builtin_neon_vst1q_lane_v:
8333 case NEON::BI__builtin_neon_vst1_lane_v:
8334 case NEON::BI__builtin_neon_vst2_v:
8335 case NEON::BI__builtin_neon_vst2q_v:
8336 case NEON::BI__builtin_neon_vst2_lane_v:
8337 case NEON::BI__builtin_neon_vst2q_lane_v:
8338 case NEON::BI__builtin_neon_vst3_v:
8339 case NEON::BI__builtin_neon_vst3q_v:
8340 case NEON::BI__builtin_neon_vst3_lane_v:
8341 case NEON::BI__builtin_neon_vst3q_lane_v:
8342 case NEON::BI__builtin_neon_vst4_v:
8343 case NEON::BI__builtin_neon_vst4q_v:
8344 case NEON::BI__builtin_neon_vst4_lane_v:
8345 case NEON::BI__builtin_neon_vst4q_lane_v:
8346 // Get the alignment for the argument in addition to the value;
8347 // we'll use it later.
8348 PtrOp0 = EmitPointerWithAlignment(E->getArg(0));
8349 Ops.push_back(PtrOp0.getPointer());
8350 continue;
8353 if (i == 1) {
8354 switch (BuiltinID) {
8355 case NEON::BI__builtin_neon_vld2_v:
8356 case NEON::BI__builtin_neon_vld2q_v:
8357 case NEON::BI__builtin_neon_vld3_v:
8358 case NEON::BI__builtin_neon_vld3q_v:
8359 case NEON::BI__builtin_neon_vld4_v:
8360 case NEON::BI__builtin_neon_vld4q_v:
8361 case NEON::BI__builtin_neon_vld2_lane_v:
8362 case NEON::BI__builtin_neon_vld2q_lane_v:
8363 case NEON::BI__builtin_neon_vld3_lane_v:
8364 case NEON::BI__builtin_neon_vld3q_lane_v:
8365 case NEON::BI__builtin_neon_vld4_lane_v:
8366 case NEON::BI__builtin_neon_vld4q_lane_v:
8367 case NEON::BI__builtin_neon_vld2_dup_v:
8368 case NEON::BI__builtin_neon_vld2q_dup_v:
8369 case NEON::BI__builtin_neon_vld3_dup_v:
8370 case NEON::BI__builtin_neon_vld3q_dup_v:
8371 case NEON::BI__builtin_neon_vld4_dup_v:
8372 case NEON::BI__builtin_neon_vld4q_dup_v:
8373 // Get the alignment for the argument in addition to the value;
8374 // we'll use it later.
8375 PtrOp1 = EmitPointerWithAlignment(E->getArg(1));
8376 Ops.push_back(PtrOp1.getPointer());
8377 continue;
8381 if ((ICEArguments & (1 << i)) == 0) {
8382 Ops.push_back(EmitScalarExpr(E->getArg(i)));
8383 } else {
8384 // If this is required to be a constant, constant fold it so that we know
8385 // that the generated intrinsic gets a ConstantInt.
8386 Ops.push_back(llvm::ConstantInt::get(
8387 getLLVMContext(),
8388 *E->getArg(i)->getIntegerConstantExpr(getContext())));
8392 switch (BuiltinID) {
8393 default: break;
8395 case NEON::BI__builtin_neon_vget_lane_i8:
8396 case NEON::BI__builtin_neon_vget_lane_i16:
8397 case NEON::BI__builtin_neon_vget_lane_i32:
8398 case NEON::BI__builtin_neon_vget_lane_i64:
8399 case NEON::BI__builtin_neon_vget_lane_bf16:
8400 case NEON::BI__builtin_neon_vget_lane_f32:
8401 case NEON::BI__builtin_neon_vgetq_lane_i8:
8402 case NEON::BI__builtin_neon_vgetq_lane_i16:
8403 case NEON::BI__builtin_neon_vgetq_lane_i32:
8404 case NEON::BI__builtin_neon_vgetq_lane_i64:
8405 case NEON::BI__builtin_neon_vgetq_lane_bf16:
8406 case NEON::BI__builtin_neon_vgetq_lane_f32:
8407 case NEON::BI__builtin_neon_vduph_lane_bf16:
8408 case NEON::BI__builtin_neon_vduph_laneq_bf16:
8409 return Builder.CreateExtractElement(Ops[0], Ops[1], "vget_lane");
8411 case NEON::BI__builtin_neon_vrndns_f32: {
8412 Value *Arg = EmitScalarExpr(E->getArg(0));
8413 llvm::Type *Tys[] = {Arg->getType()};
8414 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vrintn, Tys);
8415 return Builder.CreateCall(F, {Arg}, "vrndn"); }
8417 case NEON::BI__builtin_neon_vset_lane_i8:
8418 case NEON::BI__builtin_neon_vset_lane_i16:
8419 case NEON::BI__builtin_neon_vset_lane_i32:
8420 case NEON::BI__builtin_neon_vset_lane_i64:
8421 case NEON::BI__builtin_neon_vset_lane_bf16:
8422 case NEON::BI__builtin_neon_vset_lane_f32:
8423 case NEON::BI__builtin_neon_vsetq_lane_i8:
8424 case NEON::BI__builtin_neon_vsetq_lane_i16:
8425 case NEON::BI__builtin_neon_vsetq_lane_i32:
8426 case NEON::BI__builtin_neon_vsetq_lane_i64:
8427 case NEON::BI__builtin_neon_vsetq_lane_bf16:
8428 case NEON::BI__builtin_neon_vsetq_lane_f32:
8429 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
8431 case NEON::BI__builtin_neon_vsha1h_u32:
8432 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1h), Ops,
8433 "vsha1h");
8434 case NEON::BI__builtin_neon_vsha1cq_u32:
8435 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1c), Ops,
8436 "vsha1h");
8437 case NEON::BI__builtin_neon_vsha1pq_u32:
8438 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1p), Ops,
8439 "vsha1h");
8440 case NEON::BI__builtin_neon_vsha1mq_u32:
8441 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1m), Ops,
8442 "vsha1h");
8444 case NEON::BI__builtin_neon_vcvth_bf16_f32: {
8445 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vcvtbfp2bf), Ops,
8446 "vcvtbfp2bf");
8449 // The ARM _MoveToCoprocessor builtins put the input register value as
8450 // the first argument, but the LLVM intrinsic expects it as the third one.
8451 case clang::ARM::BI_MoveToCoprocessor:
8452 case clang::ARM::BI_MoveToCoprocessor2: {
8453 Function *F = CGM.getIntrinsic(BuiltinID == clang::ARM::BI_MoveToCoprocessor
8454 ? Intrinsic::arm_mcr
8455 : Intrinsic::arm_mcr2);
8456 return Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0],
8457 Ops[3], Ops[4], Ops[5]});
8461 // Get the last argument, which specifies the vector type.
8462 assert(HasExtraArg);
8463 const Expr *Arg = E->getArg(E->getNumArgs()-1);
8464 std::optional<llvm::APSInt> Result =
8465 Arg->getIntegerConstantExpr(getContext());
8466 if (!Result)
8467 return nullptr;
8469 if (BuiltinID == clang::ARM::BI__builtin_arm_vcvtr_f ||
8470 BuiltinID == clang::ARM::BI__builtin_arm_vcvtr_d) {
8471 // Determine the overloaded type of this builtin.
8472 llvm::Type *Ty;
8473 if (BuiltinID == clang::ARM::BI__builtin_arm_vcvtr_f)
8474 Ty = FloatTy;
8475 else
8476 Ty = DoubleTy;
8478 // Determine whether this is an unsigned conversion or not.
8479 bool usgn = Result->getZExtValue() == 1;
8480 unsigned Int = usgn ? Intrinsic::arm_vcvtru : Intrinsic::arm_vcvtr;
8482 // Call the appropriate intrinsic.
8483 Function *F = CGM.getIntrinsic(Int, Ty);
8484 return Builder.CreateCall(F, Ops, "vcvtr");
8487 // Determine the type of this overloaded NEON intrinsic.
8488 NeonTypeFlags Type = Result->getZExtValue();
8489 bool usgn = Type.isUnsigned();
8490 bool rightShift = false;
8492 llvm::FixedVectorType *VTy =
8493 GetNeonType(this, Type, getTarget().hasLegalHalfType(), false,
8494 getTarget().hasBFloat16Type());
8495 llvm::Type *Ty = VTy;
8496 if (!Ty)
8497 return nullptr;
8499 // Many NEON builtins have identical semantics and uses in ARM and
8500 // AArch64. Emit these in a single function.
8501 auto IntrinsicMap = ArrayRef(ARMSIMDIntrinsicMap);
8502 const ARMVectorIntrinsicInfo *Builtin = findARMVectorIntrinsicInMap(
8503 IntrinsicMap, BuiltinID, NEONSIMDIntrinsicsProvenSorted);
8504 if (Builtin)
8505 return EmitCommonNeonBuiltinExpr(
8506 Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic,
8507 Builtin->NameHint, Builtin->TypeModifier, E, Ops, PtrOp0, PtrOp1, Arch);
8509 unsigned Int;
8510 switch (BuiltinID) {
8511 default: return nullptr;
8512 case NEON::BI__builtin_neon_vld1q_lane_v:
8513 // Handle 64-bit integer elements as a special case. Use shuffles of
8514 // one-element vectors to avoid poor code for i64 in the backend.
8515 if (VTy->getElementType()->isIntegerTy(64)) {
8516 // Extract the other lane.
8517 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
8518 int Lane = cast<ConstantInt>(Ops[2])->getZExtValue();
8519 Value *SV = llvm::ConstantVector::get(ConstantInt::get(Int32Ty, 1-Lane));
8520 Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV);
8521 // Load the value as a one-element vector.
8522 Ty = llvm::FixedVectorType::get(VTy->getElementType(), 1);
8523 llvm::Type *Tys[] = {Ty, Int8PtrTy};
8524 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld1, Tys);
8525 Value *Align = getAlignmentValue32(PtrOp0);
8526 Value *Ld = Builder.CreateCall(F, {Ops[0], Align});
8527 // Combine them.
8528 int Indices[] = {1 - Lane, Lane};
8529 return Builder.CreateShuffleVector(Ops[1], Ld, Indices, "vld1q_lane");
8531 [[fallthrough]];
8532 case NEON::BI__builtin_neon_vld1_lane_v: {
8533 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
8534 PtrOp0 = PtrOp0.withElementType(VTy->getElementType());
8535 Value *Ld = Builder.CreateLoad(PtrOp0);
8536 return Builder.CreateInsertElement(Ops[1], Ld, Ops[2], "vld1_lane");
8538 case NEON::BI__builtin_neon_vqrshrn_n_v:
8539 Int =
8540 usgn ? Intrinsic::arm_neon_vqrshiftnu : Intrinsic::arm_neon_vqrshiftns;
8541 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n",
8542 1, true);
8543 case NEON::BI__builtin_neon_vqrshrun_n_v:
8544 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqrshiftnsu, Ty),
8545 Ops, "vqrshrun_n", 1, true);
8546 case NEON::BI__builtin_neon_vqshrn_n_v:
8547 Int = usgn ? Intrinsic::arm_neon_vqshiftnu : Intrinsic::arm_neon_vqshiftns;
8548 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n",
8549 1, true);
8550 case NEON::BI__builtin_neon_vqshrun_n_v:
8551 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftnsu, Ty),
8552 Ops, "vqshrun_n", 1, true);
8553 case NEON::BI__builtin_neon_vrecpe_v:
8554 case NEON::BI__builtin_neon_vrecpeq_v:
8555 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrecpe, Ty),
8556 Ops, "vrecpe");
8557 case NEON::BI__builtin_neon_vrshrn_n_v:
8558 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrshiftn, Ty),
8559 Ops, "vrshrn_n", 1, true);
8560 case NEON::BI__builtin_neon_vrsra_n_v:
8561 case NEON::BI__builtin_neon_vrsraq_n_v:
8562 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
8563 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
8564 Ops[2] = EmitNeonShiftVector(Ops[2], Ty, true);
8565 Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts;
8566 Ops[1] = Builder.CreateCall(CGM.getIntrinsic(Int, Ty), {Ops[1], Ops[2]});
8567 return Builder.CreateAdd(Ops[0], Ops[1], "vrsra_n");
8568 case NEON::BI__builtin_neon_vsri_n_v:
8569 case NEON::BI__builtin_neon_vsriq_n_v:
8570 rightShift = true;
8571 [[fallthrough]];
8572 case NEON::BI__builtin_neon_vsli_n_v:
8573 case NEON::BI__builtin_neon_vsliq_n_v:
8574 Ops[2] = EmitNeonShiftVector(Ops[2], Ty, rightShift);
8575 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vshiftins, Ty),
8576 Ops, "vsli_n");
8577 case NEON::BI__builtin_neon_vsra_n_v:
8578 case NEON::BI__builtin_neon_vsraq_n_v:
8579 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
8580 Ops[1] = EmitNeonRShiftImm(Ops[1], Ops[2], Ty, usgn, "vsra_n");
8581 return Builder.CreateAdd(Ops[0], Ops[1]);
8582 case NEON::BI__builtin_neon_vst1q_lane_v:
8583 // Handle 64-bit integer elements as a special case. Use a shuffle to get
8584 // a one-element vector and avoid poor code for i64 in the backend.
8585 if (VTy->getElementType()->isIntegerTy(64)) {
8586 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
8587 Value *SV = llvm::ConstantVector::get(cast<llvm::Constant>(Ops[2]));
8588 Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV);
8589 Ops[2] = getAlignmentValue32(PtrOp0);
8590 llvm::Type *Tys[] = {Int8PtrTy, Ops[1]->getType()};
8591 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst1,
8592 Tys), Ops);
8594 [[fallthrough]];
8595 case NEON::BI__builtin_neon_vst1_lane_v: {
8596 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
8597 Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
8598 return Builder.CreateStore(Ops[1],
8599 PtrOp0.withElementType(Ops[1]->getType()));
8601 case NEON::BI__builtin_neon_vtbl1_v:
8602 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl1),
8603 Ops, "vtbl1");
8604 case NEON::BI__builtin_neon_vtbl2_v:
8605 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl2),
8606 Ops, "vtbl2");
8607 case NEON::BI__builtin_neon_vtbl3_v:
8608 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl3),
8609 Ops, "vtbl3");
8610 case NEON::BI__builtin_neon_vtbl4_v:
8611 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl4),
8612 Ops, "vtbl4");
8613 case NEON::BI__builtin_neon_vtbx1_v:
8614 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx1),
8615 Ops, "vtbx1");
8616 case NEON::BI__builtin_neon_vtbx2_v:
8617 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx2),
8618 Ops, "vtbx2");
8619 case NEON::BI__builtin_neon_vtbx3_v:
8620 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx3),
8621 Ops, "vtbx3");
8622 case NEON::BI__builtin_neon_vtbx4_v:
8623 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx4),
8624 Ops, "vtbx4");
8628 template<typename Integer>
8629 static Integer GetIntegerConstantValue(const Expr *E, ASTContext &Context) {
8630 return E->getIntegerConstantExpr(Context)->getExtValue();
8633 static llvm::Value *SignOrZeroExtend(CGBuilderTy &Builder, llvm::Value *V,
8634 llvm::Type *T, bool Unsigned) {
8635 // Helper function called by Tablegen-constructed ARM MVE builtin codegen,
8636 // which finds it convenient to specify signed/unsigned as a boolean flag.
8637 return Unsigned ? Builder.CreateZExt(V, T) : Builder.CreateSExt(V, T);
8640 static llvm::Value *MVEImmediateShr(CGBuilderTy &Builder, llvm::Value *V,
8641 uint32_t Shift, bool Unsigned) {
8642 // MVE helper function for integer shift right. This must handle signed vs
8643 // unsigned, and also deal specially with the case where the shift count is
8644 // equal to the lane size. In LLVM IR, an LShr with that parameter would be
8645 // undefined behavior, but in MVE it's legal, so we must convert it to code
8646 // that is not undefined in IR.
8647 unsigned LaneBits = cast<llvm::VectorType>(V->getType())
8648 ->getElementType()
8649 ->getPrimitiveSizeInBits();
8650 if (Shift == LaneBits) {
8651 // An unsigned shift of the full lane size always generates zero, so we can
8652 // simply emit a zero vector. A signed shift of the full lane size does the
8653 // same thing as shifting by one bit fewer.
8654 if (Unsigned)
8655 return llvm::Constant::getNullValue(V->getType());
8656 else
8657 --Shift;
8659 return Unsigned ? Builder.CreateLShr(V, Shift) : Builder.CreateAShr(V, Shift);
8662 static llvm::Value *ARMMVEVectorSplat(CGBuilderTy &Builder, llvm::Value *V) {
8663 // MVE-specific helper function for a vector splat, which infers the element
8664 // count of the output vector by knowing that MVE vectors are all 128 bits
8665 // wide.
8666 unsigned Elements = 128 / V->getType()->getPrimitiveSizeInBits();
8667 return Builder.CreateVectorSplat(Elements, V);
8670 static llvm::Value *ARMMVEVectorReinterpret(CGBuilderTy &Builder,
8671 CodeGenFunction *CGF,
8672 llvm::Value *V,
8673 llvm::Type *DestType) {
8674 // Convert one MVE vector type into another by reinterpreting its in-register
8675 // format.
8677 // Little-endian, this is identical to a bitcast (which reinterprets the
8678 // memory format). But big-endian, they're not necessarily the same, because
8679 // the register and memory formats map to each other differently depending on
8680 // the lane size.
8682 // We generate a bitcast whenever we can (if we're little-endian, or if the
8683 // lane sizes are the same anyway). Otherwise we fall back to an IR intrinsic
8684 // that performs the different kind of reinterpretation.
8685 if (CGF->getTarget().isBigEndian() &&
8686 V->getType()->getScalarSizeInBits() != DestType->getScalarSizeInBits()) {
8687 return Builder.CreateCall(
8688 CGF->CGM.getIntrinsic(Intrinsic::arm_mve_vreinterpretq,
8689 {DestType, V->getType()}),
8691 } else {
8692 return Builder.CreateBitCast(V, DestType);
8696 static llvm::Value *VectorUnzip(CGBuilderTy &Builder, llvm::Value *V, bool Odd) {
8697 // Make a shufflevector that extracts every other element of a vector (evens
8698 // or odds, as desired).
8699 SmallVector<int, 16> Indices;
8700 unsigned InputElements =
8701 cast<llvm::FixedVectorType>(V->getType())->getNumElements();
8702 for (unsigned i = 0; i < InputElements; i += 2)
8703 Indices.push_back(i + Odd);
8704 return Builder.CreateShuffleVector(V, Indices);
8707 static llvm::Value *VectorZip(CGBuilderTy &Builder, llvm::Value *V0,
8708 llvm::Value *V1) {
8709 // Make a shufflevector that interleaves two vectors element by element.
8710 assert(V0->getType() == V1->getType() && "Can't zip different vector types");
8711 SmallVector<int, 16> Indices;
8712 unsigned InputElements =
8713 cast<llvm::FixedVectorType>(V0->getType())->getNumElements();
8714 for (unsigned i = 0; i < InputElements; i++) {
8715 Indices.push_back(i);
8716 Indices.push_back(i + InputElements);
8718 return Builder.CreateShuffleVector(V0, V1, Indices);
8721 template<unsigned HighBit, unsigned OtherBits>
8722 static llvm::Value *ARMMVEConstantSplat(CGBuilderTy &Builder, llvm::Type *VT) {
8723 // MVE-specific helper function to make a vector splat of a constant such as
8724 // UINT_MAX or INT_MIN, in which all bits below the highest one are equal.
8725 llvm::Type *T = cast<llvm::VectorType>(VT)->getElementType();
8726 unsigned LaneBits = T->getPrimitiveSizeInBits();
8727 uint32_t Value = HighBit << (LaneBits - 1);
8728 if (OtherBits)
8729 Value |= (1UL << (LaneBits - 1)) - 1;
8730 llvm::Value *Lane = llvm::ConstantInt::get(T, Value);
8731 return ARMMVEVectorSplat(Builder, Lane);
8734 static llvm::Value *ARMMVEVectorElementReverse(CGBuilderTy &Builder,
8735 llvm::Value *V,
8736 unsigned ReverseWidth) {
8737 // MVE-specific helper function which reverses the elements of a
8738 // vector within every (ReverseWidth)-bit collection of lanes.
8739 SmallVector<int, 16> Indices;
8740 unsigned LaneSize = V->getType()->getScalarSizeInBits();
8741 unsigned Elements = 128 / LaneSize;
8742 unsigned Mask = ReverseWidth / LaneSize - 1;
8743 for (unsigned i = 0; i < Elements; i++)
8744 Indices.push_back(i ^ Mask);
8745 return Builder.CreateShuffleVector(V, Indices);
8748 Value *CodeGenFunction::EmitARMMVEBuiltinExpr(unsigned BuiltinID,
8749 const CallExpr *E,
8750 ReturnValueSlot ReturnValue,
8751 llvm::Triple::ArchType Arch) {
8752 enum class CustomCodeGen { VLD24, VST24 } CustomCodeGenType;
8753 Intrinsic::ID IRIntr;
8754 unsigned NumVectors;
8756 // Code autogenerated by Tablegen will handle all the simple builtins.
8757 switch (BuiltinID) {
8758 #include "clang/Basic/arm_mve_builtin_cg.inc"
8760 // If we didn't match an MVE builtin id at all, go back to the
8761 // main EmitARMBuiltinExpr.
8762 default:
8763 return nullptr;
8766 // Anything that breaks from that switch is an MVE builtin that
8767 // needs handwritten code to generate.
8769 switch (CustomCodeGenType) {
8771 case CustomCodeGen::VLD24: {
8772 llvm::SmallVector<Value *, 4> Ops;
8773 llvm::SmallVector<llvm::Type *, 4> Tys;
8775 auto MvecCType = E->getType();
8776 auto MvecLType = ConvertType(MvecCType);
8777 assert(MvecLType->isStructTy() &&
8778 "Return type for vld[24]q should be a struct");
8779 assert(MvecLType->getStructNumElements() == 1 &&
8780 "Return-type struct for vld[24]q should have one element");
8781 auto MvecLTypeInner = MvecLType->getStructElementType(0);
8782 assert(MvecLTypeInner->isArrayTy() &&
8783 "Return-type struct for vld[24]q should contain an array");
8784 assert(MvecLTypeInner->getArrayNumElements() == NumVectors &&
8785 "Array member of return-type struct vld[24]q has wrong length");
8786 auto VecLType = MvecLTypeInner->getArrayElementType();
8788 Tys.push_back(VecLType);
8790 auto Addr = E->getArg(0);
8791 Ops.push_back(EmitScalarExpr(Addr));
8792 Tys.push_back(ConvertType(Addr->getType()));
8794 Function *F = CGM.getIntrinsic(IRIntr, ArrayRef(Tys));
8795 Value *LoadResult = Builder.CreateCall(F, Ops);
8796 Value *MvecOut = PoisonValue::get(MvecLType);
8797 for (unsigned i = 0; i < NumVectors; ++i) {
8798 Value *Vec = Builder.CreateExtractValue(LoadResult, i);
8799 MvecOut = Builder.CreateInsertValue(MvecOut, Vec, {0, i});
8802 if (ReturnValue.isNull())
8803 return MvecOut;
8804 else
8805 return Builder.CreateStore(MvecOut, ReturnValue.getValue());
8808 case CustomCodeGen::VST24: {
8809 llvm::SmallVector<Value *, 4> Ops;
8810 llvm::SmallVector<llvm::Type *, 4> Tys;
8812 auto Addr = E->getArg(0);
8813 Ops.push_back(EmitScalarExpr(Addr));
8814 Tys.push_back(ConvertType(Addr->getType()));
8816 auto MvecCType = E->getArg(1)->getType();
8817 auto MvecLType = ConvertType(MvecCType);
8818 assert(MvecLType->isStructTy() && "Data type for vst2q should be a struct");
8819 assert(MvecLType->getStructNumElements() == 1 &&
8820 "Data-type struct for vst2q should have one element");
8821 auto MvecLTypeInner = MvecLType->getStructElementType(0);
8822 assert(MvecLTypeInner->isArrayTy() &&
8823 "Data-type struct for vst2q should contain an array");
8824 assert(MvecLTypeInner->getArrayNumElements() == NumVectors &&
8825 "Array member of return-type struct vld[24]q has wrong length");
8826 auto VecLType = MvecLTypeInner->getArrayElementType();
8828 Tys.push_back(VecLType);
8830 AggValueSlot MvecSlot = CreateAggTemp(MvecCType);
8831 EmitAggExpr(E->getArg(1), MvecSlot);
8832 auto Mvec = Builder.CreateLoad(MvecSlot.getAddress());
8833 for (unsigned i = 0; i < NumVectors; i++)
8834 Ops.push_back(Builder.CreateExtractValue(Mvec, {0, i}));
8836 Function *F = CGM.getIntrinsic(IRIntr, ArrayRef(Tys));
8837 Value *ToReturn = nullptr;
8838 for (unsigned i = 0; i < NumVectors; i++) {
8839 Ops.push_back(llvm::ConstantInt::get(Int32Ty, i));
8840 ToReturn = Builder.CreateCall(F, Ops);
8841 Ops.pop_back();
8843 return ToReturn;
8846 llvm_unreachable("unknown custom codegen type.");
8849 Value *CodeGenFunction::EmitARMCDEBuiltinExpr(unsigned BuiltinID,
8850 const CallExpr *E,
8851 ReturnValueSlot ReturnValue,
8852 llvm::Triple::ArchType Arch) {
8853 switch (BuiltinID) {
8854 default:
8855 return nullptr;
8856 #include "clang/Basic/arm_cde_builtin_cg.inc"
8860 static Value *EmitAArch64TblBuiltinExpr(CodeGenFunction &CGF, unsigned BuiltinID,
8861 const CallExpr *E,
8862 SmallVectorImpl<Value *> &Ops,
8863 llvm::Triple::ArchType Arch) {
8864 unsigned int Int = 0;
8865 const char *s = nullptr;
8867 switch (BuiltinID) {
8868 default:
8869 return nullptr;
8870 case NEON::BI__builtin_neon_vtbl1_v:
8871 case NEON::BI__builtin_neon_vqtbl1_v:
8872 case NEON::BI__builtin_neon_vqtbl1q_v:
8873 case NEON::BI__builtin_neon_vtbl2_v:
8874 case NEON::BI__builtin_neon_vqtbl2_v:
8875 case NEON::BI__builtin_neon_vqtbl2q_v:
8876 case NEON::BI__builtin_neon_vtbl3_v:
8877 case NEON::BI__builtin_neon_vqtbl3_v:
8878 case NEON::BI__builtin_neon_vqtbl3q_v:
8879 case NEON::BI__builtin_neon_vtbl4_v:
8880 case NEON::BI__builtin_neon_vqtbl4_v:
8881 case NEON::BI__builtin_neon_vqtbl4q_v:
8882 break;
8883 case NEON::BI__builtin_neon_vtbx1_v:
8884 case NEON::BI__builtin_neon_vqtbx1_v:
8885 case NEON::BI__builtin_neon_vqtbx1q_v:
8886 case NEON::BI__builtin_neon_vtbx2_v:
8887 case NEON::BI__builtin_neon_vqtbx2_v:
8888 case NEON::BI__builtin_neon_vqtbx2q_v:
8889 case NEON::BI__builtin_neon_vtbx3_v:
8890 case NEON::BI__builtin_neon_vqtbx3_v:
8891 case NEON::BI__builtin_neon_vqtbx3q_v:
8892 case NEON::BI__builtin_neon_vtbx4_v:
8893 case NEON::BI__builtin_neon_vqtbx4_v:
8894 case NEON::BI__builtin_neon_vqtbx4q_v:
8895 break;
8898 assert(E->getNumArgs() >= 3);
8900 // Get the last argument, which specifies the vector type.
8901 const Expr *Arg = E->getArg(E->getNumArgs() - 1);
8902 std::optional<llvm::APSInt> Result =
8903 Arg->getIntegerConstantExpr(CGF.getContext());
8904 if (!Result)
8905 return nullptr;
8907 // Determine the type of this overloaded NEON intrinsic.
8908 NeonTypeFlags Type = Result->getZExtValue();
8909 llvm::FixedVectorType *Ty = GetNeonType(&CGF, Type);
8910 if (!Ty)
8911 return nullptr;
8913 CodeGen::CGBuilderTy &Builder = CGF.Builder;
8915 // AArch64 scalar builtins are not overloaded, they do not have an extra
8916 // argument that specifies the vector type, need to handle each case.
8917 switch (BuiltinID) {
8918 case NEON::BI__builtin_neon_vtbl1_v: {
8919 return packTBLDVectorList(CGF, ArrayRef(Ops).slice(0, 1), nullptr, Ops[1],
8920 Ty, Intrinsic::aarch64_neon_tbl1, "vtbl1");
8922 case NEON::BI__builtin_neon_vtbl2_v: {
8923 return packTBLDVectorList(CGF, ArrayRef(Ops).slice(0, 2), nullptr, Ops[2],
8924 Ty, Intrinsic::aarch64_neon_tbl1, "vtbl1");
8926 case NEON::BI__builtin_neon_vtbl3_v: {
8927 return packTBLDVectorList(CGF, ArrayRef(Ops).slice(0, 3), nullptr, Ops[3],
8928 Ty, Intrinsic::aarch64_neon_tbl2, "vtbl2");
8930 case NEON::BI__builtin_neon_vtbl4_v: {
8931 return packTBLDVectorList(CGF, ArrayRef(Ops).slice(0, 4), nullptr, Ops[4],
8932 Ty, Intrinsic::aarch64_neon_tbl2, "vtbl2");
8934 case NEON::BI__builtin_neon_vtbx1_v: {
8935 Value *TblRes =
8936 packTBLDVectorList(CGF, ArrayRef(Ops).slice(1, 1), nullptr, Ops[2], Ty,
8937 Intrinsic::aarch64_neon_tbl1, "vtbl1");
8939 llvm::Constant *EightV = ConstantInt::get(Ty, 8);
8940 Value *CmpRes = Builder.CreateICmp(ICmpInst::ICMP_UGE, Ops[2], EightV);
8941 CmpRes = Builder.CreateSExt(CmpRes, Ty);
8943 Value *EltsFromInput = Builder.CreateAnd(CmpRes, Ops[0]);
8944 Value *EltsFromTbl = Builder.CreateAnd(Builder.CreateNot(CmpRes), TblRes);
8945 return Builder.CreateOr(EltsFromInput, EltsFromTbl, "vtbx");
8947 case NEON::BI__builtin_neon_vtbx2_v: {
8948 return packTBLDVectorList(CGF, ArrayRef(Ops).slice(1, 2), Ops[0], Ops[3],
8949 Ty, Intrinsic::aarch64_neon_tbx1, "vtbx1");
8951 case NEON::BI__builtin_neon_vtbx3_v: {
8952 Value *TblRes =
8953 packTBLDVectorList(CGF, ArrayRef(Ops).slice(1, 3), nullptr, Ops[4], Ty,
8954 Intrinsic::aarch64_neon_tbl2, "vtbl2");
8956 llvm::Constant *TwentyFourV = ConstantInt::get(Ty, 24);
8957 Value *CmpRes = Builder.CreateICmp(ICmpInst::ICMP_UGE, Ops[4],
8958 TwentyFourV);
8959 CmpRes = Builder.CreateSExt(CmpRes, Ty);
8961 Value *EltsFromInput = Builder.CreateAnd(CmpRes, Ops[0]);
8962 Value *EltsFromTbl = Builder.CreateAnd(Builder.CreateNot(CmpRes), TblRes);
8963 return Builder.CreateOr(EltsFromInput, EltsFromTbl, "vtbx");
8965 case NEON::BI__builtin_neon_vtbx4_v: {
8966 return packTBLDVectorList(CGF, ArrayRef(Ops).slice(1, 4), Ops[0], Ops[5],
8967 Ty, Intrinsic::aarch64_neon_tbx2, "vtbx2");
8969 case NEON::BI__builtin_neon_vqtbl1_v:
8970 case NEON::BI__builtin_neon_vqtbl1q_v:
8971 Int = Intrinsic::aarch64_neon_tbl1; s = "vtbl1"; break;
8972 case NEON::BI__builtin_neon_vqtbl2_v:
8973 case NEON::BI__builtin_neon_vqtbl2q_v: {
8974 Int = Intrinsic::aarch64_neon_tbl2; s = "vtbl2"; break;
8975 case NEON::BI__builtin_neon_vqtbl3_v:
8976 case NEON::BI__builtin_neon_vqtbl3q_v:
8977 Int = Intrinsic::aarch64_neon_tbl3; s = "vtbl3"; break;
8978 case NEON::BI__builtin_neon_vqtbl4_v:
8979 case NEON::BI__builtin_neon_vqtbl4q_v:
8980 Int = Intrinsic::aarch64_neon_tbl4; s = "vtbl4"; break;
8981 case NEON::BI__builtin_neon_vqtbx1_v:
8982 case NEON::BI__builtin_neon_vqtbx1q_v:
8983 Int = Intrinsic::aarch64_neon_tbx1; s = "vtbx1"; break;
8984 case NEON::BI__builtin_neon_vqtbx2_v:
8985 case NEON::BI__builtin_neon_vqtbx2q_v:
8986 Int = Intrinsic::aarch64_neon_tbx2; s = "vtbx2"; break;
8987 case NEON::BI__builtin_neon_vqtbx3_v:
8988 case NEON::BI__builtin_neon_vqtbx3q_v:
8989 Int = Intrinsic::aarch64_neon_tbx3; s = "vtbx3"; break;
8990 case NEON::BI__builtin_neon_vqtbx4_v:
8991 case NEON::BI__builtin_neon_vqtbx4q_v:
8992 Int = Intrinsic::aarch64_neon_tbx4; s = "vtbx4"; break;
8996 if (!Int)
8997 return nullptr;
8999 Function *F = CGF.CGM.getIntrinsic(Int, Ty);
9000 return CGF.EmitNeonCall(F, Ops, s);
9003 Value *CodeGenFunction::vectorWrapScalar16(Value *Op) {
9004 auto *VTy = llvm::FixedVectorType::get(Int16Ty, 4);
9005 Op = Builder.CreateBitCast(Op, Int16Ty);
9006 Value *V = PoisonValue::get(VTy);
9007 llvm::Constant *CI = ConstantInt::get(SizeTy, 0);
9008 Op = Builder.CreateInsertElement(V, Op, CI);
9009 return Op;
9012 /// SVEBuiltinMemEltTy - Returns the memory element type for this memory
9013 /// access builtin. Only required if it can't be inferred from the base pointer
9014 /// operand.
9015 llvm::Type *CodeGenFunction::SVEBuiltinMemEltTy(const SVETypeFlags &TypeFlags) {
9016 switch (TypeFlags.getMemEltType()) {
9017 case SVETypeFlags::MemEltTyDefault:
9018 return getEltType(TypeFlags);
9019 case SVETypeFlags::MemEltTyInt8:
9020 return Builder.getInt8Ty();
9021 case SVETypeFlags::MemEltTyInt16:
9022 return Builder.getInt16Ty();
9023 case SVETypeFlags::MemEltTyInt32:
9024 return Builder.getInt32Ty();
9025 case SVETypeFlags::MemEltTyInt64:
9026 return Builder.getInt64Ty();
9028 llvm_unreachable("Unknown MemEltType");
9031 llvm::Type *CodeGenFunction::getEltType(const SVETypeFlags &TypeFlags) {
9032 switch (TypeFlags.getEltType()) {
9033 default:
9034 llvm_unreachable("Invalid SVETypeFlag!");
9036 case SVETypeFlags::EltTyInt8:
9037 return Builder.getInt8Ty();
9038 case SVETypeFlags::EltTyInt16:
9039 return Builder.getInt16Ty();
9040 case SVETypeFlags::EltTyInt32:
9041 return Builder.getInt32Ty();
9042 case SVETypeFlags::EltTyInt64:
9043 return Builder.getInt64Ty();
9044 case SVETypeFlags::EltTyInt128:
9045 return Builder.getInt128Ty();
9047 case SVETypeFlags::EltTyFloat16:
9048 return Builder.getHalfTy();
9049 case SVETypeFlags::EltTyFloat32:
9050 return Builder.getFloatTy();
9051 case SVETypeFlags::EltTyFloat64:
9052 return Builder.getDoubleTy();
9054 case SVETypeFlags::EltTyBFloat16:
9055 return Builder.getBFloatTy();
9057 case SVETypeFlags::EltTyBool8:
9058 case SVETypeFlags::EltTyBool16:
9059 case SVETypeFlags::EltTyBool32:
9060 case SVETypeFlags::EltTyBool64:
9061 return Builder.getInt1Ty();
9065 // Return the llvm predicate vector type corresponding to the specified element
9066 // TypeFlags.
9067 llvm::ScalableVectorType *
9068 CodeGenFunction::getSVEPredType(const SVETypeFlags &TypeFlags) {
9069 switch (TypeFlags.getEltType()) {
9070 default: llvm_unreachable("Unhandled SVETypeFlag!");
9072 case SVETypeFlags::EltTyInt8:
9073 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16);
9074 case SVETypeFlags::EltTyInt16:
9075 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8);
9076 case SVETypeFlags::EltTyInt32:
9077 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 4);
9078 case SVETypeFlags::EltTyInt64:
9079 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 2);
9081 case SVETypeFlags::EltTyBFloat16:
9082 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8);
9083 case SVETypeFlags::EltTyFloat16:
9084 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8);
9085 case SVETypeFlags::EltTyFloat32:
9086 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 4);
9087 case SVETypeFlags::EltTyFloat64:
9088 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 2);
9090 case SVETypeFlags::EltTyBool8:
9091 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16);
9092 case SVETypeFlags::EltTyBool16:
9093 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8);
9094 case SVETypeFlags::EltTyBool32:
9095 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 4);
9096 case SVETypeFlags::EltTyBool64:
9097 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 2);
9101 // Return the llvm vector type corresponding to the specified element TypeFlags.
9102 llvm::ScalableVectorType *
9103 CodeGenFunction::getSVEType(const SVETypeFlags &TypeFlags) {
9104 switch (TypeFlags.getEltType()) {
9105 default:
9106 llvm_unreachable("Invalid SVETypeFlag!");
9108 case SVETypeFlags::EltTyInt8:
9109 return llvm::ScalableVectorType::get(Builder.getInt8Ty(), 16);
9110 case SVETypeFlags::EltTyInt16:
9111 return llvm::ScalableVectorType::get(Builder.getInt16Ty(), 8);
9112 case SVETypeFlags::EltTyInt32:
9113 return llvm::ScalableVectorType::get(Builder.getInt32Ty(), 4);
9114 case SVETypeFlags::EltTyInt64:
9115 return llvm::ScalableVectorType::get(Builder.getInt64Ty(), 2);
9117 case SVETypeFlags::EltTyFloat16:
9118 return llvm::ScalableVectorType::get(Builder.getHalfTy(), 8);
9119 case SVETypeFlags::EltTyBFloat16:
9120 return llvm::ScalableVectorType::get(Builder.getBFloatTy(), 8);
9121 case SVETypeFlags::EltTyFloat32:
9122 return llvm::ScalableVectorType::get(Builder.getFloatTy(), 4);
9123 case SVETypeFlags::EltTyFloat64:
9124 return llvm::ScalableVectorType::get(Builder.getDoubleTy(), 2);
9126 case SVETypeFlags::EltTyBool8:
9127 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16);
9128 case SVETypeFlags::EltTyBool16:
9129 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8);
9130 case SVETypeFlags::EltTyBool32:
9131 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 4);
9132 case SVETypeFlags::EltTyBool64:
9133 return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 2);
9137 llvm::Value *
9138 CodeGenFunction::EmitSVEAllTruePred(const SVETypeFlags &TypeFlags) {
9139 Function *Ptrue =
9140 CGM.getIntrinsic(Intrinsic::aarch64_sve_ptrue, getSVEPredType(TypeFlags));
9141 return Builder.CreateCall(Ptrue, {Builder.getInt32(/*SV_ALL*/ 31)});
9144 constexpr unsigned SVEBitsPerBlock = 128;
9146 static llvm::ScalableVectorType *getSVEVectorForElementType(llvm::Type *EltTy) {
9147 unsigned NumElts = SVEBitsPerBlock / EltTy->getScalarSizeInBits();
9148 return llvm::ScalableVectorType::get(EltTy, NumElts);
9151 // Reinterpret the input predicate so that it can be used to correctly isolate
9152 // the elements of the specified datatype.
9153 Value *CodeGenFunction::EmitSVEPredicateCast(Value *Pred,
9154 llvm::ScalableVectorType *VTy) {
9155 auto *RTy = llvm::VectorType::get(IntegerType::get(getLLVMContext(), 1), VTy);
9156 if (Pred->getType() == RTy)
9157 return Pred;
9159 unsigned IntID;
9160 llvm::Type *IntrinsicTy;
9161 switch (VTy->getMinNumElements()) {
9162 default:
9163 llvm_unreachable("unsupported element count!");
9164 case 1:
9165 case 2:
9166 case 4:
9167 case 8:
9168 IntID = Intrinsic::aarch64_sve_convert_from_svbool;
9169 IntrinsicTy = RTy;
9170 break;
9171 case 16:
9172 IntID = Intrinsic::aarch64_sve_convert_to_svbool;
9173 IntrinsicTy = Pred->getType();
9174 break;
9177 Function *F = CGM.getIntrinsic(IntID, IntrinsicTy);
9178 Value *C = Builder.CreateCall(F, Pred);
9179 assert(C->getType() == RTy && "Unexpected return type!");
9180 return C;
9183 Value *CodeGenFunction::EmitSVEGatherLoad(const SVETypeFlags &TypeFlags,
9184 SmallVectorImpl<Value *> &Ops,
9185 unsigned IntID) {
9186 auto *ResultTy = getSVEType(TypeFlags);
9187 auto *OverloadedTy =
9188 llvm::ScalableVectorType::get(SVEBuiltinMemEltTy(TypeFlags), ResultTy);
9190 // At the ACLE level there's only one predicate type, svbool_t, which is
9191 // mapped to <n x 16 x i1>. However, this might be incompatible with the
9192 // actual type being loaded. For example, when loading doubles (i64) the
9193 // predicated should be <n x 2 x i1> instead. At the IR level the type of
9194 // the predicate and the data being loaded must match. Cast accordingly.
9195 Ops[0] = EmitSVEPredicateCast(Ops[0], OverloadedTy);
9197 Function *F = nullptr;
9198 if (Ops[1]->getType()->isVectorTy())
9199 // This is the "vector base, scalar offset" case. In order to uniquely
9200 // map this built-in to an LLVM IR intrinsic, we need both the return type
9201 // and the type of the vector base.
9202 F = CGM.getIntrinsic(IntID, {OverloadedTy, Ops[1]->getType()});
9203 else
9204 // This is the "scalar base, vector offset case". The type of the offset
9205 // is encoded in the name of the intrinsic. We only need to specify the
9206 // return type in order to uniquely map this built-in to an LLVM IR
9207 // intrinsic.
9208 F = CGM.getIntrinsic(IntID, OverloadedTy);
9210 // Pass 0 when the offset is missing. This can only be applied when using
9211 // the "vector base" addressing mode for which ACLE allows no offset. The
9212 // corresponding LLVM IR always requires an offset.
9213 if (Ops.size() == 2) {
9214 assert(Ops[1]->getType()->isVectorTy() && "Scalar base requires an offset");
9215 Ops.push_back(ConstantInt::get(Int64Ty, 0));
9218 // For "vector base, scalar index" scale the index so that it becomes a
9219 // scalar offset.
9220 if (!TypeFlags.isByteIndexed() && Ops[1]->getType()->isVectorTy()) {
9221 unsigned BytesPerElt =
9222 OverloadedTy->getElementType()->getScalarSizeInBits() / 8;
9223 Ops[2] = Builder.CreateShl(Ops[2], Log2_32(BytesPerElt));
9226 Value *Call = Builder.CreateCall(F, Ops);
9228 // The following sext/zext is only needed when ResultTy != OverloadedTy. In
9229 // other cases it's folded into a nop.
9230 return TypeFlags.isZExtReturn() ? Builder.CreateZExt(Call, ResultTy)
9231 : Builder.CreateSExt(Call, ResultTy);
9234 Value *CodeGenFunction::EmitSVEScatterStore(const SVETypeFlags &TypeFlags,
9235 SmallVectorImpl<Value *> &Ops,
9236 unsigned IntID) {
9237 auto *SrcDataTy = getSVEType(TypeFlags);
9238 auto *OverloadedTy =
9239 llvm::ScalableVectorType::get(SVEBuiltinMemEltTy(TypeFlags), SrcDataTy);
9241 // In ACLE the source data is passed in the last argument, whereas in LLVM IR
9242 // it's the first argument. Move it accordingly.
9243 Ops.insert(Ops.begin(), Ops.pop_back_val());
9245 Function *F = nullptr;
9246 if (Ops[2]->getType()->isVectorTy())
9247 // This is the "vector base, scalar offset" case. In order to uniquely
9248 // map this built-in to an LLVM IR intrinsic, we need both the return type
9249 // and the type of the vector base.
9250 F = CGM.getIntrinsic(IntID, {OverloadedTy, Ops[2]->getType()});
9251 else
9252 // This is the "scalar base, vector offset case". The type of the offset
9253 // is encoded in the name of the intrinsic. We only need to specify the
9254 // return type in order to uniquely map this built-in to an LLVM IR
9255 // intrinsic.
9256 F = CGM.getIntrinsic(IntID, OverloadedTy);
9258 // Pass 0 when the offset is missing. This can only be applied when using
9259 // the "vector base" addressing mode for which ACLE allows no offset. The
9260 // corresponding LLVM IR always requires an offset.
9261 if (Ops.size() == 3) {
9262 assert(Ops[1]->getType()->isVectorTy() && "Scalar base requires an offset");
9263 Ops.push_back(ConstantInt::get(Int64Ty, 0));
9266 // Truncation is needed when SrcDataTy != OverloadedTy. In other cases it's
9267 // folded into a nop.
9268 Ops[0] = Builder.CreateTrunc(Ops[0], OverloadedTy);
9270 // At the ACLE level there's only one predicate type, svbool_t, which is
9271 // mapped to <n x 16 x i1>. However, this might be incompatible with the
9272 // actual type being stored. For example, when storing doubles (i64) the
9273 // predicated should be <n x 2 x i1> instead. At the IR level the type of
9274 // the predicate and the data being stored must match. Cast accordingly.
9275 Ops[1] = EmitSVEPredicateCast(Ops[1], OverloadedTy);
9277 // For "vector base, scalar index" scale the index so that it becomes a
9278 // scalar offset.
9279 if (!TypeFlags.isByteIndexed() && Ops[2]->getType()->isVectorTy()) {
9280 unsigned BytesPerElt =
9281 OverloadedTy->getElementType()->getScalarSizeInBits() / 8;
9282 Ops[3] = Builder.CreateShl(Ops[3], Log2_32(BytesPerElt));
9285 return Builder.CreateCall(F, Ops);
9288 Value *CodeGenFunction::EmitSVEGatherPrefetch(const SVETypeFlags &TypeFlags,
9289 SmallVectorImpl<Value *> &Ops,
9290 unsigned IntID) {
9291 // The gather prefetches are overloaded on the vector input - this can either
9292 // be the vector of base addresses or vector of offsets.
9293 auto *OverloadedTy = dyn_cast<llvm::ScalableVectorType>(Ops[1]->getType());
9294 if (!OverloadedTy)
9295 OverloadedTy = cast<llvm::ScalableVectorType>(Ops[2]->getType());
9297 // Cast the predicate from svbool_t to the right number of elements.
9298 Ops[0] = EmitSVEPredicateCast(Ops[0], OverloadedTy);
9300 // vector + imm addressing modes
9301 if (Ops[1]->getType()->isVectorTy()) {
9302 if (Ops.size() == 3) {
9303 // Pass 0 for 'vector+imm' when the index is omitted.
9304 Ops.push_back(ConstantInt::get(Int64Ty, 0));
9306 // The sv_prfop is the last operand in the builtin and IR intrinsic.
9307 std::swap(Ops[2], Ops[3]);
9308 } else {
9309 // Index needs to be passed as scaled offset.
9310 llvm::Type *MemEltTy = SVEBuiltinMemEltTy(TypeFlags);
9311 unsigned BytesPerElt = MemEltTy->getPrimitiveSizeInBits() / 8;
9312 if (BytesPerElt > 1)
9313 Ops[2] = Builder.CreateShl(Ops[2], Log2_32(BytesPerElt));
9317 Function *F = CGM.getIntrinsic(IntID, OverloadedTy);
9318 return Builder.CreateCall(F, Ops);
9321 Value *CodeGenFunction::EmitSVEStructLoad(const SVETypeFlags &TypeFlags,
9322 SmallVectorImpl<Value*> &Ops,
9323 unsigned IntID) {
9324 llvm::ScalableVectorType *VTy = getSVEType(TypeFlags);
9326 unsigned N;
9327 switch (IntID) {
9328 case Intrinsic::aarch64_sve_ld2_sret:
9329 N = 2;
9330 break;
9331 case Intrinsic::aarch64_sve_ld3_sret:
9332 N = 3;
9333 break;
9334 case Intrinsic::aarch64_sve_ld4_sret:
9335 N = 4;
9336 break;
9337 default:
9338 llvm_unreachable("unknown intrinsic!");
9340 auto RetTy = llvm::VectorType::get(VTy->getElementType(),
9341 VTy->getElementCount() * N);
9343 Value *Predicate = EmitSVEPredicateCast(Ops[0], VTy);
9344 Value *BasePtr = Ops[1];
9346 // Does the load have an offset?
9347 if (Ops.size() > 2)
9348 BasePtr = Builder.CreateGEP(VTy, BasePtr, Ops[2]);
9350 Function *F = CGM.getIntrinsic(IntID, {VTy});
9351 Value *Call = Builder.CreateCall(F, {Predicate, BasePtr});
9352 unsigned MinElts = VTy->getMinNumElements();
9353 Value *Ret = llvm::PoisonValue::get(RetTy);
9354 for (unsigned I = 0; I < N; I++) {
9355 Value *Idx = ConstantInt::get(CGM.Int64Ty, I * MinElts);
9356 Value *SRet = Builder.CreateExtractValue(Call, I);
9357 Ret = Builder.CreateInsertVector(RetTy, Ret, SRet, Idx);
9359 return Ret;
9362 Value *CodeGenFunction::EmitSVEStructStore(const SVETypeFlags &TypeFlags,
9363 SmallVectorImpl<Value*> &Ops,
9364 unsigned IntID) {
9365 llvm::ScalableVectorType *VTy = getSVEType(TypeFlags);
9367 unsigned N;
9368 switch (IntID) {
9369 case Intrinsic::aarch64_sve_st2:
9370 N = 2;
9371 break;
9372 case Intrinsic::aarch64_sve_st3:
9373 N = 3;
9374 break;
9375 case Intrinsic::aarch64_sve_st4:
9376 N = 4;
9377 break;
9378 default:
9379 llvm_unreachable("unknown intrinsic!");
9382 Value *Predicate = EmitSVEPredicateCast(Ops[0], VTy);
9383 Value *BasePtr = Ops[1];
9385 // Does the store have an offset?
9386 if (Ops.size() > 3)
9387 BasePtr = Builder.CreateGEP(VTy, BasePtr, Ops[2]);
9389 Value *Val = Ops.back();
9391 // The llvm.aarch64.sve.st2/3/4 intrinsics take legal part vectors, so we
9392 // need to break up the tuple vector.
9393 SmallVector<llvm::Value*, 5> Operands;
9394 unsigned MinElts = VTy->getElementCount().getKnownMinValue();
9395 for (unsigned I = 0; I < N; ++I) {
9396 Value *Idx = ConstantInt::get(CGM.Int64Ty, I * MinElts);
9397 Operands.push_back(Builder.CreateExtractVector(VTy, Val, Idx));
9399 Operands.append({Predicate, BasePtr});
9401 Function *F = CGM.getIntrinsic(IntID, { VTy });
9402 return Builder.CreateCall(F, Operands);
9405 // SVE2's svpmullb and svpmullt builtins are similar to the svpmullb_pair and
9406 // svpmullt_pair intrinsics, with the exception that their results are bitcast
9407 // to a wider type.
9408 Value *CodeGenFunction::EmitSVEPMull(const SVETypeFlags &TypeFlags,
9409 SmallVectorImpl<Value *> &Ops,
9410 unsigned BuiltinID) {
9411 // Splat scalar operand to vector (intrinsics with _n infix)
9412 if (TypeFlags.hasSplatOperand()) {
9413 unsigned OpNo = TypeFlags.getSplatOperand();
9414 Ops[OpNo] = EmitSVEDupX(Ops[OpNo]);
9417 // The pair-wise function has a narrower overloaded type.
9418 Function *F = CGM.getIntrinsic(BuiltinID, Ops[0]->getType());
9419 Value *Call = Builder.CreateCall(F, {Ops[0], Ops[1]});
9421 // Now bitcast to the wider result type.
9422 llvm::ScalableVectorType *Ty = getSVEType(TypeFlags);
9423 return EmitSVEReinterpret(Call, Ty);
9426 Value *CodeGenFunction::EmitSVEMovl(const SVETypeFlags &TypeFlags,
9427 ArrayRef<Value *> Ops, unsigned BuiltinID) {
9428 llvm::Type *OverloadedTy = getSVEType(TypeFlags);
9429 Function *F = CGM.getIntrinsic(BuiltinID, OverloadedTy);
9430 return Builder.CreateCall(F, {Ops[0], Builder.getInt32(0)});
9433 Value *CodeGenFunction::EmitSVEPrefetchLoad(const SVETypeFlags &TypeFlags,
9434 SmallVectorImpl<Value *> &Ops,
9435 unsigned BuiltinID) {
9436 auto *MemEltTy = SVEBuiltinMemEltTy(TypeFlags);
9437 auto *VectorTy = getSVEVectorForElementType(MemEltTy);
9438 auto *MemoryTy = llvm::ScalableVectorType::get(MemEltTy, VectorTy);
9440 Value *Predicate = EmitSVEPredicateCast(Ops[0], MemoryTy);
9441 Value *BasePtr = Ops[1];
9443 // Implement the index operand if not omitted.
9444 if (Ops.size() > 3)
9445 BasePtr = Builder.CreateGEP(MemoryTy, BasePtr, Ops[2]);
9447 Value *PrfOp = Ops.back();
9449 Function *F = CGM.getIntrinsic(BuiltinID, Predicate->getType());
9450 return Builder.CreateCall(F, {Predicate, BasePtr, PrfOp});
9453 Value *CodeGenFunction::EmitSVEMaskedLoad(const CallExpr *E,
9454 llvm::Type *ReturnTy,
9455 SmallVectorImpl<Value *> &Ops,
9456 unsigned BuiltinID,
9457 bool IsZExtReturn) {
9458 QualType LangPTy = E->getArg(1)->getType();
9459 llvm::Type *MemEltTy = CGM.getTypes().ConvertType(
9460 LangPTy->castAs<PointerType>()->getPointeeType());
9462 // The vector type that is returned may be different from the
9463 // eventual type loaded from memory.
9464 auto VectorTy = cast<llvm::ScalableVectorType>(ReturnTy);
9465 auto MemoryTy = llvm::ScalableVectorType::get(MemEltTy, VectorTy);
9467 Value *Predicate = EmitSVEPredicateCast(Ops[0], MemoryTy);
9468 Value *BasePtr = Ops[1];
9470 // Does the load have an offset?
9471 if (Ops.size() > 2)
9472 BasePtr = Builder.CreateGEP(MemoryTy, BasePtr, Ops[2]);
9474 Function *F = CGM.getIntrinsic(BuiltinID, MemoryTy);
9475 auto *Load =
9476 cast<llvm::Instruction>(Builder.CreateCall(F, {Predicate, BasePtr}));
9477 auto TBAAInfo = CGM.getTBAAAccessInfo(LangPTy->getPointeeType());
9478 CGM.DecorateInstructionWithTBAA(Load, TBAAInfo);
9480 return IsZExtReturn ? Builder.CreateZExt(Load, VectorTy)
9481 : Builder.CreateSExt(Load, VectorTy);
9484 Value *CodeGenFunction::EmitSVEMaskedStore(const CallExpr *E,
9485 SmallVectorImpl<Value *> &Ops,
9486 unsigned BuiltinID) {
9487 QualType LangPTy = E->getArg(1)->getType();
9488 llvm::Type *MemEltTy = CGM.getTypes().ConvertType(
9489 LangPTy->castAs<PointerType>()->getPointeeType());
9491 // The vector type that is stored may be different from the
9492 // eventual type stored to memory.
9493 auto VectorTy = cast<llvm::ScalableVectorType>(Ops.back()->getType());
9494 auto MemoryTy = llvm::ScalableVectorType::get(MemEltTy, VectorTy);
9496 Value *Predicate = EmitSVEPredicateCast(Ops[0], MemoryTy);
9497 Value *BasePtr = Ops[1];
9499 // Does the store have an offset?
9500 if (Ops.size() == 4)
9501 BasePtr = Builder.CreateGEP(MemoryTy, BasePtr, Ops[2]);
9503 // Last value is always the data
9504 llvm::Value *Val = Builder.CreateTrunc(Ops.back(), MemoryTy);
9506 Function *F = CGM.getIntrinsic(BuiltinID, MemoryTy);
9507 auto *Store =
9508 cast<llvm::Instruction>(Builder.CreateCall(F, {Val, Predicate, BasePtr}));
9509 auto TBAAInfo = CGM.getTBAAAccessInfo(LangPTy->getPointeeType());
9510 CGM.DecorateInstructionWithTBAA(Store, TBAAInfo);
9511 return Store;
9514 Value *CodeGenFunction::EmitTileslice(Value *Offset, Value *Base) {
9515 llvm::Value *CastOffset = Builder.CreateIntCast(Offset, Int32Ty, false);
9516 return Builder.CreateAdd(Base, CastOffset, "tileslice");
9519 Value *CodeGenFunction::EmitSMELd1St1(const SVETypeFlags &TypeFlags,
9520 SmallVectorImpl<Value *> &Ops,
9521 unsigned IntID) {
9522 Ops[3] = EmitSVEPredicateCast(
9523 Ops[3], getSVEVectorForElementType(SVEBuiltinMemEltTy(TypeFlags)));
9525 SmallVector<Value *> NewOps;
9526 NewOps.push_back(Ops[3]);
9528 llvm::Value *BasePtr = Ops[4];
9530 // If the intrinsic contains the vnum parameter, multiply it with the vector
9531 // size in bytes.
9532 if (Ops.size() == 6) {
9533 Function *StreamingVectorLength =
9534 CGM.getIntrinsic(Intrinsic::aarch64_sme_cntsb);
9535 llvm::Value *StreamingVectorLengthCall =
9536 Builder.CreateCall(StreamingVectorLength);
9537 llvm::Value *Mulvl =
9538 Builder.CreateMul(StreamingVectorLengthCall, Ops[5], "mulvl");
9539 // The type of the ptr parameter is void *, so use Int8Ty here.
9540 BasePtr = Builder.CreateGEP(Int8Ty, Ops[4], Mulvl);
9542 NewOps.push_back(BasePtr);
9543 NewOps.push_back(Ops[0]);
9544 NewOps.push_back(EmitTileslice(Ops[2], Ops[1]));
9545 Function *F = CGM.getIntrinsic(IntID);
9546 return Builder.CreateCall(F, NewOps);
9549 Value *CodeGenFunction::EmitSMEReadWrite(const SVETypeFlags &TypeFlags,
9550 SmallVectorImpl<Value *> &Ops,
9551 unsigned IntID) {
9552 auto *VecTy = getSVEType(TypeFlags);
9553 Function *F = CGM.getIntrinsic(IntID, VecTy);
9554 if (TypeFlags.isReadZA()) {
9555 Ops[1] = EmitSVEPredicateCast(Ops[1], VecTy);
9556 Ops[3] = EmitTileslice(Ops[4], Ops[3]);
9557 Ops.erase(&Ops[4]);
9558 } else if (TypeFlags.isWriteZA()) {
9559 Ops[1] = EmitTileslice(Ops[2], Ops[1]);
9560 Ops[2] = EmitSVEPredicateCast(Ops[3], VecTy);
9561 Ops.erase(&Ops[3]);
9563 return Builder.CreateCall(F, Ops);
9566 Value *CodeGenFunction::EmitSMEZero(const SVETypeFlags &TypeFlags,
9567 SmallVectorImpl<Value *> &Ops,
9568 unsigned IntID) {
9569 // svzero_za() intrinsic zeros the entire za tile and has no paramters.
9570 if (Ops.size() == 0)
9571 Ops.push_back(llvm::ConstantInt::get(Int32Ty, 255));
9572 Function *F = CGM.getIntrinsic(IntID, {});
9573 return Builder.CreateCall(F, Ops);
9576 Value *CodeGenFunction::EmitSMELdrStr(const SVETypeFlags &TypeFlags,
9577 SmallVectorImpl<Value *> &Ops,
9578 unsigned IntID) {
9579 Function *Cntsb = CGM.getIntrinsic(Intrinsic::aarch64_sme_cntsb);
9580 llvm::Value *CntsbCall = Builder.CreateCall(Cntsb, {}, "svlb");
9581 llvm::Value *MulVL = Builder.CreateMul(
9582 CntsbCall,
9583 Builder.getInt64(cast<llvm::ConstantInt>(Ops[1])->getZExtValue()),
9584 "mulvl");
9585 Ops[2] = Builder.CreateGEP(Int8Ty, Ops[2], MulVL);
9586 Ops[0] = EmitTileslice(Ops[1], Ops[0]);
9587 Ops.erase(&Ops[1]);
9588 Function *F = CGM.getIntrinsic(IntID, {});
9589 return Builder.CreateCall(F, Ops);
9592 // Limit the usage of scalable llvm IR generated by the ACLE by using the
9593 // sve dup.x intrinsic instead of IRBuilder::CreateVectorSplat.
9594 Value *CodeGenFunction::EmitSVEDupX(Value *Scalar, llvm::Type *Ty) {
9595 return Builder.CreateVectorSplat(
9596 cast<llvm::VectorType>(Ty)->getElementCount(), Scalar);
9599 Value *CodeGenFunction::EmitSVEDupX(Value* Scalar) {
9600 return EmitSVEDupX(Scalar, getSVEVectorForElementType(Scalar->getType()));
9603 Value *CodeGenFunction::EmitSVEReinterpret(Value *Val, llvm::Type *Ty) {
9604 // FIXME: For big endian this needs an additional REV, or needs a separate
9605 // intrinsic that is code-generated as a no-op, because the LLVM bitcast
9606 // instruction is defined as 'bitwise' equivalent from memory point of
9607 // view (when storing/reloading), whereas the svreinterpret builtin
9608 // implements bitwise equivalent cast from register point of view.
9609 // LLVM CodeGen for a bitcast must add an explicit REV for big-endian.
9610 return Builder.CreateBitCast(Val, Ty);
9613 static void InsertExplicitZeroOperand(CGBuilderTy &Builder, llvm::Type *Ty,
9614 SmallVectorImpl<Value *> &Ops) {
9615 auto *SplatZero = Constant::getNullValue(Ty);
9616 Ops.insert(Ops.begin(), SplatZero);
9619 static void InsertExplicitUndefOperand(CGBuilderTy &Builder, llvm::Type *Ty,
9620 SmallVectorImpl<Value *> &Ops) {
9621 auto *SplatUndef = UndefValue::get(Ty);
9622 Ops.insert(Ops.begin(), SplatUndef);
9625 SmallVector<llvm::Type *, 2>
9626 CodeGenFunction::getSVEOverloadTypes(const SVETypeFlags &TypeFlags,
9627 llvm::Type *ResultType,
9628 ArrayRef<Value *> Ops) {
9629 if (TypeFlags.isOverloadNone())
9630 return {};
9632 llvm::Type *DefaultType = getSVEType(TypeFlags);
9634 if (TypeFlags.isOverloadWhile())
9635 return {DefaultType, Ops[1]->getType()};
9637 if (TypeFlags.isOverloadWhileRW())
9638 return {getSVEPredType(TypeFlags), Ops[0]->getType()};
9640 if (TypeFlags.isOverloadCvt())
9641 return {Ops[0]->getType(), Ops.back()->getType()};
9643 assert(TypeFlags.isOverloadDefault() && "Unexpected value for overloads");
9644 return {DefaultType};
9647 Value *CodeGenFunction::EmitSVETupleSetOrGet(const SVETypeFlags &TypeFlags,
9648 llvm::Type *Ty,
9649 ArrayRef<Value *> Ops) {
9650 assert((TypeFlags.isTupleSet() || TypeFlags.isTupleGet()) &&
9651 "Expects TypleFlag isTupleSet or TypeFlags.isTupleSet()");
9653 unsigned I = cast<ConstantInt>(Ops[1])->getSExtValue();
9654 auto *SingleVecTy = dyn_cast<llvm::ScalableVectorType>(
9655 TypeFlags.isTupleSet() ? Ops[2]->getType() : Ty);
9656 Value *Idx = ConstantInt::get(CGM.Int64Ty,
9657 I * SingleVecTy->getMinNumElements());
9659 if (TypeFlags.isTupleSet())
9660 return Builder.CreateInsertVector(Ty, Ops[0], Ops[2], Idx);
9661 return Builder.CreateExtractVector(Ty, Ops[0], Idx);
9664 Value *CodeGenFunction::EmitSVETupleCreate(const SVETypeFlags &TypeFlags,
9665 llvm::Type *Ty,
9666 ArrayRef<Value *> Ops) {
9667 assert(TypeFlags.isTupleCreate() && "Expects TypleFlag isTupleCreate");
9669 auto *SrcTy = dyn_cast<llvm::ScalableVectorType>(Ops[0]->getType());
9670 unsigned MinElts = SrcTy->getMinNumElements();
9671 Value *Call = llvm::PoisonValue::get(Ty);
9672 for (unsigned I = 0; I < Ops.size(); I++) {
9673 Value *Idx = ConstantInt::get(CGM.Int64Ty, I * MinElts);
9674 Call = Builder.CreateInsertVector(Ty, Call, Ops[I], Idx);
9677 return Call;
9680 Value *CodeGenFunction::EmitAArch64SVEBuiltinExpr(unsigned BuiltinID,
9681 const CallExpr *E) {
9682 // Find out if any arguments are required to be integer constant expressions.
9683 unsigned ICEArguments = 0;
9684 ASTContext::GetBuiltinTypeError Error;
9685 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
9686 assert(Error == ASTContext::GE_None && "Should not codegen an error");
9688 llvm::Type *Ty = ConvertType(E->getType());
9689 if (BuiltinID >= SVE::BI__builtin_sve_reinterpret_s8_s8 &&
9690 BuiltinID <= SVE::BI__builtin_sve_reinterpret_f64_f64) {
9691 Value *Val = EmitScalarExpr(E->getArg(0));
9692 return EmitSVEReinterpret(Val, Ty);
9695 llvm::SmallVector<Value *, 4> Ops;
9696 for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) {
9697 if ((ICEArguments & (1 << i)) == 0)
9698 Ops.push_back(EmitScalarExpr(E->getArg(i)));
9699 else {
9700 // If this is required to be a constant, constant fold it so that we know
9701 // that the generated intrinsic gets a ConstantInt.
9702 std::optional<llvm::APSInt> Result =
9703 E->getArg(i)->getIntegerConstantExpr(getContext());
9704 assert(Result && "Expected argument to be a constant");
9706 // Immediates for SVE llvm intrinsics are always 32bit. We can safely
9707 // truncate because the immediate has been range checked and no valid
9708 // immediate requires more than a handful of bits.
9709 *Result = Result->extOrTrunc(32);
9710 Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), *Result));
9714 auto *Builtin = findARMVectorIntrinsicInMap(AArch64SVEIntrinsicMap, BuiltinID,
9715 AArch64SVEIntrinsicsProvenSorted);
9716 SVETypeFlags TypeFlags(Builtin->TypeModifier);
9717 if (TypeFlags.isLoad())
9718 return EmitSVEMaskedLoad(E, Ty, Ops, Builtin->LLVMIntrinsic,
9719 TypeFlags.isZExtReturn());
9720 else if (TypeFlags.isStore())
9721 return EmitSVEMaskedStore(E, Ops, Builtin->LLVMIntrinsic);
9722 else if (TypeFlags.isGatherLoad())
9723 return EmitSVEGatherLoad(TypeFlags, Ops, Builtin->LLVMIntrinsic);
9724 else if (TypeFlags.isScatterStore())
9725 return EmitSVEScatterStore(TypeFlags, Ops, Builtin->LLVMIntrinsic);
9726 else if (TypeFlags.isPrefetch())
9727 return EmitSVEPrefetchLoad(TypeFlags, Ops, Builtin->LLVMIntrinsic);
9728 else if (TypeFlags.isGatherPrefetch())
9729 return EmitSVEGatherPrefetch(TypeFlags, Ops, Builtin->LLVMIntrinsic);
9730 else if (TypeFlags.isStructLoad())
9731 return EmitSVEStructLoad(TypeFlags, Ops, Builtin->LLVMIntrinsic);
9732 else if (TypeFlags.isStructStore())
9733 return EmitSVEStructStore(TypeFlags, Ops, Builtin->LLVMIntrinsic);
9734 else if (TypeFlags.isTupleSet() || TypeFlags.isTupleGet())
9735 return EmitSVETupleSetOrGet(TypeFlags, Ty, Ops);
9736 else if (TypeFlags.isTupleCreate())
9737 return EmitSVETupleCreate(TypeFlags, Ty, Ops);
9738 else if (TypeFlags.isUndef())
9739 return UndefValue::get(Ty);
9740 else if (Builtin->LLVMIntrinsic != 0) {
9741 if (TypeFlags.getMergeType() == SVETypeFlags::MergeZeroExp)
9742 InsertExplicitZeroOperand(Builder, Ty, Ops);
9744 if (TypeFlags.getMergeType() == SVETypeFlags::MergeAnyExp)
9745 InsertExplicitUndefOperand(Builder, Ty, Ops);
9747 // Some ACLE builtins leave out the argument to specify the predicate
9748 // pattern, which is expected to be expanded to an SV_ALL pattern.
9749 if (TypeFlags.isAppendSVALL())
9750 Ops.push_back(Builder.getInt32(/*SV_ALL*/ 31));
9751 if (TypeFlags.isInsertOp1SVALL())
9752 Ops.insert(&Ops[1], Builder.getInt32(/*SV_ALL*/ 31));
9754 // Predicates must match the main datatype.
9755 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
9756 if (auto PredTy = dyn_cast<llvm::VectorType>(Ops[i]->getType()))
9757 if (PredTy->getElementType()->isIntegerTy(1))
9758 Ops[i] = EmitSVEPredicateCast(Ops[i], getSVEType(TypeFlags));
9760 // Splat scalar operand to vector (intrinsics with _n infix)
9761 if (TypeFlags.hasSplatOperand()) {
9762 unsigned OpNo = TypeFlags.getSplatOperand();
9763 Ops[OpNo] = EmitSVEDupX(Ops[OpNo]);
9766 if (TypeFlags.isReverseCompare())
9767 std::swap(Ops[1], Ops[2]);
9768 else if (TypeFlags.isReverseUSDOT())
9769 std::swap(Ops[1], Ops[2]);
9770 else if (TypeFlags.isReverseMergeAnyBinOp() &&
9771 TypeFlags.getMergeType() == SVETypeFlags::MergeAny)
9772 std::swap(Ops[1], Ops[2]);
9773 else if (TypeFlags.isReverseMergeAnyAccOp() &&
9774 TypeFlags.getMergeType() == SVETypeFlags::MergeAny)
9775 std::swap(Ops[1], Ops[3]);
9777 // Predicated intrinsics with _z suffix need a select w/ zeroinitializer.
9778 if (TypeFlags.getMergeType() == SVETypeFlags::MergeZero) {
9779 llvm::Type *OpndTy = Ops[1]->getType();
9780 auto *SplatZero = Constant::getNullValue(OpndTy);
9781 Ops[1] = Builder.CreateSelect(Ops[0], Ops[1], SplatZero);
9784 Function *F = CGM.getIntrinsic(Builtin->LLVMIntrinsic,
9785 getSVEOverloadTypes(TypeFlags, Ty, Ops));
9786 Value *Call = Builder.CreateCall(F, Ops);
9788 // Predicate results must be converted to svbool_t.
9789 if (auto PredTy = dyn_cast<llvm::VectorType>(Call->getType()))
9790 if (PredTy->getScalarType()->isIntegerTy(1))
9791 Call = EmitSVEPredicateCast(Call, cast<llvm::ScalableVectorType>(Ty));
9793 return Call;
9796 switch (BuiltinID) {
9797 default:
9798 return nullptr;
9800 case SVE::BI__builtin_sve_svmov_b_z: {
9801 // svmov_b_z(pg, op) <=> svand_b_z(pg, op, op)
9802 SVETypeFlags TypeFlags(Builtin->TypeModifier);
9803 llvm::Type* OverloadedTy = getSVEType(TypeFlags);
9804 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_sve_and_z, OverloadedTy);
9805 return Builder.CreateCall(F, {Ops[0], Ops[1], Ops[1]});
9808 case SVE::BI__builtin_sve_svnot_b_z: {
9809 // svnot_b_z(pg, op) <=> sveor_b_z(pg, op, pg)
9810 SVETypeFlags TypeFlags(Builtin->TypeModifier);
9811 llvm::Type* OverloadedTy = getSVEType(TypeFlags);
9812 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_sve_eor_z, OverloadedTy);
9813 return Builder.CreateCall(F, {Ops[0], Ops[1], Ops[0]});
9816 case SVE::BI__builtin_sve_svmovlb_u16:
9817 case SVE::BI__builtin_sve_svmovlb_u32:
9818 case SVE::BI__builtin_sve_svmovlb_u64:
9819 return EmitSVEMovl(TypeFlags, Ops, Intrinsic::aarch64_sve_ushllb);
9821 case SVE::BI__builtin_sve_svmovlb_s16:
9822 case SVE::BI__builtin_sve_svmovlb_s32:
9823 case SVE::BI__builtin_sve_svmovlb_s64:
9824 return EmitSVEMovl(TypeFlags, Ops, Intrinsic::aarch64_sve_sshllb);
9826 case SVE::BI__builtin_sve_svmovlt_u16:
9827 case SVE::BI__builtin_sve_svmovlt_u32:
9828 case SVE::BI__builtin_sve_svmovlt_u64:
9829 return EmitSVEMovl(TypeFlags, Ops, Intrinsic::aarch64_sve_ushllt);
9831 case SVE::BI__builtin_sve_svmovlt_s16:
9832 case SVE::BI__builtin_sve_svmovlt_s32:
9833 case SVE::BI__builtin_sve_svmovlt_s64:
9834 return EmitSVEMovl(TypeFlags, Ops, Intrinsic::aarch64_sve_sshllt);
9836 case SVE::BI__builtin_sve_svpmullt_u16:
9837 case SVE::BI__builtin_sve_svpmullt_u64:
9838 case SVE::BI__builtin_sve_svpmullt_n_u16:
9839 case SVE::BI__builtin_sve_svpmullt_n_u64:
9840 return EmitSVEPMull(TypeFlags, Ops, Intrinsic::aarch64_sve_pmullt_pair);
9842 case SVE::BI__builtin_sve_svpmullb_u16:
9843 case SVE::BI__builtin_sve_svpmullb_u64:
9844 case SVE::BI__builtin_sve_svpmullb_n_u16:
9845 case SVE::BI__builtin_sve_svpmullb_n_u64:
9846 return EmitSVEPMull(TypeFlags, Ops, Intrinsic::aarch64_sve_pmullb_pair);
9848 case SVE::BI__builtin_sve_svdup_n_b8:
9849 case SVE::BI__builtin_sve_svdup_n_b16:
9850 case SVE::BI__builtin_sve_svdup_n_b32:
9851 case SVE::BI__builtin_sve_svdup_n_b64: {
9852 Value *CmpNE =
9853 Builder.CreateICmpNE(Ops[0], Constant::getNullValue(Ops[0]->getType()));
9854 llvm::ScalableVectorType *OverloadedTy = getSVEType(TypeFlags);
9855 Value *Dup = EmitSVEDupX(CmpNE, OverloadedTy);
9856 return EmitSVEPredicateCast(Dup, cast<llvm::ScalableVectorType>(Ty));
9859 case SVE::BI__builtin_sve_svdupq_n_b8:
9860 case SVE::BI__builtin_sve_svdupq_n_b16:
9861 case SVE::BI__builtin_sve_svdupq_n_b32:
9862 case SVE::BI__builtin_sve_svdupq_n_b64:
9863 case SVE::BI__builtin_sve_svdupq_n_u8:
9864 case SVE::BI__builtin_sve_svdupq_n_s8:
9865 case SVE::BI__builtin_sve_svdupq_n_u64:
9866 case SVE::BI__builtin_sve_svdupq_n_f64:
9867 case SVE::BI__builtin_sve_svdupq_n_s64:
9868 case SVE::BI__builtin_sve_svdupq_n_u16:
9869 case SVE::BI__builtin_sve_svdupq_n_f16:
9870 case SVE::BI__builtin_sve_svdupq_n_bf16:
9871 case SVE::BI__builtin_sve_svdupq_n_s16:
9872 case SVE::BI__builtin_sve_svdupq_n_u32:
9873 case SVE::BI__builtin_sve_svdupq_n_f32:
9874 case SVE::BI__builtin_sve_svdupq_n_s32: {
9875 // These builtins are implemented by storing each element to an array and using
9876 // ld1rq to materialize a vector.
9877 unsigned NumOpnds = Ops.size();
9879 bool IsBoolTy =
9880 cast<llvm::VectorType>(Ty)->getElementType()->isIntegerTy(1);
9882 // For svdupq_n_b* the element type of is an integer of type 128/numelts,
9883 // so that the compare can use the width that is natural for the expected
9884 // number of predicate lanes.
9885 llvm::Type *EltTy = Ops[0]->getType();
9886 if (IsBoolTy)
9887 EltTy = IntegerType::get(getLLVMContext(), SVEBitsPerBlock / NumOpnds);
9889 SmallVector<llvm::Value *, 16> VecOps;
9890 for (unsigned I = 0; I < NumOpnds; ++I)
9891 VecOps.push_back(Builder.CreateZExt(Ops[I], EltTy));
9892 Value *Vec = BuildVector(VecOps);
9894 llvm::Type *OverloadedTy = getSVEVectorForElementType(EltTy);
9895 Value *InsertSubVec = Builder.CreateInsertVector(
9896 OverloadedTy, PoisonValue::get(OverloadedTy), Vec, Builder.getInt64(0));
9898 Function *F =
9899 CGM.getIntrinsic(Intrinsic::aarch64_sve_dupq_lane, OverloadedTy);
9900 Value *DupQLane =
9901 Builder.CreateCall(F, {InsertSubVec, Builder.getInt64(0)});
9903 if (!IsBoolTy)
9904 return DupQLane;
9906 SVETypeFlags TypeFlags(Builtin->TypeModifier);
9907 Value *Pred = EmitSVEAllTruePred(TypeFlags);
9909 // For svdupq_n_b* we need to add an additional 'cmpne' with '0'.
9910 F = CGM.getIntrinsic(NumOpnds == 2 ? Intrinsic::aarch64_sve_cmpne
9911 : Intrinsic::aarch64_sve_cmpne_wide,
9912 OverloadedTy);
9913 Value *Call = Builder.CreateCall(
9914 F, {Pred, DupQLane, EmitSVEDupX(Builder.getInt64(0))});
9915 return EmitSVEPredicateCast(Call, cast<llvm::ScalableVectorType>(Ty));
9918 case SVE::BI__builtin_sve_svpfalse_b:
9919 return ConstantInt::getFalse(Ty);
9921 case SVE::BI__builtin_sve_svlen_bf16:
9922 case SVE::BI__builtin_sve_svlen_f16:
9923 case SVE::BI__builtin_sve_svlen_f32:
9924 case SVE::BI__builtin_sve_svlen_f64:
9925 case SVE::BI__builtin_sve_svlen_s8:
9926 case SVE::BI__builtin_sve_svlen_s16:
9927 case SVE::BI__builtin_sve_svlen_s32:
9928 case SVE::BI__builtin_sve_svlen_s64:
9929 case SVE::BI__builtin_sve_svlen_u8:
9930 case SVE::BI__builtin_sve_svlen_u16:
9931 case SVE::BI__builtin_sve_svlen_u32:
9932 case SVE::BI__builtin_sve_svlen_u64: {
9933 SVETypeFlags TF(Builtin->TypeModifier);
9934 auto VTy = cast<llvm::VectorType>(getSVEType(TF));
9935 auto *NumEls =
9936 llvm::ConstantInt::get(Ty, VTy->getElementCount().getKnownMinValue());
9938 Function *F = CGM.getIntrinsic(Intrinsic::vscale, Ty);
9939 return Builder.CreateMul(NumEls, Builder.CreateCall(F));
9942 case SVE::BI__builtin_sve_svtbl2_u8:
9943 case SVE::BI__builtin_sve_svtbl2_s8:
9944 case SVE::BI__builtin_sve_svtbl2_u16:
9945 case SVE::BI__builtin_sve_svtbl2_s16:
9946 case SVE::BI__builtin_sve_svtbl2_u32:
9947 case SVE::BI__builtin_sve_svtbl2_s32:
9948 case SVE::BI__builtin_sve_svtbl2_u64:
9949 case SVE::BI__builtin_sve_svtbl2_s64:
9950 case SVE::BI__builtin_sve_svtbl2_f16:
9951 case SVE::BI__builtin_sve_svtbl2_bf16:
9952 case SVE::BI__builtin_sve_svtbl2_f32:
9953 case SVE::BI__builtin_sve_svtbl2_f64: {
9954 SVETypeFlags TF(Builtin->TypeModifier);
9955 auto VTy = cast<llvm::ScalableVectorType>(getSVEType(TF));
9956 Value *V0 = Builder.CreateExtractVector(VTy, Ops[0],
9957 ConstantInt::get(CGM.Int64Ty, 0));
9958 unsigned MinElts = VTy->getMinNumElements();
9959 Value *V1 = Builder.CreateExtractVector(
9960 VTy, Ops[0], ConstantInt::get(CGM.Int64Ty, MinElts));
9961 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_sve_tbl2, VTy);
9962 return Builder.CreateCall(F, {V0, V1, Ops[1]});
9965 case SVE::BI__builtin_sve_svset_neonq_s8:
9966 case SVE::BI__builtin_sve_svset_neonq_s16:
9967 case SVE::BI__builtin_sve_svset_neonq_s32:
9968 case SVE::BI__builtin_sve_svset_neonq_s64:
9969 case SVE::BI__builtin_sve_svset_neonq_u8:
9970 case SVE::BI__builtin_sve_svset_neonq_u16:
9971 case SVE::BI__builtin_sve_svset_neonq_u32:
9972 case SVE::BI__builtin_sve_svset_neonq_u64:
9973 case SVE::BI__builtin_sve_svset_neonq_f16:
9974 case SVE::BI__builtin_sve_svset_neonq_f32:
9975 case SVE::BI__builtin_sve_svset_neonq_f64:
9976 case SVE::BI__builtin_sve_svset_neonq_bf16: {
9977 return Builder.CreateInsertVector(Ty, Ops[0], Ops[1], Builder.getInt64(0));
9980 case SVE::BI__builtin_sve_svget_neonq_s8:
9981 case SVE::BI__builtin_sve_svget_neonq_s16:
9982 case SVE::BI__builtin_sve_svget_neonq_s32:
9983 case SVE::BI__builtin_sve_svget_neonq_s64:
9984 case SVE::BI__builtin_sve_svget_neonq_u8:
9985 case SVE::BI__builtin_sve_svget_neonq_u16:
9986 case SVE::BI__builtin_sve_svget_neonq_u32:
9987 case SVE::BI__builtin_sve_svget_neonq_u64:
9988 case SVE::BI__builtin_sve_svget_neonq_f16:
9989 case SVE::BI__builtin_sve_svget_neonq_f32:
9990 case SVE::BI__builtin_sve_svget_neonq_f64:
9991 case SVE::BI__builtin_sve_svget_neonq_bf16: {
9992 return Builder.CreateExtractVector(Ty, Ops[0], Builder.getInt64(0));
9995 case SVE::BI__builtin_sve_svdup_neonq_s8:
9996 case SVE::BI__builtin_sve_svdup_neonq_s16:
9997 case SVE::BI__builtin_sve_svdup_neonq_s32:
9998 case SVE::BI__builtin_sve_svdup_neonq_s64:
9999 case SVE::BI__builtin_sve_svdup_neonq_u8:
10000 case SVE::BI__builtin_sve_svdup_neonq_u16:
10001 case SVE::BI__builtin_sve_svdup_neonq_u32:
10002 case SVE::BI__builtin_sve_svdup_neonq_u64:
10003 case SVE::BI__builtin_sve_svdup_neonq_f16:
10004 case SVE::BI__builtin_sve_svdup_neonq_f32:
10005 case SVE::BI__builtin_sve_svdup_neonq_f64:
10006 case SVE::BI__builtin_sve_svdup_neonq_bf16: {
10007 Value *Insert = Builder.CreateInsertVector(Ty, PoisonValue::get(Ty), Ops[0],
10008 Builder.getInt64(0));
10009 return Builder.CreateIntrinsic(Intrinsic::aarch64_sve_dupq_lane, {Ty},
10010 {Insert, Builder.getInt64(0)});
10014 /// Should not happen
10015 return nullptr;
10018 Value *CodeGenFunction::EmitAArch64SMEBuiltinExpr(unsigned BuiltinID,
10019 const CallExpr *E) {
10020 // Find out if any arguments are required to be integer constant expressions.
10021 unsigned ICEArguments = 0;
10022 ASTContext::GetBuiltinTypeError Error;
10023 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
10024 assert(Error == ASTContext::GE_None && "Should not codegen an error");
10026 llvm::Type *Ty = ConvertType(E->getType());
10027 llvm::SmallVector<Value *, 4> Ops;
10028 for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) {
10029 if ((ICEArguments & (1 << i)) == 0)
10030 Ops.push_back(EmitScalarExpr(E->getArg(i)));
10031 else {
10032 // If this is required to be a constant, constant fold it so that we know
10033 // that the generated intrinsic gets a ConstantInt.
10034 std::optional<llvm::APSInt> Result =
10035 E->getArg(i)->getIntegerConstantExpr(getContext());
10036 assert(Result && "Expected argument to be a constant");
10038 // Immediates for SVE llvm intrinsics are always 32bit. We can safely
10039 // truncate because the immediate has been range checked and no valid
10040 // immediate requires more than a handful of bits.
10041 *Result = Result->extOrTrunc(32);
10042 Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), *Result));
10046 auto *Builtin = findARMVectorIntrinsicInMap(AArch64SMEIntrinsicMap, BuiltinID,
10047 AArch64SMEIntrinsicsProvenSorted);
10048 SVETypeFlags TypeFlags(Builtin->TypeModifier);
10049 if (TypeFlags.isLoad() || TypeFlags.isStore())
10050 return EmitSMELd1St1(TypeFlags, Ops, Builtin->LLVMIntrinsic);
10051 else if (TypeFlags.isReadZA() || TypeFlags.isWriteZA())
10052 return EmitSMEReadWrite(TypeFlags, Ops, Builtin->LLVMIntrinsic);
10053 else if (BuiltinID == SME::BI__builtin_sme_svzero_mask_za ||
10054 BuiltinID == SME::BI__builtin_sme_svzero_za)
10055 return EmitSMEZero(TypeFlags, Ops, Builtin->LLVMIntrinsic);
10056 else if (BuiltinID == SME::BI__builtin_sme_svldr_vnum_za ||
10057 BuiltinID == SME::BI__builtin_sme_svstr_vnum_za)
10058 return EmitSMELdrStr(TypeFlags, Ops, Builtin->LLVMIntrinsic);
10059 else if (Builtin->LLVMIntrinsic != 0) {
10060 // Predicates must match the main datatype.
10061 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
10062 if (auto PredTy = dyn_cast<llvm::VectorType>(Ops[i]->getType()))
10063 if (PredTy->getElementType()->isIntegerTy(1))
10064 Ops[i] = EmitSVEPredicateCast(Ops[i], getSVEType(TypeFlags));
10066 Function *F = CGM.getIntrinsic(Builtin->LLVMIntrinsic,
10067 getSVEOverloadTypes(TypeFlags, Ty, Ops));
10068 Value *Call = Builder.CreateCall(F, Ops);
10069 return Call;
10072 /// Should not happen
10073 return nullptr;
10076 Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
10077 const CallExpr *E,
10078 llvm::Triple::ArchType Arch) {
10079 if (BuiltinID >= clang::AArch64::FirstSVEBuiltin &&
10080 BuiltinID <= clang::AArch64::LastSVEBuiltin)
10081 return EmitAArch64SVEBuiltinExpr(BuiltinID, E);
10083 if (BuiltinID >= clang::AArch64::FirstSMEBuiltin &&
10084 BuiltinID <= clang::AArch64::LastSMEBuiltin)
10085 return EmitAArch64SMEBuiltinExpr(BuiltinID, E);
10087 unsigned HintID = static_cast<unsigned>(-1);
10088 switch (BuiltinID) {
10089 default: break;
10090 case clang::AArch64::BI__builtin_arm_nop:
10091 HintID = 0;
10092 break;
10093 case clang::AArch64::BI__builtin_arm_yield:
10094 case clang::AArch64::BI__yield:
10095 HintID = 1;
10096 break;
10097 case clang::AArch64::BI__builtin_arm_wfe:
10098 case clang::AArch64::BI__wfe:
10099 HintID = 2;
10100 break;
10101 case clang::AArch64::BI__builtin_arm_wfi:
10102 case clang::AArch64::BI__wfi:
10103 HintID = 3;
10104 break;
10105 case clang::AArch64::BI__builtin_arm_sev:
10106 case clang::AArch64::BI__sev:
10107 HintID = 4;
10108 break;
10109 case clang::AArch64::BI__builtin_arm_sevl:
10110 case clang::AArch64::BI__sevl:
10111 HintID = 5;
10112 break;
10115 if (HintID != static_cast<unsigned>(-1)) {
10116 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_hint);
10117 return Builder.CreateCall(F, llvm::ConstantInt::get(Int32Ty, HintID));
10120 if (BuiltinID == clang::AArch64::BI__builtin_arm_rbit) {
10121 assert((getContext().getTypeSize(E->getType()) == 32) &&
10122 "rbit of unusual size!");
10123 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
10124 return Builder.CreateCall(
10125 CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg, "rbit");
10127 if (BuiltinID == clang::AArch64::BI__builtin_arm_rbit64) {
10128 assert((getContext().getTypeSize(E->getType()) == 64) &&
10129 "rbit of unusual size!");
10130 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
10131 return Builder.CreateCall(
10132 CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg, "rbit");
10135 if (BuiltinID == clang::AArch64::BI__builtin_arm_clz ||
10136 BuiltinID == clang::AArch64::BI__builtin_arm_clz64) {
10137 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
10138 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Arg->getType());
10139 Value *Res = Builder.CreateCall(F, {Arg, Builder.getInt1(false)});
10140 if (BuiltinID == clang::AArch64::BI__builtin_arm_clz64)
10141 Res = Builder.CreateTrunc(Res, Builder.getInt32Ty());
10142 return Res;
10145 if (BuiltinID == clang::AArch64::BI__builtin_arm_cls) {
10146 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
10147 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_cls), Arg,
10148 "cls");
10150 if (BuiltinID == clang::AArch64::BI__builtin_arm_cls64) {
10151 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
10152 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_cls64), Arg,
10153 "cls");
10156 if (BuiltinID == clang::AArch64::BI__builtin_arm_rint32zf ||
10157 BuiltinID == clang::AArch64::BI__builtin_arm_rint32z) {
10158 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
10159 llvm::Type *Ty = Arg->getType();
10160 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_frint32z, Ty),
10161 Arg, "frint32z");
10164 if (BuiltinID == clang::AArch64::BI__builtin_arm_rint64zf ||
10165 BuiltinID == clang::AArch64::BI__builtin_arm_rint64z) {
10166 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
10167 llvm::Type *Ty = Arg->getType();
10168 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_frint64z, Ty),
10169 Arg, "frint64z");
10172 if (BuiltinID == clang::AArch64::BI__builtin_arm_rint32xf ||
10173 BuiltinID == clang::AArch64::BI__builtin_arm_rint32x) {
10174 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
10175 llvm::Type *Ty = Arg->getType();
10176 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_frint32x, Ty),
10177 Arg, "frint32x");
10180 if (BuiltinID == clang::AArch64::BI__builtin_arm_rint64xf ||
10181 BuiltinID == clang::AArch64::BI__builtin_arm_rint64x) {
10182 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
10183 llvm::Type *Ty = Arg->getType();
10184 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_frint64x, Ty),
10185 Arg, "frint64x");
10188 if (BuiltinID == clang::AArch64::BI__builtin_arm_jcvt) {
10189 assert((getContext().getTypeSize(E->getType()) == 32) &&
10190 "__jcvt of unusual size!");
10191 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
10192 return Builder.CreateCall(
10193 CGM.getIntrinsic(Intrinsic::aarch64_fjcvtzs), Arg);
10196 if (BuiltinID == clang::AArch64::BI__builtin_arm_ld64b ||
10197 BuiltinID == clang::AArch64::BI__builtin_arm_st64b ||
10198 BuiltinID == clang::AArch64::BI__builtin_arm_st64bv ||
10199 BuiltinID == clang::AArch64::BI__builtin_arm_st64bv0) {
10200 llvm::Value *MemAddr = EmitScalarExpr(E->getArg(0));
10201 llvm::Value *ValPtr = EmitScalarExpr(E->getArg(1));
10203 if (BuiltinID == clang::AArch64::BI__builtin_arm_ld64b) {
10204 // Load from the address via an LLVM intrinsic, receiving a
10205 // tuple of 8 i64 words, and store each one to ValPtr.
10206 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_ld64b);
10207 llvm::Value *Val = Builder.CreateCall(F, MemAddr);
10208 llvm::Value *ToRet;
10209 for (size_t i = 0; i < 8; i++) {
10210 llvm::Value *ValOffsetPtr =
10211 Builder.CreateGEP(Int64Ty, ValPtr, Builder.getInt32(i));
10212 Address Addr =
10213 Address(ValOffsetPtr, Int64Ty, CharUnits::fromQuantity(8));
10214 ToRet = Builder.CreateStore(Builder.CreateExtractValue(Val, i), Addr);
10216 return ToRet;
10217 } else {
10218 // Load 8 i64 words from ValPtr, and store them to the address
10219 // via an LLVM intrinsic.
10220 SmallVector<llvm::Value *, 9> Args;
10221 Args.push_back(MemAddr);
10222 for (size_t i = 0; i < 8; i++) {
10223 llvm::Value *ValOffsetPtr =
10224 Builder.CreateGEP(Int64Ty, ValPtr, Builder.getInt32(i));
10225 Address Addr =
10226 Address(ValOffsetPtr, Int64Ty, CharUnits::fromQuantity(8));
10227 Args.push_back(Builder.CreateLoad(Addr));
10230 auto Intr = (BuiltinID == clang::AArch64::BI__builtin_arm_st64b
10231 ? Intrinsic::aarch64_st64b
10232 : BuiltinID == clang::AArch64::BI__builtin_arm_st64bv
10233 ? Intrinsic::aarch64_st64bv
10234 : Intrinsic::aarch64_st64bv0);
10235 Function *F = CGM.getIntrinsic(Intr);
10236 return Builder.CreateCall(F, Args);
10240 if (BuiltinID == clang::AArch64::BI__builtin_arm_rndr ||
10241 BuiltinID == clang::AArch64::BI__builtin_arm_rndrrs) {
10243 auto Intr = (BuiltinID == clang::AArch64::BI__builtin_arm_rndr
10244 ? Intrinsic::aarch64_rndr
10245 : Intrinsic::aarch64_rndrrs);
10246 Function *F = CGM.getIntrinsic(Intr);
10247 llvm::Value *Val = Builder.CreateCall(F);
10248 Value *RandomValue = Builder.CreateExtractValue(Val, 0);
10249 Value *Status = Builder.CreateExtractValue(Val, 1);
10251 Address MemAddress = EmitPointerWithAlignment(E->getArg(0));
10252 Builder.CreateStore(RandomValue, MemAddress);
10253 Status = Builder.CreateZExt(Status, Int32Ty);
10254 return Status;
10257 if (BuiltinID == clang::AArch64::BI__clear_cache) {
10258 assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments");
10259 const FunctionDecl *FD = E->getDirectCallee();
10260 Value *Ops[2];
10261 for (unsigned i = 0; i < 2; i++)
10262 Ops[i] = EmitScalarExpr(E->getArg(i));
10263 llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType());
10264 llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty);
10265 StringRef Name = FD->getName();
10266 return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops);
10269 if ((BuiltinID == clang::AArch64::BI__builtin_arm_ldrex ||
10270 BuiltinID == clang::AArch64::BI__builtin_arm_ldaex) &&
10271 getContext().getTypeSize(E->getType()) == 128) {
10272 Function *F =
10273 CGM.getIntrinsic(BuiltinID == clang::AArch64::BI__builtin_arm_ldaex
10274 ? Intrinsic::aarch64_ldaxp
10275 : Intrinsic::aarch64_ldxp);
10277 Value *LdPtr = EmitScalarExpr(E->getArg(0));
10278 Value *Val = Builder.CreateCall(F, LdPtr, "ldxp");
10280 Value *Val0 = Builder.CreateExtractValue(Val, 1);
10281 Value *Val1 = Builder.CreateExtractValue(Val, 0);
10282 llvm::Type *Int128Ty = llvm::IntegerType::get(getLLVMContext(), 128);
10283 Val0 = Builder.CreateZExt(Val0, Int128Ty);
10284 Val1 = Builder.CreateZExt(Val1, Int128Ty);
10286 Value *ShiftCst = llvm::ConstantInt::get(Int128Ty, 64);
10287 Val = Builder.CreateShl(Val0, ShiftCst, "shl", true /* nuw */);
10288 Val = Builder.CreateOr(Val, Val1);
10289 return Builder.CreateBitCast(Val, ConvertType(E->getType()));
10290 } else if (BuiltinID == clang::AArch64::BI__builtin_arm_ldrex ||
10291 BuiltinID == clang::AArch64::BI__builtin_arm_ldaex) {
10292 Value *LoadAddr = EmitScalarExpr(E->getArg(0));
10294 QualType Ty = E->getType();
10295 llvm::Type *RealResTy = ConvertType(Ty);
10296 llvm::Type *IntTy =
10297 llvm::IntegerType::get(getLLVMContext(), getContext().getTypeSize(Ty));
10298 llvm::Type *PtrTy = llvm::PointerType::getUnqual(getLLVMContext());
10300 Function *F =
10301 CGM.getIntrinsic(BuiltinID == clang::AArch64::BI__builtin_arm_ldaex
10302 ? Intrinsic::aarch64_ldaxr
10303 : Intrinsic::aarch64_ldxr,
10304 PtrTy);
10305 CallInst *Val = Builder.CreateCall(F, LoadAddr, "ldxr");
10306 Val->addParamAttr(
10307 0, Attribute::get(getLLVMContext(), Attribute::ElementType, IntTy));
10309 if (RealResTy->isPointerTy())
10310 return Builder.CreateIntToPtr(Val, RealResTy);
10312 llvm::Type *IntResTy = llvm::IntegerType::get(
10313 getLLVMContext(), CGM.getDataLayout().getTypeSizeInBits(RealResTy));
10314 return Builder.CreateBitCast(Builder.CreateTruncOrBitCast(Val, IntResTy),
10315 RealResTy);
10318 if ((BuiltinID == clang::AArch64::BI__builtin_arm_strex ||
10319 BuiltinID == clang::AArch64::BI__builtin_arm_stlex) &&
10320 getContext().getTypeSize(E->getArg(0)->getType()) == 128) {
10321 Function *F =
10322 CGM.getIntrinsic(BuiltinID == clang::AArch64::BI__builtin_arm_stlex
10323 ? Intrinsic::aarch64_stlxp
10324 : Intrinsic::aarch64_stxp);
10325 llvm::Type *STy = llvm::StructType::get(Int64Ty, Int64Ty);
10327 Address Tmp = CreateMemTemp(E->getArg(0)->getType());
10328 EmitAnyExprToMem(E->getArg(0), Tmp, Qualifiers(), /*init*/ true);
10330 Tmp = Tmp.withElementType(STy);
10331 llvm::Value *Val = Builder.CreateLoad(Tmp);
10333 Value *Arg0 = Builder.CreateExtractValue(Val, 0);
10334 Value *Arg1 = Builder.CreateExtractValue(Val, 1);
10335 Value *StPtr = EmitScalarExpr(E->getArg(1));
10336 return Builder.CreateCall(F, {Arg0, Arg1, StPtr}, "stxp");
10339 if (BuiltinID == clang::AArch64::BI__builtin_arm_strex ||
10340 BuiltinID == clang::AArch64::BI__builtin_arm_stlex) {
10341 Value *StoreVal = EmitScalarExpr(E->getArg(0));
10342 Value *StoreAddr = EmitScalarExpr(E->getArg(1));
10344 QualType Ty = E->getArg(0)->getType();
10345 llvm::Type *StoreTy =
10346 llvm::IntegerType::get(getLLVMContext(), getContext().getTypeSize(Ty));
10348 if (StoreVal->getType()->isPointerTy())
10349 StoreVal = Builder.CreatePtrToInt(StoreVal, Int64Ty);
10350 else {
10351 llvm::Type *IntTy = llvm::IntegerType::get(
10352 getLLVMContext(),
10353 CGM.getDataLayout().getTypeSizeInBits(StoreVal->getType()));
10354 StoreVal = Builder.CreateBitCast(StoreVal, IntTy);
10355 StoreVal = Builder.CreateZExtOrBitCast(StoreVal, Int64Ty);
10358 Function *F =
10359 CGM.getIntrinsic(BuiltinID == clang::AArch64::BI__builtin_arm_stlex
10360 ? Intrinsic::aarch64_stlxr
10361 : Intrinsic::aarch64_stxr,
10362 StoreAddr->getType());
10363 CallInst *CI = Builder.CreateCall(F, {StoreVal, StoreAddr}, "stxr");
10364 CI->addParamAttr(
10365 1, Attribute::get(getLLVMContext(), Attribute::ElementType, StoreTy));
10366 return CI;
10369 if (BuiltinID == clang::AArch64::BI__getReg) {
10370 Expr::EvalResult Result;
10371 if (!E->getArg(0)->EvaluateAsInt(Result, CGM.getContext()))
10372 llvm_unreachable("Sema will ensure that the parameter is constant");
10374 llvm::APSInt Value = Result.Val.getInt();
10375 LLVMContext &Context = CGM.getLLVMContext();
10376 std::string Reg = Value == 31 ? "sp" : "x" + toString(Value, 10);
10378 llvm::Metadata *Ops[] = {llvm::MDString::get(Context, Reg)};
10379 llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
10380 llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
10382 llvm::Function *F =
10383 CGM.getIntrinsic(llvm::Intrinsic::read_register, {Int64Ty});
10384 return Builder.CreateCall(F, Metadata);
10387 if (BuiltinID == clang::AArch64::BI__break) {
10388 Expr::EvalResult Result;
10389 if (!E->getArg(0)->EvaluateAsInt(Result, CGM.getContext()))
10390 llvm_unreachable("Sema will ensure that the parameter is constant");
10392 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::aarch64_break);
10393 return Builder.CreateCall(F, {EmitScalarExpr(E->getArg(0))});
10396 if (BuiltinID == clang::AArch64::BI__builtin_arm_clrex) {
10397 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_clrex);
10398 return Builder.CreateCall(F);
10401 if (BuiltinID == clang::AArch64::BI_ReadWriteBarrier)
10402 return Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent,
10403 llvm::SyncScope::SingleThread);
10405 // CRC32
10406 Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic;
10407 switch (BuiltinID) {
10408 case clang::AArch64::BI__builtin_arm_crc32b:
10409 CRCIntrinsicID = Intrinsic::aarch64_crc32b; break;
10410 case clang::AArch64::BI__builtin_arm_crc32cb:
10411 CRCIntrinsicID = Intrinsic::aarch64_crc32cb; break;
10412 case clang::AArch64::BI__builtin_arm_crc32h:
10413 CRCIntrinsicID = Intrinsic::aarch64_crc32h; break;
10414 case clang::AArch64::BI__builtin_arm_crc32ch:
10415 CRCIntrinsicID = Intrinsic::aarch64_crc32ch; break;
10416 case clang::AArch64::BI__builtin_arm_crc32w:
10417 CRCIntrinsicID = Intrinsic::aarch64_crc32w; break;
10418 case clang::AArch64::BI__builtin_arm_crc32cw:
10419 CRCIntrinsicID = Intrinsic::aarch64_crc32cw; break;
10420 case clang::AArch64::BI__builtin_arm_crc32d:
10421 CRCIntrinsicID = Intrinsic::aarch64_crc32x; break;
10422 case clang::AArch64::BI__builtin_arm_crc32cd:
10423 CRCIntrinsicID = Intrinsic::aarch64_crc32cx; break;
10426 if (CRCIntrinsicID != Intrinsic::not_intrinsic) {
10427 Value *Arg0 = EmitScalarExpr(E->getArg(0));
10428 Value *Arg1 = EmitScalarExpr(E->getArg(1));
10429 Function *F = CGM.getIntrinsic(CRCIntrinsicID);
10431 llvm::Type *DataTy = F->getFunctionType()->getParamType(1);
10432 Arg1 = Builder.CreateZExtOrBitCast(Arg1, DataTy);
10434 return Builder.CreateCall(F, {Arg0, Arg1});
10437 // Memory Operations (MOPS)
10438 if (BuiltinID == AArch64::BI__builtin_arm_mops_memset_tag) {
10439 Value *Dst = EmitScalarExpr(E->getArg(0));
10440 Value *Val = EmitScalarExpr(E->getArg(1));
10441 Value *Size = EmitScalarExpr(E->getArg(2));
10442 Dst = Builder.CreatePointerCast(Dst, Int8PtrTy);
10443 Val = Builder.CreateTrunc(Val, Int8Ty);
10444 Size = Builder.CreateIntCast(Size, Int64Ty, false);
10445 return Builder.CreateCall(
10446 CGM.getIntrinsic(Intrinsic::aarch64_mops_memset_tag), {Dst, Val, Size});
10449 // Memory Tagging Extensions (MTE) Intrinsics
10450 Intrinsic::ID MTEIntrinsicID = Intrinsic::not_intrinsic;
10451 switch (BuiltinID) {
10452 case clang::AArch64::BI__builtin_arm_irg:
10453 MTEIntrinsicID = Intrinsic::aarch64_irg; break;
10454 case clang::AArch64::BI__builtin_arm_addg:
10455 MTEIntrinsicID = Intrinsic::aarch64_addg; break;
10456 case clang::AArch64::BI__builtin_arm_gmi:
10457 MTEIntrinsicID = Intrinsic::aarch64_gmi; break;
10458 case clang::AArch64::BI__builtin_arm_ldg:
10459 MTEIntrinsicID = Intrinsic::aarch64_ldg; break;
10460 case clang::AArch64::BI__builtin_arm_stg:
10461 MTEIntrinsicID = Intrinsic::aarch64_stg; break;
10462 case clang::AArch64::BI__builtin_arm_subp:
10463 MTEIntrinsicID = Intrinsic::aarch64_subp; break;
10466 if (MTEIntrinsicID != Intrinsic::not_intrinsic) {
10467 llvm::Type *T = ConvertType(E->getType());
10469 if (MTEIntrinsicID == Intrinsic::aarch64_irg) {
10470 Value *Pointer = EmitScalarExpr(E->getArg(0));
10471 Value *Mask = EmitScalarExpr(E->getArg(1));
10473 Pointer = Builder.CreatePointerCast(Pointer, Int8PtrTy);
10474 Mask = Builder.CreateZExt(Mask, Int64Ty);
10475 Value *RV = Builder.CreateCall(
10476 CGM.getIntrinsic(MTEIntrinsicID), {Pointer, Mask});
10477 return Builder.CreatePointerCast(RV, T);
10479 if (MTEIntrinsicID == Intrinsic::aarch64_addg) {
10480 Value *Pointer = EmitScalarExpr(E->getArg(0));
10481 Value *TagOffset = EmitScalarExpr(E->getArg(1));
10483 Pointer = Builder.CreatePointerCast(Pointer, Int8PtrTy);
10484 TagOffset = Builder.CreateZExt(TagOffset, Int64Ty);
10485 Value *RV = Builder.CreateCall(
10486 CGM.getIntrinsic(MTEIntrinsicID), {Pointer, TagOffset});
10487 return Builder.CreatePointerCast(RV, T);
10489 if (MTEIntrinsicID == Intrinsic::aarch64_gmi) {
10490 Value *Pointer = EmitScalarExpr(E->getArg(0));
10491 Value *ExcludedMask = EmitScalarExpr(E->getArg(1));
10493 ExcludedMask = Builder.CreateZExt(ExcludedMask, Int64Ty);
10494 Pointer = Builder.CreatePointerCast(Pointer, Int8PtrTy);
10495 return Builder.CreateCall(
10496 CGM.getIntrinsic(MTEIntrinsicID), {Pointer, ExcludedMask});
10498 // Although it is possible to supply a different return
10499 // address (first arg) to this intrinsic, for now we set
10500 // return address same as input address.
10501 if (MTEIntrinsicID == Intrinsic::aarch64_ldg) {
10502 Value *TagAddress = EmitScalarExpr(E->getArg(0));
10503 TagAddress = Builder.CreatePointerCast(TagAddress, Int8PtrTy);
10504 Value *RV = Builder.CreateCall(
10505 CGM.getIntrinsic(MTEIntrinsicID), {TagAddress, TagAddress});
10506 return Builder.CreatePointerCast(RV, T);
10508 // Although it is possible to supply a different tag (to set)
10509 // to this intrinsic (as first arg), for now we supply
10510 // the tag that is in input address arg (common use case).
10511 if (MTEIntrinsicID == Intrinsic::aarch64_stg) {
10512 Value *TagAddress = EmitScalarExpr(E->getArg(0));
10513 TagAddress = Builder.CreatePointerCast(TagAddress, Int8PtrTy);
10514 return Builder.CreateCall(
10515 CGM.getIntrinsic(MTEIntrinsicID), {TagAddress, TagAddress});
10517 if (MTEIntrinsicID == Intrinsic::aarch64_subp) {
10518 Value *PointerA = EmitScalarExpr(E->getArg(0));
10519 Value *PointerB = EmitScalarExpr(E->getArg(1));
10520 PointerA = Builder.CreatePointerCast(PointerA, Int8PtrTy);
10521 PointerB = Builder.CreatePointerCast(PointerB, Int8PtrTy);
10522 return Builder.CreateCall(
10523 CGM.getIntrinsic(MTEIntrinsicID), {PointerA, PointerB});
10527 if (BuiltinID == clang::AArch64::BI__builtin_arm_rsr ||
10528 BuiltinID == clang::AArch64::BI__builtin_arm_rsr64 ||
10529 BuiltinID == clang::AArch64::BI__builtin_arm_rsr128 ||
10530 BuiltinID == clang::AArch64::BI__builtin_arm_rsrp ||
10531 BuiltinID == clang::AArch64::BI__builtin_arm_wsr ||
10532 BuiltinID == clang::AArch64::BI__builtin_arm_wsr64 ||
10533 BuiltinID == clang::AArch64::BI__builtin_arm_wsr128 ||
10534 BuiltinID == clang::AArch64::BI__builtin_arm_wsrp) {
10536 SpecialRegisterAccessKind AccessKind = Write;
10537 if (BuiltinID == clang::AArch64::BI__builtin_arm_rsr ||
10538 BuiltinID == clang::AArch64::BI__builtin_arm_rsr64 ||
10539 BuiltinID == clang::AArch64::BI__builtin_arm_rsr128 ||
10540 BuiltinID == clang::AArch64::BI__builtin_arm_rsrp)
10541 AccessKind = VolatileRead;
10543 bool IsPointerBuiltin = BuiltinID == clang::AArch64::BI__builtin_arm_rsrp ||
10544 BuiltinID == clang::AArch64::BI__builtin_arm_wsrp;
10546 bool Is32Bit = BuiltinID == clang::AArch64::BI__builtin_arm_rsr ||
10547 BuiltinID == clang::AArch64::BI__builtin_arm_wsr;
10549 bool Is128Bit = BuiltinID == clang::AArch64::BI__builtin_arm_rsr128 ||
10550 BuiltinID == clang::AArch64::BI__builtin_arm_wsr128;
10552 llvm::Type *ValueType;
10553 llvm::Type *RegisterType = Int64Ty;
10554 if (Is32Bit) {
10555 ValueType = Int32Ty;
10556 } else if (Is128Bit) {
10557 llvm::Type *Int128Ty =
10558 llvm::IntegerType::getInt128Ty(CGM.getLLVMContext());
10559 ValueType = Int128Ty;
10560 RegisterType = Int128Ty;
10561 } else if (IsPointerBuiltin) {
10562 ValueType = VoidPtrTy;
10563 } else {
10564 ValueType = Int64Ty;
10567 return EmitSpecialRegisterBuiltin(*this, E, RegisterType, ValueType,
10568 AccessKind);
10571 if (BuiltinID == clang::AArch64::BI_ReadStatusReg ||
10572 BuiltinID == clang::AArch64::BI_WriteStatusReg) {
10573 LLVMContext &Context = CGM.getLLVMContext();
10575 unsigned SysReg =
10576 E->getArg(0)->EvaluateKnownConstInt(getContext()).getZExtValue();
10578 std::string SysRegStr;
10579 llvm::raw_string_ostream(SysRegStr) <<
10580 ((1 << 1) | ((SysReg >> 14) & 1)) << ":" <<
10581 ((SysReg >> 11) & 7) << ":" <<
10582 ((SysReg >> 7) & 15) << ":" <<
10583 ((SysReg >> 3) & 15) << ":" <<
10584 ( SysReg & 7);
10586 llvm::Metadata *Ops[] = { llvm::MDString::get(Context, SysRegStr) };
10587 llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
10588 llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
10590 llvm::Type *RegisterType = Int64Ty;
10591 llvm::Type *Types[] = { RegisterType };
10593 if (BuiltinID == clang::AArch64::BI_ReadStatusReg) {
10594 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types);
10596 return Builder.CreateCall(F, Metadata);
10599 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types);
10600 llvm::Value *ArgValue = EmitScalarExpr(E->getArg(1));
10602 return Builder.CreateCall(F, { Metadata, ArgValue });
10605 if (BuiltinID == clang::AArch64::BI_AddressOfReturnAddress) {
10606 llvm::Function *F =
10607 CGM.getIntrinsic(Intrinsic::addressofreturnaddress, AllocaInt8PtrTy);
10608 return Builder.CreateCall(F);
10611 if (BuiltinID == clang::AArch64::BI__builtin_sponentry) {
10612 llvm::Function *F = CGM.getIntrinsic(Intrinsic::sponentry, AllocaInt8PtrTy);
10613 return Builder.CreateCall(F);
10616 if (BuiltinID == clang::AArch64::BI__mulh ||
10617 BuiltinID == clang::AArch64::BI__umulh) {
10618 llvm::Type *ResType = ConvertType(E->getType());
10619 llvm::Type *Int128Ty = llvm::IntegerType::get(getLLVMContext(), 128);
10621 bool IsSigned = BuiltinID == clang::AArch64::BI__mulh;
10622 Value *LHS =
10623 Builder.CreateIntCast(EmitScalarExpr(E->getArg(0)), Int128Ty, IsSigned);
10624 Value *RHS =
10625 Builder.CreateIntCast(EmitScalarExpr(E->getArg(1)), Int128Ty, IsSigned);
10627 Value *MulResult, *HigherBits;
10628 if (IsSigned) {
10629 MulResult = Builder.CreateNSWMul(LHS, RHS);
10630 HigherBits = Builder.CreateAShr(MulResult, 64);
10631 } else {
10632 MulResult = Builder.CreateNUWMul(LHS, RHS);
10633 HigherBits = Builder.CreateLShr(MulResult, 64);
10635 HigherBits = Builder.CreateIntCast(HigherBits, ResType, IsSigned);
10637 return HigherBits;
10640 if (BuiltinID == AArch64::BI__writex18byte ||
10641 BuiltinID == AArch64::BI__writex18word ||
10642 BuiltinID == AArch64::BI__writex18dword ||
10643 BuiltinID == AArch64::BI__writex18qword) {
10644 llvm::Type *IntTy = ConvertType(E->getArg(1)->getType());
10646 // Read x18 as i8*
10647 LLVMContext &Context = CGM.getLLVMContext();
10648 llvm::Metadata *Ops[] = {llvm::MDString::get(Context, "x18")};
10649 llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
10650 llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
10651 llvm::Function *F =
10652 CGM.getIntrinsic(llvm::Intrinsic::read_register, {Int64Ty});
10653 llvm::Value *X18 = Builder.CreateCall(F, Metadata);
10654 X18 = Builder.CreateIntToPtr(X18, llvm::PointerType::get(Int8Ty, 0));
10656 // Store val at x18 + offset
10657 Value *Offset = Builder.CreateZExt(EmitScalarExpr(E->getArg(0)), Int64Ty);
10658 Value *Ptr = Builder.CreateGEP(Int8Ty, X18, Offset);
10659 Ptr = Builder.CreatePointerCast(Ptr, llvm::PointerType::get(IntTy, 0));
10660 Value *Val = EmitScalarExpr(E->getArg(1));
10661 StoreInst *Store = Builder.CreateAlignedStore(Val, Ptr, CharUnits::One());
10662 return Store;
10665 if (BuiltinID == AArch64::BI__readx18byte ||
10666 BuiltinID == AArch64::BI__readx18word ||
10667 BuiltinID == AArch64::BI__readx18dword ||
10668 BuiltinID == AArch64::BI__readx18qword) {
10669 llvm::Type *IntTy = ConvertType(E->getType());
10671 // Read x18 as i8*
10672 LLVMContext &Context = CGM.getLLVMContext();
10673 llvm::Metadata *Ops[] = {llvm::MDString::get(Context, "x18")};
10674 llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
10675 llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
10676 llvm::Function *F =
10677 CGM.getIntrinsic(llvm::Intrinsic::read_register, {Int64Ty});
10678 llvm::Value *X18 = Builder.CreateCall(F, Metadata);
10679 X18 = Builder.CreateIntToPtr(X18, llvm::PointerType::get(Int8Ty, 0));
10681 // Load x18 + offset
10682 Value *Offset = Builder.CreateZExt(EmitScalarExpr(E->getArg(0)), Int64Ty);
10683 Value *Ptr = Builder.CreateGEP(Int8Ty, X18, Offset);
10684 Ptr = Builder.CreatePointerCast(Ptr, llvm::PointerType::get(IntTy, 0));
10685 LoadInst *Load = Builder.CreateAlignedLoad(IntTy, Ptr, CharUnits::One());
10686 return Load;
10689 // Handle MSVC intrinsics before argument evaluation to prevent double
10690 // evaluation.
10691 if (std::optional<MSVCIntrin> MsvcIntId =
10692 translateAarch64ToMsvcIntrin(BuiltinID))
10693 return EmitMSVCBuiltinExpr(*MsvcIntId, E);
10695 // Some intrinsics are equivalent - if they are use the base intrinsic ID.
10696 auto It = llvm::find_if(NEONEquivalentIntrinsicMap, [BuiltinID](auto &P) {
10697 return P.first == BuiltinID;
10699 if (It != end(NEONEquivalentIntrinsicMap))
10700 BuiltinID = It->second;
10702 // Find out if any arguments are required to be integer constant
10703 // expressions.
10704 unsigned ICEArguments = 0;
10705 ASTContext::GetBuiltinTypeError Error;
10706 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
10707 assert(Error == ASTContext::GE_None && "Should not codegen an error");
10709 llvm::SmallVector<Value*, 4> Ops;
10710 Address PtrOp0 = Address::invalid();
10711 for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++) {
10712 if (i == 0) {
10713 switch (BuiltinID) {
10714 case NEON::BI__builtin_neon_vld1_v:
10715 case NEON::BI__builtin_neon_vld1q_v:
10716 case NEON::BI__builtin_neon_vld1_dup_v:
10717 case NEON::BI__builtin_neon_vld1q_dup_v:
10718 case NEON::BI__builtin_neon_vld1_lane_v:
10719 case NEON::BI__builtin_neon_vld1q_lane_v:
10720 case NEON::BI__builtin_neon_vst1_v:
10721 case NEON::BI__builtin_neon_vst1q_v:
10722 case NEON::BI__builtin_neon_vst1_lane_v:
10723 case NEON::BI__builtin_neon_vst1q_lane_v:
10724 case NEON::BI__builtin_neon_vldap1_lane_s64:
10725 case NEON::BI__builtin_neon_vldap1q_lane_s64:
10726 case NEON::BI__builtin_neon_vstl1_lane_s64:
10727 case NEON::BI__builtin_neon_vstl1q_lane_s64:
10728 // Get the alignment for the argument in addition to the value;
10729 // we'll use it later.
10730 PtrOp0 = EmitPointerWithAlignment(E->getArg(0));
10731 Ops.push_back(PtrOp0.getPointer());
10732 continue;
10735 if ((ICEArguments & (1 << i)) == 0) {
10736 Ops.push_back(EmitScalarExpr(E->getArg(i)));
10737 } else {
10738 // If this is required to be a constant, constant fold it so that we know
10739 // that the generated intrinsic gets a ConstantInt.
10740 Ops.push_back(llvm::ConstantInt::get(
10741 getLLVMContext(),
10742 *E->getArg(i)->getIntegerConstantExpr(getContext())));
10746 auto SISDMap = ArrayRef(AArch64SISDIntrinsicMap);
10747 const ARMVectorIntrinsicInfo *Builtin = findARMVectorIntrinsicInMap(
10748 SISDMap, BuiltinID, AArch64SISDIntrinsicsProvenSorted);
10750 if (Builtin) {
10751 Ops.push_back(EmitScalarExpr(E->getArg(E->getNumArgs() - 1)));
10752 Value *Result = EmitCommonNeonSISDBuiltinExpr(*this, *Builtin, Ops, E);
10753 assert(Result && "SISD intrinsic should have been handled");
10754 return Result;
10757 const Expr *Arg = E->getArg(E->getNumArgs()-1);
10758 NeonTypeFlags Type(0);
10759 if (std::optional<llvm::APSInt> Result =
10760 Arg->getIntegerConstantExpr(getContext()))
10761 // Determine the type of this overloaded NEON intrinsic.
10762 Type = NeonTypeFlags(Result->getZExtValue());
10764 bool usgn = Type.isUnsigned();
10765 bool quad = Type.isQuad();
10767 // Handle non-overloaded intrinsics first.
10768 switch (BuiltinID) {
10769 default: break;
10770 case NEON::BI__builtin_neon_vabsh_f16:
10771 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10772 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::fabs, HalfTy), Ops, "vabs");
10773 case NEON::BI__builtin_neon_vaddq_p128: {
10774 llvm::Type *Ty = GetNeonType(this, NeonTypeFlags::Poly128);
10775 Ops.push_back(EmitScalarExpr(E->getArg(1)));
10776 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
10777 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
10778 Ops[0] = Builder.CreateXor(Ops[0], Ops[1]);
10779 llvm::Type *Int128Ty = llvm::Type::getIntNTy(getLLVMContext(), 128);
10780 return Builder.CreateBitCast(Ops[0], Int128Ty);
10782 case NEON::BI__builtin_neon_vldrq_p128: {
10783 llvm::Type *Int128Ty = llvm::Type::getIntNTy(getLLVMContext(), 128);
10784 Value *Ptr = EmitScalarExpr(E->getArg(0));
10785 return Builder.CreateAlignedLoad(Int128Ty, Ptr,
10786 CharUnits::fromQuantity(16));
10788 case NEON::BI__builtin_neon_vstrq_p128: {
10789 Value *Ptr = Ops[0];
10790 return Builder.CreateDefaultAlignedStore(EmitScalarExpr(E->getArg(1)), Ptr);
10792 case NEON::BI__builtin_neon_vcvts_f32_u32:
10793 case NEON::BI__builtin_neon_vcvtd_f64_u64:
10794 usgn = true;
10795 [[fallthrough]];
10796 case NEON::BI__builtin_neon_vcvts_f32_s32:
10797 case NEON::BI__builtin_neon_vcvtd_f64_s64: {
10798 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10799 bool Is64 = Ops[0]->getType()->getPrimitiveSizeInBits() == 64;
10800 llvm::Type *InTy = Is64 ? Int64Ty : Int32Ty;
10801 llvm::Type *FTy = Is64 ? DoubleTy : FloatTy;
10802 Ops[0] = Builder.CreateBitCast(Ops[0], InTy);
10803 if (usgn)
10804 return Builder.CreateUIToFP(Ops[0], FTy);
10805 return Builder.CreateSIToFP(Ops[0], FTy);
10807 case NEON::BI__builtin_neon_vcvth_f16_u16:
10808 case NEON::BI__builtin_neon_vcvth_f16_u32:
10809 case NEON::BI__builtin_neon_vcvth_f16_u64:
10810 usgn = true;
10811 [[fallthrough]];
10812 case NEON::BI__builtin_neon_vcvth_f16_s16:
10813 case NEON::BI__builtin_neon_vcvth_f16_s32:
10814 case NEON::BI__builtin_neon_vcvth_f16_s64: {
10815 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10816 llvm::Type *FTy = HalfTy;
10817 llvm::Type *InTy;
10818 if (Ops[0]->getType()->getPrimitiveSizeInBits() == 64)
10819 InTy = Int64Ty;
10820 else if (Ops[0]->getType()->getPrimitiveSizeInBits() == 32)
10821 InTy = Int32Ty;
10822 else
10823 InTy = Int16Ty;
10824 Ops[0] = Builder.CreateBitCast(Ops[0], InTy);
10825 if (usgn)
10826 return Builder.CreateUIToFP(Ops[0], FTy);
10827 return Builder.CreateSIToFP(Ops[0], FTy);
10829 case NEON::BI__builtin_neon_vcvtah_u16_f16:
10830 case NEON::BI__builtin_neon_vcvtmh_u16_f16:
10831 case NEON::BI__builtin_neon_vcvtnh_u16_f16:
10832 case NEON::BI__builtin_neon_vcvtph_u16_f16:
10833 case NEON::BI__builtin_neon_vcvth_u16_f16:
10834 case NEON::BI__builtin_neon_vcvtah_s16_f16:
10835 case NEON::BI__builtin_neon_vcvtmh_s16_f16:
10836 case NEON::BI__builtin_neon_vcvtnh_s16_f16:
10837 case NEON::BI__builtin_neon_vcvtph_s16_f16:
10838 case NEON::BI__builtin_neon_vcvth_s16_f16: {
10839 unsigned Int;
10840 llvm::Type* InTy = Int32Ty;
10841 llvm::Type* FTy = HalfTy;
10842 llvm::Type *Tys[2] = {InTy, FTy};
10843 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10844 switch (BuiltinID) {
10845 default: llvm_unreachable("missing builtin ID in switch!");
10846 case NEON::BI__builtin_neon_vcvtah_u16_f16:
10847 Int = Intrinsic::aarch64_neon_fcvtau; break;
10848 case NEON::BI__builtin_neon_vcvtmh_u16_f16:
10849 Int = Intrinsic::aarch64_neon_fcvtmu; break;
10850 case NEON::BI__builtin_neon_vcvtnh_u16_f16:
10851 Int = Intrinsic::aarch64_neon_fcvtnu; break;
10852 case NEON::BI__builtin_neon_vcvtph_u16_f16:
10853 Int = Intrinsic::aarch64_neon_fcvtpu; break;
10854 case NEON::BI__builtin_neon_vcvth_u16_f16:
10855 Int = Intrinsic::aarch64_neon_fcvtzu; break;
10856 case NEON::BI__builtin_neon_vcvtah_s16_f16:
10857 Int = Intrinsic::aarch64_neon_fcvtas; break;
10858 case NEON::BI__builtin_neon_vcvtmh_s16_f16:
10859 Int = Intrinsic::aarch64_neon_fcvtms; break;
10860 case NEON::BI__builtin_neon_vcvtnh_s16_f16:
10861 Int = Intrinsic::aarch64_neon_fcvtns; break;
10862 case NEON::BI__builtin_neon_vcvtph_s16_f16:
10863 Int = Intrinsic::aarch64_neon_fcvtps; break;
10864 case NEON::BI__builtin_neon_vcvth_s16_f16:
10865 Int = Intrinsic::aarch64_neon_fcvtzs; break;
10867 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "fcvt");
10868 return Builder.CreateTrunc(Ops[0], Int16Ty);
10870 case NEON::BI__builtin_neon_vcaleh_f16:
10871 case NEON::BI__builtin_neon_vcalth_f16:
10872 case NEON::BI__builtin_neon_vcageh_f16:
10873 case NEON::BI__builtin_neon_vcagth_f16: {
10874 unsigned Int;
10875 llvm::Type* InTy = Int32Ty;
10876 llvm::Type* FTy = HalfTy;
10877 llvm::Type *Tys[2] = {InTy, FTy};
10878 Ops.push_back(EmitScalarExpr(E->getArg(1)));
10879 switch (BuiltinID) {
10880 default: llvm_unreachable("missing builtin ID in switch!");
10881 case NEON::BI__builtin_neon_vcageh_f16:
10882 Int = Intrinsic::aarch64_neon_facge; break;
10883 case NEON::BI__builtin_neon_vcagth_f16:
10884 Int = Intrinsic::aarch64_neon_facgt; break;
10885 case NEON::BI__builtin_neon_vcaleh_f16:
10886 Int = Intrinsic::aarch64_neon_facge; std::swap(Ops[0], Ops[1]); break;
10887 case NEON::BI__builtin_neon_vcalth_f16:
10888 Int = Intrinsic::aarch64_neon_facgt; std::swap(Ops[0], Ops[1]); break;
10890 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "facg");
10891 return Builder.CreateTrunc(Ops[0], Int16Ty);
10893 case NEON::BI__builtin_neon_vcvth_n_s16_f16:
10894 case NEON::BI__builtin_neon_vcvth_n_u16_f16: {
10895 unsigned Int;
10896 llvm::Type* InTy = Int32Ty;
10897 llvm::Type* FTy = HalfTy;
10898 llvm::Type *Tys[2] = {InTy, FTy};
10899 Ops.push_back(EmitScalarExpr(E->getArg(1)));
10900 switch (BuiltinID) {
10901 default: llvm_unreachable("missing builtin ID in switch!");
10902 case NEON::BI__builtin_neon_vcvth_n_s16_f16:
10903 Int = Intrinsic::aarch64_neon_vcvtfp2fxs; break;
10904 case NEON::BI__builtin_neon_vcvth_n_u16_f16:
10905 Int = Intrinsic::aarch64_neon_vcvtfp2fxu; break;
10907 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "fcvth_n");
10908 return Builder.CreateTrunc(Ops[0], Int16Ty);
10910 case NEON::BI__builtin_neon_vcvth_n_f16_s16:
10911 case NEON::BI__builtin_neon_vcvth_n_f16_u16: {
10912 unsigned Int;
10913 llvm::Type* FTy = HalfTy;
10914 llvm::Type* InTy = Int32Ty;
10915 llvm::Type *Tys[2] = {FTy, InTy};
10916 Ops.push_back(EmitScalarExpr(E->getArg(1)));
10917 switch (BuiltinID) {
10918 default: llvm_unreachable("missing builtin ID in switch!");
10919 case NEON::BI__builtin_neon_vcvth_n_f16_s16:
10920 Int = Intrinsic::aarch64_neon_vcvtfxs2fp;
10921 Ops[0] = Builder.CreateSExt(Ops[0], InTy, "sext");
10922 break;
10923 case NEON::BI__builtin_neon_vcvth_n_f16_u16:
10924 Int = Intrinsic::aarch64_neon_vcvtfxu2fp;
10925 Ops[0] = Builder.CreateZExt(Ops[0], InTy);
10926 break;
10928 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "fcvth_n");
10930 case NEON::BI__builtin_neon_vpaddd_s64: {
10931 auto *Ty = llvm::FixedVectorType::get(Int64Ty, 2);
10932 Value *Vec = EmitScalarExpr(E->getArg(0));
10933 // The vector is v2f64, so make sure it's bitcast to that.
10934 Vec = Builder.CreateBitCast(Vec, Ty, "v2i64");
10935 llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0);
10936 llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1);
10937 Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0");
10938 Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1");
10939 // Pairwise addition of a v2f64 into a scalar f64.
10940 return Builder.CreateAdd(Op0, Op1, "vpaddd");
10942 case NEON::BI__builtin_neon_vpaddd_f64: {
10943 auto *Ty = llvm::FixedVectorType::get(DoubleTy, 2);
10944 Value *Vec = EmitScalarExpr(E->getArg(0));
10945 // The vector is v2f64, so make sure it's bitcast to that.
10946 Vec = Builder.CreateBitCast(Vec, Ty, "v2f64");
10947 llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0);
10948 llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1);
10949 Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0");
10950 Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1");
10951 // Pairwise addition of a v2f64 into a scalar f64.
10952 return Builder.CreateFAdd(Op0, Op1, "vpaddd");
10954 case NEON::BI__builtin_neon_vpadds_f32: {
10955 auto *Ty = llvm::FixedVectorType::get(FloatTy, 2);
10956 Value *Vec = EmitScalarExpr(E->getArg(0));
10957 // The vector is v2f32, so make sure it's bitcast to that.
10958 Vec = Builder.CreateBitCast(Vec, Ty, "v2f32");
10959 llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0);
10960 llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1);
10961 Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0");
10962 Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1");
10963 // Pairwise addition of a v2f32 into a scalar f32.
10964 return Builder.CreateFAdd(Op0, Op1, "vpaddd");
10966 case NEON::BI__builtin_neon_vceqzd_s64:
10967 case NEON::BI__builtin_neon_vceqzd_f64:
10968 case NEON::BI__builtin_neon_vceqzs_f32:
10969 case NEON::BI__builtin_neon_vceqzh_f16:
10970 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10971 return EmitAArch64CompareBuiltinExpr(
10972 Ops[0], ConvertType(E->getCallReturnType(getContext())),
10973 ICmpInst::FCMP_OEQ, ICmpInst::ICMP_EQ, "vceqz");
10974 case NEON::BI__builtin_neon_vcgezd_s64:
10975 case NEON::BI__builtin_neon_vcgezd_f64:
10976 case NEON::BI__builtin_neon_vcgezs_f32:
10977 case NEON::BI__builtin_neon_vcgezh_f16:
10978 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10979 return EmitAArch64CompareBuiltinExpr(
10980 Ops[0], ConvertType(E->getCallReturnType(getContext())),
10981 ICmpInst::FCMP_OGE, ICmpInst::ICMP_SGE, "vcgez");
10982 case NEON::BI__builtin_neon_vclezd_s64:
10983 case NEON::BI__builtin_neon_vclezd_f64:
10984 case NEON::BI__builtin_neon_vclezs_f32:
10985 case NEON::BI__builtin_neon_vclezh_f16:
10986 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10987 return EmitAArch64CompareBuiltinExpr(
10988 Ops[0], ConvertType(E->getCallReturnType(getContext())),
10989 ICmpInst::FCMP_OLE, ICmpInst::ICMP_SLE, "vclez");
10990 case NEON::BI__builtin_neon_vcgtzd_s64:
10991 case NEON::BI__builtin_neon_vcgtzd_f64:
10992 case NEON::BI__builtin_neon_vcgtzs_f32:
10993 case NEON::BI__builtin_neon_vcgtzh_f16:
10994 Ops.push_back(EmitScalarExpr(E->getArg(0)));
10995 return EmitAArch64CompareBuiltinExpr(
10996 Ops[0], ConvertType(E->getCallReturnType(getContext())),
10997 ICmpInst::FCMP_OGT, ICmpInst::ICMP_SGT, "vcgtz");
10998 case NEON::BI__builtin_neon_vcltzd_s64:
10999 case NEON::BI__builtin_neon_vcltzd_f64:
11000 case NEON::BI__builtin_neon_vcltzs_f32:
11001 case NEON::BI__builtin_neon_vcltzh_f16:
11002 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11003 return EmitAArch64CompareBuiltinExpr(
11004 Ops[0], ConvertType(E->getCallReturnType(getContext())),
11005 ICmpInst::FCMP_OLT, ICmpInst::ICMP_SLT, "vcltz");
11007 case NEON::BI__builtin_neon_vceqzd_u64: {
11008 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11009 Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty);
11010 Ops[0] =
11011 Builder.CreateICmpEQ(Ops[0], llvm::Constant::getNullValue(Int64Ty));
11012 return Builder.CreateSExt(Ops[0], Int64Ty, "vceqzd");
11014 case NEON::BI__builtin_neon_vceqd_f64:
11015 case NEON::BI__builtin_neon_vcled_f64:
11016 case NEON::BI__builtin_neon_vcltd_f64:
11017 case NEON::BI__builtin_neon_vcged_f64:
11018 case NEON::BI__builtin_neon_vcgtd_f64: {
11019 llvm::CmpInst::Predicate P;
11020 switch (BuiltinID) {
11021 default: llvm_unreachable("missing builtin ID in switch!");
11022 case NEON::BI__builtin_neon_vceqd_f64: P = llvm::FCmpInst::FCMP_OEQ; break;
11023 case NEON::BI__builtin_neon_vcled_f64: P = llvm::FCmpInst::FCMP_OLE; break;
11024 case NEON::BI__builtin_neon_vcltd_f64: P = llvm::FCmpInst::FCMP_OLT; break;
11025 case NEON::BI__builtin_neon_vcged_f64: P = llvm::FCmpInst::FCMP_OGE; break;
11026 case NEON::BI__builtin_neon_vcgtd_f64: P = llvm::FCmpInst::FCMP_OGT; break;
11028 Ops.push_back(EmitScalarExpr(E->getArg(1)));
11029 Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
11030 Ops[1] = Builder.CreateBitCast(Ops[1], DoubleTy);
11031 if (P == llvm::FCmpInst::FCMP_OEQ)
11032 Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
11033 else
11034 Ops[0] = Builder.CreateFCmpS(P, Ops[0], Ops[1]);
11035 return Builder.CreateSExt(Ops[0], Int64Ty, "vcmpd");
11037 case NEON::BI__builtin_neon_vceqs_f32:
11038 case NEON::BI__builtin_neon_vcles_f32:
11039 case NEON::BI__builtin_neon_vclts_f32:
11040 case NEON::BI__builtin_neon_vcges_f32:
11041 case NEON::BI__builtin_neon_vcgts_f32: {
11042 llvm::CmpInst::Predicate P;
11043 switch (BuiltinID) {
11044 default: llvm_unreachable("missing builtin ID in switch!");
11045 case NEON::BI__builtin_neon_vceqs_f32: P = llvm::FCmpInst::FCMP_OEQ; break;
11046 case NEON::BI__builtin_neon_vcles_f32: P = llvm::FCmpInst::FCMP_OLE; break;
11047 case NEON::BI__builtin_neon_vclts_f32: P = llvm::FCmpInst::FCMP_OLT; break;
11048 case NEON::BI__builtin_neon_vcges_f32: P = llvm::FCmpInst::FCMP_OGE; break;
11049 case NEON::BI__builtin_neon_vcgts_f32: P = llvm::FCmpInst::FCMP_OGT; break;
11051 Ops.push_back(EmitScalarExpr(E->getArg(1)));
11052 Ops[0] = Builder.CreateBitCast(Ops[0], FloatTy);
11053 Ops[1] = Builder.CreateBitCast(Ops[1], FloatTy);
11054 if (P == llvm::FCmpInst::FCMP_OEQ)
11055 Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
11056 else
11057 Ops[0] = Builder.CreateFCmpS(P, Ops[0], Ops[1]);
11058 return Builder.CreateSExt(Ops[0], Int32Ty, "vcmpd");
11060 case NEON::BI__builtin_neon_vceqh_f16:
11061 case NEON::BI__builtin_neon_vcleh_f16:
11062 case NEON::BI__builtin_neon_vclth_f16:
11063 case NEON::BI__builtin_neon_vcgeh_f16:
11064 case NEON::BI__builtin_neon_vcgth_f16: {
11065 llvm::CmpInst::Predicate P;
11066 switch (BuiltinID) {
11067 default: llvm_unreachable("missing builtin ID in switch!");
11068 case NEON::BI__builtin_neon_vceqh_f16: P = llvm::FCmpInst::FCMP_OEQ; break;
11069 case NEON::BI__builtin_neon_vcleh_f16: P = llvm::FCmpInst::FCMP_OLE; break;
11070 case NEON::BI__builtin_neon_vclth_f16: P = llvm::FCmpInst::FCMP_OLT; break;
11071 case NEON::BI__builtin_neon_vcgeh_f16: P = llvm::FCmpInst::FCMP_OGE; break;
11072 case NEON::BI__builtin_neon_vcgth_f16: P = llvm::FCmpInst::FCMP_OGT; break;
11074 Ops.push_back(EmitScalarExpr(E->getArg(1)));
11075 Ops[0] = Builder.CreateBitCast(Ops[0], HalfTy);
11076 Ops[1] = Builder.CreateBitCast(Ops[1], HalfTy);
11077 if (P == llvm::FCmpInst::FCMP_OEQ)
11078 Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
11079 else
11080 Ops[0] = Builder.CreateFCmpS(P, Ops[0], Ops[1]);
11081 return Builder.CreateSExt(Ops[0], Int16Ty, "vcmpd");
11083 case NEON::BI__builtin_neon_vceqd_s64:
11084 case NEON::BI__builtin_neon_vceqd_u64:
11085 case NEON::BI__builtin_neon_vcgtd_s64:
11086 case NEON::BI__builtin_neon_vcgtd_u64:
11087 case NEON::BI__builtin_neon_vcltd_s64:
11088 case NEON::BI__builtin_neon_vcltd_u64:
11089 case NEON::BI__builtin_neon_vcged_u64:
11090 case NEON::BI__builtin_neon_vcged_s64:
11091 case NEON::BI__builtin_neon_vcled_u64:
11092 case NEON::BI__builtin_neon_vcled_s64: {
11093 llvm::CmpInst::Predicate P;
11094 switch (BuiltinID) {
11095 default: llvm_unreachable("missing builtin ID in switch!");
11096 case NEON::BI__builtin_neon_vceqd_s64:
11097 case NEON::BI__builtin_neon_vceqd_u64:P = llvm::ICmpInst::ICMP_EQ;break;
11098 case NEON::BI__builtin_neon_vcgtd_s64:P = llvm::ICmpInst::ICMP_SGT;break;
11099 case NEON::BI__builtin_neon_vcgtd_u64:P = llvm::ICmpInst::ICMP_UGT;break;
11100 case NEON::BI__builtin_neon_vcltd_s64:P = llvm::ICmpInst::ICMP_SLT;break;
11101 case NEON::BI__builtin_neon_vcltd_u64:P = llvm::ICmpInst::ICMP_ULT;break;
11102 case NEON::BI__builtin_neon_vcged_u64:P = llvm::ICmpInst::ICMP_UGE;break;
11103 case NEON::BI__builtin_neon_vcged_s64:P = llvm::ICmpInst::ICMP_SGE;break;
11104 case NEON::BI__builtin_neon_vcled_u64:P = llvm::ICmpInst::ICMP_ULE;break;
11105 case NEON::BI__builtin_neon_vcled_s64:P = llvm::ICmpInst::ICMP_SLE;break;
11107 Ops.push_back(EmitScalarExpr(E->getArg(1)));
11108 Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty);
11109 Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty);
11110 Ops[0] = Builder.CreateICmp(P, Ops[0], Ops[1]);
11111 return Builder.CreateSExt(Ops[0], Int64Ty, "vceqd");
11113 case NEON::BI__builtin_neon_vtstd_s64:
11114 case NEON::BI__builtin_neon_vtstd_u64: {
11115 Ops.push_back(EmitScalarExpr(E->getArg(1)));
11116 Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty);
11117 Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty);
11118 Ops[0] = Builder.CreateAnd(Ops[0], Ops[1]);
11119 Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0],
11120 llvm::Constant::getNullValue(Int64Ty));
11121 return Builder.CreateSExt(Ops[0], Int64Ty, "vtstd");
11123 case NEON::BI__builtin_neon_vset_lane_i8:
11124 case NEON::BI__builtin_neon_vset_lane_i16:
11125 case NEON::BI__builtin_neon_vset_lane_i32:
11126 case NEON::BI__builtin_neon_vset_lane_i64:
11127 case NEON::BI__builtin_neon_vset_lane_bf16:
11128 case NEON::BI__builtin_neon_vset_lane_f32:
11129 case NEON::BI__builtin_neon_vsetq_lane_i8:
11130 case NEON::BI__builtin_neon_vsetq_lane_i16:
11131 case NEON::BI__builtin_neon_vsetq_lane_i32:
11132 case NEON::BI__builtin_neon_vsetq_lane_i64:
11133 case NEON::BI__builtin_neon_vsetq_lane_bf16:
11134 case NEON::BI__builtin_neon_vsetq_lane_f32:
11135 Ops.push_back(EmitScalarExpr(E->getArg(2)));
11136 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
11137 case NEON::BI__builtin_neon_vset_lane_f64:
11138 // The vector type needs a cast for the v1f64 variant.
11139 Ops[1] =
11140 Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(DoubleTy, 1));
11141 Ops.push_back(EmitScalarExpr(E->getArg(2)));
11142 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
11143 case NEON::BI__builtin_neon_vsetq_lane_f64:
11144 // The vector type needs a cast for the v2f64 variant.
11145 Ops[1] =
11146 Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(DoubleTy, 2));
11147 Ops.push_back(EmitScalarExpr(E->getArg(2)));
11148 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
11150 case NEON::BI__builtin_neon_vget_lane_i8:
11151 case NEON::BI__builtin_neon_vdupb_lane_i8:
11152 Ops[0] =
11153 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int8Ty, 8));
11154 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
11155 "vget_lane");
11156 case NEON::BI__builtin_neon_vgetq_lane_i8:
11157 case NEON::BI__builtin_neon_vdupb_laneq_i8:
11158 Ops[0] =
11159 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int8Ty, 16));
11160 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
11161 "vgetq_lane");
11162 case NEON::BI__builtin_neon_vget_lane_i16:
11163 case NEON::BI__builtin_neon_vduph_lane_i16:
11164 Ops[0] =
11165 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int16Ty, 4));
11166 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
11167 "vget_lane");
11168 case NEON::BI__builtin_neon_vgetq_lane_i16:
11169 case NEON::BI__builtin_neon_vduph_laneq_i16:
11170 Ops[0] =
11171 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int16Ty, 8));
11172 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
11173 "vgetq_lane");
11174 case NEON::BI__builtin_neon_vget_lane_i32:
11175 case NEON::BI__builtin_neon_vdups_lane_i32:
11176 Ops[0] =
11177 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int32Ty, 2));
11178 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
11179 "vget_lane");
11180 case NEON::BI__builtin_neon_vdups_lane_f32:
11181 Ops[0] =
11182 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(FloatTy, 2));
11183 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
11184 "vdups_lane");
11185 case NEON::BI__builtin_neon_vgetq_lane_i32:
11186 case NEON::BI__builtin_neon_vdups_laneq_i32:
11187 Ops[0] =
11188 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int32Ty, 4));
11189 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
11190 "vgetq_lane");
11191 case NEON::BI__builtin_neon_vget_lane_i64:
11192 case NEON::BI__builtin_neon_vdupd_lane_i64:
11193 Ops[0] =
11194 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 1));
11195 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
11196 "vget_lane");
11197 case NEON::BI__builtin_neon_vdupd_lane_f64:
11198 Ops[0] =
11199 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(DoubleTy, 1));
11200 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
11201 "vdupd_lane");
11202 case NEON::BI__builtin_neon_vgetq_lane_i64:
11203 case NEON::BI__builtin_neon_vdupd_laneq_i64:
11204 Ops[0] =
11205 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 2));
11206 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
11207 "vgetq_lane");
11208 case NEON::BI__builtin_neon_vget_lane_f32:
11209 Ops[0] =
11210 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(FloatTy, 2));
11211 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
11212 "vget_lane");
11213 case NEON::BI__builtin_neon_vget_lane_f64:
11214 Ops[0] =
11215 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(DoubleTy, 1));
11216 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
11217 "vget_lane");
11218 case NEON::BI__builtin_neon_vgetq_lane_f32:
11219 case NEON::BI__builtin_neon_vdups_laneq_f32:
11220 Ops[0] =
11221 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(FloatTy, 4));
11222 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
11223 "vgetq_lane");
11224 case NEON::BI__builtin_neon_vgetq_lane_f64:
11225 case NEON::BI__builtin_neon_vdupd_laneq_f64:
11226 Ops[0] =
11227 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(DoubleTy, 2));
11228 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
11229 "vgetq_lane");
11230 case NEON::BI__builtin_neon_vaddh_f16:
11231 Ops.push_back(EmitScalarExpr(E->getArg(1)));
11232 return Builder.CreateFAdd(Ops[0], Ops[1], "vaddh");
11233 case NEON::BI__builtin_neon_vsubh_f16:
11234 Ops.push_back(EmitScalarExpr(E->getArg(1)));
11235 return Builder.CreateFSub(Ops[0], Ops[1], "vsubh");
11236 case NEON::BI__builtin_neon_vmulh_f16:
11237 Ops.push_back(EmitScalarExpr(E->getArg(1)));
11238 return Builder.CreateFMul(Ops[0], Ops[1], "vmulh");
11239 case NEON::BI__builtin_neon_vdivh_f16:
11240 Ops.push_back(EmitScalarExpr(E->getArg(1)));
11241 return Builder.CreateFDiv(Ops[0], Ops[1], "vdivh");
11242 case NEON::BI__builtin_neon_vfmah_f16:
11243 // NEON intrinsic puts accumulator first, unlike the LLVM fma.
11244 return emitCallMaybeConstrainedFPBuiltin(
11245 *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, HalfTy,
11246 {EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2)), Ops[0]});
11247 case NEON::BI__builtin_neon_vfmsh_f16: {
11248 Value* Neg = Builder.CreateFNeg(EmitScalarExpr(E->getArg(1)), "vsubh");
11250 // NEON intrinsic puts accumulator first, unlike the LLVM fma.
11251 return emitCallMaybeConstrainedFPBuiltin(
11252 *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, HalfTy,
11253 {Neg, EmitScalarExpr(E->getArg(2)), Ops[0]});
11255 case NEON::BI__builtin_neon_vaddd_s64:
11256 case NEON::BI__builtin_neon_vaddd_u64:
11257 return Builder.CreateAdd(Ops[0], EmitScalarExpr(E->getArg(1)), "vaddd");
11258 case NEON::BI__builtin_neon_vsubd_s64:
11259 case NEON::BI__builtin_neon_vsubd_u64:
11260 return Builder.CreateSub(Ops[0], EmitScalarExpr(E->getArg(1)), "vsubd");
11261 case NEON::BI__builtin_neon_vqdmlalh_s16:
11262 case NEON::BI__builtin_neon_vqdmlslh_s16: {
11263 SmallVector<Value *, 2> ProductOps;
11264 ProductOps.push_back(vectorWrapScalar16(Ops[1]));
11265 ProductOps.push_back(vectorWrapScalar16(EmitScalarExpr(E->getArg(2))));
11266 auto *VTy = llvm::FixedVectorType::get(Int32Ty, 4);
11267 Ops[1] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy),
11268 ProductOps, "vqdmlXl");
11269 Constant *CI = ConstantInt::get(SizeTy, 0);
11270 Ops[1] = Builder.CreateExtractElement(Ops[1], CI, "lane0");
11272 unsigned AccumInt = BuiltinID == NEON::BI__builtin_neon_vqdmlalh_s16
11273 ? Intrinsic::aarch64_neon_sqadd
11274 : Intrinsic::aarch64_neon_sqsub;
11275 return EmitNeonCall(CGM.getIntrinsic(AccumInt, Int32Ty), Ops, "vqdmlXl");
11277 case NEON::BI__builtin_neon_vqshlud_n_s64: {
11278 Ops.push_back(EmitScalarExpr(E->getArg(1)));
11279 Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty);
11280 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqshlu, Int64Ty),
11281 Ops, "vqshlu_n");
11283 case NEON::BI__builtin_neon_vqshld_n_u64:
11284 case NEON::BI__builtin_neon_vqshld_n_s64: {
11285 unsigned Int = BuiltinID == NEON::BI__builtin_neon_vqshld_n_u64
11286 ? Intrinsic::aarch64_neon_uqshl
11287 : Intrinsic::aarch64_neon_sqshl;
11288 Ops.push_back(EmitScalarExpr(E->getArg(1)));
11289 Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty);
11290 return EmitNeonCall(CGM.getIntrinsic(Int, Int64Ty), Ops, "vqshl_n");
11292 case NEON::BI__builtin_neon_vrshrd_n_u64:
11293 case NEON::BI__builtin_neon_vrshrd_n_s64: {
11294 unsigned Int = BuiltinID == NEON::BI__builtin_neon_vrshrd_n_u64
11295 ? Intrinsic::aarch64_neon_urshl
11296 : Intrinsic::aarch64_neon_srshl;
11297 Ops.push_back(EmitScalarExpr(E->getArg(1)));
11298 int SV = cast<ConstantInt>(Ops[1])->getSExtValue();
11299 Ops[1] = ConstantInt::get(Int64Ty, -SV);
11300 return EmitNeonCall(CGM.getIntrinsic(Int, Int64Ty), Ops, "vrshr_n");
11302 case NEON::BI__builtin_neon_vrsrad_n_u64:
11303 case NEON::BI__builtin_neon_vrsrad_n_s64: {
11304 unsigned Int = BuiltinID == NEON::BI__builtin_neon_vrsrad_n_u64
11305 ? Intrinsic::aarch64_neon_urshl
11306 : Intrinsic::aarch64_neon_srshl;
11307 Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty);
11308 Ops.push_back(Builder.CreateNeg(EmitScalarExpr(E->getArg(2))));
11309 Ops[1] = Builder.CreateCall(CGM.getIntrinsic(Int, Int64Ty),
11310 {Ops[1], Builder.CreateSExt(Ops[2], Int64Ty)});
11311 return Builder.CreateAdd(Ops[0], Builder.CreateBitCast(Ops[1], Int64Ty));
11313 case NEON::BI__builtin_neon_vshld_n_s64:
11314 case NEON::BI__builtin_neon_vshld_n_u64: {
11315 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
11316 return Builder.CreateShl(
11317 Ops[0], ConstantInt::get(Int64Ty, Amt->getZExtValue()), "shld_n");
11319 case NEON::BI__builtin_neon_vshrd_n_s64: {
11320 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
11321 return Builder.CreateAShr(
11322 Ops[0], ConstantInt::get(Int64Ty, std::min(static_cast<uint64_t>(63),
11323 Amt->getZExtValue())),
11324 "shrd_n");
11326 case NEON::BI__builtin_neon_vshrd_n_u64: {
11327 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
11328 uint64_t ShiftAmt = Amt->getZExtValue();
11329 // Right-shifting an unsigned value by its size yields 0.
11330 if (ShiftAmt == 64)
11331 return ConstantInt::get(Int64Ty, 0);
11332 return Builder.CreateLShr(Ops[0], ConstantInt::get(Int64Ty, ShiftAmt),
11333 "shrd_n");
11335 case NEON::BI__builtin_neon_vsrad_n_s64: {
11336 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(2)));
11337 Ops[1] = Builder.CreateAShr(
11338 Ops[1], ConstantInt::get(Int64Ty, std::min(static_cast<uint64_t>(63),
11339 Amt->getZExtValue())),
11340 "shrd_n");
11341 return Builder.CreateAdd(Ops[0], Ops[1]);
11343 case NEON::BI__builtin_neon_vsrad_n_u64: {
11344 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(2)));
11345 uint64_t ShiftAmt = Amt->getZExtValue();
11346 // Right-shifting an unsigned value by its size yields 0.
11347 // As Op + 0 = Op, return Ops[0] directly.
11348 if (ShiftAmt == 64)
11349 return Ops[0];
11350 Ops[1] = Builder.CreateLShr(Ops[1], ConstantInt::get(Int64Ty, ShiftAmt),
11351 "shrd_n");
11352 return Builder.CreateAdd(Ops[0], Ops[1]);
11354 case NEON::BI__builtin_neon_vqdmlalh_lane_s16:
11355 case NEON::BI__builtin_neon_vqdmlalh_laneq_s16:
11356 case NEON::BI__builtin_neon_vqdmlslh_lane_s16:
11357 case NEON::BI__builtin_neon_vqdmlslh_laneq_s16: {
11358 Ops[2] = Builder.CreateExtractElement(Ops[2], EmitScalarExpr(E->getArg(3)),
11359 "lane");
11360 SmallVector<Value *, 2> ProductOps;
11361 ProductOps.push_back(vectorWrapScalar16(Ops[1]));
11362 ProductOps.push_back(vectorWrapScalar16(Ops[2]));
11363 auto *VTy = llvm::FixedVectorType::get(Int32Ty, 4);
11364 Ops[1] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy),
11365 ProductOps, "vqdmlXl");
11366 Constant *CI = ConstantInt::get(SizeTy, 0);
11367 Ops[1] = Builder.CreateExtractElement(Ops[1], CI, "lane0");
11368 Ops.pop_back();
11370 unsigned AccInt = (BuiltinID == NEON::BI__builtin_neon_vqdmlalh_lane_s16 ||
11371 BuiltinID == NEON::BI__builtin_neon_vqdmlalh_laneq_s16)
11372 ? Intrinsic::aarch64_neon_sqadd
11373 : Intrinsic::aarch64_neon_sqsub;
11374 return EmitNeonCall(CGM.getIntrinsic(AccInt, Int32Ty), Ops, "vqdmlXl");
11376 case NEON::BI__builtin_neon_vqdmlals_s32:
11377 case NEON::BI__builtin_neon_vqdmlsls_s32: {
11378 SmallVector<Value *, 2> ProductOps;
11379 ProductOps.push_back(Ops[1]);
11380 ProductOps.push_back(EmitScalarExpr(E->getArg(2)));
11381 Ops[1] =
11382 EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmulls_scalar),
11383 ProductOps, "vqdmlXl");
11385 unsigned AccumInt = BuiltinID == NEON::BI__builtin_neon_vqdmlals_s32
11386 ? Intrinsic::aarch64_neon_sqadd
11387 : Intrinsic::aarch64_neon_sqsub;
11388 return EmitNeonCall(CGM.getIntrinsic(AccumInt, Int64Ty), Ops, "vqdmlXl");
11390 case NEON::BI__builtin_neon_vqdmlals_lane_s32:
11391 case NEON::BI__builtin_neon_vqdmlals_laneq_s32:
11392 case NEON::BI__builtin_neon_vqdmlsls_lane_s32:
11393 case NEON::BI__builtin_neon_vqdmlsls_laneq_s32: {
11394 Ops[2] = Builder.CreateExtractElement(Ops[2], EmitScalarExpr(E->getArg(3)),
11395 "lane");
11396 SmallVector<Value *, 2> ProductOps;
11397 ProductOps.push_back(Ops[1]);
11398 ProductOps.push_back(Ops[2]);
11399 Ops[1] =
11400 EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmulls_scalar),
11401 ProductOps, "vqdmlXl");
11402 Ops.pop_back();
11404 unsigned AccInt = (BuiltinID == NEON::BI__builtin_neon_vqdmlals_lane_s32 ||
11405 BuiltinID == NEON::BI__builtin_neon_vqdmlals_laneq_s32)
11406 ? Intrinsic::aarch64_neon_sqadd
11407 : Intrinsic::aarch64_neon_sqsub;
11408 return EmitNeonCall(CGM.getIntrinsic(AccInt, Int64Ty), Ops, "vqdmlXl");
11410 case NEON::BI__builtin_neon_vget_lane_bf16:
11411 case NEON::BI__builtin_neon_vduph_lane_bf16:
11412 case NEON::BI__builtin_neon_vduph_lane_f16: {
11413 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
11414 "vget_lane");
11416 case NEON::BI__builtin_neon_vgetq_lane_bf16:
11417 case NEON::BI__builtin_neon_vduph_laneq_bf16:
11418 case NEON::BI__builtin_neon_vduph_laneq_f16: {
11419 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
11420 "vgetq_lane");
11423 case clang::AArch64::BI_InterlockedAdd: {
11424 Value *Arg0 = EmitScalarExpr(E->getArg(0));
11425 Value *Arg1 = EmitScalarExpr(E->getArg(1));
11426 AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
11427 AtomicRMWInst::Add, Arg0, Arg1,
11428 llvm::AtomicOrdering::SequentiallyConsistent);
11429 return Builder.CreateAdd(RMWI, Arg1);
11433 llvm::FixedVectorType *VTy = GetNeonType(this, Type);
11434 llvm::Type *Ty = VTy;
11435 if (!Ty)
11436 return nullptr;
11438 // Not all intrinsics handled by the common case work for AArch64 yet, so only
11439 // defer to common code if it's been added to our special map.
11440 Builtin = findARMVectorIntrinsicInMap(AArch64SIMDIntrinsicMap, BuiltinID,
11441 AArch64SIMDIntrinsicsProvenSorted);
11443 if (Builtin)
11444 return EmitCommonNeonBuiltinExpr(
11445 Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic,
11446 Builtin->NameHint, Builtin->TypeModifier, E, Ops,
11447 /*never use addresses*/ Address::invalid(), Address::invalid(), Arch);
11449 if (Value *V = EmitAArch64TblBuiltinExpr(*this, BuiltinID, E, Ops, Arch))
11450 return V;
11452 unsigned Int;
11453 switch (BuiltinID) {
11454 default: return nullptr;
11455 case NEON::BI__builtin_neon_vbsl_v:
11456 case NEON::BI__builtin_neon_vbslq_v: {
11457 llvm::Type *BitTy = llvm::VectorType::getInteger(VTy);
11458 Ops[0] = Builder.CreateBitCast(Ops[0], BitTy, "vbsl");
11459 Ops[1] = Builder.CreateBitCast(Ops[1], BitTy, "vbsl");
11460 Ops[2] = Builder.CreateBitCast(Ops[2], BitTy, "vbsl");
11462 Ops[1] = Builder.CreateAnd(Ops[0], Ops[1], "vbsl");
11463 Ops[2] = Builder.CreateAnd(Builder.CreateNot(Ops[0]), Ops[2], "vbsl");
11464 Ops[0] = Builder.CreateOr(Ops[1], Ops[2], "vbsl");
11465 return Builder.CreateBitCast(Ops[0], Ty);
11467 case NEON::BI__builtin_neon_vfma_lane_v:
11468 case NEON::BI__builtin_neon_vfmaq_lane_v: { // Only used for FP types
11469 // The ARM builtins (and instructions) have the addend as the first
11470 // operand, but the 'fma' intrinsics have it last. Swap it around here.
11471 Value *Addend = Ops[0];
11472 Value *Multiplicand = Ops[1];
11473 Value *LaneSource = Ops[2];
11474 Ops[0] = Multiplicand;
11475 Ops[1] = LaneSource;
11476 Ops[2] = Addend;
11478 // Now adjust things to handle the lane access.
11479 auto *SourceTy = BuiltinID == NEON::BI__builtin_neon_vfmaq_lane_v
11480 ? llvm::FixedVectorType::get(VTy->getElementType(),
11481 VTy->getNumElements() / 2)
11482 : VTy;
11483 llvm::Constant *cst = cast<Constant>(Ops[3]);
11484 Value *SV = llvm::ConstantVector::getSplat(VTy->getElementCount(), cst);
11485 Ops[1] = Builder.CreateBitCast(Ops[1], SourceTy);
11486 Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV, "lane");
11488 Ops.pop_back();
11489 Int = Builder.getIsFPConstrained() ? Intrinsic::experimental_constrained_fma
11490 : Intrinsic::fma;
11491 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "fmla");
11493 case NEON::BI__builtin_neon_vfma_laneq_v: {
11494 auto *VTy = cast<llvm::FixedVectorType>(Ty);
11495 // v1f64 fma should be mapped to Neon scalar f64 fma
11496 if (VTy && VTy->getElementType() == DoubleTy) {
11497 Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
11498 Ops[1] = Builder.CreateBitCast(Ops[1], DoubleTy);
11499 llvm::FixedVectorType *VTy =
11500 GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, true));
11501 Ops[2] = Builder.CreateBitCast(Ops[2], VTy);
11502 Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract");
11503 Value *Result;
11504 Result = emitCallMaybeConstrainedFPBuiltin(
11505 *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma,
11506 DoubleTy, {Ops[1], Ops[2], Ops[0]});
11507 return Builder.CreateBitCast(Result, Ty);
11509 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
11510 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
11512 auto *STy = llvm::FixedVectorType::get(VTy->getElementType(),
11513 VTy->getNumElements() * 2);
11514 Ops[2] = Builder.CreateBitCast(Ops[2], STy);
11515 Value *SV = llvm::ConstantVector::getSplat(VTy->getElementCount(),
11516 cast<ConstantInt>(Ops[3]));
11517 Ops[2] = Builder.CreateShuffleVector(Ops[2], Ops[2], SV, "lane");
11519 return emitCallMaybeConstrainedFPBuiltin(
11520 *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, Ty,
11521 {Ops[2], Ops[1], Ops[0]});
11523 case NEON::BI__builtin_neon_vfmaq_laneq_v: {
11524 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
11525 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
11527 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
11528 Ops[2] = EmitNeonSplat(Ops[2], cast<ConstantInt>(Ops[3]));
11529 return emitCallMaybeConstrainedFPBuiltin(
11530 *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, Ty,
11531 {Ops[2], Ops[1], Ops[0]});
11533 case NEON::BI__builtin_neon_vfmah_lane_f16:
11534 case NEON::BI__builtin_neon_vfmas_lane_f32:
11535 case NEON::BI__builtin_neon_vfmah_laneq_f16:
11536 case NEON::BI__builtin_neon_vfmas_laneq_f32:
11537 case NEON::BI__builtin_neon_vfmad_lane_f64:
11538 case NEON::BI__builtin_neon_vfmad_laneq_f64: {
11539 Ops.push_back(EmitScalarExpr(E->getArg(3)));
11540 llvm::Type *Ty = ConvertType(E->getCallReturnType(getContext()));
11541 Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract");
11542 return emitCallMaybeConstrainedFPBuiltin(
11543 *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, Ty,
11544 {Ops[1], Ops[2], Ops[0]});
11546 case NEON::BI__builtin_neon_vmull_v:
11547 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
11548 Int = usgn ? Intrinsic::aarch64_neon_umull : Intrinsic::aarch64_neon_smull;
11549 if (Type.isPoly()) Int = Intrinsic::aarch64_neon_pmull;
11550 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull");
11551 case NEON::BI__builtin_neon_vmax_v:
11552 case NEON::BI__builtin_neon_vmaxq_v:
11553 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
11554 Int = usgn ? Intrinsic::aarch64_neon_umax : Intrinsic::aarch64_neon_smax;
11555 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmax;
11556 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmax");
11557 case NEON::BI__builtin_neon_vmaxh_f16: {
11558 Ops.push_back(EmitScalarExpr(E->getArg(1)));
11559 Int = Intrinsic::aarch64_neon_fmax;
11560 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmax");
11562 case NEON::BI__builtin_neon_vmin_v:
11563 case NEON::BI__builtin_neon_vminq_v:
11564 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
11565 Int = usgn ? Intrinsic::aarch64_neon_umin : Intrinsic::aarch64_neon_smin;
11566 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmin;
11567 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmin");
11568 case NEON::BI__builtin_neon_vminh_f16: {
11569 Ops.push_back(EmitScalarExpr(E->getArg(1)));
11570 Int = Intrinsic::aarch64_neon_fmin;
11571 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmin");
11573 case NEON::BI__builtin_neon_vabd_v:
11574 case NEON::BI__builtin_neon_vabdq_v:
11575 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
11576 Int = usgn ? Intrinsic::aarch64_neon_uabd : Intrinsic::aarch64_neon_sabd;
11577 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fabd;
11578 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vabd");
11579 case NEON::BI__builtin_neon_vpadal_v:
11580 case NEON::BI__builtin_neon_vpadalq_v: {
11581 unsigned ArgElts = VTy->getNumElements();
11582 llvm::IntegerType *EltTy = cast<IntegerType>(VTy->getElementType());
11583 unsigned BitWidth = EltTy->getBitWidth();
11584 auto *ArgTy = llvm::FixedVectorType::get(
11585 llvm::IntegerType::get(getLLVMContext(), BitWidth / 2), 2 * ArgElts);
11586 llvm::Type* Tys[2] = { VTy, ArgTy };
11587 Int = usgn ? Intrinsic::aarch64_neon_uaddlp : Intrinsic::aarch64_neon_saddlp;
11588 SmallVector<llvm::Value*, 1> TmpOps;
11589 TmpOps.push_back(Ops[1]);
11590 Function *F = CGM.getIntrinsic(Int, Tys);
11591 llvm::Value *tmp = EmitNeonCall(F, TmpOps, "vpadal");
11592 llvm::Value *addend = Builder.CreateBitCast(Ops[0], tmp->getType());
11593 return Builder.CreateAdd(tmp, addend);
11595 case NEON::BI__builtin_neon_vpmin_v:
11596 case NEON::BI__builtin_neon_vpminq_v:
11597 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
11598 Int = usgn ? Intrinsic::aarch64_neon_uminp : Intrinsic::aarch64_neon_sminp;
11599 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fminp;
11600 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmin");
11601 case NEON::BI__builtin_neon_vpmax_v:
11602 case NEON::BI__builtin_neon_vpmaxq_v:
11603 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
11604 Int = usgn ? Intrinsic::aarch64_neon_umaxp : Intrinsic::aarch64_neon_smaxp;
11605 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmaxp;
11606 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmax");
11607 case NEON::BI__builtin_neon_vminnm_v:
11608 case NEON::BI__builtin_neon_vminnmq_v:
11609 Int = Intrinsic::aarch64_neon_fminnm;
11610 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vminnm");
11611 case NEON::BI__builtin_neon_vminnmh_f16:
11612 Ops.push_back(EmitScalarExpr(E->getArg(1)));
11613 Int = Intrinsic::aarch64_neon_fminnm;
11614 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vminnm");
11615 case NEON::BI__builtin_neon_vmaxnm_v:
11616 case NEON::BI__builtin_neon_vmaxnmq_v:
11617 Int = Intrinsic::aarch64_neon_fmaxnm;
11618 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmaxnm");
11619 case NEON::BI__builtin_neon_vmaxnmh_f16:
11620 Ops.push_back(EmitScalarExpr(E->getArg(1)));
11621 Int = Intrinsic::aarch64_neon_fmaxnm;
11622 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmaxnm");
11623 case NEON::BI__builtin_neon_vrecpss_f32: {
11624 Ops.push_back(EmitScalarExpr(E->getArg(1)));
11625 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, FloatTy),
11626 Ops, "vrecps");
11628 case NEON::BI__builtin_neon_vrecpsd_f64:
11629 Ops.push_back(EmitScalarExpr(E->getArg(1)));
11630 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, DoubleTy),
11631 Ops, "vrecps");
11632 case NEON::BI__builtin_neon_vrecpsh_f16:
11633 Ops.push_back(EmitScalarExpr(E->getArg(1)));
11634 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, HalfTy),
11635 Ops, "vrecps");
11636 case NEON::BI__builtin_neon_vqshrun_n_v:
11637 Int = Intrinsic::aarch64_neon_sqshrun;
11638 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrun_n");
11639 case NEON::BI__builtin_neon_vqrshrun_n_v:
11640 Int = Intrinsic::aarch64_neon_sqrshrun;
11641 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrun_n");
11642 case NEON::BI__builtin_neon_vqshrn_n_v:
11643 Int = usgn ? Intrinsic::aarch64_neon_uqshrn : Intrinsic::aarch64_neon_sqshrn;
11644 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n");
11645 case NEON::BI__builtin_neon_vrshrn_n_v:
11646 Int = Intrinsic::aarch64_neon_rshrn;
11647 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshrn_n");
11648 case NEON::BI__builtin_neon_vqrshrn_n_v:
11649 Int = usgn ? Intrinsic::aarch64_neon_uqrshrn : Intrinsic::aarch64_neon_sqrshrn;
11650 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n");
11651 case NEON::BI__builtin_neon_vrndah_f16: {
11652 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11653 Int = Builder.getIsFPConstrained()
11654 ? Intrinsic::experimental_constrained_round
11655 : Intrinsic::round;
11656 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrnda");
11658 case NEON::BI__builtin_neon_vrnda_v:
11659 case NEON::BI__builtin_neon_vrndaq_v: {
11660 Int = Builder.getIsFPConstrained()
11661 ? Intrinsic::experimental_constrained_round
11662 : Intrinsic::round;
11663 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnda");
11665 case NEON::BI__builtin_neon_vrndih_f16: {
11666 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11667 Int = Builder.getIsFPConstrained()
11668 ? Intrinsic::experimental_constrained_nearbyint
11669 : Intrinsic::nearbyint;
11670 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndi");
11672 case NEON::BI__builtin_neon_vrndmh_f16: {
11673 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11674 Int = Builder.getIsFPConstrained()
11675 ? Intrinsic::experimental_constrained_floor
11676 : Intrinsic::floor;
11677 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndm");
11679 case NEON::BI__builtin_neon_vrndm_v:
11680 case NEON::BI__builtin_neon_vrndmq_v: {
11681 Int = Builder.getIsFPConstrained()
11682 ? Intrinsic::experimental_constrained_floor
11683 : Intrinsic::floor;
11684 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndm");
11686 case NEON::BI__builtin_neon_vrndnh_f16: {
11687 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11688 Int = Builder.getIsFPConstrained()
11689 ? Intrinsic::experimental_constrained_roundeven
11690 : Intrinsic::roundeven;
11691 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndn");
11693 case NEON::BI__builtin_neon_vrndn_v:
11694 case NEON::BI__builtin_neon_vrndnq_v: {
11695 Int = Builder.getIsFPConstrained()
11696 ? Intrinsic::experimental_constrained_roundeven
11697 : Intrinsic::roundeven;
11698 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndn");
11700 case NEON::BI__builtin_neon_vrndns_f32: {
11701 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11702 Int = Builder.getIsFPConstrained()
11703 ? Intrinsic::experimental_constrained_roundeven
11704 : Intrinsic::roundeven;
11705 return EmitNeonCall(CGM.getIntrinsic(Int, FloatTy), Ops, "vrndn");
11707 case NEON::BI__builtin_neon_vrndph_f16: {
11708 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11709 Int = Builder.getIsFPConstrained()
11710 ? Intrinsic::experimental_constrained_ceil
11711 : Intrinsic::ceil;
11712 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndp");
11714 case NEON::BI__builtin_neon_vrndp_v:
11715 case NEON::BI__builtin_neon_vrndpq_v: {
11716 Int = Builder.getIsFPConstrained()
11717 ? Intrinsic::experimental_constrained_ceil
11718 : Intrinsic::ceil;
11719 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndp");
11721 case NEON::BI__builtin_neon_vrndxh_f16: {
11722 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11723 Int = Builder.getIsFPConstrained()
11724 ? Intrinsic::experimental_constrained_rint
11725 : Intrinsic::rint;
11726 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndx");
11728 case NEON::BI__builtin_neon_vrndx_v:
11729 case NEON::BI__builtin_neon_vrndxq_v: {
11730 Int = Builder.getIsFPConstrained()
11731 ? Intrinsic::experimental_constrained_rint
11732 : Intrinsic::rint;
11733 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndx");
11735 case NEON::BI__builtin_neon_vrndh_f16: {
11736 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11737 Int = Builder.getIsFPConstrained()
11738 ? Intrinsic::experimental_constrained_trunc
11739 : Intrinsic::trunc;
11740 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndz");
11742 case NEON::BI__builtin_neon_vrnd32x_f32:
11743 case NEON::BI__builtin_neon_vrnd32xq_f32: {
11744 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11745 Int = Intrinsic::aarch64_neon_frint32x;
11746 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnd32x");
11748 case NEON::BI__builtin_neon_vrnd32z_f32:
11749 case NEON::BI__builtin_neon_vrnd32zq_f32: {
11750 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11751 Int = Intrinsic::aarch64_neon_frint32z;
11752 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnd32z");
11754 case NEON::BI__builtin_neon_vrnd64x_f32:
11755 case NEON::BI__builtin_neon_vrnd64xq_f32: {
11756 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11757 Int = Intrinsic::aarch64_neon_frint64x;
11758 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnd64x");
11760 case NEON::BI__builtin_neon_vrnd64z_f32:
11761 case NEON::BI__builtin_neon_vrnd64zq_f32: {
11762 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11763 Int = Intrinsic::aarch64_neon_frint64z;
11764 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnd64z");
11766 case NEON::BI__builtin_neon_vrnd_v:
11767 case NEON::BI__builtin_neon_vrndq_v: {
11768 Int = Builder.getIsFPConstrained()
11769 ? Intrinsic::experimental_constrained_trunc
11770 : Intrinsic::trunc;
11771 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndz");
11773 case NEON::BI__builtin_neon_vcvt_f64_v:
11774 case NEON::BI__builtin_neon_vcvtq_f64_v:
11775 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
11776 Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, quad));
11777 return usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
11778 : Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
11779 case NEON::BI__builtin_neon_vcvt_f64_f32: {
11780 assert(Type.getEltType() == NeonTypeFlags::Float64 && quad &&
11781 "unexpected vcvt_f64_f32 builtin");
11782 NeonTypeFlags SrcFlag = NeonTypeFlags(NeonTypeFlags::Float32, false, false);
11783 Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(this, SrcFlag));
11785 return Builder.CreateFPExt(Ops[0], Ty, "vcvt");
11787 case NEON::BI__builtin_neon_vcvt_f32_f64: {
11788 assert(Type.getEltType() == NeonTypeFlags::Float32 &&
11789 "unexpected vcvt_f32_f64 builtin");
11790 NeonTypeFlags SrcFlag = NeonTypeFlags(NeonTypeFlags::Float64, false, true);
11791 Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(this, SrcFlag));
11793 return Builder.CreateFPTrunc(Ops[0], Ty, "vcvt");
11795 case NEON::BI__builtin_neon_vcvt_s32_v:
11796 case NEON::BI__builtin_neon_vcvt_u32_v:
11797 case NEON::BI__builtin_neon_vcvt_s64_v:
11798 case NEON::BI__builtin_neon_vcvt_u64_v:
11799 case NEON::BI__builtin_neon_vcvt_s16_f16:
11800 case NEON::BI__builtin_neon_vcvt_u16_f16:
11801 case NEON::BI__builtin_neon_vcvtq_s32_v:
11802 case NEON::BI__builtin_neon_vcvtq_u32_v:
11803 case NEON::BI__builtin_neon_vcvtq_s64_v:
11804 case NEON::BI__builtin_neon_vcvtq_u64_v:
11805 case NEON::BI__builtin_neon_vcvtq_s16_f16:
11806 case NEON::BI__builtin_neon_vcvtq_u16_f16: {
11807 Int =
11808 usgn ? Intrinsic::aarch64_neon_fcvtzu : Intrinsic::aarch64_neon_fcvtzs;
11809 llvm::Type *Tys[2] = {Ty, GetFloatNeonType(this, Type)};
11810 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtz");
11812 case NEON::BI__builtin_neon_vcvta_s16_f16:
11813 case NEON::BI__builtin_neon_vcvta_u16_f16:
11814 case NEON::BI__builtin_neon_vcvta_s32_v:
11815 case NEON::BI__builtin_neon_vcvtaq_s16_f16:
11816 case NEON::BI__builtin_neon_vcvtaq_s32_v:
11817 case NEON::BI__builtin_neon_vcvta_u32_v:
11818 case NEON::BI__builtin_neon_vcvtaq_u16_f16:
11819 case NEON::BI__builtin_neon_vcvtaq_u32_v:
11820 case NEON::BI__builtin_neon_vcvta_s64_v:
11821 case NEON::BI__builtin_neon_vcvtaq_s64_v:
11822 case NEON::BI__builtin_neon_vcvta_u64_v:
11823 case NEON::BI__builtin_neon_vcvtaq_u64_v: {
11824 Int = usgn ? Intrinsic::aarch64_neon_fcvtau : Intrinsic::aarch64_neon_fcvtas;
11825 llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
11826 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvta");
11828 case NEON::BI__builtin_neon_vcvtm_s16_f16:
11829 case NEON::BI__builtin_neon_vcvtm_s32_v:
11830 case NEON::BI__builtin_neon_vcvtmq_s16_f16:
11831 case NEON::BI__builtin_neon_vcvtmq_s32_v:
11832 case NEON::BI__builtin_neon_vcvtm_u16_f16:
11833 case NEON::BI__builtin_neon_vcvtm_u32_v:
11834 case NEON::BI__builtin_neon_vcvtmq_u16_f16:
11835 case NEON::BI__builtin_neon_vcvtmq_u32_v:
11836 case NEON::BI__builtin_neon_vcvtm_s64_v:
11837 case NEON::BI__builtin_neon_vcvtmq_s64_v:
11838 case NEON::BI__builtin_neon_vcvtm_u64_v:
11839 case NEON::BI__builtin_neon_vcvtmq_u64_v: {
11840 Int = usgn ? Intrinsic::aarch64_neon_fcvtmu : Intrinsic::aarch64_neon_fcvtms;
11841 llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
11842 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtm");
11844 case NEON::BI__builtin_neon_vcvtn_s16_f16:
11845 case NEON::BI__builtin_neon_vcvtn_s32_v:
11846 case NEON::BI__builtin_neon_vcvtnq_s16_f16:
11847 case NEON::BI__builtin_neon_vcvtnq_s32_v:
11848 case NEON::BI__builtin_neon_vcvtn_u16_f16:
11849 case NEON::BI__builtin_neon_vcvtn_u32_v:
11850 case NEON::BI__builtin_neon_vcvtnq_u16_f16:
11851 case NEON::BI__builtin_neon_vcvtnq_u32_v:
11852 case NEON::BI__builtin_neon_vcvtn_s64_v:
11853 case NEON::BI__builtin_neon_vcvtnq_s64_v:
11854 case NEON::BI__builtin_neon_vcvtn_u64_v:
11855 case NEON::BI__builtin_neon_vcvtnq_u64_v: {
11856 Int = usgn ? Intrinsic::aarch64_neon_fcvtnu : Intrinsic::aarch64_neon_fcvtns;
11857 llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
11858 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtn");
11860 case NEON::BI__builtin_neon_vcvtp_s16_f16:
11861 case NEON::BI__builtin_neon_vcvtp_s32_v:
11862 case NEON::BI__builtin_neon_vcvtpq_s16_f16:
11863 case NEON::BI__builtin_neon_vcvtpq_s32_v:
11864 case NEON::BI__builtin_neon_vcvtp_u16_f16:
11865 case NEON::BI__builtin_neon_vcvtp_u32_v:
11866 case NEON::BI__builtin_neon_vcvtpq_u16_f16:
11867 case NEON::BI__builtin_neon_vcvtpq_u32_v:
11868 case NEON::BI__builtin_neon_vcvtp_s64_v:
11869 case NEON::BI__builtin_neon_vcvtpq_s64_v:
11870 case NEON::BI__builtin_neon_vcvtp_u64_v:
11871 case NEON::BI__builtin_neon_vcvtpq_u64_v: {
11872 Int = usgn ? Intrinsic::aarch64_neon_fcvtpu : Intrinsic::aarch64_neon_fcvtps;
11873 llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
11874 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtp");
11876 case NEON::BI__builtin_neon_vmulx_v:
11877 case NEON::BI__builtin_neon_vmulxq_v: {
11878 Int = Intrinsic::aarch64_neon_fmulx;
11879 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmulx");
11881 case NEON::BI__builtin_neon_vmulxh_lane_f16:
11882 case NEON::BI__builtin_neon_vmulxh_laneq_f16: {
11883 // vmulx_lane should be mapped to Neon scalar mulx after
11884 // extracting the scalar element
11885 Ops.push_back(EmitScalarExpr(E->getArg(2)));
11886 Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2], "extract");
11887 Ops.pop_back();
11888 Int = Intrinsic::aarch64_neon_fmulx;
11889 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmulx");
11891 case NEON::BI__builtin_neon_vmul_lane_v:
11892 case NEON::BI__builtin_neon_vmul_laneq_v: {
11893 // v1f64 vmul_lane should be mapped to Neon scalar mul lane
11894 bool Quad = false;
11895 if (BuiltinID == NEON::BI__builtin_neon_vmul_laneq_v)
11896 Quad = true;
11897 Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
11898 llvm::FixedVectorType *VTy =
11899 GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, Quad));
11900 Ops[1] = Builder.CreateBitCast(Ops[1], VTy);
11901 Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2], "extract");
11902 Value *Result = Builder.CreateFMul(Ops[0], Ops[1]);
11903 return Builder.CreateBitCast(Result, Ty);
11905 case NEON::BI__builtin_neon_vnegd_s64:
11906 return Builder.CreateNeg(EmitScalarExpr(E->getArg(0)), "vnegd");
11907 case NEON::BI__builtin_neon_vnegh_f16:
11908 return Builder.CreateFNeg(EmitScalarExpr(E->getArg(0)), "vnegh");
11909 case NEON::BI__builtin_neon_vpmaxnm_v:
11910 case NEON::BI__builtin_neon_vpmaxnmq_v: {
11911 Int = Intrinsic::aarch64_neon_fmaxnmp;
11912 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmaxnm");
11914 case NEON::BI__builtin_neon_vpminnm_v:
11915 case NEON::BI__builtin_neon_vpminnmq_v: {
11916 Int = Intrinsic::aarch64_neon_fminnmp;
11917 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpminnm");
11919 case NEON::BI__builtin_neon_vsqrth_f16: {
11920 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11921 Int = Builder.getIsFPConstrained()
11922 ? Intrinsic::experimental_constrained_sqrt
11923 : Intrinsic::sqrt;
11924 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vsqrt");
11926 case NEON::BI__builtin_neon_vsqrt_v:
11927 case NEON::BI__builtin_neon_vsqrtq_v: {
11928 Int = Builder.getIsFPConstrained()
11929 ? Intrinsic::experimental_constrained_sqrt
11930 : Intrinsic::sqrt;
11931 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
11932 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsqrt");
11934 case NEON::BI__builtin_neon_vrbit_v:
11935 case NEON::BI__builtin_neon_vrbitq_v: {
11936 Int = Intrinsic::bitreverse;
11937 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrbit");
11939 case NEON::BI__builtin_neon_vaddv_u8:
11940 // FIXME: These are handled by the AArch64 scalar code.
11941 usgn = true;
11942 [[fallthrough]];
11943 case NEON::BI__builtin_neon_vaddv_s8: {
11944 Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
11945 Ty = Int32Ty;
11946 VTy = llvm::FixedVectorType::get(Int8Ty, 8);
11947 llvm::Type *Tys[2] = { Ty, VTy };
11948 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11949 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
11950 return Builder.CreateTrunc(Ops[0], Int8Ty);
11952 case NEON::BI__builtin_neon_vaddv_u16:
11953 usgn = true;
11954 [[fallthrough]];
11955 case NEON::BI__builtin_neon_vaddv_s16: {
11956 Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
11957 Ty = Int32Ty;
11958 VTy = llvm::FixedVectorType::get(Int16Ty, 4);
11959 llvm::Type *Tys[2] = { Ty, VTy };
11960 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11961 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
11962 return Builder.CreateTrunc(Ops[0], Int16Ty);
11964 case NEON::BI__builtin_neon_vaddvq_u8:
11965 usgn = true;
11966 [[fallthrough]];
11967 case NEON::BI__builtin_neon_vaddvq_s8: {
11968 Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
11969 Ty = Int32Ty;
11970 VTy = llvm::FixedVectorType::get(Int8Ty, 16);
11971 llvm::Type *Tys[2] = { Ty, VTy };
11972 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11973 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
11974 return Builder.CreateTrunc(Ops[0], Int8Ty);
11976 case NEON::BI__builtin_neon_vaddvq_u16:
11977 usgn = true;
11978 [[fallthrough]];
11979 case NEON::BI__builtin_neon_vaddvq_s16: {
11980 Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
11981 Ty = Int32Ty;
11982 VTy = llvm::FixedVectorType::get(Int16Ty, 8);
11983 llvm::Type *Tys[2] = { Ty, VTy };
11984 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11985 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
11986 return Builder.CreateTrunc(Ops[0], Int16Ty);
11988 case NEON::BI__builtin_neon_vmaxv_u8: {
11989 Int = Intrinsic::aarch64_neon_umaxv;
11990 Ty = Int32Ty;
11991 VTy = llvm::FixedVectorType::get(Int8Ty, 8);
11992 llvm::Type *Tys[2] = { Ty, VTy };
11993 Ops.push_back(EmitScalarExpr(E->getArg(0)));
11994 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
11995 return Builder.CreateTrunc(Ops[0], Int8Ty);
11997 case NEON::BI__builtin_neon_vmaxv_u16: {
11998 Int = Intrinsic::aarch64_neon_umaxv;
11999 Ty = Int32Ty;
12000 VTy = llvm::FixedVectorType::get(Int16Ty, 4);
12001 llvm::Type *Tys[2] = { Ty, VTy };
12002 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12003 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
12004 return Builder.CreateTrunc(Ops[0], Int16Ty);
12006 case NEON::BI__builtin_neon_vmaxvq_u8: {
12007 Int = Intrinsic::aarch64_neon_umaxv;
12008 Ty = Int32Ty;
12009 VTy = llvm::FixedVectorType::get(Int8Ty, 16);
12010 llvm::Type *Tys[2] = { Ty, VTy };
12011 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12012 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
12013 return Builder.CreateTrunc(Ops[0], Int8Ty);
12015 case NEON::BI__builtin_neon_vmaxvq_u16: {
12016 Int = Intrinsic::aarch64_neon_umaxv;
12017 Ty = Int32Ty;
12018 VTy = llvm::FixedVectorType::get(Int16Ty, 8);
12019 llvm::Type *Tys[2] = { Ty, VTy };
12020 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12021 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
12022 return Builder.CreateTrunc(Ops[0], Int16Ty);
12024 case NEON::BI__builtin_neon_vmaxv_s8: {
12025 Int = Intrinsic::aarch64_neon_smaxv;
12026 Ty = Int32Ty;
12027 VTy = llvm::FixedVectorType::get(Int8Ty, 8);
12028 llvm::Type *Tys[2] = { Ty, VTy };
12029 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12030 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
12031 return Builder.CreateTrunc(Ops[0], Int8Ty);
12033 case NEON::BI__builtin_neon_vmaxv_s16: {
12034 Int = Intrinsic::aarch64_neon_smaxv;
12035 Ty = Int32Ty;
12036 VTy = llvm::FixedVectorType::get(Int16Ty, 4);
12037 llvm::Type *Tys[2] = { Ty, VTy };
12038 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12039 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
12040 return Builder.CreateTrunc(Ops[0], Int16Ty);
12042 case NEON::BI__builtin_neon_vmaxvq_s8: {
12043 Int = Intrinsic::aarch64_neon_smaxv;
12044 Ty = Int32Ty;
12045 VTy = llvm::FixedVectorType::get(Int8Ty, 16);
12046 llvm::Type *Tys[2] = { Ty, VTy };
12047 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12048 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
12049 return Builder.CreateTrunc(Ops[0], Int8Ty);
12051 case NEON::BI__builtin_neon_vmaxvq_s16: {
12052 Int = Intrinsic::aarch64_neon_smaxv;
12053 Ty = Int32Ty;
12054 VTy = llvm::FixedVectorType::get(Int16Ty, 8);
12055 llvm::Type *Tys[2] = { Ty, VTy };
12056 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12057 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
12058 return Builder.CreateTrunc(Ops[0], Int16Ty);
12060 case NEON::BI__builtin_neon_vmaxv_f16: {
12061 Int = Intrinsic::aarch64_neon_fmaxv;
12062 Ty = HalfTy;
12063 VTy = llvm::FixedVectorType::get(HalfTy, 4);
12064 llvm::Type *Tys[2] = { Ty, VTy };
12065 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12066 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
12067 return Builder.CreateTrunc(Ops[0], HalfTy);
12069 case NEON::BI__builtin_neon_vmaxvq_f16: {
12070 Int = Intrinsic::aarch64_neon_fmaxv;
12071 Ty = HalfTy;
12072 VTy = llvm::FixedVectorType::get(HalfTy, 8);
12073 llvm::Type *Tys[2] = { Ty, VTy };
12074 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12075 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
12076 return Builder.CreateTrunc(Ops[0], HalfTy);
12078 case NEON::BI__builtin_neon_vminv_u8: {
12079 Int = Intrinsic::aarch64_neon_uminv;
12080 Ty = Int32Ty;
12081 VTy = llvm::FixedVectorType::get(Int8Ty, 8);
12082 llvm::Type *Tys[2] = { Ty, VTy };
12083 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12084 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
12085 return Builder.CreateTrunc(Ops[0], Int8Ty);
12087 case NEON::BI__builtin_neon_vminv_u16: {
12088 Int = Intrinsic::aarch64_neon_uminv;
12089 Ty = Int32Ty;
12090 VTy = llvm::FixedVectorType::get(Int16Ty, 4);
12091 llvm::Type *Tys[2] = { Ty, VTy };
12092 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12093 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
12094 return Builder.CreateTrunc(Ops[0], Int16Ty);
12096 case NEON::BI__builtin_neon_vminvq_u8: {
12097 Int = Intrinsic::aarch64_neon_uminv;
12098 Ty = Int32Ty;
12099 VTy = llvm::FixedVectorType::get(Int8Ty, 16);
12100 llvm::Type *Tys[2] = { Ty, VTy };
12101 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12102 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
12103 return Builder.CreateTrunc(Ops[0], Int8Ty);
12105 case NEON::BI__builtin_neon_vminvq_u16: {
12106 Int = Intrinsic::aarch64_neon_uminv;
12107 Ty = Int32Ty;
12108 VTy = llvm::FixedVectorType::get(Int16Ty, 8);
12109 llvm::Type *Tys[2] = { Ty, VTy };
12110 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12111 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
12112 return Builder.CreateTrunc(Ops[0], Int16Ty);
12114 case NEON::BI__builtin_neon_vminv_s8: {
12115 Int = Intrinsic::aarch64_neon_sminv;
12116 Ty = Int32Ty;
12117 VTy = llvm::FixedVectorType::get(Int8Ty, 8);
12118 llvm::Type *Tys[2] = { Ty, VTy };
12119 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12120 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
12121 return Builder.CreateTrunc(Ops[0], Int8Ty);
12123 case NEON::BI__builtin_neon_vminv_s16: {
12124 Int = Intrinsic::aarch64_neon_sminv;
12125 Ty = Int32Ty;
12126 VTy = llvm::FixedVectorType::get(Int16Ty, 4);
12127 llvm::Type *Tys[2] = { Ty, VTy };
12128 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12129 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
12130 return Builder.CreateTrunc(Ops[0], Int16Ty);
12132 case NEON::BI__builtin_neon_vminvq_s8: {
12133 Int = Intrinsic::aarch64_neon_sminv;
12134 Ty = Int32Ty;
12135 VTy = llvm::FixedVectorType::get(Int8Ty, 16);
12136 llvm::Type *Tys[2] = { Ty, VTy };
12137 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12138 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
12139 return Builder.CreateTrunc(Ops[0], Int8Ty);
12141 case NEON::BI__builtin_neon_vminvq_s16: {
12142 Int = Intrinsic::aarch64_neon_sminv;
12143 Ty = Int32Ty;
12144 VTy = llvm::FixedVectorType::get(Int16Ty, 8);
12145 llvm::Type *Tys[2] = { Ty, VTy };
12146 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12147 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
12148 return Builder.CreateTrunc(Ops[0], Int16Ty);
12150 case NEON::BI__builtin_neon_vminv_f16: {
12151 Int = Intrinsic::aarch64_neon_fminv;
12152 Ty = HalfTy;
12153 VTy = llvm::FixedVectorType::get(HalfTy, 4);
12154 llvm::Type *Tys[2] = { Ty, VTy };
12155 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12156 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
12157 return Builder.CreateTrunc(Ops[0], HalfTy);
12159 case NEON::BI__builtin_neon_vminvq_f16: {
12160 Int = Intrinsic::aarch64_neon_fminv;
12161 Ty = HalfTy;
12162 VTy = llvm::FixedVectorType::get(HalfTy, 8);
12163 llvm::Type *Tys[2] = { Ty, VTy };
12164 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12165 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
12166 return Builder.CreateTrunc(Ops[0], HalfTy);
12168 case NEON::BI__builtin_neon_vmaxnmv_f16: {
12169 Int = Intrinsic::aarch64_neon_fmaxnmv;
12170 Ty = HalfTy;
12171 VTy = llvm::FixedVectorType::get(HalfTy, 4);
12172 llvm::Type *Tys[2] = { Ty, VTy };
12173 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12174 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxnmv");
12175 return Builder.CreateTrunc(Ops[0], HalfTy);
12177 case NEON::BI__builtin_neon_vmaxnmvq_f16: {
12178 Int = Intrinsic::aarch64_neon_fmaxnmv;
12179 Ty = HalfTy;
12180 VTy = llvm::FixedVectorType::get(HalfTy, 8);
12181 llvm::Type *Tys[2] = { Ty, VTy };
12182 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12183 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxnmv");
12184 return Builder.CreateTrunc(Ops[0], HalfTy);
12186 case NEON::BI__builtin_neon_vminnmv_f16: {
12187 Int = Intrinsic::aarch64_neon_fminnmv;
12188 Ty = HalfTy;
12189 VTy = llvm::FixedVectorType::get(HalfTy, 4);
12190 llvm::Type *Tys[2] = { Ty, VTy };
12191 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12192 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminnmv");
12193 return Builder.CreateTrunc(Ops[0], HalfTy);
12195 case NEON::BI__builtin_neon_vminnmvq_f16: {
12196 Int = Intrinsic::aarch64_neon_fminnmv;
12197 Ty = HalfTy;
12198 VTy = llvm::FixedVectorType::get(HalfTy, 8);
12199 llvm::Type *Tys[2] = { Ty, VTy };
12200 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12201 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminnmv");
12202 return Builder.CreateTrunc(Ops[0], HalfTy);
12204 case NEON::BI__builtin_neon_vmul_n_f64: {
12205 Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
12206 Value *RHS = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), DoubleTy);
12207 return Builder.CreateFMul(Ops[0], RHS);
12209 case NEON::BI__builtin_neon_vaddlv_u8: {
12210 Int = Intrinsic::aarch64_neon_uaddlv;
12211 Ty = Int32Ty;
12212 VTy = llvm::FixedVectorType::get(Int8Ty, 8);
12213 llvm::Type *Tys[2] = { Ty, VTy };
12214 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12215 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
12216 return Builder.CreateTrunc(Ops[0], Int16Ty);
12218 case NEON::BI__builtin_neon_vaddlv_u16: {
12219 Int = Intrinsic::aarch64_neon_uaddlv;
12220 Ty = Int32Ty;
12221 VTy = llvm::FixedVectorType::get(Int16Ty, 4);
12222 llvm::Type *Tys[2] = { Ty, VTy };
12223 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12224 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
12226 case NEON::BI__builtin_neon_vaddlvq_u8: {
12227 Int = Intrinsic::aarch64_neon_uaddlv;
12228 Ty = Int32Ty;
12229 VTy = llvm::FixedVectorType::get(Int8Ty, 16);
12230 llvm::Type *Tys[2] = { Ty, VTy };
12231 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12232 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
12233 return Builder.CreateTrunc(Ops[0], Int16Ty);
12235 case NEON::BI__builtin_neon_vaddlvq_u16: {
12236 Int = Intrinsic::aarch64_neon_uaddlv;
12237 Ty = Int32Ty;
12238 VTy = llvm::FixedVectorType::get(Int16Ty, 8);
12239 llvm::Type *Tys[2] = { Ty, VTy };
12240 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12241 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
12243 case NEON::BI__builtin_neon_vaddlv_s8: {
12244 Int = Intrinsic::aarch64_neon_saddlv;
12245 Ty = Int32Ty;
12246 VTy = llvm::FixedVectorType::get(Int8Ty, 8);
12247 llvm::Type *Tys[2] = { Ty, VTy };
12248 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12249 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
12250 return Builder.CreateTrunc(Ops[0], Int16Ty);
12252 case NEON::BI__builtin_neon_vaddlv_s16: {
12253 Int = Intrinsic::aarch64_neon_saddlv;
12254 Ty = Int32Ty;
12255 VTy = llvm::FixedVectorType::get(Int16Ty, 4);
12256 llvm::Type *Tys[2] = { Ty, VTy };
12257 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12258 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
12260 case NEON::BI__builtin_neon_vaddlvq_s8: {
12261 Int = Intrinsic::aarch64_neon_saddlv;
12262 Ty = Int32Ty;
12263 VTy = llvm::FixedVectorType::get(Int8Ty, 16);
12264 llvm::Type *Tys[2] = { Ty, VTy };
12265 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12266 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
12267 return Builder.CreateTrunc(Ops[0], Int16Ty);
12269 case NEON::BI__builtin_neon_vaddlvq_s16: {
12270 Int = Intrinsic::aarch64_neon_saddlv;
12271 Ty = Int32Ty;
12272 VTy = llvm::FixedVectorType::get(Int16Ty, 8);
12273 llvm::Type *Tys[2] = { Ty, VTy };
12274 Ops.push_back(EmitScalarExpr(E->getArg(0)));
12275 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
12277 case NEON::BI__builtin_neon_vsri_n_v:
12278 case NEON::BI__builtin_neon_vsriq_n_v: {
12279 Int = Intrinsic::aarch64_neon_vsri;
12280 llvm::Function *Intrin = CGM.getIntrinsic(Int, Ty);
12281 return EmitNeonCall(Intrin, Ops, "vsri_n");
12283 case NEON::BI__builtin_neon_vsli_n_v:
12284 case NEON::BI__builtin_neon_vsliq_n_v: {
12285 Int = Intrinsic::aarch64_neon_vsli;
12286 llvm::Function *Intrin = CGM.getIntrinsic(Int, Ty);
12287 return EmitNeonCall(Intrin, Ops, "vsli_n");
12289 case NEON::BI__builtin_neon_vsra_n_v:
12290 case NEON::BI__builtin_neon_vsraq_n_v:
12291 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
12292 Ops[1] = EmitNeonRShiftImm(Ops[1], Ops[2], Ty, usgn, "vsra_n");
12293 return Builder.CreateAdd(Ops[0], Ops[1]);
12294 case NEON::BI__builtin_neon_vrsra_n_v:
12295 case NEON::BI__builtin_neon_vrsraq_n_v: {
12296 Int = usgn ? Intrinsic::aarch64_neon_urshl : Intrinsic::aarch64_neon_srshl;
12297 SmallVector<llvm::Value*,2> TmpOps;
12298 TmpOps.push_back(Ops[1]);
12299 TmpOps.push_back(Ops[2]);
12300 Function* F = CGM.getIntrinsic(Int, Ty);
12301 llvm::Value *tmp = EmitNeonCall(F, TmpOps, "vrshr_n", 1, true);
12302 Ops[0] = Builder.CreateBitCast(Ops[0], VTy);
12303 return Builder.CreateAdd(Ops[0], tmp);
12305 case NEON::BI__builtin_neon_vld1_v:
12306 case NEON::BI__builtin_neon_vld1q_v: {
12307 return Builder.CreateAlignedLoad(VTy, Ops[0], PtrOp0.getAlignment());
12309 case NEON::BI__builtin_neon_vst1_v:
12310 case NEON::BI__builtin_neon_vst1q_v:
12311 Ops[1] = Builder.CreateBitCast(Ops[1], VTy);
12312 return Builder.CreateAlignedStore(Ops[1], Ops[0], PtrOp0.getAlignment());
12313 case NEON::BI__builtin_neon_vld1_lane_v:
12314 case NEON::BI__builtin_neon_vld1q_lane_v: {
12315 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
12316 Ops[0] = Builder.CreateAlignedLoad(VTy->getElementType(), Ops[0],
12317 PtrOp0.getAlignment());
12318 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vld1_lane");
12320 case NEON::BI__builtin_neon_vldap1_lane_s64:
12321 case NEON::BI__builtin_neon_vldap1q_lane_s64: {
12322 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
12323 llvm::LoadInst *LI = Builder.CreateAlignedLoad(
12324 VTy->getElementType(), Ops[0], PtrOp0.getAlignment());
12325 LI->setAtomic(llvm::AtomicOrdering::Acquire);
12326 Ops[0] = LI;
12327 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vldap1_lane");
12329 case NEON::BI__builtin_neon_vld1_dup_v:
12330 case NEON::BI__builtin_neon_vld1q_dup_v: {
12331 Value *V = PoisonValue::get(Ty);
12332 Ops[0] = Builder.CreateAlignedLoad(VTy->getElementType(), Ops[0],
12333 PtrOp0.getAlignment());
12334 llvm::Constant *CI = ConstantInt::get(Int32Ty, 0);
12335 Ops[0] = Builder.CreateInsertElement(V, Ops[0], CI);
12336 return EmitNeonSplat(Ops[0], CI);
12338 case NEON::BI__builtin_neon_vst1_lane_v:
12339 case NEON::BI__builtin_neon_vst1q_lane_v:
12340 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
12341 Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
12342 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
12343 return Builder.CreateAlignedStore(Ops[1], Builder.CreateBitCast(Ops[0], Ty),
12344 PtrOp0.getAlignment());
12345 case NEON::BI__builtin_neon_vstl1_lane_s64:
12346 case NEON::BI__builtin_neon_vstl1q_lane_s64: {
12347 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
12348 Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
12349 llvm::StoreInst *SI =
12350 Builder.CreateAlignedStore(Ops[1], Ops[0], PtrOp0.getAlignment());
12351 SI->setAtomic(llvm::AtomicOrdering::Release);
12352 return SI;
12354 case NEON::BI__builtin_neon_vld2_v:
12355 case NEON::BI__builtin_neon_vld2q_v: {
12356 llvm::Type *PTy = llvm::PointerType::getUnqual(getLLVMContext());
12357 llvm::Type *Tys[2] = { VTy, PTy };
12358 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2, Tys);
12359 Ops[1] = Builder.CreateCall(F, Ops[1], "vld2");
12360 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
12362 case NEON::BI__builtin_neon_vld3_v:
12363 case NEON::BI__builtin_neon_vld3q_v: {
12364 llvm::Type *PTy = llvm::PointerType::getUnqual(getLLVMContext());
12365 llvm::Type *Tys[2] = { VTy, PTy };
12366 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3, Tys);
12367 Ops[1] = Builder.CreateCall(F, Ops[1], "vld3");
12368 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
12370 case NEON::BI__builtin_neon_vld4_v:
12371 case NEON::BI__builtin_neon_vld4q_v: {
12372 llvm::Type *PTy = llvm::PointerType::getUnqual(getLLVMContext());
12373 llvm::Type *Tys[2] = { VTy, PTy };
12374 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4, Tys);
12375 Ops[1] = Builder.CreateCall(F, Ops[1], "vld4");
12376 Ops[0] = Builder.CreateBitCast(Ops[0],
12377 llvm::PointerType::getUnqual(Ops[1]->getType()));
12378 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
12380 case NEON::BI__builtin_neon_vld2_dup_v:
12381 case NEON::BI__builtin_neon_vld2q_dup_v: {
12382 llvm::Type *PTy = llvm::PointerType::getUnqual(getLLVMContext());
12383 llvm::Type *Tys[2] = { VTy, PTy };
12384 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2r, Tys);
12385 Ops[1] = Builder.CreateCall(F, Ops[1], "vld2");
12386 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
12388 case NEON::BI__builtin_neon_vld3_dup_v:
12389 case NEON::BI__builtin_neon_vld3q_dup_v: {
12390 llvm::Type *PTy = llvm::PointerType::getUnqual(getLLVMContext());
12391 llvm::Type *Tys[2] = { VTy, PTy };
12392 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3r, Tys);
12393 Ops[1] = Builder.CreateCall(F, Ops[1], "vld3");
12394 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
12396 case NEON::BI__builtin_neon_vld4_dup_v:
12397 case NEON::BI__builtin_neon_vld4q_dup_v: {
12398 llvm::Type *PTy = llvm::PointerType::getUnqual(getLLVMContext());
12399 llvm::Type *Tys[2] = { VTy, PTy };
12400 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4r, Tys);
12401 Ops[1] = Builder.CreateCall(F, Ops[1], "vld4");
12402 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
12404 case NEON::BI__builtin_neon_vld2_lane_v:
12405 case NEON::BI__builtin_neon_vld2q_lane_v: {
12406 llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
12407 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2lane, Tys);
12408 std::rotate(Ops.begin() + 1, Ops.begin() + 2, Ops.end());
12409 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
12410 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
12411 Ops[3] = Builder.CreateZExt(Ops[3], Int64Ty);
12412 Ops[1] = Builder.CreateCall(F, ArrayRef(Ops).slice(1), "vld2_lane");
12413 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
12415 case NEON::BI__builtin_neon_vld3_lane_v:
12416 case NEON::BI__builtin_neon_vld3q_lane_v: {
12417 llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
12418 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3lane, Tys);
12419 std::rotate(Ops.begin() + 1, Ops.begin() + 2, Ops.end());
12420 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
12421 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
12422 Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
12423 Ops[4] = Builder.CreateZExt(Ops[4], Int64Ty);
12424 Ops[1] = Builder.CreateCall(F, ArrayRef(Ops).slice(1), "vld3_lane");
12425 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
12427 case NEON::BI__builtin_neon_vld4_lane_v:
12428 case NEON::BI__builtin_neon_vld4q_lane_v: {
12429 llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
12430 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4lane, Tys);
12431 std::rotate(Ops.begin() + 1, Ops.begin() + 2, Ops.end());
12432 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
12433 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
12434 Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
12435 Ops[4] = Builder.CreateBitCast(Ops[4], Ty);
12436 Ops[5] = Builder.CreateZExt(Ops[5], Int64Ty);
12437 Ops[1] = Builder.CreateCall(F, ArrayRef(Ops).slice(1), "vld4_lane");
12438 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
12440 case NEON::BI__builtin_neon_vst2_v:
12441 case NEON::BI__builtin_neon_vst2q_v: {
12442 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
12443 llvm::Type *Tys[2] = { VTy, Ops[2]->getType() };
12444 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st2, Tys),
12445 Ops, "");
12447 case NEON::BI__builtin_neon_vst2_lane_v:
12448 case NEON::BI__builtin_neon_vst2q_lane_v: {
12449 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
12450 Ops[2] = Builder.CreateZExt(Ops[2], Int64Ty);
12451 llvm::Type *Tys[2] = { VTy, Ops[3]->getType() };
12452 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st2lane, Tys),
12453 Ops, "");
12455 case NEON::BI__builtin_neon_vst3_v:
12456 case NEON::BI__builtin_neon_vst3q_v: {
12457 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
12458 llvm::Type *Tys[2] = { VTy, Ops[3]->getType() };
12459 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st3, Tys),
12460 Ops, "");
12462 case NEON::BI__builtin_neon_vst3_lane_v:
12463 case NEON::BI__builtin_neon_vst3q_lane_v: {
12464 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
12465 Ops[3] = Builder.CreateZExt(Ops[3], Int64Ty);
12466 llvm::Type *Tys[2] = { VTy, Ops[4]->getType() };
12467 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st3lane, Tys),
12468 Ops, "");
12470 case NEON::BI__builtin_neon_vst4_v:
12471 case NEON::BI__builtin_neon_vst4q_v: {
12472 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
12473 llvm::Type *Tys[2] = { VTy, Ops[4]->getType() };
12474 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st4, Tys),
12475 Ops, "");
12477 case NEON::BI__builtin_neon_vst4_lane_v:
12478 case NEON::BI__builtin_neon_vst4q_lane_v: {
12479 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
12480 Ops[4] = Builder.CreateZExt(Ops[4], Int64Ty);
12481 llvm::Type *Tys[2] = { VTy, Ops[5]->getType() };
12482 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st4lane, Tys),
12483 Ops, "");
12485 case NEON::BI__builtin_neon_vtrn_v:
12486 case NEON::BI__builtin_neon_vtrnq_v: {
12487 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
12488 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
12489 Value *SV = nullptr;
12491 for (unsigned vi = 0; vi != 2; ++vi) {
12492 SmallVector<int, 16> Indices;
12493 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
12494 Indices.push_back(i+vi);
12495 Indices.push_back(i+e+vi);
12497 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
12498 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vtrn");
12499 SV = Builder.CreateDefaultAlignedStore(SV, Addr);
12501 return SV;
12503 case NEON::BI__builtin_neon_vuzp_v:
12504 case NEON::BI__builtin_neon_vuzpq_v: {
12505 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
12506 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
12507 Value *SV = nullptr;
12509 for (unsigned vi = 0; vi != 2; ++vi) {
12510 SmallVector<int, 16> Indices;
12511 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
12512 Indices.push_back(2*i+vi);
12514 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
12515 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vuzp");
12516 SV = Builder.CreateDefaultAlignedStore(SV, Addr);
12518 return SV;
12520 case NEON::BI__builtin_neon_vzip_v:
12521 case NEON::BI__builtin_neon_vzipq_v: {
12522 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
12523 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
12524 Value *SV = nullptr;
12526 for (unsigned vi = 0; vi != 2; ++vi) {
12527 SmallVector<int, 16> Indices;
12528 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
12529 Indices.push_back((i + vi*e) >> 1);
12530 Indices.push_back(((i + vi*e) >> 1)+e);
12532 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
12533 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vzip");
12534 SV = Builder.CreateDefaultAlignedStore(SV, Addr);
12536 return SV;
12538 case NEON::BI__builtin_neon_vqtbl1q_v: {
12539 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl1, Ty),
12540 Ops, "vtbl1");
12542 case NEON::BI__builtin_neon_vqtbl2q_v: {
12543 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl2, Ty),
12544 Ops, "vtbl2");
12546 case NEON::BI__builtin_neon_vqtbl3q_v: {
12547 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl3, Ty),
12548 Ops, "vtbl3");
12550 case NEON::BI__builtin_neon_vqtbl4q_v: {
12551 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl4, Ty),
12552 Ops, "vtbl4");
12554 case NEON::BI__builtin_neon_vqtbx1q_v: {
12555 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx1, Ty),
12556 Ops, "vtbx1");
12558 case NEON::BI__builtin_neon_vqtbx2q_v: {
12559 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx2, Ty),
12560 Ops, "vtbx2");
12562 case NEON::BI__builtin_neon_vqtbx3q_v: {
12563 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx3, Ty),
12564 Ops, "vtbx3");
12566 case NEON::BI__builtin_neon_vqtbx4q_v: {
12567 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx4, Ty),
12568 Ops, "vtbx4");
12570 case NEON::BI__builtin_neon_vsqadd_v:
12571 case NEON::BI__builtin_neon_vsqaddq_v: {
12572 Int = Intrinsic::aarch64_neon_usqadd;
12573 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsqadd");
12575 case NEON::BI__builtin_neon_vuqadd_v:
12576 case NEON::BI__builtin_neon_vuqaddq_v: {
12577 Int = Intrinsic::aarch64_neon_suqadd;
12578 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vuqadd");
12583 Value *CodeGenFunction::EmitBPFBuiltinExpr(unsigned BuiltinID,
12584 const CallExpr *E) {
12585 assert((BuiltinID == BPF::BI__builtin_preserve_field_info ||
12586 BuiltinID == BPF::BI__builtin_btf_type_id ||
12587 BuiltinID == BPF::BI__builtin_preserve_type_info ||
12588 BuiltinID == BPF::BI__builtin_preserve_enum_value) &&
12589 "unexpected BPF builtin");
12591 // A sequence number, injected into IR builtin functions, to
12592 // prevent CSE given the only difference of the function
12593 // may just be the debuginfo metadata.
12594 static uint32_t BuiltinSeqNum;
12596 switch (BuiltinID) {
12597 default:
12598 llvm_unreachable("Unexpected BPF builtin");
12599 case BPF::BI__builtin_preserve_field_info: {
12600 const Expr *Arg = E->getArg(0);
12601 bool IsBitField = Arg->IgnoreParens()->getObjectKind() == OK_BitField;
12603 if (!getDebugInfo()) {
12604 CGM.Error(E->getExprLoc(),
12605 "using __builtin_preserve_field_info() without -g");
12606 return IsBitField ? EmitLValue(Arg).getBitFieldPointer()
12607 : EmitLValue(Arg).getPointer(*this);
12610 // Enable underlying preserve_*_access_index() generation.
12611 bool OldIsInPreservedAIRegion = IsInPreservedAIRegion;
12612 IsInPreservedAIRegion = true;
12613 Value *FieldAddr = IsBitField ? EmitLValue(Arg).getBitFieldPointer()
12614 : EmitLValue(Arg).getPointer(*this);
12615 IsInPreservedAIRegion = OldIsInPreservedAIRegion;
12617 ConstantInt *C = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
12618 Value *InfoKind = ConstantInt::get(Int64Ty, C->getSExtValue());
12620 // Built the IR for the preserve_field_info intrinsic.
12621 llvm::Function *FnGetFieldInfo = llvm::Intrinsic::getDeclaration(
12622 &CGM.getModule(), llvm::Intrinsic::bpf_preserve_field_info,
12623 {FieldAddr->getType()});
12624 return Builder.CreateCall(FnGetFieldInfo, {FieldAddr, InfoKind});
12626 case BPF::BI__builtin_btf_type_id:
12627 case BPF::BI__builtin_preserve_type_info: {
12628 if (!getDebugInfo()) {
12629 CGM.Error(E->getExprLoc(), "using builtin function without -g");
12630 return nullptr;
12633 const Expr *Arg0 = E->getArg(0);
12634 llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateStandaloneType(
12635 Arg0->getType(), Arg0->getExprLoc());
12637 ConstantInt *Flag = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
12638 Value *FlagValue = ConstantInt::get(Int64Ty, Flag->getSExtValue());
12639 Value *SeqNumVal = ConstantInt::get(Int32Ty, BuiltinSeqNum++);
12641 llvm::Function *FnDecl;
12642 if (BuiltinID == BPF::BI__builtin_btf_type_id)
12643 FnDecl = llvm::Intrinsic::getDeclaration(
12644 &CGM.getModule(), llvm::Intrinsic::bpf_btf_type_id, {});
12645 else
12646 FnDecl = llvm::Intrinsic::getDeclaration(
12647 &CGM.getModule(), llvm::Intrinsic::bpf_preserve_type_info, {});
12648 CallInst *Fn = Builder.CreateCall(FnDecl, {SeqNumVal, FlagValue});
12649 Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
12650 return Fn;
12652 case BPF::BI__builtin_preserve_enum_value: {
12653 if (!getDebugInfo()) {
12654 CGM.Error(E->getExprLoc(), "using builtin function without -g");
12655 return nullptr;
12658 const Expr *Arg0 = E->getArg(0);
12659 llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateStandaloneType(
12660 Arg0->getType(), Arg0->getExprLoc());
12662 // Find enumerator
12663 const auto *UO = cast<UnaryOperator>(Arg0->IgnoreParens());
12664 const auto *CE = cast<CStyleCastExpr>(UO->getSubExpr());
12665 const auto *DR = cast<DeclRefExpr>(CE->getSubExpr());
12666 const auto *Enumerator = cast<EnumConstantDecl>(DR->getDecl());
12668 auto &InitVal = Enumerator->getInitVal();
12669 std::string InitValStr;
12670 if (InitVal.isNegative() || InitVal > uint64_t(INT64_MAX))
12671 InitValStr = std::to_string(InitVal.getSExtValue());
12672 else
12673 InitValStr = std::to_string(InitVal.getZExtValue());
12674 std::string EnumStr = Enumerator->getNameAsString() + ":" + InitValStr;
12675 Value *EnumStrVal = Builder.CreateGlobalStringPtr(EnumStr);
12677 ConstantInt *Flag = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
12678 Value *FlagValue = ConstantInt::get(Int64Ty, Flag->getSExtValue());
12679 Value *SeqNumVal = ConstantInt::get(Int32Ty, BuiltinSeqNum++);
12681 llvm::Function *IntrinsicFn = llvm::Intrinsic::getDeclaration(
12682 &CGM.getModule(), llvm::Intrinsic::bpf_preserve_enum_value, {});
12683 CallInst *Fn =
12684 Builder.CreateCall(IntrinsicFn, {SeqNumVal, EnumStrVal, FlagValue});
12685 Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
12686 return Fn;
12691 llvm::Value *CodeGenFunction::
12692 BuildVector(ArrayRef<llvm::Value*> Ops) {
12693 assert((Ops.size() & (Ops.size() - 1)) == 0 &&
12694 "Not a power-of-two sized vector!");
12695 bool AllConstants = true;
12696 for (unsigned i = 0, e = Ops.size(); i != e && AllConstants; ++i)
12697 AllConstants &= isa<Constant>(Ops[i]);
12699 // If this is a constant vector, create a ConstantVector.
12700 if (AllConstants) {
12701 SmallVector<llvm::Constant*, 16> CstOps;
12702 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
12703 CstOps.push_back(cast<Constant>(Ops[i]));
12704 return llvm::ConstantVector::get(CstOps);
12707 // Otherwise, insertelement the values to build the vector.
12708 Value *Result = llvm::PoisonValue::get(
12709 llvm::FixedVectorType::get(Ops[0]->getType(), Ops.size()));
12711 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
12712 Result = Builder.CreateInsertElement(Result, Ops[i], Builder.getInt64(i));
12714 return Result;
12717 // Convert the mask from an integer type to a vector of i1.
12718 static Value *getMaskVecValue(CodeGenFunction &CGF, Value *Mask,
12719 unsigned NumElts) {
12721 auto *MaskTy = llvm::FixedVectorType::get(
12722 CGF.Builder.getInt1Ty(),
12723 cast<IntegerType>(Mask->getType())->getBitWidth());
12724 Value *MaskVec = CGF.Builder.CreateBitCast(Mask, MaskTy);
12726 // If we have less than 8 elements, then the starting mask was an i8 and
12727 // we need to extract down to the right number of elements.
12728 if (NumElts < 8) {
12729 int Indices[4];
12730 for (unsigned i = 0; i != NumElts; ++i)
12731 Indices[i] = i;
12732 MaskVec = CGF.Builder.CreateShuffleVector(
12733 MaskVec, MaskVec, ArrayRef(Indices, NumElts), "extract");
12735 return MaskVec;
12738 static Value *EmitX86MaskedStore(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
12739 Align Alignment) {
12740 Value *Ptr = Ops[0];
12742 Value *MaskVec = getMaskVecValue(
12743 CGF, Ops[2],
12744 cast<llvm::FixedVectorType>(Ops[1]->getType())->getNumElements());
12746 return CGF.Builder.CreateMaskedStore(Ops[1], Ptr, Alignment, MaskVec);
12749 static Value *EmitX86MaskedLoad(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
12750 Align Alignment) {
12751 llvm::Type *Ty = Ops[1]->getType();
12752 Value *Ptr = Ops[0];
12754 Value *MaskVec = getMaskVecValue(
12755 CGF, Ops[2], cast<llvm::FixedVectorType>(Ty)->getNumElements());
12757 return CGF.Builder.CreateMaskedLoad(Ty, Ptr, Alignment, MaskVec, Ops[1]);
12760 static Value *EmitX86ExpandLoad(CodeGenFunction &CGF,
12761 ArrayRef<Value *> Ops) {
12762 auto *ResultTy = cast<llvm::VectorType>(Ops[1]->getType());
12763 Value *Ptr = Ops[0];
12765 Value *MaskVec = getMaskVecValue(
12766 CGF, Ops[2], cast<FixedVectorType>(ResultTy)->getNumElements());
12768 llvm::Function *F = CGF.CGM.getIntrinsic(Intrinsic::masked_expandload,
12769 ResultTy);
12770 return CGF.Builder.CreateCall(F, { Ptr, MaskVec, Ops[1] });
12773 static Value *EmitX86CompressExpand(CodeGenFunction &CGF,
12774 ArrayRef<Value *> Ops,
12775 bool IsCompress) {
12776 auto *ResultTy = cast<llvm::FixedVectorType>(Ops[1]->getType());
12778 Value *MaskVec = getMaskVecValue(CGF, Ops[2], ResultTy->getNumElements());
12780 Intrinsic::ID IID = IsCompress ? Intrinsic::x86_avx512_mask_compress
12781 : Intrinsic::x86_avx512_mask_expand;
12782 llvm::Function *F = CGF.CGM.getIntrinsic(IID, ResultTy);
12783 return CGF.Builder.CreateCall(F, { Ops[0], Ops[1], MaskVec });
12786 static Value *EmitX86CompressStore(CodeGenFunction &CGF,
12787 ArrayRef<Value *> Ops) {
12788 auto *ResultTy = cast<llvm::FixedVectorType>(Ops[1]->getType());
12789 Value *Ptr = Ops[0];
12791 Value *MaskVec = getMaskVecValue(CGF, Ops[2], ResultTy->getNumElements());
12793 llvm::Function *F = CGF.CGM.getIntrinsic(Intrinsic::masked_compressstore,
12794 ResultTy);
12795 return CGF.Builder.CreateCall(F, { Ops[1], Ptr, MaskVec });
12798 static Value *EmitX86MaskLogic(CodeGenFunction &CGF, Instruction::BinaryOps Opc,
12799 ArrayRef<Value *> Ops,
12800 bool InvertLHS = false) {
12801 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
12802 Value *LHS = getMaskVecValue(CGF, Ops[0], NumElts);
12803 Value *RHS = getMaskVecValue(CGF, Ops[1], NumElts);
12805 if (InvertLHS)
12806 LHS = CGF.Builder.CreateNot(LHS);
12808 return CGF.Builder.CreateBitCast(CGF.Builder.CreateBinOp(Opc, LHS, RHS),
12809 Ops[0]->getType());
12812 static Value *EmitX86FunnelShift(CodeGenFunction &CGF, Value *Op0, Value *Op1,
12813 Value *Amt, bool IsRight) {
12814 llvm::Type *Ty = Op0->getType();
12816 // Amount may be scalar immediate, in which case create a splat vector.
12817 // Funnel shifts amounts are treated as modulo and types are all power-of-2 so
12818 // we only care about the lowest log2 bits anyway.
12819 if (Amt->getType() != Ty) {
12820 unsigned NumElts = cast<llvm::FixedVectorType>(Ty)->getNumElements();
12821 Amt = CGF.Builder.CreateIntCast(Amt, Ty->getScalarType(), false);
12822 Amt = CGF.Builder.CreateVectorSplat(NumElts, Amt);
12825 unsigned IID = IsRight ? Intrinsic::fshr : Intrinsic::fshl;
12826 Function *F = CGF.CGM.getIntrinsic(IID, Ty);
12827 return CGF.Builder.CreateCall(F, {Op0, Op1, Amt});
12830 static Value *EmitX86vpcom(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
12831 bool IsSigned) {
12832 Value *Op0 = Ops[0];
12833 Value *Op1 = Ops[1];
12834 llvm::Type *Ty = Op0->getType();
12835 uint64_t Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x7;
12837 CmpInst::Predicate Pred;
12838 switch (Imm) {
12839 case 0x0:
12840 Pred = IsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT;
12841 break;
12842 case 0x1:
12843 Pred = IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE;
12844 break;
12845 case 0x2:
12846 Pred = IsSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
12847 break;
12848 case 0x3:
12849 Pred = IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE;
12850 break;
12851 case 0x4:
12852 Pred = ICmpInst::ICMP_EQ;
12853 break;
12854 case 0x5:
12855 Pred = ICmpInst::ICMP_NE;
12856 break;
12857 case 0x6:
12858 return llvm::Constant::getNullValue(Ty); // FALSE
12859 case 0x7:
12860 return llvm::Constant::getAllOnesValue(Ty); // TRUE
12861 default:
12862 llvm_unreachable("Unexpected XOP vpcom/vpcomu predicate");
12865 Value *Cmp = CGF.Builder.CreateICmp(Pred, Op0, Op1);
12866 Value *Res = CGF.Builder.CreateSExt(Cmp, Ty);
12867 return Res;
12870 static Value *EmitX86Select(CodeGenFunction &CGF,
12871 Value *Mask, Value *Op0, Value *Op1) {
12873 // If the mask is all ones just return first argument.
12874 if (const auto *C = dyn_cast<Constant>(Mask))
12875 if (C->isAllOnesValue())
12876 return Op0;
12878 Mask = getMaskVecValue(
12879 CGF, Mask, cast<llvm::FixedVectorType>(Op0->getType())->getNumElements());
12881 return CGF.Builder.CreateSelect(Mask, Op0, Op1);
12884 static Value *EmitX86ScalarSelect(CodeGenFunction &CGF,
12885 Value *Mask, Value *Op0, Value *Op1) {
12886 // If the mask is all ones just return first argument.
12887 if (const auto *C = dyn_cast<Constant>(Mask))
12888 if (C->isAllOnesValue())
12889 return Op0;
12891 auto *MaskTy = llvm::FixedVectorType::get(
12892 CGF.Builder.getInt1Ty(), Mask->getType()->getIntegerBitWidth());
12893 Mask = CGF.Builder.CreateBitCast(Mask, MaskTy);
12894 Mask = CGF.Builder.CreateExtractElement(Mask, (uint64_t)0);
12895 return CGF.Builder.CreateSelect(Mask, Op0, Op1);
12898 static Value *EmitX86MaskedCompareResult(CodeGenFunction &CGF, Value *Cmp,
12899 unsigned NumElts, Value *MaskIn) {
12900 if (MaskIn) {
12901 const auto *C = dyn_cast<Constant>(MaskIn);
12902 if (!C || !C->isAllOnesValue())
12903 Cmp = CGF.Builder.CreateAnd(Cmp, getMaskVecValue(CGF, MaskIn, NumElts));
12906 if (NumElts < 8) {
12907 int Indices[8];
12908 for (unsigned i = 0; i != NumElts; ++i)
12909 Indices[i] = i;
12910 for (unsigned i = NumElts; i != 8; ++i)
12911 Indices[i] = i % NumElts + NumElts;
12912 Cmp = CGF.Builder.CreateShuffleVector(
12913 Cmp, llvm::Constant::getNullValue(Cmp->getType()), Indices);
12916 return CGF.Builder.CreateBitCast(Cmp,
12917 IntegerType::get(CGF.getLLVMContext(),
12918 std::max(NumElts, 8U)));
12921 static Value *EmitX86MaskedCompare(CodeGenFunction &CGF, unsigned CC,
12922 bool Signed, ArrayRef<Value *> Ops) {
12923 assert((Ops.size() == 2 || Ops.size() == 4) &&
12924 "Unexpected number of arguments");
12925 unsigned NumElts =
12926 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
12927 Value *Cmp;
12929 if (CC == 3) {
12930 Cmp = Constant::getNullValue(
12931 llvm::FixedVectorType::get(CGF.Builder.getInt1Ty(), NumElts));
12932 } else if (CC == 7) {
12933 Cmp = Constant::getAllOnesValue(
12934 llvm::FixedVectorType::get(CGF.Builder.getInt1Ty(), NumElts));
12935 } else {
12936 ICmpInst::Predicate Pred;
12937 switch (CC) {
12938 default: llvm_unreachable("Unknown condition code");
12939 case 0: Pred = ICmpInst::ICMP_EQ; break;
12940 case 1: Pred = Signed ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; break;
12941 case 2: Pred = Signed ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; break;
12942 case 4: Pred = ICmpInst::ICMP_NE; break;
12943 case 5: Pred = Signed ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; break;
12944 case 6: Pred = Signed ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; break;
12946 Cmp = CGF.Builder.CreateICmp(Pred, Ops[0], Ops[1]);
12949 Value *MaskIn = nullptr;
12950 if (Ops.size() == 4)
12951 MaskIn = Ops[3];
12953 return EmitX86MaskedCompareResult(CGF, Cmp, NumElts, MaskIn);
12956 static Value *EmitX86ConvertToMask(CodeGenFunction &CGF, Value *In) {
12957 Value *Zero = Constant::getNullValue(In->getType());
12958 return EmitX86MaskedCompare(CGF, 1, true, { In, Zero });
12961 static Value *EmitX86ConvertIntToFp(CodeGenFunction &CGF, const CallExpr *E,
12962 ArrayRef<Value *> Ops, bool IsSigned) {
12963 unsigned Rnd = cast<llvm::ConstantInt>(Ops[3])->getZExtValue();
12964 llvm::Type *Ty = Ops[1]->getType();
12966 Value *Res;
12967 if (Rnd != 4) {
12968 Intrinsic::ID IID = IsSigned ? Intrinsic::x86_avx512_sitofp_round
12969 : Intrinsic::x86_avx512_uitofp_round;
12970 Function *F = CGF.CGM.getIntrinsic(IID, { Ty, Ops[0]->getType() });
12971 Res = CGF.Builder.CreateCall(F, { Ops[0], Ops[3] });
12972 } else {
12973 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
12974 Res = IsSigned ? CGF.Builder.CreateSIToFP(Ops[0], Ty)
12975 : CGF.Builder.CreateUIToFP(Ops[0], Ty);
12978 return EmitX86Select(CGF, Ops[2], Res, Ops[1]);
12981 // Lowers X86 FMA intrinsics to IR.
12982 static Value *EmitX86FMAExpr(CodeGenFunction &CGF, const CallExpr *E,
12983 ArrayRef<Value *> Ops, unsigned BuiltinID,
12984 bool IsAddSub) {
12986 bool Subtract = false;
12987 Intrinsic::ID IID = Intrinsic::not_intrinsic;
12988 switch (BuiltinID) {
12989 default: break;
12990 case clang::X86::BI__builtin_ia32_vfmsubph512_mask3:
12991 Subtract = true;
12992 [[fallthrough]];
12993 case clang::X86::BI__builtin_ia32_vfmaddph512_mask:
12994 case clang::X86::BI__builtin_ia32_vfmaddph512_maskz:
12995 case clang::X86::BI__builtin_ia32_vfmaddph512_mask3:
12996 IID = llvm::Intrinsic::x86_avx512fp16_vfmadd_ph_512;
12997 break;
12998 case clang::X86::BI__builtin_ia32_vfmsubaddph512_mask3:
12999 Subtract = true;
13000 [[fallthrough]];
13001 case clang::X86::BI__builtin_ia32_vfmaddsubph512_mask:
13002 case clang::X86::BI__builtin_ia32_vfmaddsubph512_maskz:
13003 case clang::X86::BI__builtin_ia32_vfmaddsubph512_mask3:
13004 IID = llvm::Intrinsic::x86_avx512fp16_vfmaddsub_ph_512;
13005 break;
13006 case clang::X86::BI__builtin_ia32_vfmsubps512_mask3:
13007 Subtract = true;
13008 [[fallthrough]];
13009 case clang::X86::BI__builtin_ia32_vfmaddps512_mask:
13010 case clang::X86::BI__builtin_ia32_vfmaddps512_maskz:
13011 case clang::X86::BI__builtin_ia32_vfmaddps512_mask3:
13012 IID = llvm::Intrinsic::x86_avx512_vfmadd_ps_512; break;
13013 case clang::X86::BI__builtin_ia32_vfmsubpd512_mask3:
13014 Subtract = true;
13015 [[fallthrough]];
13016 case clang::X86::BI__builtin_ia32_vfmaddpd512_mask:
13017 case clang::X86::BI__builtin_ia32_vfmaddpd512_maskz:
13018 case clang::X86::BI__builtin_ia32_vfmaddpd512_mask3:
13019 IID = llvm::Intrinsic::x86_avx512_vfmadd_pd_512; break;
13020 case clang::X86::BI__builtin_ia32_vfmsubaddps512_mask3:
13021 Subtract = true;
13022 [[fallthrough]];
13023 case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask:
13024 case clang::X86::BI__builtin_ia32_vfmaddsubps512_maskz:
13025 case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask3:
13026 IID = llvm::Intrinsic::x86_avx512_vfmaddsub_ps_512;
13027 break;
13028 case clang::X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
13029 Subtract = true;
13030 [[fallthrough]];
13031 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask:
13032 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
13033 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
13034 IID = llvm::Intrinsic::x86_avx512_vfmaddsub_pd_512;
13035 break;
13038 Value *A = Ops[0];
13039 Value *B = Ops[1];
13040 Value *C = Ops[2];
13042 if (Subtract)
13043 C = CGF.Builder.CreateFNeg(C);
13045 Value *Res;
13047 // Only handle in case of _MM_FROUND_CUR_DIRECTION/4 (no rounding).
13048 if (IID != Intrinsic::not_intrinsic &&
13049 (cast<llvm::ConstantInt>(Ops.back())->getZExtValue() != (uint64_t)4 ||
13050 IsAddSub)) {
13051 Function *Intr = CGF.CGM.getIntrinsic(IID);
13052 Res = CGF.Builder.CreateCall(Intr, {A, B, C, Ops.back() });
13053 } else {
13054 llvm::Type *Ty = A->getType();
13055 Function *FMA;
13056 if (CGF.Builder.getIsFPConstrained()) {
13057 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
13058 FMA = CGF.CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, Ty);
13059 Res = CGF.Builder.CreateConstrainedFPCall(FMA, {A, B, C});
13060 } else {
13061 FMA = CGF.CGM.getIntrinsic(Intrinsic::fma, Ty);
13062 Res = CGF.Builder.CreateCall(FMA, {A, B, C});
13066 // Handle any required masking.
13067 Value *MaskFalseVal = nullptr;
13068 switch (BuiltinID) {
13069 case clang::X86::BI__builtin_ia32_vfmaddph512_mask:
13070 case clang::X86::BI__builtin_ia32_vfmaddps512_mask:
13071 case clang::X86::BI__builtin_ia32_vfmaddpd512_mask:
13072 case clang::X86::BI__builtin_ia32_vfmaddsubph512_mask:
13073 case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask:
13074 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask:
13075 MaskFalseVal = Ops[0];
13076 break;
13077 case clang::X86::BI__builtin_ia32_vfmaddph512_maskz:
13078 case clang::X86::BI__builtin_ia32_vfmaddps512_maskz:
13079 case clang::X86::BI__builtin_ia32_vfmaddpd512_maskz:
13080 case clang::X86::BI__builtin_ia32_vfmaddsubph512_maskz:
13081 case clang::X86::BI__builtin_ia32_vfmaddsubps512_maskz:
13082 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
13083 MaskFalseVal = Constant::getNullValue(Ops[0]->getType());
13084 break;
13085 case clang::X86::BI__builtin_ia32_vfmsubph512_mask3:
13086 case clang::X86::BI__builtin_ia32_vfmaddph512_mask3:
13087 case clang::X86::BI__builtin_ia32_vfmsubps512_mask3:
13088 case clang::X86::BI__builtin_ia32_vfmaddps512_mask3:
13089 case clang::X86::BI__builtin_ia32_vfmsubpd512_mask3:
13090 case clang::X86::BI__builtin_ia32_vfmaddpd512_mask3:
13091 case clang::X86::BI__builtin_ia32_vfmsubaddph512_mask3:
13092 case clang::X86::BI__builtin_ia32_vfmaddsubph512_mask3:
13093 case clang::X86::BI__builtin_ia32_vfmsubaddps512_mask3:
13094 case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask3:
13095 case clang::X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
13096 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
13097 MaskFalseVal = Ops[2];
13098 break;
13101 if (MaskFalseVal)
13102 return EmitX86Select(CGF, Ops[3], Res, MaskFalseVal);
13104 return Res;
13107 static Value *EmitScalarFMAExpr(CodeGenFunction &CGF, const CallExpr *E,
13108 MutableArrayRef<Value *> Ops, Value *Upper,
13109 bool ZeroMask = false, unsigned PTIdx = 0,
13110 bool NegAcc = false) {
13111 unsigned Rnd = 4;
13112 if (Ops.size() > 4)
13113 Rnd = cast<llvm::ConstantInt>(Ops[4])->getZExtValue();
13115 if (NegAcc)
13116 Ops[2] = CGF.Builder.CreateFNeg(Ops[2]);
13118 Ops[0] = CGF.Builder.CreateExtractElement(Ops[0], (uint64_t)0);
13119 Ops[1] = CGF.Builder.CreateExtractElement(Ops[1], (uint64_t)0);
13120 Ops[2] = CGF.Builder.CreateExtractElement(Ops[2], (uint64_t)0);
13121 Value *Res;
13122 if (Rnd != 4) {
13123 Intrinsic::ID IID;
13125 switch (Ops[0]->getType()->getPrimitiveSizeInBits()) {
13126 case 16:
13127 IID = Intrinsic::x86_avx512fp16_vfmadd_f16;
13128 break;
13129 case 32:
13130 IID = Intrinsic::x86_avx512_vfmadd_f32;
13131 break;
13132 case 64:
13133 IID = Intrinsic::x86_avx512_vfmadd_f64;
13134 break;
13135 default:
13136 llvm_unreachable("Unexpected size");
13138 Res = CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(IID),
13139 {Ops[0], Ops[1], Ops[2], Ops[4]});
13140 } else if (CGF.Builder.getIsFPConstrained()) {
13141 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
13142 Function *FMA = CGF.CGM.getIntrinsic(
13143 Intrinsic::experimental_constrained_fma, Ops[0]->getType());
13144 Res = CGF.Builder.CreateConstrainedFPCall(FMA, Ops.slice(0, 3));
13145 } else {
13146 Function *FMA = CGF.CGM.getIntrinsic(Intrinsic::fma, Ops[0]->getType());
13147 Res = CGF.Builder.CreateCall(FMA, Ops.slice(0, 3));
13149 // If we have more than 3 arguments, we need to do masking.
13150 if (Ops.size() > 3) {
13151 Value *PassThru = ZeroMask ? Constant::getNullValue(Res->getType())
13152 : Ops[PTIdx];
13154 // If we negated the accumulator and the its the PassThru value we need to
13155 // bypass the negate. Conveniently Upper should be the same thing in this
13156 // case.
13157 if (NegAcc && PTIdx == 2)
13158 PassThru = CGF.Builder.CreateExtractElement(Upper, (uint64_t)0);
13160 Res = EmitX86ScalarSelect(CGF, Ops[3], Res, PassThru);
13162 return CGF.Builder.CreateInsertElement(Upper, Res, (uint64_t)0);
13165 static Value *EmitX86Muldq(CodeGenFunction &CGF, bool IsSigned,
13166 ArrayRef<Value *> Ops) {
13167 llvm::Type *Ty = Ops[0]->getType();
13168 // Arguments have a vXi32 type so cast to vXi64.
13169 Ty = llvm::FixedVectorType::get(CGF.Int64Ty,
13170 Ty->getPrimitiveSizeInBits() / 64);
13171 Value *LHS = CGF.Builder.CreateBitCast(Ops[0], Ty);
13172 Value *RHS = CGF.Builder.CreateBitCast(Ops[1], Ty);
13174 if (IsSigned) {
13175 // Shift left then arithmetic shift right.
13176 Constant *ShiftAmt = ConstantInt::get(Ty, 32);
13177 LHS = CGF.Builder.CreateShl(LHS, ShiftAmt);
13178 LHS = CGF.Builder.CreateAShr(LHS, ShiftAmt);
13179 RHS = CGF.Builder.CreateShl(RHS, ShiftAmt);
13180 RHS = CGF.Builder.CreateAShr(RHS, ShiftAmt);
13181 } else {
13182 // Clear the upper bits.
13183 Constant *Mask = ConstantInt::get(Ty, 0xffffffff);
13184 LHS = CGF.Builder.CreateAnd(LHS, Mask);
13185 RHS = CGF.Builder.CreateAnd(RHS, Mask);
13188 return CGF.Builder.CreateMul(LHS, RHS);
13191 // Emit a masked pternlog intrinsic. This only exists because the header has to
13192 // use a macro and we aren't able to pass the input argument to a pternlog
13193 // builtin and a select builtin without evaluating it twice.
13194 static Value *EmitX86Ternlog(CodeGenFunction &CGF, bool ZeroMask,
13195 ArrayRef<Value *> Ops) {
13196 llvm::Type *Ty = Ops[0]->getType();
13198 unsigned VecWidth = Ty->getPrimitiveSizeInBits();
13199 unsigned EltWidth = Ty->getScalarSizeInBits();
13200 Intrinsic::ID IID;
13201 if (VecWidth == 128 && EltWidth == 32)
13202 IID = Intrinsic::x86_avx512_pternlog_d_128;
13203 else if (VecWidth == 256 && EltWidth == 32)
13204 IID = Intrinsic::x86_avx512_pternlog_d_256;
13205 else if (VecWidth == 512 && EltWidth == 32)
13206 IID = Intrinsic::x86_avx512_pternlog_d_512;
13207 else if (VecWidth == 128 && EltWidth == 64)
13208 IID = Intrinsic::x86_avx512_pternlog_q_128;
13209 else if (VecWidth == 256 && EltWidth == 64)
13210 IID = Intrinsic::x86_avx512_pternlog_q_256;
13211 else if (VecWidth == 512 && EltWidth == 64)
13212 IID = Intrinsic::x86_avx512_pternlog_q_512;
13213 else
13214 llvm_unreachable("Unexpected intrinsic");
13216 Value *Ternlog = CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(IID),
13217 Ops.drop_back());
13218 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(Ty) : Ops[0];
13219 return EmitX86Select(CGF, Ops[4], Ternlog, PassThru);
13222 static Value *EmitX86SExtMask(CodeGenFunction &CGF, Value *Op,
13223 llvm::Type *DstTy) {
13224 unsigned NumberOfElements =
13225 cast<llvm::FixedVectorType>(DstTy)->getNumElements();
13226 Value *Mask = getMaskVecValue(CGF, Op, NumberOfElements);
13227 return CGF.Builder.CreateSExt(Mask, DstTy, "vpmovm2");
13230 Value *CodeGenFunction::EmitX86CpuIs(const CallExpr *E) {
13231 const Expr *CPUExpr = E->getArg(0)->IgnoreParenCasts();
13232 StringRef CPUStr = cast<clang::StringLiteral>(CPUExpr)->getString();
13233 return EmitX86CpuIs(CPUStr);
13236 // Convert F16 halfs to floats.
13237 static Value *EmitX86CvtF16ToFloatExpr(CodeGenFunction &CGF,
13238 ArrayRef<Value *> Ops,
13239 llvm::Type *DstTy) {
13240 assert((Ops.size() == 1 || Ops.size() == 3 || Ops.size() == 4) &&
13241 "Unknown cvtph2ps intrinsic");
13243 // If the SAE intrinsic doesn't use default rounding then we can't upgrade.
13244 if (Ops.size() == 4 && cast<llvm::ConstantInt>(Ops[3])->getZExtValue() != 4) {
13245 Function *F =
13246 CGF.CGM.getIntrinsic(Intrinsic::x86_avx512_mask_vcvtph2ps_512);
13247 return CGF.Builder.CreateCall(F, {Ops[0], Ops[1], Ops[2], Ops[3]});
13250 unsigned NumDstElts = cast<llvm::FixedVectorType>(DstTy)->getNumElements();
13251 Value *Src = Ops[0];
13253 // Extract the subvector.
13254 if (NumDstElts !=
13255 cast<llvm::FixedVectorType>(Src->getType())->getNumElements()) {
13256 assert(NumDstElts == 4 && "Unexpected vector size");
13257 Src = CGF.Builder.CreateShuffleVector(Src, ArrayRef<int>{0, 1, 2, 3});
13260 // Bitcast from vXi16 to vXf16.
13261 auto *HalfTy = llvm::FixedVectorType::get(
13262 llvm::Type::getHalfTy(CGF.getLLVMContext()), NumDstElts);
13263 Src = CGF.Builder.CreateBitCast(Src, HalfTy);
13265 // Perform the fp-extension.
13266 Value *Res = CGF.Builder.CreateFPExt(Src, DstTy, "cvtph2ps");
13268 if (Ops.size() >= 3)
13269 Res = EmitX86Select(CGF, Ops[2], Res, Ops[1]);
13270 return Res;
13273 Value *CodeGenFunction::EmitX86CpuIs(StringRef CPUStr) {
13275 llvm::Type *Int32Ty = Builder.getInt32Ty();
13277 // Matching the struct layout from the compiler-rt/libgcc structure that is
13278 // filled in:
13279 // unsigned int __cpu_vendor;
13280 // unsigned int __cpu_type;
13281 // unsigned int __cpu_subtype;
13282 // unsigned int __cpu_features[1];
13283 llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty, Int32Ty,
13284 llvm::ArrayType::get(Int32Ty, 1));
13286 // Grab the global __cpu_model.
13287 llvm::Constant *CpuModel = CGM.CreateRuntimeVariable(STy, "__cpu_model");
13288 cast<llvm::GlobalValue>(CpuModel)->setDSOLocal(true);
13290 // Calculate the index needed to access the correct field based on the
13291 // range. Also adjust the expected value.
13292 unsigned Index;
13293 unsigned Value;
13294 std::tie(Index, Value) = StringSwitch<std::pair<unsigned, unsigned>>(CPUStr)
13295 #define X86_VENDOR(ENUM, STRING) \
13296 .Case(STRING, {0u, static_cast<unsigned>(llvm::X86::ENUM)})
13297 #define X86_CPU_TYPE_ALIAS(ENUM, ALIAS) \
13298 .Case(ALIAS, {1u, static_cast<unsigned>(llvm::X86::ENUM)})
13299 #define X86_CPU_TYPE(ENUM, STR) \
13300 .Case(STR, {1u, static_cast<unsigned>(llvm::X86::ENUM)})
13301 #define X86_CPU_SUBTYPE_ALIAS(ENUM, ALIAS) \
13302 .Case(ALIAS, {2u, static_cast<unsigned>(llvm::X86::ENUM)})
13303 #define X86_CPU_SUBTYPE(ENUM, STR) \
13304 .Case(STR, {2u, static_cast<unsigned>(llvm::X86::ENUM)})
13305 #include "llvm/TargetParser/X86TargetParser.def"
13306 .Default({0, 0});
13307 assert(Value != 0 && "Invalid CPUStr passed to CpuIs");
13309 // Grab the appropriate field from __cpu_model.
13310 llvm::Value *Idxs[] = {ConstantInt::get(Int32Ty, 0),
13311 ConstantInt::get(Int32Ty, Index)};
13312 llvm::Value *CpuValue = Builder.CreateGEP(STy, CpuModel, Idxs);
13313 CpuValue = Builder.CreateAlignedLoad(Int32Ty, CpuValue,
13314 CharUnits::fromQuantity(4));
13316 // Check the value of the field against the requested value.
13317 return Builder.CreateICmpEQ(CpuValue,
13318 llvm::ConstantInt::get(Int32Ty, Value));
13321 Value *CodeGenFunction::EmitX86CpuSupports(const CallExpr *E) {
13322 const Expr *FeatureExpr = E->getArg(0)->IgnoreParenCasts();
13323 StringRef FeatureStr = cast<StringLiteral>(FeatureExpr)->getString();
13324 return EmitX86CpuSupports(FeatureStr);
13327 Value *CodeGenFunction::EmitX86CpuSupports(ArrayRef<StringRef> FeatureStrs) {
13328 uint64_t Mask = llvm::X86::getCpuSupportsMask(FeatureStrs);
13329 std::array<uint32_t, 4> FeatureMask{Lo_32(Mask), Hi_32(Mask), 0, 0};
13330 return EmitX86CpuSupports(FeatureMask);
13333 llvm::Value *
13334 CodeGenFunction::EmitX86CpuSupports(std::array<uint32_t, 4> FeatureMask) {
13335 Value *Result = Builder.getTrue();
13336 if (FeatureMask[0] != 0) {
13337 // Matching the struct layout from the compiler-rt/libgcc structure that is
13338 // filled in:
13339 // unsigned int __cpu_vendor;
13340 // unsigned int __cpu_type;
13341 // unsigned int __cpu_subtype;
13342 // unsigned int __cpu_features[1];
13343 llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty, Int32Ty,
13344 llvm::ArrayType::get(Int32Ty, 1));
13346 // Grab the global __cpu_model.
13347 llvm::Constant *CpuModel = CGM.CreateRuntimeVariable(STy, "__cpu_model");
13348 cast<llvm::GlobalValue>(CpuModel)->setDSOLocal(true);
13350 // Grab the first (0th) element from the field __cpu_features off of the
13351 // global in the struct STy.
13352 Value *Idxs[] = {Builder.getInt32(0), Builder.getInt32(3),
13353 Builder.getInt32(0)};
13354 Value *CpuFeatures = Builder.CreateGEP(STy, CpuModel, Idxs);
13355 Value *Features = Builder.CreateAlignedLoad(Int32Ty, CpuFeatures,
13356 CharUnits::fromQuantity(4));
13358 // Check the value of the bit corresponding to the feature requested.
13359 Value *Mask = Builder.getInt32(FeatureMask[0]);
13360 Value *Bitset = Builder.CreateAnd(Features, Mask);
13361 Value *Cmp = Builder.CreateICmpEQ(Bitset, Mask);
13362 Result = Builder.CreateAnd(Result, Cmp);
13365 llvm::Type *ATy = llvm::ArrayType::get(Int32Ty, 3);
13366 llvm::Constant *CpuFeatures2 =
13367 CGM.CreateRuntimeVariable(ATy, "__cpu_features2");
13368 cast<llvm::GlobalValue>(CpuFeatures2)->setDSOLocal(true);
13369 for (int i = 1; i != 4; ++i) {
13370 const uint32_t M = FeatureMask[i];
13371 if (!M)
13372 continue;
13373 Value *Idxs[] = {Builder.getInt32(0), Builder.getInt32(i - 1)};
13374 Value *Features = Builder.CreateAlignedLoad(
13375 Int32Ty, Builder.CreateGEP(ATy, CpuFeatures2, Idxs),
13376 CharUnits::fromQuantity(4));
13377 // Check the value of the bit corresponding to the feature requested.
13378 Value *Mask = Builder.getInt32(M);
13379 Value *Bitset = Builder.CreateAnd(Features, Mask);
13380 Value *Cmp = Builder.CreateICmpEQ(Bitset, Mask);
13381 Result = Builder.CreateAnd(Result, Cmp);
13384 return Result;
13387 Value *CodeGenFunction::EmitAArch64CpuInit() {
13388 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
13389 llvm::FunctionCallee Func =
13390 CGM.CreateRuntimeFunction(FTy, "init_cpu_features_resolver");
13391 cast<llvm::GlobalValue>(Func.getCallee())->setDSOLocal(true);
13392 cast<llvm::GlobalValue>(Func.getCallee())
13393 ->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass);
13394 return Builder.CreateCall(Func);
13397 Value *CodeGenFunction::EmitX86CpuInit() {
13398 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy,
13399 /*Variadic*/ false);
13400 llvm::FunctionCallee Func =
13401 CGM.CreateRuntimeFunction(FTy, "__cpu_indicator_init");
13402 cast<llvm::GlobalValue>(Func.getCallee())->setDSOLocal(true);
13403 cast<llvm::GlobalValue>(Func.getCallee())
13404 ->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass);
13405 return Builder.CreateCall(Func);
13408 llvm::Value *
13409 CodeGenFunction::EmitAArch64CpuSupports(ArrayRef<StringRef> FeaturesStrs) {
13410 uint64_t FeaturesMask = llvm::AArch64::getCpuSupportsMask(FeaturesStrs);
13411 Value *Result = Builder.getTrue();
13412 if (FeaturesMask != 0) {
13413 // Get features from structure in runtime library
13414 // struct {
13415 // unsigned long long features;
13416 // } __aarch64_cpu_features;
13417 llvm::Type *STy = llvm::StructType::get(Int64Ty);
13418 llvm::Constant *AArch64CPUFeatures =
13419 CGM.CreateRuntimeVariable(STy, "__aarch64_cpu_features");
13420 cast<llvm::GlobalValue>(AArch64CPUFeatures)->setDSOLocal(true);
13421 llvm::Value *CpuFeatures = Builder.CreateGEP(
13422 STy, AArch64CPUFeatures,
13423 {ConstantInt::get(Int32Ty, 0), ConstantInt::get(Int32Ty, 0)});
13424 Value *Features = Builder.CreateAlignedLoad(Int64Ty, CpuFeatures,
13425 CharUnits::fromQuantity(8));
13426 Value *Mask = Builder.getInt64(FeaturesMask);
13427 Value *Bitset = Builder.CreateAnd(Features, Mask);
13428 Value *Cmp = Builder.CreateICmpEQ(Bitset, Mask);
13429 Result = Builder.CreateAnd(Result, Cmp);
13431 return Result;
13434 Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
13435 const CallExpr *E) {
13436 if (BuiltinID == X86::BI__builtin_cpu_is)
13437 return EmitX86CpuIs(E);
13438 if (BuiltinID == X86::BI__builtin_cpu_supports)
13439 return EmitX86CpuSupports(E);
13440 if (BuiltinID == X86::BI__builtin_cpu_init)
13441 return EmitX86CpuInit();
13443 // Handle MSVC intrinsics before argument evaluation to prevent double
13444 // evaluation.
13445 if (std::optional<MSVCIntrin> MsvcIntId = translateX86ToMsvcIntrin(BuiltinID))
13446 return EmitMSVCBuiltinExpr(*MsvcIntId, E);
13448 SmallVector<Value*, 4> Ops;
13449 bool IsMaskFCmp = false;
13450 bool IsConjFMA = false;
13452 // Find out if any arguments are required to be integer constant expressions.
13453 unsigned ICEArguments = 0;
13454 ASTContext::GetBuiltinTypeError Error;
13455 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
13456 assert(Error == ASTContext::GE_None && "Should not codegen an error");
13458 for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) {
13459 // If this is a normal argument, just emit it as a scalar.
13460 if ((ICEArguments & (1 << i)) == 0) {
13461 Ops.push_back(EmitScalarExpr(E->getArg(i)));
13462 continue;
13465 // If this is required to be a constant, constant fold it so that we know
13466 // that the generated intrinsic gets a ConstantInt.
13467 Ops.push_back(llvm::ConstantInt::get(
13468 getLLVMContext(), *E->getArg(i)->getIntegerConstantExpr(getContext())));
13471 // These exist so that the builtin that takes an immediate can be bounds
13472 // checked by clang to avoid passing bad immediates to the backend. Since
13473 // AVX has a larger immediate than SSE we would need separate builtins to
13474 // do the different bounds checking. Rather than create a clang specific
13475 // SSE only builtin, this implements eight separate builtins to match gcc
13476 // implementation.
13477 auto getCmpIntrinsicCall = [this, &Ops](Intrinsic::ID ID, unsigned Imm) {
13478 Ops.push_back(llvm::ConstantInt::get(Int8Ty, Imm));
13479 llvm::Function *F = CGM.getIntrinsic(ID);
13480 return Builder.CreateCall(F, Ops);
13483 // For the vector forms of FP comparisons, translate the builtins directly to
13484 // IR.
13485 // TODO: The builtins could be removed if the SSE header files used vector
13486 // extension comparisons directly (vector ordered/unordered may need
13487 // additional support via __builtin_isnan()).
13488 auto getVectorFCmpIR = [this, &Ops, E](CmpInst::Predicate Pred,
13489 bool IsSignaling) {
13490 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
13491 Value *Cmp;
13492 if (IsSignaling)
13493 Cmp = Builder.CreateFCmpS(Pred, Ops[0], Ops[1]);
13494 else
13495 Cmp = Builder.CreateFCmp(Pred, Ops[0], Ops[1]);
13496 llvm::VectorType *FPVecTy = cast<llvm::VectorType>(Ops[0]->getType());
13497 llvm::VectorType *IntVecTy = llvm::VectorType::getInteger(FPVecTy);
13498 Value *Sext = Builder.CreateSExt(Cmp, IntVecTy);
13499 return Builder.CreateBitCast(Sext, FPVecTy);
13502 switch (BuiltinID) {
13503 default: return nullptr;
13504 case X86::BI_mm_prefetch: {
13505 Value *Address = Ops[0];
13506 ConstantInt *C = cast<ConstantInt>(Ops[1]);
13507 Value *RW = ConstantInt::get(Int32Ty, (C->getZExtValue() >> 2) & 0x1);
13508 Value *Locality = ConstantInt::get(Int32Ty, C->getZExtValue() & 0x3);
13509 Value *Data = ConstantInt::get(Int32Ty, 1);
13510 Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType());
13511 return Builder.CreateCall(F, {Address, RW, Locality, Data});
13513 case X86::BI_mm_clflush: {
13514 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_clflush),
13515 Ops[0]);
13517 case X86::BI_mm_lfence: {
13518 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_lfence));
13520 case X86::BI_mm_mfence: {
13521 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_mfence));
13523 case X86::BI_mm_sfence: {
13524 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_sfence));
13526 case X86::BI_mm_pause: {
13527 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_pause));
13529 case X86::BI__rdtsc: {
13530 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_rdtsc));
13532 case X86::BI__builtin_ia32_rdtscp: {
13533 Value *Call = Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_rdtscp));
13534 Builder.CreateDefaultAlignedStore(Builder.CreateExtractValue(Call, 1),
13535 Ops[0]);
13536 return Builder.CreateExtractValue(Call, 0);
13538 case X86::BI__builtin_ia32_lzcnt_u16:
13539 case X86::BI__builtin_ia32_lzcnt_u32:
13540 case X86::BI__builtin_ia32_lzcnt_u64: {
13541 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Ops[0]->getType());
13542 return Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)});
13544 case X86::BI__builtin_ia32_tzcnt_u16:
13545 case X86::BI__builtin_ia32_tzcnt_u32:
13546 case X86::BI__builtin_ia32_tzcnt_u64: {
13547 Function *F = CGM.getIntrinsic(Intrinsic::cttz, Ops[0]->getType());
13548 return Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)});
13550 case X86::BI__builtin_ia32_undef128:
13551 case X86::BI__builtin_ia32_undef256:
13552 case X86::BI__builtin_ia32_undef512:
13553 // The x86 definition of "undef" is not the same as the LLVM definition
13554 // (PR32176). We leave optimizing away an unnecessary zero constant to the
13555 // IR optimizer and backend.
13556 // TODO: If we had a "freeze" IR instruction to generate a fixed undef
13557 // value, we should use that here instead of a zero.
13558 return llvm::Constant::getNullValue(ConvertType(E->getType()));
13559 case X86::BI__builtin_ia32_vec_init_v8qi:
13560 case X86::BI__builtin_ia32_vec_init_v4hi:
13561 case X86::BI__builtin_ia32_vec_init_v2si:
13562 return Builder.CreateBitCast(BuildVector(Ops),
13563 llvm::Type::getX86_MMXTy(getLLVMContext()));
13564 case X86::BI__builtin_ia32_vec_ext_v2si:
13565 case X86::BI__builtin_ia32_vec_ext_v16qi:
13566 case X86::BI__builtin_ia32_vec_ext_v8hi:
13567 case X86::BI__builtin_ia32_vec_ext_v4si:
13568 case X86::BI__builtin_ia32_vec_ext_v4sf:
13569 case X86::BI__builtin_ia32_vec_ext_v2di:
13570 case X86::BI__builtin_ia32_vec_ext_v32qi:
13571 case X86::BI__builtin_ia32_vec_ext_v16hi:
13572 case X86::BI__builtin_ia32_vec_ext_v8si:
13573 case X86::BI__builtin_ia32_vec_ext_v4di: {
13574 unsigned NumElts =
13575 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
13576 uint64_t Index = cast<ConstantInt>(Ops[1])->getZExtValue();
13577 Index &= NumElts - 1;
13578 // These builtins exist so we can ensure the index is an ICE and in range.
13579 // Otherwise we could just do this in the header file.
13580 return Builder.CreateExtractElement(Ops[0], Index);
13582 case X86::BI__builtin_ia32_vec_set_v16qi:
13583 case X86::BI__builtin_ia32_vec_set_v8hi:
13584 case X86::BI__builtin_ia32_vec_set_v4si:
13585 case X86::BI__builtin_ia32_vec_set_v2di:
13586 case X86::BI__builtin_ia32_vec_set_v32qi:
13587 case X86::BI__builtin_ia32_vec_set_v16hi:
13588 case X86::BI__builtin_ia32_vec_set_v8si:
13589 case X86::BI__builtin_ia32_vec_set_v4di: {
13590 unsigned NumElts =
13591 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
13592 unsigned Index = cast<ConstantInt>(Ops[2])->getZExtValue();
13593 Index &= NumElts - 1;
13594 // These builtins exist so we can ensure the index is an ICE and in range.
13595 // Otherwise we could just do this in the header file.
13596 return Builder.CreateInsertElement(Ops[0], Ops[1], Index);
13598 case X86::BI_mm_setcsr:
13599 case X86::BI__builtin_ia32_ldmxcsr: {
13600 Address Tmp = CreateMemTemp(E->getArg(0)->getType());
13601 Builder.CreateStore(Ops[0], Tmp);
13602 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_ldmxcsr),
13603 Tmp.getPointer());
13605 case X86::BI_mm_getcsr:
13606 case X86::BI__builtin_ia32_stmxcsr: {
13607 Address Tmp = CreateMemTemp(E->getType());
13608 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_stmxcsr),
13609 Tmp.getPointer());
13610 return Builder.CreateLoad(Tmp, "stmxcsr");
13612 case X86::BI__builtin_ia32_xsave:
13613 case X86::BI__builtin_ia32_xsave64:
13614 case X86::BI__builtin_ia32_xrstor:
13615 case X86::BI__builtin_ia32_xrstor64:
13616 case X86::BI__builtin_ia32_xsaveopt:
13617 case X86::BI__builtin_ia32_xsaveopt64:
13618 case X86::BI__builtin_ia32_xrstors:
13619 case X86::BI__builtin_ia32_xrstors64:
13620 case X86::BI__builtin_ia32_xsavec:
13621 case X86::BI__builtin_ia32_xsavec64:
13622 case X86::BI__builtin_ia32_xsaves:
13623 case X86::BI__builtin_ia32_xsaves64:
13624 case X86::BI__builtin_ia32_xsetbv:
13625 case X86::BI_xsetbv: {
13626 Intrinsic::ID ID;
13627 #define INTRINSIC_X86_XSAVE_ID(NAME) \
13628 case X86::BI__builtin_ia32_##NAME: \
13629 ID = Intrinsic::x86_##NAME; \
13630 break
13631 switch (BuiltinID) {
13632 default: llvm_unreachable("Unsupported intrinsic!");
13633 INTRINSIC_X86_XSAVE_ID(xsave);
13634 INTRINSIC_X86_XSAVE_ID(xsave64);
13635 INTRINSIC_X86_XSAVE_ID(xrstor);
13636 INTRINSIC_X86_XSAVE_ID(xrstor64);
13637 INTRINSIC_X86_XSAVE_ID(xsaveopt);
13638 INTRINSIC_X86_XSAVE_ID(xsaveopt64);
13639 INTRINSIC_X86_XSAVE_ID(xrstors);
13640 INTRINSIC_X86_XSAVE_ID(xrstors64);
13641 INTRINSIC_X86_XSAVE_ID(xsavec);
13642 INTRINSIC_X86_XSAVE_ID(xsavec64);
13643 INTRINSIC_X86_XSAVE_ID(xsaves);
13644 INTRINSIC_X86_XSAVE_ID(xsaves64);
13645 INTRINSIC_X86_XSAVE_ID(xsetbv);
13646 case X86::BI_xsetbv:
13647 ID = Intrinsic::x86_xsetbv;
13648 break;
13650 #undef INTRINSIC_X86_XSAVE_ID
13651 Value *Mhi = Builder.CreateTrunc(
13652 Builder.CreateLShr(Ops[1], ConstantInt::get(Int64Ty, 32)), Int32Ty);
13653 Value *Mlo = Builder.CreateTrunc(Ops[1], Int32Ty);
13654 Ops[1] = Mhi;
13655 Ops.push_back(Mlo);
13656 return Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
13658 case X86::BI__builtin_ia32_xgetbv:
13659 case X86::BI_xgetbv:
13660 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_xgetbv), Ops);
13661 case X86::BI__builtin_ia32_storedqudi128_mask:
13662 case X86::BI__builtin_ia32_storedqusi128_mask:
13663 case X86::BI__builtin_ia32_storedquhi128_mask:
13664 case X86::BI__builtin_ia32_storedquqi128_mask:
13665 case X86::BI__builtin_ia32_storeupd128_mask:
13666 case X86::BI__builtin_ia32_storeups128_mask:
13667 case X86::BI__builtin_ia32_storedqudi256_mask:
13668 case X86::BI__builtin_ia32_storedqusi256_mask:
13669 case X86::BI__builtin_ia32_storedquhi256_mask:
13670 case X86::BI__builtin_ia32_storedquqi256_mask:
13671 case X86::BI__builtin_ia32_storeupd256_mask:
13672 case X86::BI__builtin_ia32_storeups256_mask:
13673 case X86::BI__builtin_ia32_storedqudi512_mask:
13674 case X86::BI__builtin_ia32_storedqusi512_mask:
13675 case X86::BI__builtin_ia32_storedquhi512_mask:
13676 case X86::BI__builtin_ia32_storedquqi512_mask:
13677 case X86::BI__builtin_ia32_storeupd512_mask:
13678 case X86::BI__builtin_ia32_storeups512_mask:
13679 return EmitX86MaskedStore(*this, Ops, Align(1));
13681 case X86::BI__builtin_ia32_storesh128_mask:
13682 case X86::BI__builtin_ia32_storess128_mask:
13683 case X86::BI__builtin_ia32_storesd128_mask:
13684 return EmitX86MaskedStore(*this, Ops, Align(1));
13686 case X86::BI__builtin_ia32_vpopcntb_128:
13687 case X86::BI__builtin_ia32_vpopcntd_128:
13688 case X86::BI__builtin_ia32_vpopcntq_128:
13689 case X86::BI__builtin_ia32_vpopcntw_128:
13690 case X86::BI__builtin_ia32_vpopcntb_256:
13691 case X86::BI__builtin_ia32_vpopcntd_256:
13692 case X86::BI__builtin_ia32_vpopcntq_256:
13693 case X86::BI__builtin_ia32_vpopcntw_256:
13694 case X86::BI__builtin_ia32_vpopcntb_512:
13695 case X86::BI__builtin_ia32_vpopcntd_512:
13696 case X86::BI__builtin_ia32_vpopcntq_512:
13697 case X86::BI__builtin_ia32_vpopcntw_512: {
13698 llvm::Type *ResultType = ConvertType(E->getType());
13699 llvm::Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ResultType);
13700 return Builder.CreateCall(F, Ops);
13702 case X86::BI__builtin_ia32_cvtmask2b128:
13703 case X86::BI__builtin_ia32_cvtmask2b256:
13704 case X86::BI__builtin_ia32_cvtmask2b512:
13705 case X86::BI__builtin_ia32_cvtmask2w128:
13706 case X86::BI__builtin_ia32_cvtmask2w256:
13707 case X86::BI__builtin_ia32_cvtmask2w512:
13708 case X86::BI__builtin_ia32_cvtmask2d128:
13709 case X86::BI__builtin_ia32_cvtmask2d256:
13710 case X86::BI__builtin_ia32_cvtmask2d512:
13711 case X86::BI__builtin_ia32_cvtmask2q128:
13712 case X86::BI__builtin_ia32_cvtmask2q256:
13713 case X86::BI__builtin_ia32_cvtmask2q512:
13714 return EmitX86SExtMask(*this, Ops[0], ConvertType(E->getType()));
13716 case X86::BI__builtin_ia32_cvtb2mask128:
13717 case X86::BI__builtin_ia32_cvtb2mask256:
13718 case X86::BI__builtin_ia32_cvtb2mask512:
13719 case X86::BI__builtin_ia32_cvtw2mask128:
13720 case X86::BI__builtin_ia32_cvtw2mask256:
13721 case X86::BI__builtin_ia32_cvtw2mask512:
13722 case X86::BI__builtin_ia32_cvtd2mask128:
13723 case X86::BI__builtin_ia32_cvtd2mask256:
13724 case X86::BI__builtin_ia32_cvtd2mask512:
13725 case X86::BI__builtin_ia32_cvtq2mask128:
13726 case X86::BI__builtin_ia32_cvtq2mask256:
13727 case X86::BI__builtin_ia32_cvtq2mask512:
13728 return EmitX86ConvertToMask(*this, Ops[0]);
13730 case X86::BI__builtin_ia32_cvtdq2ps512_mask:
13731 case X86::BI__builtin_ia32_cvtqq2ps512_mask:
13732 case X86::BI__builtin_ia32_cvtqq2pd512_mask:
13733 case X86::BI__builtin_ia32_vcvtw2ph512_mask:
13734 case X86::BI__builtin_ia32_vcvtdq2ph512_mask:
13735 case X86::BI__builtin_ia32_vcvtqq2ph512_mask:
13736 return EmitX86ConvertIntToFp(*this, E, Ops, /*IsSigned*/ true);
13737 case X86::BI__builtin_ia32_cvtudq2ps512_mask:
13738 case X86::BI__builtin_ia32_cvtuqq2ps512_mask:
13739 case X86::BI__builtin_ia32_cvtuqq2pd512_mask:
13740 case X86::BI__builtin_ia32_vcvtuw2ph512_mask:
13741 case X86::BI__builtin_ia32_vcvtudq2ph512_mask:
13742 case X86::BI__builtin_ia32_vcvtuqq2ph512_mask:
13743 return EmitX86ConvertIntToFp(*this, E, Ops, /*IsSigned*/ false);
13745 case X86::BI__builtin_ia32_vfmaddss3:
13746 case X86::BI__builtin_ia32_vfmaddsd3:
13747 case X86::BI__builtin_ia32_vfmaddsh3_mask:
13748 case X86::BI__builtin_ia32_vfmaddss3_mask:
13749 case X86::BI__builtin_ia32_vfmaddsd3_mask:
13750 return EmitScalarFMAExpr(*this, E, Ops, Ops[0]);
13751 case X86::BI__builtin_ia32_vfmaddss:
13752 case X86::BI__builtin_ia32_vfmaddsd:
13753 return EmitScalarFMAExpr(*this, E, Ops,
13754 Constant::getNullValue(Ops[0]->getType()));
13755 case X86::BI__builtin_ia32_vfmaddsh3_maskz:
13756 case X86::BI__builtin_ia32_vfmaddss3_maskz:
13757 case X86::BI__builtin_ia32_vfmaddsd3_maskz:
13758 return EmitScalarFMAExpr(*this, E, Ops, Ops[0], /*ZeroMask*/ true);
13759 case X86::BI__builtin_ia32_vfmaddsh3_mask3:
13760 case X86::BI__builtin_ia32_vfmaddss3_mask3:
13761 case X86::BI__builtin_ia32_vfmaddsd3_mask3:
13762 return EmitScalarFMAExpr(*this, E, Ops, Ops[2], /*ZeroMask*/ false, 2);
13763 case X86::BI__builtin_ia32_vfmsubsh3_mask3:
13764 case X86::BI__builtin_ia32_vfmsubss3_mask3:
13765 case X86::BI__builtin_ia32_vfmsubsd3_mask3:
13766 return EmitScalarFMAExpr(*this, E, Ops, Ops[2], /*ZeroMask*/ false, 2,
13767 /*NegAcc*/ true);
13768 case X86::BI__builtin_ia32_vfmaddph:
13769 case X86::BI__builtin_ia32_vfmaddps:
13770 case X86::BI__builtin_ia32_vfmaddpd:
13771 case X86::BI__builtin_ia32_vfmaddph256:
13772 case X86::BI__builtin_ia32_vfmaddps256:
13773 case X86::BI__builtin_ia32_vfmaddpd256:
13774 case X86::BI__builtin_ia32_vfmaddph512_mask:
13775 case X86::BI__builtin_ia32_vfmaddph512_maskz:
13776 case X86::BI__builtin_ia32_vfmaddph512_mask3:
13777 case X86::BI__builtin_ia32_vfmaddps512_mask:
13778 case X86::BI__builtin_ia32_vfmaddps512_maskz:
13779 case X86::BI__builtin_ia32_vfmaddps512_mask3:
13780 case X86::BI__builtin_ia32_vfmsubps512_mask3:
13781 case X86::BI__builtin_ia32_vfmaddpd512_mask:
13782 case X86::BI__builtin_ia32_vfmaddpd512_maskz:
13783 case X86::BI__builtin_ia32_vfmaddpd512_mask3:
13784 case X86::BI__builtin_ia32_vfmsubpd512_mask3:
13785 case X86::BI__builtin_ia32_vfmsubph512_mask3:
13786 return EmitX86FMAExpr(*this, E, Ops, BuiltinID, /*IsAddSub*/ false);
13787 case X86::BI__builtin_ia32_vfmaddsubph512_mask:
13788 case X86::BI__builtin_ia32_vfmaddsubph512_maskz:
13789 case X86::BI__builtin_ia32_vfmaddsubph512_mask3:
13790 case X86::BI__builtin_ia32_vfmsubaddph512_mask3:
13791 case X86::BI__builtin_ia32_vfmaddsubps512_mask:
13792 case X86::BI__builtin_ia32_vfmaddsubps512_maskz:
13793 case X86::BI__builtin_ia32_vfmaddsubps512_mask3:
13794 case X86::BI__builtin_ia32_vfmsubaddps512_mask3:
13795 case X86::BI__builtin_ia32_vfmaddsubpd512_mask:
13796 case X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
13797 case X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
13798 case X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
13799 return EmitX86FMAExpr(*this, E, Ops, BuiltinID, /*IsAddSub*/ true);
13801 case X86::BI__builtin_ia32_movdqa32store128_mask:
13802 case X86::BI__builtin_ia32_movdqa64store128_mask:
13803 case X86::BI__builtin_ia32_storeaps128_mask:
13804 case X86::BI__builtin_ia32_storeapd128_mask:
13805 case X86::BI__builtin_ia32_movdqa32store256_mask:
13806 case X86::BI__builtin_ia32_movdqa64store256_mask:
13807 case X86::BI__builtin_ia32_storeaps256_mask:
13808 case X86::BI__builtin_ia32_storeapd256_mask:
13809 case X86::BI__builtin_ia32_movdqa32store512_mask:
13810 case X86::BI__builtin_ia32_movdqa64store512_mask:
13811 case X86::BI__builtin_ia32_storeaps512_mask:
13812 case X86::BI__builtin_ia32_storeapd512_mask:
13813 return EmitX86MaskedStore(
13814 *this, Ops,
13815 getContext().getTypeAlignInChars(E->getArg(1)->getType()).getAsAlign());
13817 case X86::BI__builtin_ia32_loadups128_mask:
13818 case X86::BI__builtin_ia32_loadups256_mask:
13819 case X86::BI__builtin_ia32_loadups512_mask:
13820 case X86::BI__builtin_ia32_loadupd128_mask:
13821 case X86::BI__builtin_ia32_loadupd256_mask:
13822 case X86::BI__builtin_ia32_loadupd512_mask:
13823 case X86::BI__builtin_ia32_loaddquqi128_mask:
13824 case X86::BI__builtin_ia32_loaddquqi256_mask:
13825 case X86::BI__builtin_ia32_loaddquqi512_mask:
13826 case X86::BI__builtin_ia32_loaddquhi128_mask:
13827 case X86::BI__builtin_ia32_loaddquhi256_mask:
13828 case X86::BI__builtin_ia32_loaddquhi512_mask:
13829 case X86::BI__builtin_ia32_loaddqusi128_mask:
13830 case X86::BI__builtin_ia32_loaddqusi256_mask:
13831 case X86::BI__builtin_ia32_loaddqusi512_mask:
13832 case X86::BI__builtin_ia32_loaddqudi128_mask:
13833 case X86::BI__builtin_ia32_loaddqudi256_mask:
13834 case X86::BI__builtin_ia32_loaddqudi512_mask:
13835 return EmitX86MaskedLoad(*this, Ops, Align(1));
13837 case X86::BI__builtin_ia32_loadsh128_mask:
13838 case X86::BI__builtin_ia32_loadss128_mask:
13839 case X86::BI__builtin_ia32_loadsd128_mask:
13840 return EmitX86MaskedLoad(*this, Ops, Align(1));
13842 case X86::BI__builtin_ia32_loadaps128_mask:
13843 case X86::BI__builtin_ia32_loadaps256_mask:
13844 case X86::BI__builtin_ia32_loadaps512_mask:
13845 case X86::BI__builtin_ia32_loadapd128_mask:
13846 case X86::BI__builtin_ia32_loadapd256_mask:
13847 case X86::BI__builtin_ia32_loadapd512_mask:
13848 case X86::BI__builtin_ia32_movdqa32load128_mask:
13849 case X86::BI__builtin_ia32_movdqa32load256_mask:
13850 case X86::BI__builtin_ia32_movdqa32load512_mask:
13851 case X86::BI__builtin_ia32_movdqa64load128_mask:
13852 case X86::BI__builtin_ia32_movdqa64load256_mask:
13853 case X86::BI__builtin_ia32_movdqa64load512_mask:
13854 return EmitX86MaskedLoad(
13855 *this, Ops,
13856 getContext().getTypeAlignInChars(E->getArg(1)->getType()).getAsAlign());
13858 case X86::BI__builtin_ia32_expandloaddf128_mask:
13859 case X86::BI__builtin_ia32_expandloaddf256_mask:
13860 case X86::BI__builtin_ia32_expandloaddf512_mask:
13861 case X86::BI__builtin_ia32_expandloadsf128_mask:
13862 case X86::BI__builtin_ia32_expandloadsf256_mask:
13863 case X86::BI__builtin_ia32_expandloadsf512_mask:
13864 case X86::BI__builtin_ia32_expandloaddi128_mask:
13865 case X86::BI__builtin_ia32_expandloaddi256_mask:
13866 case X86::BI__builtin_ia32_expandloaddi512_mask:
13867 case X86::BI__builtin_ia32_expandloadsi128_mask:
13868 case X86::BI__builtin_ia32_expandloadsi256_mask:
13869 case X86::BI__builtin_ia32_expandloadsi512_mask:
13870 case X86::BI__builtin_ia32_expandloadhi128_mask:
13871 case X86::BI__builtin_ia32_expandloadhi256_mask:
13872 case X86::BI__builtin_ia32_expandloadhi512_mask:
13873 case X86::BI__builtin_ia32_expandloadqi128_mask:
13874 case X86::BI__builtin_ia32_expandloadqi256_mask:
13875 case X86::BI__builtin_ia32_expandloadqi512_mask:
13876 return EmitX86ExpandLoad(*this, Ops);
13878 case X86::BI__builtin_ia32_compressstoredf128_mask:
13879 case X86::BI__builtin_ia32_compressstoredf256_mask:
13880 case X86::BI__builtin_ia32_compressstoredf512_mask:
13881 case X86::BI__builtin_ia32_compressstoresf128_mask:
13882 case X86::BI__builtin_ia32_compressstoresf256_mask:
13883 case X86::BI__builtin_ia32_compressstoresf512_mask:
13884 case X86::BI__builtin_ia32_compressstoredi128_mask:
13885 case X86::BI__builtin_ia32_compressstoredi256_mask:
13886 case X86::BI__builtin_ia32_compressstoredi512_mask:
13887 case X86::BI__builtin_ia32_compressstoresi128_mask:
13888 case X86::BI__builtin_ia32_compressstoresi256_mask:
13889 case X86::BI__builtin_ia32_compressstoresi512_mask:
13890 case X86::BI__builtin_ia32_compressstorehi128_mask:
13891 case X86::BI__builtin_ia32_compressstorehi256_mask:
13892 case X86::BI__builtin_ia32_compressstorehi512_mask:
13893 case X86::BI__builtin_ia32_compressstoreqi128_mask:
13894 case X86::BI__builtin_ia32_compressstoreqi256_mask:
13895 case X86::BI__builtin_ia32_compressstoreqi512_mask:
13896 return EmitX86CompressStore(*this, Ops);
13898 case X86::BI__builtin_ia32_expanddf128_mask:
13899 case X86::BI__builtin_ia32_expanddf256_mask:
13900 case X86::BI__builtin_ia32_expanddf512_mask:
13901 case X86::BI__builtin_ia32_expandsf128_mask:
13902 case X86::BI__builtin_ia32_expandsf256_mask:
13903 case X86::BI__builtin_ia32_expandsf512_mask:
13904 case X86::BI__builtin_ia32_expanddi128_mask:
13905 case X86::BI__builtin_ia32_expanddi256_mask:
13906 case X86::BI__builtin_ia32_expanddi512_mask:
13907 case X86::BI__builtin_ia32_expandsi128_mask:
13908 case X86::BI__builtin_ia32_expandsi256_mask:
13909 case X86::BI__builtin_ia32_expandsi512_mask:
13910 case X86::BI__builtin_ia32_expandhi128_mask:
13911 case X86::BI__builtin_ia32_expandhi256_mask:
13912 case X86::BI__builtin_ia32_expandhi512_mask:
13913 case X86::BI__builtin_ia32_expandqi128_mask:
13914 case X86::BI__builtin_ia32_expandqi256_mask:
13915 case X86::BI__builtin_ia32_expandqi512_mask:
13916 return EmitX86CompressExpand(*this, Ops, /*IsCompress*/false);
13918 case X86::BI__builtin_ia32_compressdf128_mask:
13919 case X86::BI__builtin_ia32_compressdf256_mask:
13920 case X86::BI__builtin_ia32_compressdf512_mask:
13921 case X86::BI__builtin_ia32_compresssf128_mask:
13922 case X86::BI__builtin_ia32_compresssf256_mask:
13923 case X86::BI__builtin_ia32_compresssf512_mask:
13924 case X86::BI__builtin_ia32_compressdi128_mask:
13925 case X86::BI__builtin_ia32_compressdi256_mask:
13926 case X86::BI__builtin_ia32_compressdi512_mask:
13927 case X86::BI__builtin_ia32_compresssi128_mask:
13928 case X86::BI__builtin_ia32_compresssi256_mask:
13929 case X86::BI__builtin_ia32_compresssi512_mask:
13930 case X86::BI__builtin_ia32_compresshi128_mask:
13931 case X86::BI__builtin_ia32_compresshi256_mask:
13932 case X86::BI__builtin_ia32_compresshi512_mask:
13933 case X86::BI__builtin_ia32_compressqi128_mask:
13934 case X86::BI__builtin_ia32_compressqi256_mask:
13935 case X86::BI__builtin_ia32_compressqi512_mask:
13936 return EmitX86CompressExpand(*this, Ops, /*IsCompress*/true);
13938 case X86::BI__builtin_ia32_gather3div2df:
13939 case X86::BI__builtin_ia32_gather3div2di:
13940 case X86::BI__builtin_ia32_gather3div4df:
13941 case X86::BI__builtin_ia32_gather3div4di:
13942 case X86::BI__builtin_ia32_gather3div4sf:
13943 case X86::BI__builtin_ia32_gather3div4si:
13944 case X86::BI__builtin_ia32_gather3div8sf:
13945 case X86::BI__builtin_ia32_gather3div8si:
13946 case X86::BI__builtin_ia32_gather3siv2df:
13947 case X86::BI__builtin_ia32_gather3siv2di:
13948 case X86::BI__builtin_ia32_gather3siv4df:
13949 case X86::BI__builtin_ia32_gather3siv4di:
13950 case X86::BI__builtin_ia32_gather3siv4sf:
13951 case X86::BI__builtin_ia32_gather3siv4si:
13952 case X86::BI__builtin_ia32_gather3siv8sf:
13953 case X86::BI__builtin_ia32_gather3siv8si:
13954 case X86::BI__builtin_ia32_gathersiv8df:
13955 case X86::BI__builtin_ia32_gathersiv16sf:
13956 case X86::BI__builtin_ia32_gatherdiv8df:
13957 case X86::BI__builtin_ia32_gatherdiv16sf:
13958 case X86::BI__builtin_ia32_gathersiv8di:
13959 case X86::BI__builtin_ia32_gathersiv16si:
13960 case X86::BI__builtin_ia32_gatherdiv8di:
13961 case X86::BI__builtin_ia32_gatherdiv16si: {
13962 Intrinsic::ID IID;
13963 switch (BuiltinID) {
13964 default: llvm_unreachable("Unexpected builtin");
13965 case X86::BI__builtin_ia32_gather3div2df:
13966 IID = Intrinsic::x86_avx512_mask_gather3div2_df;
13967 break;
13968 case X86::BI__builtin_ia32_gather3div2di:
13969 IID = Intrinsic::x86_avx512_mask_gather3div2_di;
13970 break;
13971 case X86::BI__builtin_ia32_gather3div4df:
13972 IID = Intrinsic::x86_avx512_mask_gather3div4_df;
13973 break;
13974 case X86::BI__builtin_ia32_gather3div4di:
13975 IID = Intrinsic::x86_avx512_mask_gather3div4_di;
13976 break;
13977 case X86::BI__builtin_ia32_gather3div4sf:
13978 IID = Intrinsic::x86_avx512_mask_gather3div4_sf;
13979 break;
13980 case X86::BI__builtin_ia32_gather3div4si:
13981 IID = Intrinsic::x86_avx512_mask_gather3div4_si;
13982 break;
13983 case X86::BI__builtin_ia32_gather3div8sf:
13984 IID = Intrinsic::x86_avx512_mask_gather3div8_sf;
13985 break;
13986 case X86::BI__builtin_ia32_gather3div8si:
13987 IID = Intrinsic::x86_avx512_mask_gather3div8_si;
13988 break;
13989 case X86::BI__builtin_ia32_gather3siv2df:
13990 IID = Intrinsic::x86_avx512_mask_gather3siv2_df;
13991 break;
13992 case X86::BI__builtin_ia32_gather3siv2di:
13993 IID = Intrinsic::x86_avx512_mask_gather3siv2_di;
13994 break;
13995 case X86::BI__builtin_ia32_gather3siv4df:
13996 IID = Intrinsic::x86_avx512_mask_gather3siv4_df;
13997 break;
13998 case X86::BI__builtin_ia32_gather3siv4di:
13999 IID = Intrinsic::x86_avx512_mask_gather3siv4_di;
14000 break;
14001 case X86::BI__builtin_ia32_gather3siv4sf:
14002 IID = Intrinsic::x86_avx512_mask_gather3siv4_sf;
14003 break;
14004 case X86::BI__builtin_ia32_gather3siv4si:
14005 IID = Intrinsic::x86_avx512_mask_gather3siv4_si;
14006 break;
14007 case X86::BI__builtin_ia32_gather3siv8sf:
14008 IID = Intrinsic::x86_avx512_mask_gather3siv8_sf;
14009 break;
14010 case X86::BI__builtin_ia32_gather3siv8si:
14011 IID = Intrinsic::x86_avx512_mask_gather3siv8_si;
14012 break;
14013 case X86::BI__builtin_ia32_gathersiv8df:
14014 IID = Intrinsic::x86_avx512_mask_gather_dpd_512;
14015 break;
14016 case X86::BI__builtin_ia32_gathersiv16sf:
14017 IID = Intrinsic::x86_avx512_mask_gather_dps_512;
14018 break;
14019 case X86::BI__builtin_ia32_gatherdiv8df:
14020 IID = Intrinsic::x86_avx512_mask_gather_qpd_512;
14021 break;
14022 case X86::BI__builtin_ia32_gatherdiv16sf:
14023 IID = Intrinsic::x86_avx512_mask_gather_qps_512;
14024 break;
14025 case X86::BI__builtin_ia32_gathersiv8di:
14026 IID = Intrinsic::x86_avx512_mask_gather_dpq_512;
14027 break;
14028 case X86::BI__builtin_ia32_gathersiv16si:
14029 IID = Intrinsic::x86_avx512_mask_gather_dpi_512;
14030 break;
14031 case X86::BI__builtin_ia32_gatherdiv8di:
14032 IID = Intrinsic::x86_avx512_mask_gather_qpq_512;
14033 break;
14034 case X86::BI__builtin_ia32_gatherdiv16si:
14035 IID = Intrinsic::x86_avx512_mask_gather_qpi_512;
14036 break;
14039 unsigned MinElts = std::min(
14040 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements(),
14041 cast<llvm::FixedVectorType>(Ops[2]->getType())->getNumElements());
14042 Ops[3] = getMaskVecValue(*this, Ops[3], MinElts);
14043 Function *Intr = CGM.getIntrinsic(IID);
14044 return Builder.CreateCall(Intr, Ops);
14047 case X86::BI__builtin_ia32_scattersiv8df:
14048 case X86::BI__builtin_ia32_scattersiv16sf:
14049 case X86::BI__builtin_ia32_scatterdiv8df:
14050 case X86::BI__builtin_ia32_scatterdiv16sf:
14051 case X86::BI__builtin_ia32_scattersiv8di:
14052 case X86::BI__builtin_ia32_scattersiv16si:
14053 case X86::BI__builtin_ia32_scatterdiv8di:
14054 case X86::BI__builtin_ia32_scatterdiv16si:
14055 case X86::BI__builtin_ia32_scatterdiv2df:
14056 case X86::BI__builtin_ia32_scatterdiv2di:
14057 case X86::BI__builtin_ia32_scatterdiv4df:
14058 case X86::BI__builtin_ia32_scatterdiv4di:
14059 case X86::BI__builtin_ia32_scatterdiv4sf:
14060 case X86::BI__builtin_ia32_scatterdiv4si:
14061 case X86::BI__builtin_ia32_scatterdiv8sf:
14062 case X86::BI__builtin_ia32_scatterdiv8si:
14063 case X86::BI__builtin_ia32_scattersiv2df:
14064 case X86::BI__builtin_ia32_scattersiv2di:
14065 case X86::BI__builtin_ia32_scattersiv4df:
14066 case X86::BI__builtin_ia32_scattersiv4di:
14067 case X86::BI__builtin_ia32_scattersiv4sf:
14068 case X86::BI__builtin_ia32_scattersiv4si:
14069 case X86::BI__builtin_ia32_scattersiv8sf:
14070 case X86::BI__builtin_ia32_scattersiv8si: {
14071 Intrinsic::ID IID;
14072 switch (BuiltinID) {
14073 default: llvm_unreachable("Unexpected builtin");
14074 case X86::BI__builtin_ia32_scattersiv8df:
14075 IID = Intrinsic::x86_avx512_mask_scatter_dpd_512;
14076 break;
14077 case X86::BI__builtin_ia32_scattersiv16sf:
14078 IID = Intrinsic::x86_avx512_mask_scatter_dps_512;
14079 break;
14080 case X86::BI__builtin_ia32_scatterdiv8df:
14081 IID = Intrinsic::x86_avx512_mask_scatter_qpd_512;
14082 break;
14083 case X86::BI__builtin_ia32_scatterdiv16sf:
14084 IID = Intrinsic::x86_avx512_mask_scatter_qps_512;
14085 break;
14086 case X86::BI__builtin_ia32_scattersiv8di:
14087 IID = Intrinsic::x86_avx512_mask_scatter_dpq_512;
14088 break;
14089 case X86::BI__builtin_ia32_scattersiv16si:
14090 IID = Intrinsic::x86_avx512_mask_scatter_dpi_512;
14091 break;
14092 case X86::BI__builtin_ia32_scatterdiv8di:
14093 IID = Intrinsic::x86_avx512_mask_scatter_qpq_512;
14094 break;
14095 case X86::BI__builtin_ia32_scatterdiv16si:
14096 IID = Intrinsic::x86_avx512_mask_scatter_qpi_512;
14097 break;
14098 case X86::BI__builtin_ia32_scatterdiv2df:
14099 IID = Intrinsic::x86_avx512_mask_scatterdiv2_df;
14100 break;
14101 case X86::BI__builtin_ia32_scatterdiv2di:
14102 IID = Intrinsic::x86_avx512_mask_scatterdiv2_di;
14103 break;
14104 case X86::BI__builtin_ia32_scatterdiv4df:
14105 IID = Intrinsic::x86_avx512_mask_scatterdiv4_df;
14106 break;
14107 case X86::BI__builtin_ia32_scatterdiv4di:
14108 IID = Intrinsic::x86_avx512_mask_scatterdiv4_di;
14109 break;
14110 case X86::BI__builtin_ia32_scatterdiv4sf:
14111 IID = Intrinsic::x86_avx512_mask_scatterdiv4_sf;
14112 break;
14113 case X86::BI__builtin_ia32_scatterdiv4si:
14114 IID = Intrinsic::x86_avx512_mask_scatterdiv4_si;
14115 break;
14116 case X86::BI__builtin_ia32_scatterdiv8sf:
14117 IID = Intrinsic::x86_avx512_mask_scatterdiv8_sf;
14118 break;
14119 case X86::BI__builtin_ia32_scatterdiv8si:
14120 IID = Intrinsic::x86_avx512_mask_scatterdiv8_si;
14121 break;
14122 case X86::BI__builtin_ia32_scattersiv2df:
14123 IID = Intrinsic::x86_avx512_mask_scattersiv2_df;
14124 break;
14125 case X86::BI__builtin_ia32_scattersiv2di:
14126 IID = Intrinsic::x86_avx512_mask_scattersiv2_di;
14127 break;
14128 case X86::BI__builtin_ia32_scattersiv4df:
14129 IID = Intrinsic::x86_avx512_mask_scattersiv4_df;
14130 break;
14131 case X86::BI__builtin_ia32_scattersiv4di:
14132 IID = Intrinsic::x86_avx512_mask_scattersiv4_di;
14133 break;
14134 case X86::BI__builtin_ia32_scattersiv4sf:
14135 IID = Intrinsic::x86_avx512_mask_scattersiv4_sf;
14136 break;
14137 case X86::BI__builtin_ia32_scattersiv4si:
14138 IID = Intrinsic::x86_avx512_mask_scattersiv4_si;
14139 break;
14140 case X86::BI__builtin_ia32_scattersiv8sf:
14141 IID = Intrinsic::x86_avx512_mask_scattersiv8_sf;
14142 break;
14143 case X86::BI__builtin_ia32_scattersiv8si:
14144 IID = Intrinsic::x86_avx512_mask_scattersiv8_si;
14145 break;
14148 unsigned MinElts = std::min(
14149 cast<llvm::FixedVectorType>(Ops[2]->getType())->getNumElements(),
14150 cast<llvm::FixedVectorType>(Ops[3]->getType())->getNumElements());
14151 Ops[1] = getMaskVecValue(*this, Ops[1], MinElts);
14152 Function *Intr = CGM.getIntrinsic(IID);
14153 return Builder.CreateCall(Intr, Ops);
14156 case X86::BI__builtin_ia32_vextractf128_pd256:
14157 case X86::BI__builtin_ia32_vextractf128_ps256:
14158 case X86::BI__builtin_ia32_vextractf128_si256:
14159 case X86::BI__builtin_ia32_extract128i256:
14160 case X86::BI__builtin_ia32_extractf64x4_mask:
14161 case X86::BI__builtin_ia32_extractf32x4_mask:
14162 case X86::BI__builtin_ia32_extracti64x4_mask:
14163 case X86::BI__builtin_ia32_extracti32x4_mask:
14164 case X86::BI__builtin_ia32_extractf32x8_mask:
14165 case X86::BI__builtin_ia32_extracti32x8_mask:
14166 case X86::BI__builtin_ia32_extractf32x4_256_mask:
14167 case X86::BI__builtin_ia32_extracti32x4_256_mask:
14168 case X86::BI__builtin_ia32_extractf64x2_256_mask:
14169 case X86::BI__builtin_ia32_extracti64x2_256_mask:
14170 case X86::BI__builtin_ia32_extractf64x2_512_mask:
14171 case X86::BI__builtin_ia32_extracti64x2_512_mask: {
14172 auto *DstTy = cast<llvm::FixedVectorType>(ConvertType(E->getType()));
14173 unsigned NumElts = DstTy->getNumElements();
14174 unsigned SrcNumElts =
14175 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
14176 unsigned SubVectors = SrcNumElts / NumElts;
14177 unsigned Index = cast<ConstantInt>(Ops[1])->getZExtValue();
14178 assert(llvm::isPowerOf2_32(SubVectors) && "Expected power of 2 subvectors");
14179 Index &= SubVectors - 1; // Remove any extra bits.
14180 Index *= NumElts;
14182 int Indices[16];
14183 for (unsigned i = 0; i != NumElts; ++i)
14184 Indices[i] = i + Index;
14186 Value *Res = Builder.CreateShuffleVector(Ops[0], ArrayRef(Indices, NumElts),
14187 "extract");
14189 if (Ops.size() == 4)
14190 Res = EmitX86Select(*this, Ops[3], Res, Ops[2]);
14192 return Res;
14194 case X86::BI__builtin_ia32_vinsertf128_pd256:
14195 case X86::BI__builtin_ia32_vinsertf128_ps256:
14196 case X86::BI__builtin_ia32_vinsertf128_si256:
14197 case X86::BI__builtin_ia32_insert128i256:
14198 case X86::BI__builtin_ia32_insertf64x4:
14199 case X86::BI__builtin_ia32_insertf32x4:
14200 case X86::BI__builtin_ia32_inserti64x4:
14201 case X86::BI__builtin_ia32_inserti32x4:
14202 case X86::BI__builtin_ia32_insertf32x8:
14203 case X86::BI__builtin_ia32_inserti32x8:
14204 case X86::BI__builtin_ia32_insertf32x4_256:
14205 case X86::BI__builtin_ia32_inserti32x4_256:
14206 case X86::BI__builtin_ia32_insertf64x2_256:
14207 case X86::BI__builtin_ia32_inserti64x2_256:
14208 case X86::BI__builtin_ia32_insertf64x2_512:
14209 case X86::BI__builtin_ia32_inserti64x2_512: {
14210 unsigned DstNumElts =
14211 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
14212 unsigned SrcNumElts =
14213 cast<llvm::FixedVectorType>(Ops[1]->getType())->getNumElements();
14214 unsigned SubVectors = DstNumElts / SrcNumElts;
14215 unsigned Index = cast<ConstantInt>(Ops[2])->getZExtValue();
14216 assert(llvm::isPowerOf2_32(SubVectors) && "Expected power of 2 subvectors");
14217 Index &= SubVectors - 1; // Remove any extra bits.
14218 Index *= SrcNumElts;
14220 int Indices[16];
14221 for (unsigned i = 0; i != DstNumElts; ++i)
14222 Indices[i] = (i >= SrcNumElts) ? SrcNumElts + (i % SrcNumElts) : i;
14224 Value *Op1 = Builder.CreateShuffleVector(
14225 Ops[1], ArrayRef(Indices, DstNumElts), "widen");
14227 for (unsigned i = 0; i != DstNumElts; ++i) {
14228 if (i >= Index && i < (Index + SrcNumElts))
14229 Indices[i] = (i - Index) + DstNumElts;
14230 else
14231 Indices[i] = i;
14234 return Builder.CreateShuffleVector(Ops[0], Op1,
14235 ArrayRef(Indices, DstNumElts), "insert");
14237 case X86::BI__builtin_ia32_pmovqd512_mask:
14238 case X86::BI__builtin_ia32_pmovwb512_mask: {
14239 Value *Res = Builder.CreateTrunc(Ops[0], Ops[1]->getType());
14240 return EmitX86Select(*this, Ops[2], Res, Ops[1]);
14242 case X86::BI__builtin_ia32_pmovdb512_mask:
14243 case X86::BI__builtin_ia32_pmovdw512_mask:
14244 case X86::BI__builtin_ia32_pmovqw512_mask: {
14245 if (const auto *C = dyn_cast<Constant>(Ops[2]))
14246 if (C->isAllOnesValue())
14247 return Builder.CreateTrunc(Ops[0], Ops[1]->getType());
14249 Intrinsic::ID IID;
14250 switch (BuiltinID) {
14251 default: llvm_unreachable("Unsupported intrinsic!");
14252 case X86::BI__builtin_ia32_pmovdb512_mask:
14253 IID = Intrinsic::x86_avx512_mask_pmov_db_512;
14254 break;
14255 case X86::BI__builtin_ia32_pmovdw512_mask:
14256 IID = Intrinsic::x86_avx512_mask_pmov_dw_512;
14257 break;
14258 case X86::BI__builtin_ia32_pmovqw512_mask:
14259 IID = Intrinsic::x86_avx512_mask_pmov_qw_512;
14260 break;
14263 Function *Intr = CGM.getIntrinsic(IID);
14264 return Builder.CreateCall(Intr, Ops);
14266 case X86::BI__builtin_ia32_pblendw128:
14267 case X86::BI__builtin_ia32_blendpd:
14268 case X86::BI__builtin_ia32_blendps:
14269 case X86::BI__builtin_ia32_blendpd256:
14270 case X86::BI__builtin_ia32_blendps256:
14271 case X86::BI__builtin_ia32_pblendw256:
14272 case X86::BI__builtin_ia32_pblendd128:
14273 case X86::BI__builtin_ia32_pblendd256: {
14274 unsigned NumElts =
14275 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
14276 unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
14278 int Indices[16];
14279 // If there are more than 8 elements, the immediate is used twice so make
14280 // sure we handle that.
14281 for (unsigned i = 0; i != NumElts; ++i)
14282 Indices[i] = ((Imm >> (i % 8)) & 0x1) ? NumElts + i : i;
14284 return Builder.CreateShuffleVector(Ops[0], Ops[1],
14285 ArrayRef(Indices, NumElts), "blend");
14287 case X86::BI__builtin_ia32_pshuflw:
14288 case X86::BI__builtin_ia32_pshuflw256:
14289 case X86::BI__builtin_ia32_pshuflw512: {
14290 uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
14291 auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
14292 unsigned NumElts = Ty->getNumElements();
14294 // Splat the 8-bits of immediate 4 times to help the loop wrap around.
14295 Imm = (Imm & 0xff) * 0x01010101;
14297 int Indices[32];
14298 for (unsigned l = 0; l != NumElts; l += 8) {
14299 for (unsigned i = 0; i != 4; ++i) {
14300 Indices[l + i] = l + (Imm & 3);
14301 Imm >>= 2;
14303 for (unsigned i = 4; i != 8; ++i)
14304 Indices[l + i] = l + i;
14307 return Builder.CreateShuffleVector(Ops[0], ArrayRef(Indices, NumElts),
14308 "pshuflw");
14310 case X86::BI__builtin_ia32_pshufhw:
14311 case X86::BI__builtin_ia32_pshufhw256:
14312 case X86::BI__builtin_ia32_pshufhw512: {
14313 uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
14314 auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
14315 unsigned NumElts = Ty->getNumElements();
14317 // Splat the 8-bits of immediate 4 times to help the loop wrap around.
14318 Imm = (Imm & 0xff) * 0x01010101;
14320 int Indices[32];
14321 for (unsigned l = 0; l != NumElts; l += 8) {
14322 for (unsigned i = 0; i != 4; ++i)
14323 Indices[l + i] = l + i;
14324 for (unsigned i = 4; i != 8; ++i) {
14325 Indices[l + i] = l + 4 + (Imm & 3);
14326 Imm >>= 2;
14330 return Builder.CreateShuffleVector(Ops[0], ArrayRef(Indices, NumElts),
14331 "pshufhw");
14333 case X86::BI__builtin_ia32_pshufd:
14334 case X86::BI__builtin_ia32_pshufd256:
14335 case X86::BI__builtin_ia32_pshufd512:
14336 case X86::BI__builtin_ia32_vpermilpd:
14337 case X86::BI__builtin_ia32_vpermilps:
14338 case X86::BI__builtin_ia32_vpermilpd256:
14339 case X86::BI__builtin_ia32_vpermilps256:
14340 case X86::BI__builtin_ia32_vpermilpd512:
14341 case X86::BI__builtin_ia32_vpermilps512: {
14342 uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
14343 auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
14344 unsigned NumElts = Ty->getNumElements();
14345 unsigned NumLanes = Ty->getPrimitiveSizeInBits() / 128;
14346 unsigned NumLaneElts = NumElts / NumLanes;
14348 // Splat the 8-bits of immediate 4 times to help the loop wrap around.
14349 Imm = (Imm & 0xff) * 0x01010101;
14351 int Indices[16];
14352 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
14353 for (unsigned i = 0; i != NumLaneElts; ++i) {
14354 Indices[i + l] = (Imm % NumLaneElts) + l;
14355 Imm /= NumLaneElts;
14359 return Builder.CreateShuffleVector(Ops[0], ArrayRef(Indices, NumElts),
14360 "permil");
14362 case X86::BI__builtin_ia32_shufpd:
14363 case X86::BI__builtin_ia32_shufpd256:
14364 case X86::BI__builtin_ia32_shufpd512:
14365 case X86::BI__builtin_ia32_shufps:
14366 case X86::BI__builtin_ia32_shufps256:
14367 case X86::BI__builtin_ia32_shufps512: {
14368 uint32_t Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
14369 auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
14370 unsigned NumElts = Ty->getNumElements();
14371 unsigned NumLanes = Ty->getPrimitiveSizeInBits() / 128;
14372 unsigned NumLaneElts = NumElts / NumLanes;
14374 // Splat the 8-bits of immediate 4 times to help the loop wrap around.
14375 Imm = (Imm & 0xff) * 0x01010101;
14377 int Indices[16];
14378 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
14379 for (unsigned i = 0; i != NumLaneElts; ++i) {
14380 unsigned Index = Imm % NumLaneElts;
14381 Imm /= NumLaneElts;
14382 if (i >= (NumLaneElts / 2))
14383 Index += NumElts;
14384 Indices[l + i] = l + Index;
14388 return Builder.CreateShuffleVector(Ops[0], Ops[1],
14389 ArrayRef(Indices, NumElts), "shufp");
14391 case X86::BI__builtin_ia32_permdi256:
14392 case X86::BI__builtin_ia32_permdf256:
14393 case X86::BI__builtin_ia32_permdi512:
14394 case X86::BI__builtin_ia32_permdf512: {
14395 unsigned Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
14396 auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
14397 unsigned NumElts = Ty->getNumElements();
14399 // These intrinsics operate on 256-bit lanes of four 64-bit elements.
14400 int Indices[8];
14401 for (unsigned l = 0; l != NumElts; l += 4)
14402 for (unsigned i = 0; i != 4; ++i)
14403 Indices[l + i] = l + ((Imm >> (2 * i)) & 0x3);
14405 return Builder.CreateShuffleVector(Ops[0], ArrayRef(Indices, NumElts),
14406 "perm");
14408 case X86::BI__builtin_ia32_palignr128:
14409 case X86::BI__builtin_ia32_palignr256:
14410 case X86::BI__builtin_ia32_palignr512: {
14411 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0xff;
14413 unsigned NumElts =
14414 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
14415 assert(NumElts % 16 == 0);
14417 // If palignr is shifting the pair of vectors more than the size of two
14418 // lanes, emit zero.
14419 if (ShiftVal >= 32)
14420 return llvm::Constant::getNullValue(ConvertType(E->getType()));
14422 // If palignr is shifting the pair of input vectors more than one lane,
14423 // but less than two lanes, convert to shifting in zeroes.
14424 if (ShiftVal > 16) {
14425 ShiftVal -= 16;
14426 Ops[1] = Ops[0];
14427 Ops[0] = llvm::Constant::getNullValue(Ops[0]->getType());
14430 int Indices[64];
14431 // 256-bit palignr operates on 128-bit lanes so we need to handle that
14432 for (unsigned l = 0; l != NumElts; l += 16) {
14433 for (unsigned i = 0; i != 16; ++i) {
14434 unsigned Idx = ShiftVal + i;
14435 if (Idx >= 16)
14436 Idx += NumElts - 16; // End of lane, switch operand.
14437 Indices[l + i] = Idx + l;
14441 return Builder.CreateShuffleVector(Ops[1], Ops[0],
14442 ArrayRef(Indices, NumElts), "palignr");
14444 case X86::BI__builtin_ia32_alignd128:
14445 case X86::BI__builtin_ia32_alignd256:
14446 case X86::BI__builtin_ia32_alignd512:
14447 case X86::BI__builtin_ia32_alignq128:
14448 case X86::BI__builtin_ia32_alignq256:
14449 case X86::BI__builtin_ia32_alignq512: {
14450 unsigned NumElts =
14451 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
14452 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0xff;
14454 // Mask the shift amount to width of a vector.
14455 ShiftVal &= NumElts - 1;
14457 int Indices[16];
14458 for (unsigned i = 0; i != NumElts; ++i)
14459 Indices[i] = i + ShiftVal;
14461 return Builder.CreateShuffleVector(Ops[1], Ops[0],
14462 ArrayRef(Indices, NumElts), "valign");
14464 case X86::BI__builtin_ia32_shuf_f32x4_256:
14465 case X86::BI__builtin_ia32_shuf_f64x2_256:
14466 case X86::BI__builtin_ia32_shuf_i32x4_256:
14467 case X86::BI__builtin_ia32_shuf_i64x2_256:
14468 case X86::BI__builtin_ia32_shuf_f32x4:
14469 case X86::BI__builtin_ia32_shuf_f64x2:
14470 case X86::BI__builtin_ia32_shuf_i32x4:
14471 case X86::BI__builtin_ia32_shuf_i64x2: {
14472 unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
14473 auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
14474 unsigned NumElts = Ty->getNumElements();
14475 unsigned NumLanes = Ty->getPrimitiveSizeInBits() == 512 ? 4 : 2;
14476 unsigned NumLaneElts = NumElts / NumLanes;
14478 int Indices[16];
14479 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
14480 unsigned Index = (Imm % NumLanes) * NumLaneElts;
14481 Imm /= NumLanes; // Discard the bits we just used.
14482 if (l >= (NumElts / 2))
14483 Index += NumElts; // Switch to other source.
14484 for (unsigned i = 0; i != NumLaneElts; ++i) {
14485 Indices[l + i] = Index + i;
14489 return Builder.CreateShuffleVector(Ops[0], Ops[1],
14490 ArrayRef(Indices, NumElts), "shuf");
14493 case X86::BI__builtin_ia32_vperm2f128_pd256:
14494 case X86::BI__builtin_ia32_vperm2f128_ps256:
14495 case X86::BI__builtin_ia32_vperm2f128_si256:
14496 case X86::BI__builtin_ia32_permti256: {
14497 unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
14498 unsigned NumElts =
14499 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
14501 // This takes a very simple approach since there are two lanes and a
14502 // shuffle can have 2 inputs. So we reserve the first input for the first
14503 // lane and the second input for the second lane. This may result in
14504 // duplicate sources, but this can be dealt with in the backend.
14506 Value *OutOps[2];
14507 int Indices[8];
14508 for (unsigned l = 0; l != 2; ++l) {
14509 // Determine the source for this lane.
14510 if (Imm & (1 << ((l * 4) + 3)))
14511 OutOps[l] = llvm::ConstantAggregateZero::get(Ops[0]->getType());
14512 else if (Imm & (1 << ((l * 4) + 1)))
14513 OutOps[l] = Ops[1];
14514 else
14515 OutOps[l] = Ops[0];
14517 for (unsigned i = 0; i != NumElts/2; ++i) {
14518 // Start with ith element of the source for this lane.
14519 unsigned Idx = (l * NumElts) + i;
14520 // If bit 0 of the immediate half is set, switch to the high half of
14521 // the source.
14522 if (Imm & (1 << (l * 4)))
14523 Idx += NumElts/2;
14524 Indices[(l * (NumElts/2)) + i] = Idx;
14528 return Builder.CreateShuffleVector(OutOps[0], OutOps[1],
14529 ArrayRef(Indices, NumElts), "vperm");
14532 case X86::BI__builtin_ia32_pslldqi128_byteshift:
14533 case X86::BI__builtin_ia32_pslldqi256_byteshift:
14534 case X86::BI__builtin_ia32_pslldqi512_byteshift: {
14535 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
14536 auto *ResultType = cast<llvm::FixedVectorType>(Ops[0]->getType());
14537 // Builtin type is vXi64 so multiply by 8 to get bytes.
14538 unsigned NumElts = ResultType->getNumElements() * 8;
14540 // If pslldq is shifting the vector more than 15 bytes, emit zero.
14541 if (ShiftVal >= 16)
14542 return llvm::Constant::getNullValue(ResultType);
14544 int Indices[64];
14545 // 256/512-bit pslldq operates on 128-bit lanes so we need to handle that
14546 for (unsigned l = 0; l != NumElts; l += 16) {
14547 for (unsigned i = 0; i != 16; ++i) {
14548 unsigned Idx = NumElts + i - ShiftVal;
14549 if (Idx < NumElts) Idx -= NumElts - 16; // end of lane, switch operand.
14550 Indices[l + i] = Idx + l;
14554 auto *VecTy = llvm::FixedVectorType::get(Int8Ty, NumElts);
14555 Value *Cast = Builder.CreateBitCast(Ops[0], VecTy, "cast");
14556 Value *Zero = llvm::Constant::getNullValue(VecTy);
14557 Value *SV = Builder.CreateShuffleVector(
14558 Zero, Cast, ArrayRef(Indices, NumElts), "pslldq");
14559 return Builder.CreateBitCast(SV, Ops[0]->getType(), "cast");
14561 case X86::BI__builtin_ia32_psrldqi128_byteshift:
14562 case X86::BI__builtin_ia32_psrldqi256_byteshift:
14563 case X86::BI__builtin_ia32_psrldqi512_byteshift: {
14564 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
14565 auto *ResultType = cast<llvm::FixedVectorType>(Ops[0]->getType());
14566 // Builtin type is vXi64 so multiply by 8 to get bytes.
14567 unsigned NumElts = ResultType->getNumElements() * 8;
14569 // If psrldq is shifting the vector more than 15 bytes, emit zero.
14570 if (ShiftVal >= 16)
14571 return llvm::Constant::getNullValue(ResultType);
14573 int Indices[64];
14574 // 256/512-bit psrldq operates on 128-bit lanes so we need to handle that
14575 for (unsigned l = 0; l != NumElts; l += 16) {
14576 for (unsigned i = 0; i != 16; ++i) {
14577 unsigned Idx = i + ShiftVal;
14578 if (Idx >= 16) Idx += NumElts - 16; // end of lane, switch operand.
14579 Indices[l + i] = Idx + l;
14583 auto *VecTy = llvm::FixedVectorType::get(Int8Ty, NumElts);
14584 Value *Cast = Builder.CreateBitCast(Ops[0], VecTy, "cast");
14585 Value *Zero = llvm::Constant::getNullValue(VecTy);
14586 Value *SV = Builder.CreateShuffleVector(
14587 Cast, Zero, ArrayRef(Indices, NumElts), "psrldq");
14588 return Builder.CreateBitCast(SV, ResultType, "cast");
14590 case X86::BI__builtin_ia32_kshiftliqi:
14591 case X86::BI__builtin_ia32_kshiftlihi:
14592 case X86::BI__builtin_ia32_kshiftlisi:
14593 case X86::BI__builtin_ia32_kshiftlidi: {
14594 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
14595 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
14597 if (ShiftVal >= NumElts)
14598 return llvm::Constant::getNullValue(Ops[0]->getType());
14600 Value *In = getMaskVecValue(*this, Ops[0], NumElts);
14602 int Indices[64];
14603 for (unsigned i = 0; i != NumElts; ++i)
14604 Indices[i] = NumElts + i - ShiftVal;
14606 Value *Zero = llvm::Constant::getNullValue(In->getType());
14607 Value *SV = Builder.CreateShuffleVector(
14608 Zero, In, ArrayRef(Indices, NumElts), "kshiftl");
14609 return Builder.CreateBitCast(SV, Ops[0]->getType());
14611 case X86::BI__builtin_ia32_kshiftriqi:
14612 case X86::BI__builtin_ia32_kshiftrihi:
14613 case X86::BI__builtin_ia32_kshiftrisi:
14614 case X86::BI__builtin_ia32_kshiftridi: {
14615 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
14616 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
14618 if (ShiftVal >= NumElts)
14619 return llvm::Constant::getNullValue(Ops[0]->getType());
14621 Value *In = getMaskVecValue(*this, Ops[0], NumElts);
14623 int Indices[64];
14624 for (unsigned i = 0; i != NumElts; ++i)
14625 Indices[i] = i + ShiftVal;
14627 Value *Zero = llvm::Constant::getNullValue(In->getType());
14628 Value *SV = Builder.CreateShuffleVector(
14629 In, Zero, ArrayRef(Indices, NumElts), "kshiftr");
14630 return Builder.CreateBitCast(SV, Ops[0]->getType());
14632 case X86::BI__builtin_ia32_movnti:
14633 case X86::BI__builtin_ia32_movnti64:
14634 case X86::BI__builtin_ia32_movntsd:
14635 case X86::BI__builtin_ia32_movntss: {
14636 llvm::MDNode *Node = llvm::MDNode::get(
14637 getLLVMContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
14639 Value *Ptr = Ops[0];
14640 Value *Src = Ops[1];
14642 // Extract the 0'th element of the source vector.
14643 if (BuiltinID == X86::BI__builtin_ia32_movntsd ||
14644 BuiltinID == X86::BI__builtin_ia32_movntss)
14645 Src = Builder.CreateExtractElement(Src, (uint64_t)0, "extract");
14647 // Convert the type of the pointer to a pointer to the stored type.
14648 Value *BC = Builder.CreateBitCast(
14649 Ptr, llvm::PointerType::getUnqual(Src->getType()), "cast");
14651 // Unaligned nontemporal store of the scalar value.
14652 StoreInst *SI = Builder.CreateDefaultAlignedStore(Src, BC);
14653 SI->setMetadata(llvm::LLVMContext::MD_nontemporal, Node);
14654 SI->setAlignment(llvm::Align(1));
14655 return SI;
14657 // Rotate is a special case of funnel shift - 1st 2 args are the same.
14658 case X86::BI__builtin_ia32_vprotb:
14659 case X86::BI__builtin_ia32_vprotw:
14660 case X86::BI__builtin_ia32_vprotd:
14661 case X86::BI__builtin_ia32_vprotq:
14662 case X86::BI__builtin_ia32_vprotbi:
14663 case X86::BI__builtin_ia32_vprotwi:
14664 case X86::BI__builtin_ia32_vprotdi:
14665 case X86::BI__builtin_ia32_vprotqi:
14666 case X86::BI__builtin_ia32_prold128:
14667 case X86::BI__builtin_ia32_prold256:
14668 case X86::BI__builtin_ia32_prold512:
14669 case X86::BI__builtin_ia32_prolq128:
14670 case X86::BI__builtin_ia32_prolq256:
14671 case X86::BI__builtin_ia32_prolq512:
14672 case X86::BI__builtin_ia32_prolvd128:
14673 case X86::BI__builtin_ia32_prolvd256:
14674 case X86::BI__builtin_ia32_prolvd512:
14675 case X86::BI__builtin_ia32_prolvq128:
14676 case X86::BI__builtin_ia32_prolvq256:
14677 case X86::BI__builtin_ia32_prolvq512:
14678 return EmitX86FunnelShift(*this, Ops[0], Ops[0], Ops[1], false);
14679 case X86::BI__builtin_ia32_prord128:
14680 case X86::BI__builtin_ia32_prord256:
14681 case X86::BI__builtin_ia32_prord512:
14682 case X86::BI__builtin_ia32_prorq128:
14683 case X86::BI__builtin_ia32_prorq256:
14684 case X86::BI__builtin_ia32_prorq512:
14685 case X86::BI__builtin_ia32_prorvd128:
14686 case X86::BI__builtin_ia32_prorvd256:
14687 case X86::BI__builtin_ia32_prorvd512:
14688 case X86::BI__builtin_ia32_prorvq128:
14689 case X86::BI__builtin_ia32_prorvq256:
14690 case X86::BI__builtin_ia32_prorvq512:
14691 return EmitX86FunnelShift(*this, Ops[0], Ops[0], Ops[1], true);
14692 case X86::BI__builtin_ia32_selectb_128:
14693 case X86::BI__builtin_ia32_selectb_256:
14694 case X86::BI__builtin_ia32_selectb_512:
14695 case X86::BI__builtin_ia32_selectw_128:
14696 case X86::BI__builtin_ia32_selectw_256:
14697 case X86::BI__builtin_ia32_selectw_512:
14698 case X86::BI__builtin_ia32_selectd_128:
14699 case X86::BI__builtin_ia32_selectd_256:
14700 case X86::BI__builtin_ia32_selectd_512:
14701 case X86::BI__builtin_ia32_selectq_128:
14702 case X86::BI__builtin_ia32_selectq_256:
14703 case X86::BI__builtin_ia32_selectq_512:
14704 case X86::BI__builtin_ia32_selectph_128:
14705 case X86::BI__builtin_ia32_selectph_256:
14706 case X86::BI__builtin_ia32_selectph_512:
14707 case X86::BI__builtin_ia32_selectpbf_128:
14708 case X86::BI__builtin_ia32_selectpbf_256:
14709 case X86::BI__builtin_ia32_selectpbf_512:
14710 case X86::BI__builtin_ia32_selectps_128:
14711 case X86::BI__builtin_ia32_selectps_256:
14712 case X86::BI__builtin_ia32_selectps_512:
14713 case X86::BI__builtin_ia32_selectpd_128:
14714 case X86::BI__builtin_ia32_selectpd_256:
14715 case X86::BI__builtin_ia32_selectpd_512:
14716 return EmitX86Select(*this, Ops[0], Ops[1], Ops[2]);
14717 case X86::BI__builtin_ia32_selectsh_128:
14718 case X86::BI__builtin_ia32_selectsbf_128:
14719 case X86::BI__builtin_ia32_selectss_128:
14720 case X86::BI__builtin_ia32_selectsd_128: {
14721 Value *A = Builder.CreateExtractElement(Ops[1], (uint64_t)0);
14722 Value *B = Builder.CreateExtractElement(Ops[2], (uint64_t)0);
14723 A = EmitX86ScalarSelect(*this, Ops[0], A, B);
14724 return Builder.CreateInsertElement(Ops[1], A, (uint64_t)0);
14726 case X86::BI__builtin_ia32_cmpb128_mask:
14727 case X86::BI__builtin_ia32_cmpb256_mask:
14728 case X86::BI__builtin_ia32_cmpb512_mask:
14729 case X86::BI__builtin_ia32_cmpw128_mask:
14730 case X86::BI__builtin_ia32_cmpw256_mask:
14731 case X86::BI__builtin_ia32_cmpw512_mask:
14732 case X86::BI__builtin_ia32_cmpd128_mask:
14733 case X86::BI__builtin_ia32_cmpd256_mask:
14734 case X86::BI__builtin_ia32_cmpd512_mask:
14735 case X86::BI__builtin_ia32_cmpq128_mask:
14736 case X86::BI__builtin_ia32_cmpq256_mask:
14737 case X86::BI__builtin_ia32_cmpq512_mask: {
14738 unsigned CC = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x7;
14739 return EmitX86MaskedCompare(*this, CC, true, Ops);
14741 case X86::BI__builtin_ia32_ucmpb128_mask:
14742 case X86::BI__builtin_ia32_ucmpb256_mask:
14743 case X86::BI__builtin_ia32_ucmpb512_mask:
14744 case X86::BI__builtin_ia32_ucmpw128_mask:
14745 case X86::BI__builtin_ia32_ucmpw256_mask:
14746 case X86::BI__builtin_ia32_ucmpw512_mask:
14747 case X86::BI__builtin_ia32_ucmpd128_mask:
14748 case X86::BI__builtin_ia32_ucmpd256_mask:
14749 case X86::BI__builtin_ia32_ucmpd512_mask:
14750 case X86::BI__builtin_ia32_ucmpq128_mask:
14751 case X86::BI__builtin_ia32_ucmpq256_mask:
14752 case X86::BI__builtin_ia32_ucmpq512_mask: {
14753 unsigned CC = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x7;
14754 return EmitX86MaskedCompare(*this, CC, false, Ops);
14756 case X86::BI__builtin_ia32_vpcomb:
14757 case X86::BI__builtin_ia32_vpcomw:
14758 case X86::BI__builtin_ia32_vpcomd:
14759 case X86::BI__builtin_ia32_vpcomq:
14760 return EmitX86vpcom(*this, Ops, true);
14761 case X86::BI__builtin_ia32_vpcomub:
14762 case X86::BI__builtin_ia32_vpcomuw:
14763 case X86::BI__builtin_ia32_vpcomud:
14764 case X86::BI__builtin_ia32_vpcomuq:
14765 return EmitX86vpcom(*this, Ops, false);
14767 case X86::BI__builtin_ia32_kortestcqi:
14768 case X86::BI__builtin_ia32_kortestchi:
14769 case X86::BI__builtin_ia32_kortestcsi:
14770 case X86::BI__builtin_ia32_kortestcdi: {
14771 Value *Or = EmitX86MaskLogic(*this, Instruction::Or, Ops);
14772 Value *C = llvm::Constant::getAllOnesValue(Ops[0]->getType());
14773 Value *Cmp = Builder.CreateICmpEQ(Or, C);
14774 return Builder.CreateZExt(Cmp, ConvertType(E->getType()));
14776 case X86::BI__builtin_ia32_kortestzqi:
14777 case X86::BI__builtin_ia32_kortestzhi:
14778 case X86::BI__builtin_ia32_kortestzsi:
14779 case X86::BI__builtin_ia32_kortestzdi: {
14780 Value *Or = EmitX86MaskLogic(*this, Instruction::Or, Ops);
14781 Value *C = llvm::Constant::getNullValue(Ops[0]->getType());
14782 Value *Cmp = Builder.CreateICmpEQ(Or, C);
14783 return Builder.CreateZExt(Cmp, ConvertType(E->getType()));
14786 case X86::BI__builtin_ia32_ktestcqi:
14787 case X86::BI__builtin_ia32_ktestzqi:
14788 case X86::BI__builtin_ia32_ktestchi:
14789 case X86::BI__builtin_ia32_ktestzhi:
14790 case X86::BI__builtin_ia32_ktestcsi:
14791 case X86::BI__builtin_ia32_ktestzsi:
14792 case X86::BI__builtin_ia32_ktestcdi:
14793 case X86::BI__builtin_ia32_ktestzdi: {
14794 Intrinsic::ID IID;
14795 switch (BuiltinID) {
14796 default: llvm_unreachable("Unsupported intrinsic!");
14797 case X86::BI__builtin_ia32_ktestcqi:
14798 IID = Intrinsic::x86_avx512_ktestc_b;
14799 break;
14800 case X86::BI__builtin_ia32_ktestzqi:
14801 IID = Intrinsic::x86_avx512_ktestz_b;
14802 break;
14803 case X86::BI__builtin_ia32_ktestchi:
14804 IID = Intrinsic::x86_avx512_ktestc_w;
14805 break;
14806 case X86::BI__builtin_ia32_ktestzhi:
14807 IID = Intrinsic::x86_avx512_ktestz_w;
14808 break;
14809 case X86::BI__builtin_ia32_ktestcsi:
14810 IID = Intrinsic::x86_avx512_ktestc_d;
14811 break;
14812 case X86::BI__builtin_ia32_ktestzsi:
14813 IID = Intrinsic::x86_avx512_ktestz_d;
14814 break;
14815 case X86::BI__builtin_ia32_ktestcdi:
14816 IID = Intrinsic::x86_avx512_ktestc_q;
14817 break;
14818 case X86::BI__builtin_ia32_ktestzdi:
14819 IID = Intrinsic::x86_avx512_ktestz_q;
14820 break;
14823 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
14824 Value *LHS = getMaskVecValue(*this, Ops[0], NumElts);
14825 Value *RHS = getMaskVecValue(*this, Ops[1], NumElts);
14826 Function *Intr = CGM.getIntrinsic(IID);
14827 return Builder.CreateCall(Intr, {LHS, RHS});
14830 case X86::BI__builtin_ia32_kaddqi:
14831 case X86::BI__builtin_ia32_kaddhi:
14832 case X86::BI__builtin_ia32_kaddsi:
14833 case X86::BI__builtin_ia32_kadddi: {
14834 Intrinsic::ID IID;
14835 switch (BuiltinID) {
14836 default: llvm_unreachable("Unsupported intrinsic!");
14837 case X86::BI__builtin_ia32_kaddqi:
14838 IID = Intrinsic::x86_avx512_kadd_b;
14839 break;
14840 case X86::BI__builtin_ia32_kaddhi:
14841 IID = Intrinsic::x86_avx512_kadd_w;
14842 break;
14843 case X86::BI__builtin_ia32_kaddsi:
14844 IID = Intrinsic::x86_avx512_kadd_d;
14845 break;
14846 case X86::BI__builtin_ia32_kadddi:
14847 IID = Intrinsic::x86_avx512_kadd_q;
14848 break;
14851 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
14852 Value *LHS = getMaskVecValue(*this, Ops[0], NumElts);
14853 Value *RHS = getMaskVecValue(*this, Ops[1], NumElts);
14854 Function *Intr = CGM.getIntrinsic(IID);
14855 Value *Res = Builder.CreateCall(Intr, {LHS, RHS});
14856 return Builder.CreateBitCast(Res, Ops[0]->getType());
14858 case X86::BI__builtin_ia32_kandqi:
14859 case X86::BI__builtin_ia32_kandhi:
14860 case X86::BI__builtin_ia32_kandsi:
14861 case X86::BI__builtin_ia32_kanddi:
14862 return EmitX86MaskLogic(*this, Instruction::And, Ops);
14863 case X86::BI__builtin_ia32_kandnqi:
14864 case X86::BI__builtin_ia32_kandnhi:
14865 case X86::BI__builtin_ia32_kandnsi:
14866 case X86::BI__builtin_ia32_kandndi:
14867 return EmitX86MaskLogic(*this, Instruction::And, Ops, true);
14868 case X86::BI__builtin_ia32_korqi:
14869 case X86::BI__builtin_ia32_korhi:
14870 case X86::BI__builtin_ia32_korsi:
14871 case X86::BI__builtin_ia32_kordi:
14872 return EmitX86MaskLogic(*this, Instruction::Or, Ops);
14873 case X86::BI__builtin_ia32_kxnorqi:
14874 case X86::BI__builtin_ia32_kxnorhi:
14875 case X86::BI__builtin_ia32_kxnorsi:
14876 case X86::BI__builtin_ia32_kxnordi:
14877 return EmitX86MaskLogic(*this, Instruction::Xor, Ops, true);
14878 case X86::BI__builtin_ia32_kxorqi:
14879 case X86::BI__builtin_ia32_kxorhi:
14880 case X86::BI__builtin_ia32_kxorsi:
14881 case X86::BI__builtin_ia32_kxordi:
14882 return EmitX86MaskLogic(*this, Instruction::Xor, Ops);
14883 case X86::BI__builtin_ia32_knotqi:
14884 case X86::BI__builtin_ia32_knothi:
14885 case X86::BI__builtin_ia32_knotsi:
14886 case X86::BI__builtin_ia32_knotdi: {
14887 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
14888 Value *Res = getMaskVecValue(*this, Ops[0], NumElts);
14889 return Builder.CreateBitCast(Builder.CreateNot(Res),
14890 Ops[0]->getType());
14892 case X86::BI__builtin_ia32_kmovb:
14893 case X86::BI__builtin_ia32_kmovw:
14894 case X86::BI__builtin_ia32_kmovd:
14895 case X86::BI__builtin_ia32_kmovq: {
14896 // Bitcast to vXi1 type and then back to integer. This gets the mask
14897 // register type into the IR, but might be optimized out depending on
14898 // what's around it.
14899 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
14900 Value *Res = getMaskVecValue(*this, Ops[0], NumElts);
14901 return Builder.CreateBitCast(Res, Ops[0]->getType());
14904 case X86::BI__builtin_ia32_kunpckdi:
14905 case X86::BI__builtin_ia32_kunpcksi:
14906 case X86::BI__builtin_ia32_kunpckhi: {
14907 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
14908 Value *LHS = getMaskVecValue(*this, Ops[0], NumElts);
14909 Value *RHS = getMaskVecValue(*this, Ops[1], NumElts);
14910 int Indices[64];
14911 for (unsigned i = 0; i != NumElts; ++i)
14912 Indices[i] = i;
14914 // First extract half of each vector. This gives better codegen than
14915 // doing it in a single shuffle.
14916 LHS = Builder.CreateShuffleVector(LHS, LHS, ArrayRef(Indices, NumElts / 2));
14917 RHS = Builder.CreateShuffleVector(RHS, RHS, ArrayRef(Indices, NumElts / 2));
14918 // Concat the vectors.
14919 // NOTE: Operands are swapped to match the intrinsic definition.
14920 Value *Res =
14921 Builder.CreateShuffleVector(RHS, LHS, ArrayRef(Indices, NumElts));
14922 return Builder.CreateBitCast(Res, Ops[0]->getType());
14925 case X86::BI__builtin_ia32_vplzcntd_128:
14926 case X86::BI__builtin_ia32_vplzcntd_256:
14927 case X86::BI__builtin_ia32_vplzcntd_512:
14928 case X86::BI__builtin_ia32_vplzcntq_128:
14929 case X86::BI__builtin_ia32_vplzcntq_256:
14930 case X86::BI__builtin_ia32_vplzcntq_512: {
14931 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Ops[0]->getType());
14932 return Builder.CreateCall(F, {Ops[0],Builder.getInt1(false)});
14934 case X86::BI__builtin_ia32_sqrtss:
14935 case X86::BI__builtin_ia32_sqrtsd: {
14936 Value *A = Builder.CreateExtractElement(Ops[0], (uint64_t)0);
14937 Function *F;
14938 if (Builder.getIsFPConstrained()) {
14939 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
14940 F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt,
14941 A->getType());
14942 A = Builder.CreateConstrainedFPCall(F, {A});
14943 } else {
14944 F = CGM.getIntrinsic(Intrinsic::sqrt, A->getType());
14945 A = Builder.CreateCall(F, {A});
14947 return Builder.CreateInsertElement(Ops[0], A, (uint64_t)0);
14949 case X86::BI__builtin_ia32_sqrtsh_round_mask:
14950 case X86::BI__builtin_ia32_sqrtsd_round_mask:
14951 case X86::BI__builtin_ia32_sqrtss_round_mask: {
14952 unsigned CC = cast<llvm::ConstantInt>(Ops[4])->getZExtValue();
14953 // Support only if the rounding mode is 4 (AKA CUR_DIRECTION),
14954 // otherwise keep the intrinsic.
14955 if (CC != 4) {
14956 Intrinsic::ID IID;
14958 switch (BuiltinID) {
14959 default:
14960 llvm_unreachable("Unsupported intrinsic!");
14961 case X86::BI__builtin_ia32_sqrtsh_round_mask:
14962 IID = Intrinsic::x86_avx512fp16_mask_sqrt_sh;
14963 break;
14964 case X86::BI__builtin_ia32_sqrtsd_round_mask:
14965 IID = Intrinsic::x86_avx512_mask_sqrt_sd;
14966 break;
14967 case X86::BI__builtin_ia32_sqrtss_round_mask:
14968 IID = Intrinsic::x86_avx512_mask_sqrt_ss;
14969 break;
14971 return Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
14973 Value *A = Builder.CreateExtractElement(Ops[1], (uint64_t)0);
14974 Function *F;
14975 if (Builder.getIsFPConstrained()) {
14976 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
14977 F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt,
14978 A->getType());
14979 A = Builder.CreateConstrainedFPCall(F, A);
14980 } else {
14981 F = CGM.getIntrinsic(Intrinsic::sqrt, A->getType());
14982 A = Builder.CreateCall(F, A);
14984 Value *Src = Builder.CreateExtractElement(Ops[2], (uint64_t)0);
14985 A = EmitX86ScalarSelect(*this, Ops[3], A, Src);
14986 return Builder.CreateInsertElement(Ops[0], A, (uint64_t)0);
14988 case X86::BI__builtin_ia32_sqrtpd256:
14989 case X86::BI__builtin_ia32_sqrtpd:
14990 case X86::BI__builtin_ia32_sqrtps256:
14991 case X86::BI__builtin_ia32_sqrtps:
14992 case X86::BI__builtin_ia32_sqrtph256:
14993 case X86::BI__builtin_ia32_sqrtph:
14994 case X86::BI__builtin_ia32_sqrtph512:
14995 case X86::BI__builtin_ia32_sqrtps512:
14996 case X86::BI__builtin_ia32_sqrtpd512: {
14997 if (Ops.size() == 2) {
14998 unsigned CC = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
14999 // Support only if the rounding mode is 4 (AKA CUR_DIRECTION),
15000 // otherwise keep the intrinsic.
15001 if (CC != 4) {
15002 Intrinsic::ID IID;
15004 switch (BuiltinID) {
15005 default:
15006 llvm_unreachable("Unsupported intrinsic!");
15007 case X86::BI__builtin_ia32_sqrtph512:
15008 IID = Intrinsic::x86_avx512fp16_sqrt_ph_512;
15009 break;
15010 case X86::BI__builtin_ia32_sqrtps512:
15011 IID = Intrinsic::x86_avx512_sqrt_ps_512;
15012 break;
15013 case X86::BI__builtin_ia32_sqrtpd512:
15014 IID = Intrinsic::x86_avx512_sqrt_pd_512;
15015 break;
15017 return Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
15020 if (Builder.getIsFPConstrained()) {
15021 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
15022 Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt,
15023 Ops[0]->getType());
15024 return Builder.CreateConstrainedFPCall(F, Ops[0]);
15025 } else {
15026 Function *F = CGM.getIntrinsic(Intrinsic::sqrt, Ops[0]->getType());
15027 return Builder.CreateCall(F, Ops[0]);
15031 case X86::BI__builtin_ia32_pmuludq128:
15032 case X86::BI__builtin_ia32_pmuludq256:
15033 case X86::BI__builtin_ia32_pmuludq512:
15034 return EmitX86Muldq(*this, /*IsSigned*/false, Ops);
15036 case X86::BI__builtin_ia32_pmuldq128:
15037 case X86::BI__builtin_ia32_pmuldq256:
15038 case X86::BI__builtin_ia32_pmuldq512:
15039 return EmitX86Muldq(*this, /*IsSigned*/true, Ops);
15041 case X86::BI__builtin_ia32_pternlogd512_mask:
15042 case X86::BI__builtin_ia32_pternlogq512_mask:
15043 case X86::BI__builtin_ia32_pternlogd128_mask:
15044 case X86::BI__builtin_ia32_pternlogd256_mask:
15045 case X86::BI__builtin_ia32_pternlogq128_mask:
15046 case X86::BI__builtin_ia32_pternlogq256_mask:
15047 return EmitX86Ternlog(*this, /*ZeroMask*/false, Ops);
15049 case X86::BI__builtin_ia32_pternlogd512_maskz:
15050 case X86::BI__builtin_ia32_pternlogq512_maskz:
15051 case X86::BI__builtin_ia32_pternlogd128_maskz:
15052 case X86::BI__builtin_ia32_pternlogd256_maskz:
15053 case X86::BI__builtin_ia32_pternlogq128_maskz:
15054 case X86::BI__builtin_ia32_pternlogq256_maskz:
15055 return EmitX86Ternlog(*this, /*ZeroMask*/true, Ops);
15057 case X86::BI__builtin_ia32_vpshldd128:
15058 case X86::BI__builtin_ia32_vpshldd256:
15059 case X86::BI__builtin_ia32_vpshldd512:
15060 case X86::BI__builtin_ia32_vpshldq128:
15061 case X86::BI__builtin_ia32_vpshldq256:
15062 case X86::BI__builtin_ia32_vpshldq512:
15063 case X86::BI__builtin_ia32_vpshldw128:
15064 case X86::BI__builtin_ia32_vpshldw256:
15065 case X86::BI__builtin_ia32_vpshldw512:
15066 return EmitX86FunnelShift(*this, Ops[0], Ops[1], Ops[2], false);
15068 case X86::BI__builtin_ia32_vpshrdd128:
15069 case X86::BI__builtin_ia32_vpshrdd256:
15070 case X86::BI__builtin_ia32_vpshrdd512:
15071 case X86::BI__builtin_ia32_vpshrdq128:
15072 case X86::BI__builtin_ia32_vpshrdq256:
15073 case X86::BI__builtin_ia32_vpshrdq512:
15074 case X86::BI__builtin_ia32_vpshrdw128:
15075 case X86::BI__builtin_ia32_vpshrdw256:
15076 case X86::BI__builtin_ia32_vpshrdw512:
15077 // Ops 0 and 1 are swapped.
15078 return EmitX86FunnelShift(*this, Ops[1], Ops[0], Ops[2], true);
15080 case X86::BI__builtin_ia32_vpshldvd128:
15081 case X86::BI__builtin_ia32_vpshldvd256:
15082 case X86::BI__builtin_ia32_vpshldvd512:
15083 case X86::BI__builtin_ia32_vpshldvq128:
15084 case X86::BI__builtin_ia32_vpshldvq256:
15085 case X86::BI__builtin_ia32_vpshldvq512:
15086 case X86::BI__builtin_ia32_vpshldvw128:
15087 case X86::BI__builtin_ia32_vpshldvw256:
15088 case X86::BI__builtin_ia32_vpshldvw512:
15089 return EmitX86FunnelShift(*this, Ops[0], Ops[1], Ops[2], false);
15091 case X86::BI__builtin_ia32_vpshrdvd128:
15092 case X86::BI__builtin_ia32_vpshrdvd256:
15093 case X86::BI__builtin_ia32_vpshrdvd512:
15094 case X86::BI__builtin_ia32_vpshrdvq128:
15095 case X86::BI__builtin_ia32_vpshrdvq256:
15096 case X86::BI__builtin_ia32_vpshrdvq512:
15097 case X86::BI__builtin_ia32_vpshrdvw128:
15098 case X86::BI__builtin_ia32_vpshrdvw256:
15099 case X86::BI__builtin_ia32_vpshrdvw512:
15100 // Ops 0 and 1 are swapped.
15101 return EmitX86FunnelShift(*this, Ops[1], Ops[0], Ops[2], true);
15103 // Reductions
15104 case X86::BI__builtin_ia32_reduce_fadd_pd512:
15105 case X86::BI__builtin_ia32_reduce_fadd_ps512:
15106 case X86::BI__builtin_ia32_reduce_fadd_ph512:
15107 case X86::BI__builtin_ia32_reduce_fadd_ph256:
15108 case X86::BI__builtin_ia32_reduce_fadd_ph128: {
15109 Function *F =
15110 CGM.getIntrinsic(Intrinsic::vector_reduce_fadd, Ops[1]->getType());
15111 IRBuilder<>::FastMathFlagGuard FMFGuard(Builder);
15112 Builder.getFastMathFlags().setAllowReassoc();
15113 return Builder.CreateCall(F, {Ops[0], Ops[1]});
15115 case X86::BI__builtin_ia32_reduce_fmul_pd512:
15116 case X86::BI__builtin_ia32_reduce_fmul_ps512:
15117 case X86::BI__builtin_ia32_reduce_fmul_ph512:
15118 case X86::BI__builtin_ia32_reduce_fmul_ph256:
15119 case X86::BI__builtin_ia32_reduce_fmul_ph128: {
15120 Function *F =
15121 CGM.getIntrinsic(Intrinsic::vector_reduce_fmul, Ops[1]->getType());
15122 IRBuilder<>::FastMathFlagGuard FMFGuard(Builder);
15123 Builder.getFastMathFlags().setAllowReassoc();
15124 return Builder.CreateCall(F, {Ops[0], Ops[1]});
15126 case X86::BI__builtin_ia32_reduce_fmax_pd512:
15127 case X86::BI__builtin_ia32_reduce_fmax_ps512:
15128 case X86::BI__builtin_ia32_reduce_fmax_ph512:
15129 case X86::BI__builtin_ia32_reduce_fmax_ph256:
15130 case X86::BI__builtin_ia32_reduce_fmax_ph128: {
15131 Function *F =
15132 CGM.getIntrinsic(Intrinsic::vector_reduce_fmax, Ops[0]->getType());
15133 IRBuilder<>::FastMathFlagGuard FMFGuard(Builder);
15134 Builder.getFastMathFlags().setNoNaNs();
15135 return Builder.CreateCall(F, {Ops[0]});
15137 case X86::BI__builtin_ia32_reduce_fmin_pd512:
15138 case X86::BI__builtin_ia32_reduce_fmin_ps512:
15139 case X86::BI__builtin_ia32_reduce_fmin_ph512:
15140 case X86::BI__builtin_ia32_reduce_fmin_ph256:
15141 case X86::BI__builtin_ia32_reduce_fmin_ph128: {
15142 Function *F =
15143 CGM.getIntrinsic(Intrinsic::vector_reduce_fmin, Ops[0]->getType());
15144 IRBuilder<>::FastMathFlagGuard FMFGuard(Builder);
15145 Builder.getFastMathFlags().setNoNaNs();
15146 return Builder.CreateCall(F, {Ops[0]});
15149 // 3DNow!
15150 case X86::BI__builtin_ia32_pswapdsf:
15151 case X86::BI__builtin_ia32_pswapdsi: {
15152 llvm::Type *MMXTy = llvm::Type::getX86_MMXTy(getLLVMContext());
15153 Ops[0] = Builder.CreateBitCast(Ops[0], MMXTy, "cast");
15154 llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_3dnowa_pswapd);
15155 return Builder.CreateCall(F, Ops, "pswapd");
15157 case X86::BI__builtin_ia32_rdrand16_step:
15158 case X86::BI__builtin_ia32_rdrand32_step:
15159 case X86::BI__builtin_ia32_rdrand64_step:
15160 case X86::BI__builtin_ia32_rdseed16_step:
15161 case X86::BI__builtin_ia32_rdseed32_step:
15162 case X86::BI__builtin_ia32_rdseed64_step: {
15163 Intrinsic::ID ID;
15164 switch (BuiltinID) {
15165 default: llvm_unreachable("Unsupported intrinsic!");
15166 case X86::BI__builtin_ia32_rdrand16_step:
15167 ID = Intrinsic::x86_rdrand_16;
15168 break;
15169 case X86::BI__builtin_ia32_rdrand32_step:
15170 ID = Intrinsic::x86_rdrand_32;
15171 break;
15172 case X86::BI__builtin_ia32_rdrand64_step:
15173 ID = Intrinsic::x86_rdrand_64;
15174 break;
15175 case X86::BI__builtin_ia32_rdseed16_step:
15176 ID = Intrinsic::x86_rdseed_16;
15177 break;
15178 case X86::BI__builtin_ia32_rdseed32_step:
15179 ID = Intrinsic::x86_rdseed_32;
15180 break;
15181 case X86::BI__builtin_ia32_rdseed64_step:
15182 ID = Intrinsic::x86_rdseed_64;
15183 break;
15186 Value *Call = Builder.CreateCall(CGM.getIntrinsic(ID));
15187 Builder.CreateDefaultAlignedStore(Builder.CreateExtractValue(Call, 0),
15188 Ops[0]);
15189 return Builder.CreateExtractValue(Call, 1);
15191 case X86::BI__builtin_ia32_addcarryx_u32:
15192 case X86::BI__builtin_ia32_addcarryx_u64:
15193 case X86::BI__builtin_ia32_subborrow_u32:
15194 case X86::BI__builtin_ia32_subborrow_u64: {
15195 Intrinsic::ID IID;
15196 switch (BuiltinID) {
15197 default: llvm_unreachable("Unsupported intrinsic!");
15198 case X86::BI__builtin_ia32_addcarryx_u32:
15199 IID = Intrinsic::x86_addcarry_32;
15200 break;
15201 case X86::BI__builtin_ia32_addcarryx_u64:
15202 IID = Intrinsic::x86_addcarry_64;
15203 break;
15204 case X86::BI__builtin_ia32_subborrow_u32:
15205 IID = Intrinsic::x86_subborrow_32;
15206 break;
15207 case X86::BI__builtin_ia32_subborrow_u64:
15208 IID = Intrinsic::x86_subborrow_64;
15209 break;
15212 Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID),
15213 { Ops[0], Ops[1], Ops[2] });
15214 Builder.CreateDefaultAlignedStore(Builder.CreateExtractValue(Call, 1),
15215 Ops[3]);
15216 return Builder.CreateExtractValue(Call, 0);
15219 case X86::BI__builtin_ia32_fpclassps128_mask:
15220 case X86::BI__builtin_ia32_fpclassps256_mask:
15221 case X86::BI__builtin_ia32_fpclassps512_mask:
15222 case X86::BI__builtin_ia32_fpclassph128_mask:
15223 case X86::BI__builtin_ia32_fpclassph256_mask:
15224 case X86::BI__builtin_ia32_fpclassph512_mask:
15225 case X86::BI__builtin_ia32_fpclasspd128_mask:
15226 case X86::BI__builtin_ia32_fpclasspd256_mask:
15227 case X86::BI__builtin_ia32_fpclasspd512_mask: {
15228 unsigned NumElts =
15229 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
15230 Value *MaskIn = Ops[2];
15231 Ops.erase(&Ops[2]);
15233 Intrinsic::ID ID;
15234 switch (BuiltinID) {
15235 default: llvm_unreachable("Unsupported intrinsic!");
15236 case X86::BI__builtin_ia32_fpclassph128_mask:
15237 ID = Intrinsic::x86_avx512fp16_fpclass_ph_128;
15238 break;
15239 case X86::BI__builtin_ia32_fpclassph256_mask:
15240 ID = Intrinsic::x86_avx512fp16_fpclass_ph_256;
15241 break;
15242 case X86::BI__builtin_ia32_fpclassph512_mask:
15243 ID = Intrinsic::x86_avx512fp16_fpclass_ph_512;
15244 break;
15245 case X86::BI__builtin_ia32_fpclassps128_mask:
15246 ID = Intrinsic::x86_avx512_fpclass_ps_128;
15247 break;
15248 case X86::BI__builtin_ia32_fpclassps256_mask:
15249 ID = Intrinsic::x86_avx512_fpclass_ps_256;
15250 break;
15251 case X86::BI__builtin_ia32_fpclassps512_mask:
15252 ID = Intrinsic::x86_avx512_fpclass_ps_512;
15253 break;
15254 case X86::BI__builtin_ia32_fpclasspd128_mask:
15255 ID = Intrinsic::x86_avx512_fpclass_pd_128;
15256 break;
15257 case X86::BI__builtin_ia32_fpclasspd256_mask:
15258 ID = Intrinsic::x86_avx512_fpclass_pd_256;
15259 break;
15260 case X86::BI__builtin_ia32_fpclasspd512_mask:
15261 ID = Intrinsic::x86_avx512_fpclass_pd_512;
15262 break;
15265 Value *Fpclass = Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
15266 return EmitX86MaskedCompareResult(*this, Fpclass, NumElts, MaskIn);
15269 case X86::BI__builtin_ia32_vp2intersect_q_512:
15270 case X86::BI__builtin_ia32_vp2intersect_q_256:
15271 case X86::BI__builtin_ia32_vp2intersect_q_128:
15272 case X86::BI__builtin_ia32_vp2intersect_d_512:
15273 case X86::BI__builtin_ia32_vp2intersect_d_256:
15274 case X86::BI__builtin_ia32_vp2intersect_d_128: {
15275 unsigned NumElts =
15276 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
15277 Intrinsic::ID ID;
15279 switch (BuiltinID) {
15280 default: llvm_unreachable("Unsupported intrinsic!");
15281 case X86::BI__builtin_ia32_vp2intersect_q_512:
15282 ID = Intrinsic::x86_avx512_vp2intersect_q_512;
15283 break;
15284 case X86::BI__builtin_ia32_vp2intersect_q_256:
15285 ID = Intrinsic::x86_avx512_vp2intersect_q_256;
15286 break;
15287 case X86::BI__builtin_ia32_vp2intersect_q_128:
15288 ID = Intrinsic::x86_avx512_vp2intersect_q_128;
15289 break;
15290 case X86::BI__builtin_ia32_vp2intersect_d_512:
15291 ID = Intrinsic::x86_avx512_vp2intersect_d_512;
15292 break;
15293 case X86::BI__builtin_ia32_vp2intersect_d_256:
15294 ID = Intrinsic::x86_avx512_vp2intersect_d_256;
15295 break;
15296 case X86::BI__builtin_ia32_vp2intersect_d_128:
15297 ID = Intrinsic::x86_avx512_vp2intersect_d_128;
15298 break;
15301 Value *Call = Builder.CreateCall(CGM.getIntrinsic(ID), {Ops[0], Ops[1]});
15302 Value *Result = Builder.CreateExtractValue(Call, 0);
15303 Result = EmitX86MaskedCompareResult(*this, Result, NumElts, nullptr);
15304 Builder.CreateDefaultAlignedStore(Result, Ops[2]);
15306 Result = Builder.CreateExtractValue(Call, 1);
15307 Result = EmitX86MaskedCompareResult(*this, Result, NumElts, nullptr);
15308 return Builder.CreateDefaultAlignedStore(Result, Ops[3]);
15311 case X86::BI__builtin_ia32_vpmultishiftqb128:
15312 case X86::BI__builtin_ia32_vpmultishiftqb256:
15313 case X86::BI__builtin_ia32_vpmultishiftqb512: {
15314 Intrinsic::ID ID;
15315 switch (BuiltinID) {
15316 default: llvm_unreachable("Unsupported intrinsic!");
15317 case X86::BI__builtin_ia32_vpmultishiftqb128:
15318 ID = Intrinsic::x86_avx512_pmultishift_qb_128;
15319 break;
15320 case X86::BI__builtin_ia32_vpmultishiftqb256:
15321 ID = Intrinsic::x86_avx512_pmultishift_qb_256;
15322 break;
15323 case X86::BI__builtin_ia32_vpmultishiftqb512:
15324 ID = Intrinsic::x86_avx512_pmultishift_qb_512;
15325 break;
15328 return Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
15331 case X86::BI__builtin_ia32_vpshufbitqmb128_mask:
15332 case X86::BI__builtin_ia32_vpshufbitqmb256_mask:
15333 case X86::BI__builtin_ia32_vpshufbitqmb512_mask: {
15334 unsigned NumElts =
15335 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
15336 Value *MaskIn = Ops[2];
15337 Ops.erase(&Ops[2]);
15339 Intrinsic::ID ID;
15340 switch (BuiltinID) {
15341 default: llvm_unreachable("Unsupported intrinsic!");
15342 case X86::BI__builtin_ia32_vpshufbitqmb128_mask:
15343 ID = Intrinsic::x86_avx512_vpshufbitqmb_128;
15344 break;
15345 case X86::BI__builtin_ia32_vpshufbitqmb256_mask:
15346 ID = Intrinsic::x86_avx512_vpshufbitqmb_256;
15347 break;
15348 case X86::BI__builtin_ia32_vpshufbitqmb512_mask:
15349 ID = Intrinsic::x86_avx512_vpshufbitqmb_512;
15350 break;
15353 Value *Shufbit = Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
15354 return EmitX86MaskedCompareResult(*this, Shufbit, NumElts, MaskIn);
15357 // packed comparison intrinsics
15358 case X86::BI__builtin_ia32_cmpeqps:
15359 case X86::BI__builtin_ia32_cmpeqpd:
15360 return getVectorFCmpIR(CmpInst::FCMP_OEQ, /*IsSignaling*/false);
15361 case X86::BI__builtin_ia32_cmpltps:
15362 case X86::BI__builtin_ia32_cmpltpd:
15363 return getVectorFCmpIR(CmpInst::FCMP_OLT, /*IsSignaling*/true);
15364 case X86::BI__builtin_ia32_cmpleps:
15365 case X86::BI__builtin_ia32_cmplepd:
15366 return getVectorFCmpIR(CmpInst::FCMP_OLE, /*IsSignaling*/true);
15367 case X86::BI__builtin_ia32_cmpunordps:
15368 case X86::BI__builtin_ia32_cmpunordpd:
15369 return getVectorFCmpIR(CmpInst::FCMP_UNO, /*IsSignaling*/false);
15370 case X86::BI__builtin_ia32_cmpneqps:
15371 case X86::BI__builtin_ia32_cmpneqpd:
15372 return getVectorFCmpIR(CmpInst::FCMP_UNE, /*IsSignaling*/false);
15373 case X86::BI__builtin_ia32_cmpnltps:
15374 case X86::BI__builtin_ia32_cmpnltpd:
15375 return getVectorFCmpIR(CmpInst::FCMP_UGE, /*IsSignaling*/true);
15376 case X86::BI__builtin_ia32_cmpnleps:
15377 case X86::BI__builtin_ia32_cmpnlepd:
15378 return getVectorFCmpIR(CmpInst::FCMP_UGT, /*IsSignaling*/true);
15379 case X86::BI__builtin_ia32_cmpordps:
15380 case X86::BI__builtin_ia32_cmpordpd:
15381 return getVectorFCmpIR(CmpInst::FCMP_ORD, /*IsSignaling*/false);
15382 case X86::BI__builtin_ia32_cmpph128_mask:
15383 case X86::BI__builtin_ia32_cmpph256_mask:
15384 case X86::BI__builtin_ia32_cmpph512_mask:
15385 case X86::BI__builtin_ia32_cmpps128_mask:
15386 case X86::BI__builtin_ia32_cmpps256_mask:
15387 case X86::BI__builtin_ia32_cmpps512_mask:
15388 case X86::BI__builtin_ia32_cmppd128_mask:
15389 case X86::BI__builtin_ia32_cmppd256_mask:
15390 case X86::BI__builtin_ia32_cmppd512_mask:
15391 IsMaskFCmp = true;
15392 [[fallthrough]];
15393 case X86::BI__builtin_ia32_cmpps:
15394 case X86::BI__builtin_ia32_cmpps256:
15395 case X86::BI__builtin_ia32_cmppd:
15396 case X86::BI__builtin_ia32_cmppd256: {
15397 // Lowering vector comparisons to fcmp instructions, while
15398 // ignoring signalling behaviour requested
15399 // ignoring rounding mode requested
15400 // This is only possible if fp-model is not strict and FENV_ACCESS is off.
15402 // The third argument is the comparison condition, and integer in the
15403 // range [0, 31]
15404 unsigned CC = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x1f;
15406 // Lowering to IR fcmp instruction.
15407 // Ignoring requested signaling behaviour,
15408 // e.g. both _CMP_GT_OS & _CMP_GT_OQ are translated to FCMP_OGT.
15409 FCmpInst::Predicate Pred;
15410 bool IsSignaling;
15411 // Predicates for 16-31 repeat the 0-15 predicates. Only the signalling
15412 // behavior is inverted. We'll handle that after the switch.
15413 switch (CC & 0xf) {
15414 case 0x00: Pred = FCmpInst::FCMP_OEQ; IsSignaling = false; break;
15415 case 0x01: Pred = FCmpInst::FCMP_OLT; IsSignaling = true; break;
15416 case 0x02: Pred = FCmpInst::FCMP_OLE; IsSignaling = true; break;
15417 case 0x03: Pred = FCmpInst::FCMP_UNO; IsSignaling = false; break;
15418 case 0x04: Pred = FCmpInst::FCMP_UNE; IsSignaling = false; break;
15419 case 0x05: Pred = FCmpInst::FCMP_UGE; IsSignaling = true; break;
15420 case 0x06: Pred = FCmpInst::FCMP_UGT; IsSignaling = true; break;
15421 case 0x07: Pred = FCmpInst::FCMP_ORD; IsSignaling = false; break;
15422 case 0x08: Pred = FCmpInst::FCMP_UEQ; IsSignaling = false; break;
15423 case 0x09: Pred = FCmpInst::FCMP_ULT; IsSignaling = true; break;
15424 case 0x0a: Pred = FCmpInst::FCMP_ULE; IsSignaling = true; break;
15425 case 0x0b: Pred = FCmpInst::FCMP_FALSE; IsSignaling = false; break;
15426 case 0x0c: Pred = FCmpInst::FCMP_ONE; IsSignaling = false; break;
15427 case 0x0d: Pred = FCmpInst::FCMP_OGE; IsSignaling = true; break;
15428 case 0x0e: Pred = FCmpInst::FCMP_OGT; IsSignaling = true; break;
15429 case 0x0f: Pred = FCmpInst::FCMP_TRUE; IsSignaling = false; break;
15430 default: llvm_unreachable("Unhandled CC");
15433 // Invert the signalling behavior for 16-31.
15434 if (CC & 0x10)
15435 IsSignaling = !IsSignaling;
15437 // If the predicate is true or false and we're using constrained intrinsics,
15438 // we don't have a compare intrinsic we can use. Just use the legacy X86
15439 // specific intrinsic.
15440 // If the intrinsic is mask enabled and we're using constrained intrinsics,
15441 // use the legacy X86 specific intrinsic.
15442 if (Builder.getIsFPConstrained() &&
15443 (Pred == FCmpInst::FCMP_TRUE || Pred == FCmpInst::FCMP_FALSE ||
15444 IsMaskFCmp)) {
15446 Intrinsic::ID IID;
15447 switch (BuiltinID) {
15448 default: llvm_unreachable("Unexpected builtin");
15449 case X86::BI__builtin_ia32_cmpps:
15450 IID = Intrinsic::x86_sse_cmp_ps;
15451 break;
15452 case X86::BI__builtin_ia32_cmpps256:
15453 IID = Intrinsic::x86_avx_cmp_ps_256;
15454 break;
15455 case X86::BI__builtin_ia32_cmppd:
15456 IID = Intrinsic::x86_sse2_cmp_pd;
15457 break;
15458 case X86::BI__builtin_ia32_cmppd256:
15459 IID = Intrinsic::x86_avx_cmp_pd_256;
15460 break;
15461 case X86::BI__builtin_ia32_cmpps512_mask:
15462 IID = Intrinsic::x86_avx512_mask_cmp_ps_512;
15463 break;
15464 case X86::BI__builtin_ia32_cmppd512_mask:
15465 IID = Intrinsic::x86_avx512_mask_cmp_pd_512;
15466 break;
15467 case X86::BI__builtin_ia32_cmpps128_mask:
15468 IID = Intrinsic::x86_avx512_mask_cmp_ps_128;
15469 break;
15470 case X86::BI__builtin_ia32_cmpps256_mask:
15471 IID = Intrinsic::x86_avx512_mask_cmp_ps_256;
15472 break;
15473 case X86::BI__builtin_ia32_cmppd128_mask:
15474 IID = Intrinsic::x86_avx512_mask_cmp_pd_128;
15475 break;
15476 case X86::BI__builtin_ia32_cmppd256_mask:
15477 IID = Intrinsic::x86_avx512_mask_cmp_pd_256;
15478 break;
15481 Function *Intr = CGM.getIntrinsic(IID);
15482 if (IsMaskFCmp) {
15483 unsigned NumElts =
15484 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
15485 Ops[3] = getMaskVecValue(*this, Ops[3], NumElts);
15486 Value *Cmp = Builder.CreateCall(Intr, Ops);
15487 return EmitX86MaskedCompareResult(*this, Cmp, NumElts, nullptr);
15490 return Builder.CreateCall(Intr, Ops);
15493 // Builtins without the _mask suffix return a vector of integers
15494 // of the same width as the input vectors
15495 if (IsMaskFCmp) {
15496 // We ignore SAE if strict FP is disabled. We only keep precise
15497 // exception behavior under strict FP.
15498 // NOTE: If strict FP does ever go through here a CGFPOptionsRAII
15499 // object will be required.
15500 unsigned NumElts =
15501 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
15502 Value *Cmp;
15503 if (IsSignaling)
15504 Cmp = Builder.CreateFCmpS(Pred, Ops[0], Ops[1]);
15505 else
15506 Cmp = Builder.CreateFCmp(Pred, Ops[0], Ops[1]);
15507 return EmitX86MaskedCompareResult(*this, Cmp, NumElts, Ops[3]);
15510 return getVectorFCmpIR(Pred, IsSignaling);
15513 // SSE scalar comparison intrinsics
15514 case X86::BI__builtin_ia32_cmpeqss:
15515 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 0);
15516 case X86::BI__builtin_ia32_cmpltss:
15517 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 1);
15518 case X86::BI__builtin_ia32_cmpless:
15519 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 2);
15520 case X86::BI__builtin_ia32_cmpunordss:
15521 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 3);
15522 case X86::BI__builtin_ia32_cmpneqss:
15523 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 4);
15524 case X86::BI__builtin_ia32_cmpnltss:
15525 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 5);
15526 case X86::BI__builtin_ia32_cmpnless:
15527 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 6);
15528 case X86::BI__builtin_ia32_cmpordss:
15529 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 7);
15530 case X86::BI__builtin_ia32_cmpeqsd:
15531 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 0);
15532 case X86::BI__builtin_ia32_cmpltsd:
15533 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 1);
15534 case X86::BI__builtin_ia32_cmplesd:
15535 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 2);
15536 case X86::BI__builtin_ia32_cmpunordsd:
15537 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 3);
15538 case X86::BI__builtin_ia32_cmpneqsd:
15539 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 4);
15540 case X86::BI__builtin_ia32_cmpnltsd:
15541 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 5);
15542 case X86::BI__builtin_ia32_cmpnlesd:
15543 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 6);
15544 case X86::BI__builtin_ia32_cmpordsd:
15545 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 7);
15547 // f16c half2float intrinsics
15548 case X86::BI__builtin_ia32_vcvtph2ps:
15549 case X86::BI__builtin_ia32_vcvtph2ps256:
15550 case X86::BI__builtin_ia32_vcvtph2ps_mask:
15551 case X86::BI__builtin_ia32_vcvtph2ps256_mask:
15552 case X86::BI__builtin_ia32_vcvtph2ps512_mask: {
15553 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
15554 return EmitX86CvtF16ToFloatExpr(*this, Ops, ConvertType(E->getType()));
15557 // AVX512 bf16 intrinsics
15558 case X86::BI__builtin_ia32_cvtneps2bf16_128_mask: {
15559 Ops[2] = getMaskVecValue(
15560 *this, Ops[2],
15561 cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements());
15562 Intrinsic::ID IID = Intrinsic::x86_avx512bf16_mask_cvtneps2bf16_128;
15563 return Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
15565 case X86::BI__builtin_ia32_cvtsbf162ss_32:
15566 return Builder.CreateFPExt(Ops[0], Builder.getFloatTy());
15568 case X86::BI__builtin_ia32_cvtneps2bf16_256_mask:
15569 case X86::BI__builtin_ia32_cvtneps2bf16_512_mask: {
15570 Intrinsic::ID IID;
15571 switch (BuiltinID) {
15572 default: llvm_unreachable("Unsupported intrinsic!");
15573 case X86::BI__builtin_ia32_cvtneps2bf16_256_mask:
15574 IID = Intrinsic::x86_avx512bf16_cvtneps2bf16_256;
15575 break;
15576 case X86::BI__builtin_ia32_cvtneps2bf16_512_mask:
15577 IID = Intrinsic::x86_avx512bf16_cvtneps2bf16_512;
15578 break;
15580 Value *Res = Builder.CreateCall(CGM.getIntrinsic(IID), Ops[0]);
15581 return EmitX86Select(*this, Ops[2], Res, Ops[1]);
15584 case X86::BI__cpuid:
15585 case X86::BI__cpuidex: {
15586 Value *FuncId = EmitScalarExpr(E->getArg(1));
15587 Value *SubFuncId = BuiltinID == X86::BI__cpuidex
15588 ? EmitScalarExpr(E->getArg(2))
15589 : llvm::ConstantInt::get(Int32Ty, 0);
15591 llvm::StructType *CpuidRetTy =
15592 llvm::StructType::get(Int32Ty, Int32Ty, Int32Ty, Int32Ty);
15593 llvm::FunctionType *FTy =
15594 llvm::FunctionType::get(CpuidRetTy, {Int32Ty, Int32Ty}, false);
15596 StringRef Asm, Constraints;
15597 if (getTarget().getTriple().getArch() == llvm::Triple::x86) {
15598 Asm = "cpuid";
15599 Constraints = "={ax},={bx},={cx},={dx},{ax},{cx}";
15600 } else {
15601 // x86-64 uses %rbx as the base register, so preserve it.
15602 Asm = "xchgq %rbx, ${1:q}\n"
15603 "cpuid\n"
15604 "xchgq %rbx, ${1:q}";
15605 Constraints = "={ax},=r,={cx},={dx},0,2";
15608 llvm::InlineAsm *IA = llvm::InlineAsm::get(FTy, Asm, Constraints,
15609 /*hasSideEffects=*/false);
15610 Value *IACall = Builder.CreateCall(IA, {FuncId, SubFuncId});
15611 Value *BasePtr = EmitScalarExpr(E->getArg(0));
15612 Value *Store = nullptr;
15613 for (unsigned i = 0; i < 4; i++) {
15614 Value *Extracted = Builder.CreateExtractValue(IACall, i);
15615 Value *StorePtr = Builder.CreateConstInBoundsGEP1_32(Int32Ty, BasePtr, i);
15616 Store = Builder.CreateAlignedStore(Extracted, StorePtr, getIntAlign());
15619 // Return the last store instruction to signal that we have emitted the
15620 // the intrinsic.
15621 return Store;
15624 case X86::BI__emul:
15625 case X86::BI__emulu: {
15626 llvm::Type *Int64Ty = llvm::IntegerType::get(getLLVMContext(), 64);
15627 bool isSigned = (BuiltinID == X86::BI__emul);
15628 Value *LHS = Builder.CreateIntCast(Ops[0], Int64Ty, isSigned);
15629 Value *RHS = Builder.CreateIntCast(Ops[1], Int64Ty, isSigned);
15630 return Builder.CreateMul(LHS, RHS, "", !isSigned, isSigned);
15632 case X86::BI__mulh:
15633 case X86::BI__umulh:
15634 case X86::BI_mul128:
15635 case X86::BI_umul128: {
15636 llvm::Type *ResType = ConvertType(E->getType());
15637 llvm::Type *Int128Ty = llvm::IntegerType::get(getLLVMContext(), 128);
15639 bool IsSigned = (BuiltinID == X86::BI__mulh || BuiltinID == X86::BI_mul128);
15640 Value *LHS = Builder.CreateIntCast(Ops[0], Int128Ty, IsSigned);
15641 Value *RHS = Builder.CreateIntCast(Ops[1], Int128Ty, IsSigned);
15643 Value *MulResult, *HigherBits;
15644 if (IsSigned) {
15645 MulResult = Builder.CreateNSWMul(LHS, RHS);
15646 HigherBits = Builder.CreateAShr(MulResult, 64);
15647 } else {
15648 MulResult = Builder.CreateNUWMul(LHS, RHS);
15649 HigherBits = Builder.CreateLShr(MulResult, 64);
15651 HigherBits = Builder.CreateIntCast(HigherBits, ResType, IsSigned);
15653 if (BuiltinID == X86::BI__mulh || BuiltinID == X86::BI__umulh)
15654 return HigherBits;
15656 Address HighBitsAddress = EmitPointerWithAlignment(E->getArg(2));
15657 Builder.CreateStore(HigherBits, HighBitsAddress);
15658 return Builder.CreateIntCast(MulResult, ResType, IsSigned);
15661 case X86::BI__faststorefence: {
15662 return Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent,
15663 llvm::SyncScope::System);
15665 case X86::BI__shiftleft128:
15666 case X86::BI__shiftright128: {
15667 llvm::Function *F = CGM.getIntrinsic(
15668 BuiltinID == X86::BI__shiftleft128 ? Intrinsic::fshl : Intrinsic::fshr,
15669 Int64Ty);
15670 // Flip low/high ops and zero-extend amount to matching type.
15671 // shiftleft128(Low, High, Amt) -> fshl(High, Low, Amt)
15672 // shiftright128(Low, High, Amt) -> fshr(High, Low, Amt)
15673 std::swap(Ops[0], Ops[1]);
15674 Ops[2] = Builder.CreateZExt(Ops[2], Int64Ty);
15675 return Builder.CreateCall(F, Ops);
15677 case X86::BI_ReadWriteBarrier:
15678 case X86::BI_ReadBarrier:
15679 case X86::BI_WriteBarrier: {
15680 return Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent,
15681 llvm::SyncScope::SingleThread);
15684 case X86::BI_AddressOfReturnAddress: {
15685 Function *F =
15686 CGM.getIntrinsic(Intrinsic::addressofreturnaddress, AllocaInt8PtrTy);
15687 return Builder.CreateCall(F);
15689 case X86::BI__stosb: {
15690 // We treat __stosb as a volatile memset - it may not generate "rep stosb"
15691 // instruction, but it will create a memset that won't be optimized away.
15692 return Builder.CreateMemSet(Ops[0], Ops[1], Ops[2], Align(1), true);
15694 case X86::BI__ud2:
15695 // llvm.trap makes a ud2a instruction on x86.
15696 return EmitTrapCall(Intrinsic::trap);
15697 case X86::BI__int2c: {
15698 // This syscall signals a driver assertion failure in x86 NT kernels.
15699 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
15700 llvm::InlineAsm *IA =
15701 llvm::InlineAsm::get(FTy, "int $$0x2c", "", /*hasSideEffects=*/true);
15702 llvm::AttributeList NoReturnAttr = llvm::AttributeList::get(
15703 getLLVMContext(), llvm::AttributeList::FunctionIndex,
15704 llvm::Attribute::NoReturn);
15705 llvm::CallInst *CI = Builder.CreateCall(IA);
15706 CI->setAttributes(NoReturnAttr);
15707 return CI;
15709 case X86::BI__readfsbyte:
15710 case X86::BI__readfsword:
15711 case X86::BI__readfsdword:
15712 case X86::BI__readfsqword: {
15713 llvm::Type *IntTy = ConvertType(E->getType());
15714 Value *Ptr =
15715 Builder.CreateIntToPtr(Ops[0], llvm::PointerType::get(IntTy, 257));
15716 LoadInst *Load = Builder.CreateAlignedLoad(
15717 IntTy, Ptr, getContext().getTypeAlignInChars(E->getType()));
15718 Load->setVolatile(true);
15719 return Load;
15721 case X86::BI__readgsbyte:
15722 case X86::BI__readgsword:
15723 case X86::BI__readgsdword:
15724 case X86::BI__readgsqword: {
15725 llvm::Type *IntTy = ConvertType(E->getType());
15726 Value *Ptr =
15727 Builder.CreateIntToPtr(Ops[0], llvm::PointerType::get(IntTy, 256));
15728 LoadInst *Load = Builder.CreateAlignedLoad(
15729 IntTy, Ptr, getContext().getTypeAlignInChars(E->getType()));
15730 Load->setVolatile(true);
15731 return Load;
15733 case X86::BI__builtin_ia32_encodekey128_u32: {
15734 Intrinsic::ID IID = Intrinsic::x86_encodekey128;
15736 Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID), {Ops[0], Ops[1]});
15738 for (int i = 0; i < 3; ++i) {
15739 Value *Extract = Builder.CreateExtractValue(Call, i + 1);
15740 Value *Ptr = Builder.CreateConstGEP1_32(Int8Ty, Ops[2], i * 16);
15741 Ptr = Builder.CreateBitCast(
15742 Ptr, llvm::PointerType::getUnqual(Extract->getType()));
15743 Builder.CreateAlignedStore(Extract, Ptr, Align(1));
15746 return Builder.CreateExtractValue(Call, 0);
15748 case X86::BI__builtin_ia32_encodekey256_u32: {
15749 Intrinsic::ID IID = Intrinsic::x86_encodekey256;
15751 Value *Call =
15752 Builder.CreateCall(CGM.getIntrinsic(IID), {Ops[0], Ops[1], Ops[2]});
15754 for (int i = 0; i < 4; ++i) {
15755 Value *Extract = Builder.CreateExtractValue(Call, i + 1);
15756 Value *Ptr = Builder.CreateConstGEP1_32(Int8Ty, Ops[3], i * 16);
15757 Ptr = Builder.CreateBitCast(
15758 Ptr, llvm::PointerType::getUnqual(Extract->getType()));
15759 Builder.CreateAlignedStore(Extract, Ptr, Align(1));
15762 return Builder.CreateExtractValue(Call, 0);
15764 case X86::BI__builtin_ia32_aesenc128kl_u8:
15765 case X86::BI__builtin_ia32_aesdec128kl_u8:
15766 case X86::BI__builtin_ia32_aesenc256kl_u8:
15767 case X86::BI__builtin_ia32_aesdec256kl_u8: {
15768 Intrinsic::ID IID;
15769 StringRef BlockName;
15770 switch (BuiltinID) {
15771 default:
15772 llvm_unreachable("Unexpected builtin");
15773 case X86::BI__builtin_ia32_aesenc128kl_u8:
15774 IID = Intrinsic::x86_aesenc128kl;
15775 BlockName = "aesenc128kl";
15776 break;
15777 case X86::BI__builtin_ia32_aesdec128kl_u8:
15778 IID = Intrinsic::x86_aesdec128kl;
15779 BlockName = "aesdec128kl";
15780 break;
15781 case X86::BI__builtin_ia32_aesenc256kl_u8:
15782 IID = Intrinsic::x86_aesenc256kl;
15783 BlockName = "aesenc256kl";
15784 break;
15785 case X86::BI__builtin_ia32_aesdec256kl_u8:
15786 IID = Intrinsic::x86_aesdec256kl;
15787 BlockName = "aesdec256kl";
15788 break;
15791 Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID), {Ops[1], Ops[2]});
15793 BasicBlock *NoError =
15794 createBasicBlock(BlockName + "_no_error", this->CurFn);
15795 BasicBlock *Error = createBasicBlock(BlockName + "_error", this->CurFn);
15796 BasicBlock *End = createBasicBlock(BlockName + "_end", this->CurFn);
15798 Value *Ret = Builder.CreateExtractValue(Call, 0);
15799 Value *Succ = Builder.CreateTrunc(Ret, Builder.getInt1Ty());
15800 Value *Out = Builder.CreateExtractValue(Call, 1);
15801 Builder.CreateCondBr(Succ, NoError, Error);
15803 Builder.SetInsertPoint(NoError);
15804 Builder.CreateDefaultAlignedStore(Out, Ops[0]);
15805 Builder.CreateBr(End);
15807 Builder.SetInsertPoint(Error);
15808 Constant *Zero = llvm::Constant::getNullValue(Out->getType());
15809 Builder.CreateDefaultAlignedStore(Zero, Ops[0]);
15810 Builder.CreateBr(End);
15812 Builder.SetInsertPoint(End);
15813 return Builder.CreateExtractValue(Call, 0);
15815 case X86::BI__builtin_ia32_aesencwide128kl_u8:
15816 case X86::BI__builtin_ia32_aesdecwide128kl_u8:
15817 case X86::BI__builtin_ia32_aesencwide256kl_u8:
15818 case X86::BI__builtin_ia32_aesdecwide256kl_u8: {
15819 Intrinsic::ID IID;
15820 StringRef BlockName;
15821 switch (BuiltinID) {
15822 case X86::BI__builtin_ia32_aesencwide128kl_u8:
15823 IID = Intrinsic::x86_aesencwide128kl;
15824 BlockName = "aesencwide128kl";
15825 break;
15826 case X86::BI__builtin_ia32_aesdecwide128kl_u8:
15827 IID = Intrinsic::x86_aesdecwide128kl;
15828 BlockName = "aesdecwide128kl";
15829 break;
15830 case X86::BI__builtin_ia32_aesencwide256kl_u8:
15831 IID = Intrinsic::x86_aesencwide256kl;
15832 BlockName = "aesencwide256kl";
15833 break;
15834 case X86::BI__builtin_ia32_aesdecwide256kl_u8:
15835 IID = Intrinsic::x86_aesdecwide256kl;
15836 BlockName = "aesdecwide256kl";
15837 break;
15840 llvm::Type *Ty = FixedVectorType::get(Builder.getInt64Ty(), 2);
15841 Value *InOps[9];
15842 InOps[0] = Ops[2];
15843 for (int i = 0; i != 8; ++i) {
15844 Value *Ptr = Builder.CreateConstGEP1_32(Ty, Ops[1], i);
15845 InOps[i + 1] = Builder.CreateAlignedLoad(Ty, Ptr, Align(16));
15848 Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID), InOps);
15850 BasicBlock *NoError =
15851 createBasicBlock(BlockName + "_no_error", this->CurFn);
15852 BasicBlock *Error = createBasicBlock(BlockName + "_error", this->CurFn);
15853 BasicBlock *End = createBasicBlock(BlockName + "_end", this->CurFn);
15855 Value *Ret = Builder.CreateExtractValue(Call, 0);
15856 Value *Succ = Builder.CreateTrunc(Ret, Builder.getInt1Ty());
15857 Builder.CreateCondBr(Succ, NoError, Error);
15859 Builder.SetInsertPoint(NoError);
15860 for (int i = 0; i != 8; ++i) {
15861 Value *Extract = Builder.CreateExtractValue(Call, i + 1);
15862 Value *Ptr = Builder.CreateConstGEP1_32(Extract->getType(), Ops[0], i);
15863 Builder.CreateAlignedStore(Extract, Ptr, Align(16));
15865 Builder.CreateBr(End);
15867 Builder.SetInsertPoint(Error);
15868 for (int i = 0; i != 8; ++i) {
15869 Value *Out = Builder.CreateExtractValue(Call, i + 1);
15870 Constant *Zero = llvm::Constant::getNullValue(Out->getType());
15871 Value *Ptr = Builder.CreateConstGEP1_32(Out->getType(), Ops[0], i);
15872 Builder.CreateAlignedStore(Zero, Ptr, Align(16));
15874 Builder.CreateBr(End);
15876 Builder.SetInsertPoint(End);
15877 return Builder.CreateExtractValue(Call, 0);
15879 case X86::BI__builtin_ia32_vfcmaddcph512_mask:
15880 IsConjFMA = true;
15881 [[fallthrough]];
15882 case X86::BI__builtin_ia32_vfmaddcph512_mask: {
15883 Intrinsic::ID IID = IsConjFMA
15884 ? Intrinsic::x86_avx512fp16_mask_vfcmadd_cph_512
15885 : Intrinsic::x86_avx512fp16_mask_vfmadd_cph_512;
15886 Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
15887 return EmitX86Select(*this, Ops[3], Call, Ops[0]);
15889 case X86::BI__builtin_ia32_vfcmaddcsh_round_mask:
15890 IsConjFMA = true;
15891 [[fallthrough]];
15892 case X86::BI__builtin_ia32_vfmaddcsh_round_mask: {
15893 Intrinsic::ID IID = IsConjFMA ? Intrinsic::x86_avx512fp16_mask_vfcmadd_csh
15894 : Intrinsic::x86_avx512fp16_mask_vfmadd_csh;
15895 Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
15896 Value *And = Builder.CreateAnd(Ops[3], llvm::ConstantInt::get(Int8Ty, 1));
15897 return EmitX86Select(*this, And, Call, Ops[0]);
15899 case X86::BI__builtin_ia32_vfcmaddcsh_round_mask3:
15900 IsConjFMA = true;
15901 [[fallthrough]];
15902 case X86::BI__builtin_ia32_vfmaddcsh_round_mask3: {
15903 Intrinsic::ID IID = IsConjFMA ? Intrinsic::x86_avx512fp16_mask_vfcmadd_csh
15904 : Intrinsic::x86_avx512fp16_mask_vfmadd_csh;
15905 Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
15906 static constexpr int Mask[] = {0, 5, 6, 7};
15907 return Builder.CreateShuffleVector(Call, Ops[2], Mask);
15909 case X86::BI__builtin_ia32_prefetchi:
15910 return Builder.CreateCall(
15911 CGM.getIntrinsic(Intrinsic::prefetch, Ops[0]->getType()),
15912 {Ops[0], llvm::ConstantInt::get(Int32Ty, 0), Ops[1],
15913 llvm::ConstantInt::get(Int32Ty, 0)});
15917 Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
15918 const CallExpr *E) {
15919 // Do not emit the builtin arguments in the arguments of a function call,
15920 // because the evaluation order of function arguments is not specified in C++.
15921 // This is important when testing to ensure the arguments are emitted in the
15922 // same order every time. Eg:
15923 // Instead of:
15924 // return Builder.CreateFDiv(EmitScalarExpr(E->getArg(0)),
15925 // EmitScalarExpr(E->getArg(1)), "swdiv");
15926 // Use:
15927 // Value *Op0 = EmitScalarExpr(E->getArg(0));
15928 // Value *Op1 = EmitScalarExpr(E->getArg(1));
15929 // return Builder.CreateFDiv(Op0, Op1, "swdiv")
15931 Intrinsic::ID ID = Intrinsic::not_intrinsic;
15933 switch (BuiltinID) {
15934 default: return nullptr;
15936 // __builtin_ppc_get_timebase is GCC 4.8+'s PowerPC-specific name for what we
15937 // call __builtin_readcyclecounter.
15938 case PPC::BI__builtin_ppc_get_timebase:
15939 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::readcyclecounter));
15941 // vec_ld, vec_xl_be, vec_lvsl, vec_lvsr
15942 case PPC::BI__builtin_altivec_lvx:
15943 case PPC::BI__builtin_altivec_lvxl:
15944 case PPC::BI__builtin_altivec_lvebx:
15945 case PPC::BI__builtin_altivec_lvehx:
15946 case PPC::BI__builtin_altivec_lvewx:
15947 case PPC::BI__builtin_altivec_lvsl:
15948 case PPC::BI__builtin_altivec_lvsr:
15949 case PPC::BI__builtin_vsx_lxvd2x:
15950 case PPC::BI__builtin_vsx_lxvw4x:
15951 case PPC::BI__builtin_vsx_lxvd2x_be:
15952 case PPC::BI__builtin_vsx_lxvw4x_be:
15953 case PPC::BI__builtin_vsx_lxvl:
15954 case PPC::BI__builtin_vsx_lxvll:
15956 SmallVector<Value *, 2> Ops;
15957 Ops.push_back(EmitScalarExpr(E->getArg(0)));
15958 Ops.push_back(EmitScalarExpr(E->getArg(1)));
15959 if (!(BuiltinID == PPC::BI__builtin_vsx_lxvl ||
15960 BuiltinID == PPC::BI__builtin_vsx_lxvll)) {
15961 Ops[0] = Builder.CreateGEP(Int8Ty, Ops[1], Ops[0]);
15962 Ops.pop_back();
15965 switch (BuiltinID) {
15966 default: llvm_unreachable("Unsupported ld/lvsl/lvsr intrinsic!");
15967 case PPC::BI__builtin_altivec_lvx:
15968 ID = Intrinsic::ppc_altivec_lvx;
15969 break;
15970 case PPC::BI__builtin_altivec_lvxl:
15971 ID = Intrinsic::ppc_altivec_lvxl;
15972 break;
15973 case PPC::BI__builtin_altivec_lvebx:
15974 ID = Intrinsic::ppc_altivec_lvebx;
15975 break;
15976 case PPC::BI__builtin_altivec_lvehx:
15977 ID = Intrinsic::ppc_altivec_lvehx;
15978 break;
15979 case PPC::BI__builtin_altivec_lvewx:
15980 ID = Intrinsic::ppc_altivec_lvewx;
15981 break;
15982 case PPC::BI__builtin_altivec_lvsl:
15983 ID = Intrinsic::ppc_altivec_lvsl;
15984 break;
15985 case PPC::BI__builtin_altivec_lvsr:
15986 ID = Intrinsic::ppc_altivec_lvsr;
15987 break;
15988 case PPC::BI__builtin_vsx_lxvd2x:
15989 ID = Intrinsic::ppc_vsx_lxvd2x;
15990 break;
15991 case PPC::BI__builtin_vsx_lxvw4x:
15992 ID = Intrinsic::ppc_vsx_lxvw4x;
15993 break;
15994 case PPC::BI__builtin_vsx_lxvd2x_be:
15995 ID = Intrinsic::ppc_vsx_lxvd2x_be;
15996 break;
15997 case PPC::BI__builtin_vsx_lxvw4x_be:
15998 ID = Intrinsic::ppc_vsx_lxvw4x_be;
15999 break;
16000 case PPC::BI__builtin_vsx_lxvl:
16001 ID = Intrinsic::ppc_vsx_lxvl;
16002 break;
16003 case PPC::BI__builtin_vsx_lxvll:
16004 ID = Intrinsic::ppc_vsx_lxvll;
16005 break;
16007 llvm::Function *F = CGM.getIntrinsic(ID);
16008 return Builder.CreateCall(F, Ops, "");
16011 // vec_st, vec_xst_be
16012 case PPC::BI__builtin_altivec_stvx:
16013 case PPC::BI__builtin_altivec_stvxl:
16014 case PPC::BI__builtin_altivec_stvebx:
16015 case PPC::BI__builtin_altivec_stvehx:
16016 case PPC::BI__builtin_altivec_stvewx:
16017 case PPC::BI__builtin_vsx_stxvd2x:
16018 case PPC::BI__builtin_vsx_stxvw4x:
16019 case PPC::BI__builtin_vsx_stxvd2x_be:
16020 case PPC::BI__builtin_vsx_stxvw4x_be:
16021 case PPC::BI__builtin_vsx_stxvl:
16022 case PPC::BI__builtin_vsx_stxvll:
16024 SmallVector<Value *, 3> Ops;
16025 Ops.push_back(EmitScalarExpr(E->getArg(0)));
16026 Ops.push_back(EmitScalarExpr(E->getArg(1)));
16027 Ops.push_back(EmitScalarExpr(E->getArg(2)));
16028 if (!(BuiltinID == PPC::BI__builtin_vsx_stxvl ||
16029 BuiltinID == PPC::BI__builtin_vsx_stxvll)) {
16030 Ops[1] = Builder.CreateGEP(Int8Ty, Ops[2], Ops[1]);
16031 Ops.pop_back();
16034 switch (BuiltinID) {
16035 default: llvm_unreachable("Unsupported st intrinsic!");
16036 case PPC::BI__builtin_altivec_stvx:
16037 ID = Intrinsic::ppc_altivec_stvx;
16038 break;
16039 case PPC::BI__builtin_altivec_stvxl:
16040 ID = Intrinsic::ppc_altivec_stvxl;
16041 break;
16042 case PPC::BI__builtin_altivec_stvebx:
16043 ID = Intrinsic::ppc_altivec_stvebx;
16044 break;
16045 case PPC::BI__builtin_altivec_stvehx:
16046 ID = Intrinsic::ppc_altivec_stvehx;
16047 break;
16048 case PPC::BI__builtin_altivec_stvewx:
16049 ID = Intrinsic::ppc_altivec_stvewx;
16050 break;
16051 case PPC::BI__builtin_vsx_stxvd2x:
16052 ID = Intrinsic::ppc_vsx_stxvd2x;
16053 break;
16054 case PPC::BI__builtin_vsx_stxvw4x:
16055 ID = Intrinsic::ppc_vsx_stxvw4x;
16056 break;
16057 case PPC::BI__builtin_vsx_stxvd2x_be:
16058 ID = Intrinsic::ppc_vsx_stxvd2x_be;
16059 break;
16060 case PPC::BI__builtin_vsx_stxvw4x_be:
16061 ID = Intrinsic::ppc_vsx_stxvw4x_be;
16062 break;
16063 case PPC::BI__builtin_vsx_stxvl:
16064 ID = Intrinsic::ppc_vsx_stxvl;
16065 break;
16066 case PPC::BI__builtin_vsx_stxvll:
16067 ID = Intrinsic::ppc_vsx_stxvll;
16068 break;
16070 llvm::Function *F = CGM.getIntrinsic(ID);
16071 return Builder.CreateCall(F, Ops, "");
16073 case PPC::BI__builtin_vsx_ldrmb: {
16074 // Essentially boils down to performing an unaligned VMX load sequence so
16075 // as to avoid crossing a page boundary and then shuffling the elements
16076 // into the right side of the vector register.
16077 Value *Op0 = EmitScalarExpr(E->getArg(0));
16078 Value *Op1 = EmitScalarExpr(E->getArg(1));
16079 int64_t NumBytes = cast<ConstantInt>(Op1)->getZExtValue();
16080 llvm::Type *ResTy = ConvertType(E->getType());
16081 bool IsLE = getTarget().isLittleEndian();
16083 // If the user wants the entire vector, just load the entire vector.
16084 if (NumBytes == 16) {
16085 Value *LD =
16086 Builder.CreateLoad(Address(Op0, ResTy, CharUnits::fromQuantity(1)));
16087 if (!IsLE)
16088 return LD;
16090 // Reverse the bytes on LE.
16091 SmallVector<int, 16> RevMask;
16092 for (int Idx = 0; Idx < 16; Idx++)
16093 RevMask.push_back(15 - Idx);
16094 return Builder.CreateShuffleVector(LD, LD, RevMask);
16097 llvm::Function *Lvx = CGM.getIntrinsic(Intrinsic::ppc_altivec_lvx);
16098 llvm::Function *Lvs = CGM.getIntrinsic(IsLE ? Intrinsic::ppc_altivec_lvsr
16099 : Intrinsic::ppc_altivec_lvsl);
16100 llvm::Function *Vperm = CGM.getIntrinsic(Intrinsic::ppc_altivec_vperm);
16101 Value *HiMem = Builder.CreateGEP(
16102 Int8Ty, Op0, ConstantInt::get(Op1->getType(), NumBytes - 1));
16103 Value *LoLd = Builder.CreateCall(Lvx, Op0, "ld.lo");
16104 Value *HiLd = Builder.CreateCall(Lvx, HiMem, "ld.hi");
16105 Value *Mask1 = Builder.CreateCall(Lvs, Op0, "mask1");
16107 Op0 = IsLE ? HiLd : LoLd;
16108 Op1 = IsLE ? LoLd : HiLd;
16109 Value *AllElts = Builder.CreateCall(Vperm, {Op0, Op1, Mask1}, "shuffle1");
16110 Constant *Zero = llvm::Constant::getNullValue(IsLE ? ResTy : AllElts->getType());
16112 if (IsLE) {
16113 SmallVector<int, 16> Consts;
16114 for (int Idx = 0; Idx < 16; Idx++) {
16115 int Val = (NumBytes - Idx - 1 >= 0) ? (NumBytes - Idx - 1)
16116 : 16 - (NumBytes - Idx);
16117 Consts.push_back(Val);
16119 return Builder.CreateShuffleVector(Builder.CreateBitCast(AllElts, ResTy),
16120 Zero, Consts);
16122 SmallVector<Constant *, 16> Consts;
16123 for (int Idx = 0; Idx < 16; Idx++)
16124 Consts.push_back(Builder.getInt8(NumBytes + Idx));
16125 Value *Mask2 = ConstantVector::get(Consts);
16126 return Builder.CreateBitCast(
16127 Builder.CreateCall(Vperm, {Zero, AllElts, Mask2}, "shuffle2"), ResTy);
16129 case PPC::BI__builtin_vsx_strmb: {
16130 Value *Op0 = EmitScalarExpr(E->getArg(0));
16131 Value *Op1 = EmitScalarExpr(E->getArg(1));
16132 Value *Op2 = EmitScalarExpr(E->getArg(2));
16133 int64_t NumBytes = cast<ConstantInt>(Op1)->getZExtValue();
16134 bool IsLE = getTarget().isLittleEndian();
16135 auto StoreSubVec = [&](unsigned Width, unsigned Offset, unsigned EltNo) {
16136 // Storing the whole vector, simply store it on BE and reverse bytes and
16137 // store on LE.
16138 if (Width == 16) {
16139 Value *StVec = Op2;
16140 if (IsLE) {
16141 SmallVector<int, 16> RevMask;
16142 for (int Idx = 0; Idx < 16; Idx++)
16143 RevMask.push_back(15 - Idx);
16144 StVec = Builder.CreateShuffleVector(Op2, Op2, RevMask);
16146 return Builder.CreateStore(
16147 StVec, Address(Op0, Op2->getType(), CharUnits::fromQuantity(1)));
16149 auto *ConvTy = Int64Ty;
16150 unsigned NumElts = 0;
16151 switch (Width) {
16152 default:
16153 llvm_unreachable("width for stores must be a power of 2");
16154 case 8:
16155 ConvTy = Int64Ty;
16156 NumElts = 2;
16157 break;
16158 case 4:
16159 ConvTy = Int32Ty;
16160 NumElts = 4;
16161 break;
16162 case 2:
16163 ConvTy = Int16Ty;
16164 NumElts = 8;
16165 break;
16166 case 1:
16167 ConvTy = Int8Ty;
16168 NumElts = 16;
16169 break;
16171 Value *Vec = Builder.CreateBitCast(
16172 Op2, llvm::FixedVectorType::get(ConvTy, NumElts));
16173 Value *Ptr =
16174 Builder.CreateGEP(Int8Ty, Op0, ConstantInt::get(Int64Ty, Offset));
16175 Value *Elt = Builder.CreateExtractElement(Vec, EltNo);
16176 if (IsLE && Width > 1) {
16177 Function *F = CGM.getIntrinsic(Intrinsic::bswap, ConvTy);
16178 Elt = Builder.CreateCall(F, Elt);
16180 return Builder.CreateStore(
16181 Elt, Address(Ptr, ConvTy, CharUnits::fromQuantity(1)));
16183 unsigned Stored = 0;
16184 unsigned RemainingBytes = NumBytes;
16185 Value *Result;
16186 if (NumBytes == 16)
16187 return StoreSubVec(16, 0, 0);
16188 if (NumBytes >= 8) {
16189 Result = StoreSubVec(8, NumBytes - 8, IsLE ? 0 : 1);
16190 RemainingBytes -= 8;
16191 Stored += 8;
16193 if (RemainingBytes >= 4) {
16194 Result = StoreSubVec(4, NumBytes - Stored - 4,
16195 IsLE ? (Stored >> 2) : 3 - (Stored >> 2));
16196 RemainingBytes -= 4;
16197 Stored += 4;
16199 if (RemainingBytes >= 2) {
16200 Result = StoreSubVec(2, NumBytes - Stored - 2,
16201 IsLE ? (Stored >> 1) : 7 - (Stored >> 1));
16202 RemainingBytes -= 2;
16203 Stored += 2;
16205 if (RemainingBytes)
16206 Result =
16207 StoreSubVec(1, NumBytes - Stored - 1, IsLE ? Stored : 15 - Stored);
16208 return Result;
16210 // Square root
16211 case PPC::BI__builtin_vsx_xvsqrtsp:
16212 case PPC::BI__builtin_vsx_xvsqrtdp: {
16213 llvm::Type *ResultType = ConvertType(E->getType());
16214 Value *X = EmitScalarExpr(E->getArg(0));
16215 if (Builder.getIsFPConstrained()) {
16216 llvm::Function *F = CGM.getIntrinsic(
16217 Intrinsic::experimental_constrained_sqrt, ResultType);
16218 return Builder.CreateConstrainedFPCall(F, X);
16219 } else {
16220 llvm::Function *F = CGM.getIntrinsic(Intrinsic::sqrt, ResultType);
16221 return Builder.CreateCall(F, X);
16224 // Count leading zeros
16225 case PPC::BI__builtin_altivec_vclzb:
16226 case PPC::BI__builtin_altivec_vclzh:
16227 case PPC::BI__builtin_altivec_vclzw:
16228 case PPC::BI__builtin_altivec_vclzd: {
16229 llvm::Type *ResultType = ConvertType(E->getType());
16230 Value *X = EmitScalarExpr(E->getArg(0));
16231 Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
16232 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ResultType);
16233 return Builder.CreateCall(F, {X, Undef});
16235 case PPC::BI__builtin_altivec_vctzb:
16236 case PPC::BI__builtin_altivec_vctzh:
16237 case PPC::BI__builtin_altivec_vctzw:
16238 case PPC::BI__builtin_altivec_vctzd: {
16239 llvm::Type *ResultType = ConvertType(E->getType());
16240 Value *X = EmitScalarExpr(E->getArg(0));
16241 Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
16242 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ResultType);
16243 return Builder.CreateCall(F, {X, Undef});
16245 case PPC::BI__builtin_altivec_vinsd:
16246 case PPC::BI__builtin_altivec_vinsw:
16247 case PPC::BI__builtin_altivec_vinsd_elt:
16248 case PPC::BI__builtin_altivec_vinsw_elt: {
16249 llvm::Type *ResultType = ConvertType(E->getType());
16250 Value *Op0 = EmitScalarExpr(E->getArg(0));
16251 Value *Op1 = EmitScalarExpr(E->getArg(1));
16252 Value *Op2 = EmitScalarExpr(E->getArg(2));
16254 bool IsUnaligned = (BuiltinID == PPC::BI__builtin_altivec_vinsw ||
16255 BuiltinID == PPC::BI__builtin_altivec_vinsd);
16257 bool Is32bit = (BuiltinID == PPC::BI__builtin_altivec_vinsw ||
16258 BuiltinID == PPC::BI__builtin_altivec_vinsw_elt);
16260 // The third argument must be a compile time constant.
16261 ConstantInt *ArgCI = dyn_cast<ConstantInt>(Op2);
16262 assert(ArgCI &&
16263 "Third Arg to vinsw/vinsd intrinsic must be a constant integer!");
16265 // Valid value for the third argument is dependent on the input type and
16266 // builtin called.
16267 int ValidMaxValue = 0;
16268 if (IsUnaligned)
16269 ValidMaxValue = (Is32bit) ? 12 : 8;
16270 else
16271 ValidMaxValue = (Is32bit) ? 3 : 1;
16273 // Get value of third argument.
16274 int64_t ConstArg = ArgCI->getSExtValue();
16276 // Compose range checking error message.
16277 std::string RangeErrMsg = IsUnaligned ? "byte" : "element";
16278 RangeErrMsg += " number " + llvm::to_string(ConstArg);
16279 RangeErrMsg += " is outside of the valid range [0, ";
16280 RangeErrMsg += llvm::to_string(ValidMaxValue) + "]";
16282 // Issue error if third argument is not within the valid range.
16283 if (ConstArg < 0 || ConstArg > ValidMaxValue)
16284 CGM.Error(E->getExprLoc(), RangeErrMsg);
16286 // Input to vec_replace_elt is an element index, convert to byte index.
16287 if (!IsUnaligned) {
16288 ConstArg *= Is32bit ? 4 : 8;
16289 // Fix the constant according to endianess.
16290 if (getTarget().isLittleEndian())
16291 ConstArg = (Is32bit ? 12 : 8) - ConstArg;
16294 ID = Is32bit ? Intrinsic::ppc_altivec_vinsw : Intrinsic::ppc_altivec_vinsd;
16295 Op2 = ConstantInt::getSigned(Int32Ty, ConstArg);
16296 // Casting input to vector int as per intrinsic definition.
16297 Op0 =
16298 Is32bit
16299 ? Builder.CreateBitCast(Op0, llvm::FixedVectorType::get(Int32Ty, 4))
16300 : Builder.CreateBitCast(Op0,
16301 llvm::FixedVectorType::get(Int64Ty, 2));
16302 return Builder.CreateBitCast(
16303 Builder.CreateCall(CGM.getIntrinsic(ID), {Op0, Op1, Op2}), ResultType);
16305 case PPC::BI__builtin_altivec_vpopcntb:
16306 case PPC::BI__builtin_altivec_vpopcnth:
16307 case PPC::BI__builtin_altivec_vpopcntw:
16308 case PPC::BI__builtin_altivec_vpopcntd: {
16309 llvm::Type *ResultType = ConvertType(E->getType());
16310 Value *X = EmitScalarExpr(E->getArg(0));
16311 llvm::Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ResultType);
16312 return Builder.CreateCall(F, X);
16314 case PPC::BI__builtin_altivec_vadduqm:
16315 case PPC::BI__builtin_altivec_vsubuqm: {
16316 Value *Op0 = EmitScalarExpr(E->getArg(0));
16317 Value *Op1 = EmitScalarExpr(E->getArg(1));
16318 llvm::Type *Int128Ty = llvm::IntegerType::get(getLLVMContext(), 128);
16319 Op0 = Builder.CreateBitCast(Op0, llvm::FixedVectorType::get(Int128Ty, 1));
16320 Op1 = Builder.CreateBitCast(Op1, llvm::FixedVectorType::get(Int128Ty, 1));
16321 if (BuiltinID == PPC::BI__builtin_altivec_vadduqm)
16322 return Builder.CreateAdd(Op0, Op1, "vadduqm");
16323 else
16324 return Builder.CreateSub(Op0, Op1, "vsubuqm");
16326 case PPC::BI__builtin_altivec_vaddcuq_c:
16327 case PPC::BI__builtin_altivec_vsubcuq_c: {
16328 SmallVector<Value *, 2> Ops;
16329 Value *Op0 = EmitScalarExpr(E->getArg(0));
16330 Value *Op1 = EmitScalarExpr(E->getArg(1));
16331 llvm::Type *V1I128Ty = llvm::FixedVectorType::get(
16332 llvm::IntegerType::get(getLLVMContext(), 128), 1);
16333 Ops.push_back(Builder.CreateBitCast(Op0, V1I128Ty));
16334 Ops.push_back(Builder.CreateBitCast(Op1, V1I128Ty));
16335 ID = (BuiltinID == PPC::BI__builtin_altivec_vaddcuq_c)
16336 ? Intrinsic::ppc_altivec_vaddcuq
16337 : Intrinsic::ppc_altivec_vsubcuq;
16338 return Builder.CreateCall(CGM.getIntrinsic(ID), Ops, "");
16340 case PPC::BI__builtin_altivec_vaddeuqm_c:
16341 case PPC::BI__builtin_altivec_vaddecuq_c:
16342 case PPC::BI__builtin_altivec_vsubeuqm_c:
16343 case PPC::BI__builtin_altivec_vsubecuq_c: {
16344 SmallVector<Value *, 3> Ops;
16345 Value *Op0 = EmitScalarExpr(E->getArg(0));
16346 Value *Op1 = EmitScalarExpr(E->getArg(1));
16347 Value *Op2 = EmitScalarExpr(E->getArg(2));
16348 llvm::Type *V1I128Ty = llvm::FixedVectorType::get(
16349 llvm::IntegerType::get(getLLVMContext(), 128), 1);
16350 Ops.push_back(Builder.CreateBitCast(Op0, V1I128Ty));
16351 Ops.push_back(Builder.CreateBitCast(Op1, V1I128Ty));
16352 Ops.push_back(Builder.CreateBitCast(Op2, V1I128Ty));
16353 switch (BuiltinID) {
16354 default:
16355 llvm_unreachable("Unsupported intrinsic!");
16356 case PPC::BI__builtin_altivec_vaddeuqm_c:
16357 ID = Intrinsic::ppc_altivec_vaddeuqm;
16358 break;
16359 case PPC::BI__builtin_altivec_vaddecuq_c:
16360 ID = Intrinsic::ppc_altivec_vaddecuq;
16361 break;
16362 case PPC::BI__builtin_altivec_vsubeuqm_c:
16363 ID = Intrinsic::ppc_altivec_vsubeuqm;
16364 break;
16365 case PPC::BI__builtin_altivec_vsubecuq_c:
16366 ID = Intrinsic::ppc_altivec_vsubecuq;
16367 break;
16369 return Builder.CreateCall(CGM.getIntrinsic(ID), Ops, "");
16371 // Rotate and insert under mask operation.
16372 // __rldimi(rs, is, shift, mask)
16373 // (rotl64(rs, shift) & mask) | (is & ~mask)
16374 // __rlwimi(rs, is, shift, mask)
16375 // (rotl(rs, shift) & mask) | (is & ~mask)
16376 case PPC::BI__builtin_ppc_rldimi:
16377 case PPC::BI__builtin_ppc_rlwimi: {
16378 Value *Op0 = EmitScalarExpr(E->getArg(0));
16379 Value *Op1 = EmitScalarExpr(E->getArg(1));
16380 Value *Op2 = EmitScalarExpr(E->getArg(2));
16381 Value *Op3 = EmitScalarExpr(E->getArg(3));
16382 llvm::Type *Ty = Op0->getType();
16383 Function *F = CGM.getIntrinsic(Intrinsic::fshl, Ty);
16384 if (BuiltinID == PPC::BI__builtin_ppc_rldimi)
16385 Op2 = Builder.CreateZExt(Op2, Int64Ty);
16386 Value *Shift = Builder.CreateCall(F, {Op0, Op0, Op2});
16387 Value *X = Builder.CreateAnd(Shift, Op3);
16388 Value *Y = Builder.CreateAnd(Op1, Builder.CreateNot(Op3));
16389 return Builder.CreateOr(X, Y);
16391 // Rotate and insert under mask operation.
16392 // __rlwnm(rs, shift, mask)
16393 // rotl(rs, shift) & mask
16394 case PPC::BI__builtin_ppc_rlwnm: {
16395 Value *Op0 = EmitScalarExpr(E->getArg(0));
16396 Value *Op1 = EmitScalarExpr(E->getArg(1));
16397 Value *Op2 = EmitScalarExpr(E->getArg(2));
16398 llvm::Type *Ty = Op0->getType();
16399 Function *F = CGM.getIntrinsic(Intrinsic::fshl, Ty);
16400 Value *Shift = Builder.CreateCall(F, {Op0, Op0, Op1});
16401 return Builder.CreateAnd(Shift, Op2);
16403 case PPC::BI__builtin_ppc_poppar4:
16404 case PPC::BI__builtin_ppc_poppar8: {
16405 Value *Op0 = EmitScalarExpr(E->getArg(0));
16406 llvm::Type *ArgType = Op0->getType();
16407 Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
16408 Value *Tmp = Builder.CreateCall(F, Op0);
16410 llvm::Type *ResultType = ConvertType(E->getType());
16411 Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1));
16412 if (Result->getType() != ResultType)
16413 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
16414 "cast");
16415 return Result;
16417 case PPC::BI__builtin_ppc_cmpb: {
16418 Value *Op0 = EmitScalarExpr(E->getArg(0));
16419 Value *Op1 = EmitScalarExpr(E->getArg(1));
16420 if (getTarget().getTriple().isPPC64()) {
16421 Function *F =
16422 CGM.getIntrinsic(Intrinsic::ppc_cmpb, {Int64Ty, Int64Ty, Int64Ty});
16423 return Builder.CreateCall(F, {Op0, Op1}, "cmpb");
16425 // For 32 bit, emit the code as below:
16426 // %conv = trunc i64 %a to i32
16427 // %conv1 = trunc i64 %b to i32
16428 // %shr = lshr i64 %a, 32
16429 // %conv2 = trunc i64 %shr to i32
16430 // %shr3 = lshr i64 %b, 32
16431 // %conv4 = trunc i64 %shr3 to i32
16432 // %0 = tail call i32 @llvm.ppc.cmpb32(i32 %conv, i32 %conv1)
16433 // %conv5 = zext i32 %0 to i64
16434 // %1 = tail call i32 @llvm.ppc.cmpb32(i32 %conv2, i32 %conv4)
16435 // %conv614 = zext i32 %1 to i64
16436 // %shl = shl nuw i64 %conv614, 32
16437 // %or = or i64 %shl, %conv5
16438 // ret i64 %or
16439 Function *F =
16440 CGM.getIntrinsic(Intrinsic::ppc_cmpb, {Int32Ty, Int32Ty, Int32Ty});
16441 Value *ArgOneLo = Builder.CreateTrunc(Op0, Int32Ty);
16442 Value *ArgTwoLo = Builder.CreateTrunc(Op1, Int32Ty);
16443 Constant *ShiftAmt = ConstantInt::get(Int64Ty, 32);
16444 Value *ArgOneHi =
16445 Builder.CreateTrunc(Builder.CreateLShr(Op0, ShiftAmt), Int32Ty);
16446 Value *ArgTwoHi =
16447 Builder.CreateTrunc(Builder.CreateLShr(Op1, ShiftAmt), Int32Ty);
16448 Value *ResLo = Builder.CreateZExt(
16449 Builder.CreateCall(F, {ArgOneLo, ArgTwoLo}, "cmpb"), Int64Ty);
16450 Value *ResHiShift = Builder.CreateZExt(
16451 Builder.CreateCall(F, {ArgOneHi, ArgTwoHi}, "cmpb"), Int64Ty);
16452 Value *ResHi = Builder.CreateShl(ResHiShift, ShiftAmt);
16453 return Builder.CreateOr(ResLo, ResHi);
16455 // Copy sign
16456 case PPC::BI__builtin_vsx_xvcpsgnsp:
16457 case PPC::BI__builtin_vsx_xvcpsgndp: {
16458 llvm::Type *ResultType = ConvertType(E->getType());
16459 Value *X = EmitScalarExpr(E->getArg(0));
16460 Value *Y = EmitScalarExpr(E->getArg(1));
16461 ID = Intrinsic::copysign;
16462 llvm::Function *F = CGM.getIntrinsic(ID, ResultType);
16463 return Builder.CreateCall(F, {X, Y});
16465 // Rounding/truncation
16466 case PPC::BI__builtin_vsx_xvrspip:
16467 case PPC::BI__builtin_vsx_xvrdpip:
16468 case PPC::BI__builtin_vsx_xvrdpim:
16469 case PPC::BI__builtin_vsx_xvrspim:
16470 case PPC::BI__builtin_vsx_xvrdpi:
16471 case PPC::BI__builtin_vsx_xvrspi:
16472 case PPC::BI__builtin_vsx_xvrdpic:
16473 case PPC::BI__builtin_vsx_xvrspic:
16474 case PPC::BI__builtin_vsx_xvrdpiz:
16475 case PPC::BI__builtin_vsx_xvrspiz: {
16476 llvm::Type *ResultType = ConvertType(E->getType());
16477 Value *X = EmitScalarExpr(E->getArg(0));
16478 if (BuiltinID == PPC::BI__builtin_vsx_xvrdpim ||
16479 BuiltinID == PPC::BI__builtin_vsx_xvrspim)
16480 ID = Builder.getIsFPConstrained()
16481 ? Intrinsic::experimental_constrained_floor
16482 : Intrinsic::floor;
16483 else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpi ||
16484 BuiltinID == PPC::BI__builtin_vsx_xvrspi)
16485 ID = Builder.getIsFPConstrained()
16486 ? Intrinsic::experimental_constrained_round
16487 : Intrinsic::round;
16488 else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpic ||
16489 BuiltinID == PPC::BI__builtin_vsx_xvrspic)
16490 ID = Builder.getIsFPConstrained()
16491 ? Intrinsic::experimental_constrained_rint
16492 : Intrinsic::rint;
16493 else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpip ||
16494 BuiltinID == PPC::BI__builtin_vsx_xvrspip)
16495 ID = Builder.getIsFPConstrained()
16496 ? Intrinsic::experimental_constrained_ceil
16497 : Intrinsic::ceil;
16498 else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpiz ||
16499 BuiltinID == PPC::BI__builtin_vsx_xvrspiz)
16500 ID = Builder.getIsFPConstrained()
16501 ? Intrinsic::experimental_constrained_trunc
16502 : Intrinsic::trunc;
16503 llvm::Function *F = CGM.getIntrinsic(ID, ResultType);
16504 return Builder.getIsFPConstrained() ? Builder.CreateConstrainedFPCall(F, X)
16505 : Builder.CreateCall(F, X);
16508 // Absolute value
16509 case PPC::BI__builtin_vsx_xvabsdp:
16510 case PPC::BI__builtin_vsx_xvabssp: {
16511 llvm::Type *ResultType = ConvertType(E->getType());
16512 Value *X = EmitScalarExpr(E->getArg(0));
16513 llvm::Function *F = CGM.getIntrinsic(Intrinsic::fabs, ResultType);
16514 return Builder.CreateCall(F, X);
16517 // Fastmath by default
16518 case PPC::BI__builtin_ppc_recipdivf:
16519 case PPC::BI__builtin_ppc_recipdivd:
16520 case PPC::BI__builtin_ppc_rsqrtf:
16521 case PPC::BI__builtin_ppc_rsqrtd: {
16522 FastMathFlags FMF = Builder.getFastMathFlags();
16523 Builder.getFastMathFlags().setFast();
16524 llvm::Type *ResultType = ConvertType(E->getType());
16525 Value *X = EmitScalarExpr(E->getArg(0));
16527 if (BuiltinID == PPC::BI__builtin_ppc_recipdivf ||
16528 BuiltinID == PPC::BI__builtin_ppc_recipdivd) {
16529 Value *Y = EmitScalarExpr(E->getArg(1));
16530 Value *FDiv = Builder.CreateFDiv(X, Y, "recipdiv");
16531 Builder.getFastMathFlags() &= (FMF);
16532 return FDiv;
16534 auto *One = ConstantFP::get(ResultType, 1.0);
16535 llvm::Function *F = CGM.getIntrinsic(Intrinsic::sqrt, ResultType);
16536 Value *FDiv = Builder.CreateFDiv(One, Builder.CreateCall(F, X), "rsqrt");
16537 Builder.getFastMathFlags() &= (FMF);
16538 return FDiv;
16540 case PPC::BI__builtin_ppc_alignx: {
16541 Value *Op0 = EmitScalarExpr(E->getArg(0));
16542 Value *Op1 = EmitScalarExpr(E->getArg(1));
16543 ConstantInt *AlignmentCI = cast<ConstantInt>(Op0);
16544 if (AlignmentCI->getValue().ugt(llvm::Value::MaximumAlignment))
16545 AlignmentCI = ConstantInt::get(AlignmentCI->getType(),
16546 llvm::Value::MaximumAlignment);
16548 emitAlignmentAssumption(Op1, E->getArg(1),
16549 /*The expr loc is sufficient.*/ SourceLocation(),
16550 AlignmentCI, nullptr);
16551 return Op1;
16553 case PPC::BI__builtin_ppc_rdlam: {
16554 Value *Op0 = EmitScalarExpr(E->getArg(0));
16555 Value *Op1 = EmitScalarExpr(E->getArg(1));
16556 Value *Op2 = EmitScalarExpr(E->getArg(2));
16557 llvm::Type *Ty = Op0->getType();
16558 Value *ShiftAmt = Builder.CreateIntCast(Op1, Ty, false);
16559 Function *F = CGM.getIntrinsic(Intrinsic::fshl, Ty);
16560 Value *Rotate = Builder.CreateCall(F, {Op0, Op0, ShiftAmt});
16561 return Builder.CreateAnd(Rotate, Op2);
16563 case PPC::BI__builtin_ppc_load2r: {
16564 Function *F = CGM.getIntrinsic(Intrinsic::ppc_load2r);
16565 Value *Op0 = EmitScalarExpr(E->getArg(0));
16566 Value *LoadIntrinsic = Builder.CreateCall(F, {Op0});
16567 return Builder.CreateTrunc(LoadIntrinsic, Int16Ty);
16569 // FMA variations
16570 case PPC::BI__builtin_ppc_fnmsub:
16571 case PPC::BI__builtin_ppc_fnmsubs:
16572 case PPC::BI__builtin_vsx_xvmaddadp:
16573 case PPC::BI__builtin_vsx_xvmaddasp:
16574 case PPC::BI__builtin_vsx_xvnmaddadp:
16575 case PPC::BI__builtin_vsx_xvnmaddasp:
16576 case PPC::BI__builtin_vsx_xvmsubadp:
16577 case PPC::BI__builtin_vsx_xvmsubasp:
16578 case PPC::BI__builtin_vsx_xvnmsubadp:
16579 case PPC::BI__builtin_vsx_xvnmsubasp: {
16580 llvm::Type *ResultType = ConvertType(E->getType());
16581 Value *X = EmitScalarExpr(E->getArg(0));
16582 Value *Y = EmitScalarExpr(E->getArg(1));
16583 Value *Z = EmitScalarExpr(E->getArg(2));
16584 llvm::Function *F;
16585 if (Builder.getIsFPConstrained())
16586 F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
16587 else
16588 F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
16589 switch (BuiltinID) {
16590 case PPC::BI__builtin_vsx_xvmaddadp:
16591 case PPC::BI__builtin_vsx_xvmaddasp:
16592 if (Builder.getIsFPConstrained())
16593 return Builder.CreateConstrainedFPCall(F, {X, Y, Z});
16594 else
16595 return Builder.CreateCall(F, {X, Y, Z});
16596 case PPC::BI__builtin_vsx_xvnmaddadp:
16597 case PPC::BI__builtin_vsx_xvnmaddasp:
16598 if (Builder.getIsFPConstrained())
16599 return Builder.CreateFNeg(
16600 Builder.CreateConstrainedFPCall(F, {X, Y, Z}), "neg");
16601 else
16602 return Builder.CreateFNeg(Builder.CreateCall(F, {X, Y, Z}), "neg");
16603 case PPC::BI__builtin_vsx_xvmsubadp:
16604 case PPC::BI__builtin_vsx_xvmsubasp:
16605 if (Builder.getIsFPConstrained())
16606 return Builder.CreateConstrainedFPCall(
16607 F, {X, Y, Builder.CreateFNeg(Z, "neg")});
16608 else
16609 return Builder.CreateCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")});
16610 case PPC::BI__builtin_ppc_fnmsub:
16611 case PPC::BI__builtin_ppc_fnmsubs:
16612 case PPC::BI__builtin_vsx_xvnmsubadp:
16613 case PPC::BI__builtin_vsx_xvnmsubasp:
16614 if (Builder.getIsFPConstrained())
16615 return Builder.CreateFNeg(
16616 Builder.CreateConstrainedFPCall(
16617 F, {X, Y, Builder.CreateFNeg(Z, "neg")}),
16618 "neg");
16619 else
16620 return Builder.CreateCall(
16621 CGM.getIntrinsic(Intrinsic::ppc_fnmsub, ResultType), {X, Y, Z});
16623 llvm_unreachable("Unknown FMA operation");
16624 return nullptr; // Suppress no-return warning
16627 case PPC::BI__builtin_vsx_insertword: {
16628 Value *Op0 = EmitScalarExpr(E->getArg(0));
16629 Value *Op1 = EmitScalarExpr(E->getArg(1));
16630 Value *Op2 = EmitScalarExpr(E->getArg(2));
16631 llvm::Function *F = CGM.getIntrinsic(Intrinsic::ppc_vsx_xxinsertw);
16633 // Third argument is a compile time constant int. It must be clamped to
16634 // to the range [0, 12].
16635 ConstantInt *ArgCI = dyn_cast<ConstantInt>(Op2);
16636 assert(ArgCI &&
16637 "Third arg to xxinsertw intrinsic must be constant integer");
16638 const int64_t MaxIndex = 12;
16639 int64_t Index = std::clamp(ArgCI->getSExtValue(), (int64_t)0, MaxIndex);
16641 // The builtin semantics don't exactly match the xxinsertw instructions
16642 // semantics (which ppc_vsx_xxinsertw follows). The builtin extracts the
16643 // word from the first argument, and inserts it in the second argument. The
16644 // instruction extracts the word from its second input register and inserts
16645 // it into its first input register, so swap the first and second arguments.
16646 std::swap(Op0, Op1);
16648 // Need to cast the second argument from a vector of unsigned int to a
16649 // vector of long long.
16650 Op1 = Builder.CreateBitCast(Op1, llvm::FixedVectorType::get(Int64Ty, 2));
16652 if (getTarget().isLittleEndian()) {
16653 // Reverse the double words in the vector we will extract from.
16654 Op0 = Builder.CreateBitCast(Op0, llvm::FixedVectorType::get(Int64Ty, 2));
16655 Op0 = Builder.CreateShuffleVector(Op0, Op0, ArrayRef<int>{1, 0});
16657 // Reverse the index.
16658 Index = MaxIndex - Index;
16661 // Intrinsic expects the first arg to be a vector of int.
16662 Op0 = Builder.CreateBitCast(Op0, llvm::FixedVectorType::get(Int32Ty, 4));
16663 Op2 = ConstantInt::getSigned(Int32Ty, Index);
16664 return Builder.CreateCall(F, {Op0, Op1, Op2});
16667 case PPC::BI__builtin_vsx_extractuword: {
16668 Value *Op0 = EmitScalarExpr(E->getArg(0));
16669 Value *Op1 = EmitScalarExpr(E->getArg(1));
16670 llvm::Function *F = CGM.getIntrinsic(Intrinsic::ppc_vsx_xxextractuw);
16672 // Intrinsic expects the first argument to be a vector of doublewords.
16673 Op0 = Builder.CreateBitCast(Op0, llvm::FixedVectorType::get(Int64Ty, 2));
16675 // The second argument is a compile time constant int that needs to
16676 // be clamped to the range [0, 12].
16677 ConstantInt *ArgCI = dyn_cast<ConstantInt>(Op1);
16678 assert(ArgCI &&
16679 "Second Arg to xxextractuw intrinsic must be a constant integer!");
16680 const int64_t MaxIndex = 12;
16681 int64_t Index = std::clamp(ArgCI->getSExtValue(), (int64_t)0, MaxIndex);
16683 if (getTarget().isLittleEndian()) {
16684 // Reverse the index.
16685 Index = MaxIndex - Index;
16686 Op1 = ConstantInt::getSigned(Int32Ty, Index);
16688 // Emit the call, then reverse the double words of the results vector.
16689 Value *Call = Builder.CreateCall(F, {Op0, Op1});
16691 Value *ShuffleCall =
16692 Builder.CreateShuffleVector(Call, Call, ArrayRef<int>{1, 0});
16693 return ShuffleCall;
16694 } else {
16695 Op1 = ConstantInt::getSigned(Int32Ty, Index);
16696 return Builder.CreateCall(F, {Op0, Op1});
16700 case PPC::BI__builtin_vsx_xxpermdi: {
16701 Value *Op0 = EmitScalarExpr(E->getArg(0));
16702 Value *Op1 = EmitScalarExpr(E->getArg(1));
16703 Value *Op2 = EmitScalarExpr(E->getArg(2));
16704 ConstantInt *ArgCI = dyn_cast<ConstantInt>(Op2);
16705 assert(ArgCI && "Third arg must be constant integer!");
16707 unsigned Index = ArgCI->getZExtValue();
16708 Op0 = Builder.CreateBitCast(Op0, llvm::FixedVectorType::get(Int64Ty, 2));
16709 Op1 = Builder.CreateBitCast(Op1, llvm::FixedVectorType::get(Int64Ty, 2));
16711 // Account for endianness by treating this as just a shuffle. So we use the
16712 // same indices for both LE and BE in order to produce expected results in
16713 // both cases.
16714 int ElemIdx0 = (Index & 2) >> 1;
16715 int ElemIdx1 = 2 + (Index & 1);
16717 int ShuffleElts[2] = {ElemIdx0, ElemIdx1};
16718 Value *ShuffleCall = Builder.CreateShuffleVector(Op0, Op1, ShuffleElts);
16719 QualType BIRetType = E->getType();
16720 auto RetTy = ConvertType(BIRetType);
16721 return Builder.CreateBitCast(ShuffleCall, RetTy);
16724 case PPC::BI__builtin_vsx_xxsldwi: {
16725 Value *Op0 = EmitScalarExpr(E->getArg(0));
16726 Value *Op1 = EmitScalarExpr(E->getArg(1));
16727 Value *Op2 = EmitScalarExpr(E->getArg(2));
16728 ConstantInt *ArgCI = dyn_cast<ConstantInt>(Op2);
16729 assert(ArgCI && "Third argument must be a compile time constant");
16730 unsigned Index = ArgCI->getZExtValue() & 0x3;
16731 Op0 = Builder.CreateBitCast(Op0, llvm::FixedVectorType::get(Int32Ty, 4));
16732 Op1 = Builder.CreateBitCast(Op1, llvm::FixedVectorType::get(Int32Ty, 4));
16734 // Create a shuffle mask
16735 int ElemIdx0;
16736 int ElemIdx1;
16737 int ElemIdx2;
16738 int ElemIdx3;
16739 if (getTarget().isLittleEndian()) {
16740 // Little endian element N comes from element 8+N-Index of the
16741 // concatenated wide vector (of course, using modulo arithmetic on
16742 // the total number of elements).
16743 ElemIdx0 = (8 - Index) % 8;
16744 ElemIdx1 = (9 - Index) % 8;
16745 ElemIdx2 = (10 - Index) % 8;
16746 ElemIdx3 = (11 - Index) % 8;
16747 } else {
16748 // Big endian ElemIdx<N> = Index + N
16749 ElemIdx0 = Index;
16750 ElemIdx1 = Index + 1;
16751 ElemIdx2 = Index + 2;
16752 ElemIdx3 = Index + 3;
16755 int ShuffleElts[4] = {ElemIdx0, ElemIdx1, ElemIdx2, ElemIdx3};
16756 Value *ShuffleCall = Builder.CreateShuffleVector(Op0, Op1, ShuffleElts);
16757 QualType BIRetType = E->getType();
16758 auto RetTy = ConvertType(BIRetType);
16759 return Builder.CreateBitCast(ShuffleCall, RetTy);
16762 case PPC::BI__builtin_pack_vector_int128: {
16763 Value *Op0 = EmitScalarExpr(E->getArg(0));
16764 Value *Op1 = EmitScalarExpr(E->getArg(1));
16765 bool isLittleEndian = getTarget().isLittleEndian();
16766 Value *PoisonValue =
16767 llvm::PoisonValue::get(llvm::FixedVectorType::get(Op0->getType(), 2));
16768 Value *Res = Builder.CreateInsertElement(
16769 PoisonValue, Op0, (uint64_t)(isLittleEndian ? 1 : 0));
16770 Res = Builder.CreateInsertElement(Res, Op1,
16771 (uint64_t)(isLittleEndian ? 0 : 1));
16772 return Builder.CreateBitCast(Res, ConvertType(E->getType()));
16775 case PPC::BI__builtin_unpack_vector_int128: {
16776 Value *Op0 = EmitScalarExpr(E->getArg(0));
16777 Value *Op1 = EmitScalarExpr(E->getArg(1));
16778 ConstantInt *Index = cast<ConstantInt>(Op1);
16779 Value *Unpacked = Builder.CreateBitCast(
16780 Op0, llvm::FixedVectorType::get(ConvertType(E->getType()), 2));
16782 if (getTarget().isLittleEndian())
16783 Index = ConstantInt::get(Index->getType(), 1 - Index->getZExtValue());
16785 return Builder.CreateExtractElement(Unpacked, Index);
16788 case PPC::BI__builtin_ppc_sthcx: {
16789 llvm::Function *F = CGM.getIntrinsic(Intrinsic::ppc_sthcx);
16790 Value *Op0 = EmitScalarExpr(E->getArg(0));
16791 Value *Op1 = Builder.CreateSExt(EmitScalarExpr(E->getArg(1)), Int32Ty);
16792 return Builder.CreateCall(F, {Op0, Op1});
16795 // The PPC MMA builtins take a pointer to a __vector_quad as an argument.
16796 // Some of the MMA instructions accumulate their result into an existing
16797 // accumulator whereas the others generate a new accumulator. So we need to
16798 // use custom code generation to expand a builtin call with a pointer to a
16799 // load (if the corresponding instruction accumulates its result) followed by
16800 // the call to the intrinsic and a store of the result.
16801 #define CUSTOM_BUILTIN(Name, Intr, Types, Accumulate, Feature) \
16802 case PPC::BI__builtin_##Name:
16803 #include "clang/Basic/BuiltinsPPC.def"
16805 SmallVector<Value *, 4> Ops;
16806 for (unsigned i = 0, e = E->getNumArgs(); i != e; i++)
16807 if (E->getArg(i)->getType()->isArrayType())
16808 Ops.push_back(EmitArrayToPointerDecay(E->getArg(i)).getPointer());
16809 else
16810 Ops.push_back(EmitScalarExpr(E->getArg(i)));
16811 // The first argument of these two builtins is a pointer used to store their
16812 // result. However, the llvm intrinsics return their result in multiple
16813 // return values. So, here we emit code extracting these values from the
16814 // intrinsic results and storing them using that pointer.
16815 if (BuiltinID == PPC::BI__builtin_mma_disassemble_acc ||
16816 BuiltinID == PPC::BI__builtin_vsx_disassemble_pair ||
16817 BuiltinID == PPC::BI__builtin_mma_disassemble_pair) {
16818 unsigned NumVecs = 2;
16819 auto Intrinsic = Intrinsic::ppc_vsx_disassemble_pair;
16820 if (BuiltinID == PPC::BI__builtin_mma_disassemble_acc) {
16821 NumVecs = 4;
16822 Intrinsic = Intrinsic::ppc_mma_disassemble_acc;
16824 llvm::Function *F = CGM.getIntrinsic(Intrinsic);
16825 Address Addr = EmitPointerWithAlignment(E->getArg(1));
16826 Value *Vec = Builder.CreateLoad(Addr);
16827 Value *Call = Builder.CreateCall(F, {Vec});
16828 llvm::Type *VTy = llvm::FixedVectorType::get(Int8Ty, 16);
16829 Value *Ptr = Ops[0];
16830 for (unsigned i=0; i<NumVecs; i++) {
16831 Value *Vec = Builder.CreateExtractValue(Call, i);
16832 llvm::ConstantInt* Index = llvm::ConstantInt::get(IntTy, i);
16833 Value *GEP = Builder.CreateInBoundsGEP(VTy, Ptr, Index);
16834 Builder.CreateAlignedStore(Vec, GEP, MaybeAlign(16));
16836 return Call;
16838 if (BuiltinID == PPC::BI__builtin_vsx_build_pair ||
16839 BuiltinID == PPC::BI__builtin_mma_build_acc) {
16840 // Reverse the order of the operands for LE, so the
16841 // same builtin call can be used on both LE and BE
16842 // without the need for the programmer to swap operands.
16843 // The operands are reversed starting from the second argument,
16844 // the first operand is the pointer to the pair/accumulator
16845 // that is being built.
16846 if (getTarget().isLittleEndian())
16847 std::reverse(Ops.begin() + 1, Ops.end());
16849 bool Accumulate;
16850 switch (BuiltinID) {
16851 #define CUSTOM_BUILTIN(Name, Intr, Types, Acc, Feature) \
16852 case PPC::BI__builtin_##Name: \
16853 ID = Intrinsic::ppc_##Intr; \
16854 Accumulate = Acc; \
16855 break;
16856 #include "clang/Basic/BuiltinsPPC.def"
16858 if (BuiltinID == PPC::BI__builtin_vsx_lxvp ||
16859 BuiltinID == PPC::BI__builtin_vsx_stxvp ||
16860 BuiltinID == PPC::BI__builtin_mma_lxvp ||
16861 BuiltinID == PPC::BI__builtin_mma_stxvp) {
16862 if (BuiltinID == PPC::BI__builtin_vsx_lxvp ||
16863 BuiltinID == PPC::BI__builtin_mma_lxvp) {
16864 Ops[0] = Builder.CreateGEP(Int8Ty, Ops[1], Ops[0]);
16865 } else {
16866 Ops[1] = Builder.CreateGEP(Int8Ty, Ops[2], Ops[1]);
16868 Ops.pop_back();
16869 llvm::Function *F = CGM.getIntrinsic(ID);
16870 return Builder.CreateCall(F, Ops, "");
16872 SmallVector<Value*, 4> CallOps;
16873 if (Accumulate) {
16874 Address Addr = EmitPointerWithAlignment(E->getArg(0));
16875 Value *Acc = Builder.CreateLoad(Addr);
16876 CallOps.push_back(Acc);
16878 for (unsigned i=1; i<Ops.size(); i++)
16879 CallOps.push_back(Ops[i]);
16880 llvm::Function *F = CGM.getIntrinsic(ID);
16881 Value *Call = Builder.CreateCall(F, CallOps);
16882 return Builder.CreateAlignedStore(Call, Ops[0], MaybeAlign(64));
16885 case PPC::BI__builtin_ppc_compare_and_swap:
16886 case PPC::BI__builtin_ppc_compare_and_swaplp: {
16887 Address Addr = EmitPointerWithAlignment(E->getArg(0));
16888 Address OldValAddr = EmitPointerWithAlignment(E->getArg(1));
16889 Value *OldVal = Builder.CreateLoad(OldValAddr);
16890 QualType AtomicTy = E->getArg(0)->getType()->getPointeeType();
16891 LValue LV = MakeAddrLValue(Addr, AtomicTy);
16892 Value *Op2 = EmitScalarExpr(E->getArg(2));
16893 auto Pair = EmitAtomicCompareExchange(
16894 LV, RValue::get(OldVal), RValue::get(Op2), E->getExprLoc(),
16895 llvm::AtomicOrdering::Monotonic, llvm::AtomicOrdering::Monotonic, true);
16896 // Unlike c11's atomic_compare_exchange, according to
16897 // https://www.ibm.com/docs/en/xl-c-and-cpp-aix/16.1?topic=functions-compare-swap-compare-swaplp
16898 // > In either case, the contents of the memory location specified by addr
16899 // > are copied into the memory location specified by old_val_addr.
16900 // But it hasn't specified storing to OldValAddr is atomic or not and
16901 // which order to use. Now following XL's codegen, treat it as a normal
16902 // store.
16903 Value *LoadedVal = Pair.first.getScalarVal();
16904 Builder.CreateStore(LoadedVal, OldValAddr);
16905 return Builder.CreateZExt(Pair.second, Builder.getInt32Ty());
16907 case PPC::BI__builtin_ppc_fetch_and_add:
16908 case PPC::BI__builtin_ppc_fetch_and_addlp: {
16909 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
16910 llvm::AtomicOrdering::Monotonic);
16912 case PPC::BI__builtin_ppc_fetch_and_and:
16913 case PPC::BI__builtin_ppc_fetch_and_andlp: {
16914 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
16915 llvm::AtomicOrdering::Monotonic);
16918 case PPC::BI__builtin_ppc_fetch_and_or:
16919 case PPC::BI__builtin_ppc_fetch_and_orlp: {
16920 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
16921 llvm::AtomicOrdering::Monotonic);
16923 case PPC::BI__builtin_ppc_fetch_and_swap:
16924 case PPC::BI__builtin_ppc_fetch_and_swaplp: {
16925 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
16926 llvm::AtomicOrdering::Monotonic);
16928 case PPC::BI__builtin_ppc_ldarx:
16929 case PPC::BI__builtin_ppc_lwarx:
16930 case PPC::BI__builtin_ppc_lharx:
16931 case PPC::BI__builtin_ppc_lbarx:
16932 return emitPPCLoadReserveIntrinsic(*this, BuiltinID, E);
16933 case PPC::BI__builtin_ppc_mfspr: {
16934 Value *Op0 = EmitScalarExpr(E->getArg(0));
16935 llvm::Type *RetType = CGM.getDataLayout().getTypeSizeInBits(VoidPtrTy) == 32
16936 ? Int32Ty
16937 : Int64Ty;
16938 Function *F = CGM.getIntrinsic(Intrinsic::ppc_mfspr, RetType);
16939 return Builder.CreateCall(F, {Op0});
16941 case PPC::BI__builtin_ppc_mtspr: {
16942 Value *Op0 = EmitScalarExpr(E->getArg(0));
16943 Value *Op1 = EmitScalarExpr(E->getArg(1));
16944 llvm::Type *RetType = CGM.getDataLayout().getTypeSizeInBits(VoidPtrTy) == 32
16945 ? Int32Ty
16946 : Int64Ty;
16947 Function *F = CGM.getIntrinsic(Intrinsic::ppc_mtspr, RetType);
16948 return Builder.CreateCall(F, {Op0, Op1});
16950 case PPC::BI__builtin_ppc_popcntb: {
16951 Value *ArgValue = EmitScalarExpr(E->getArg(0));
16952 llvm::Type *ArgType = ArgValue->getType();
16953 Function *F = CGM.getIntrinsic(Intrinsic::ppc_popcntb, {ArgType, ArgType});
16954 return Builder.CreateCall(F, {ArgValue}, "popcntb");
16956 case PPC::BI__builtin_ppc_mtfsf: {
16957 // The builtin takes a uint32 that needs to be cast to an
16958 // f64 to be passed to the intrinsic.
16959 Value *Op0 = EmitScalarExpr(E->getArg(0));
16960 Value *Op1 = EmitScalarExpr(E->getArg(1));
16961 Value *Cast = Builder.CreateUIToFP(Op1, DoubleTy);
16962 llvm::Function *F = CGM.getIntrinsic(Intrinsic::ppc_mtfsf);
16963 return Builder.CreateCall(F, {Op0, Cast}, "");
16966 case PPC::BI__builtin_ppc_swdiv_nochk:
16967 case PPC::BI__builtin_ppc_swdivs_nochk: {
16968 Value *Op0 = EmitScalarExpr(E->getArg(0));
16969 Value *Op1 = EmitScalarExpr(E->getArg(1));
16970 FastMathFlags FMF = Builder.getFastMathFlags();
16971 Builder.getFastMathFlags().setFast();
16972 Value *FDiv = Builder.CreateFDiv(Op0, Op1, "swdiv_nochk");
16973 Builder.getFastMathFlags() &= (FMF);
16974 return FDiv;
16976 case PPC::BI__builtin_ppc_fric:
16977 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
16978 *this, E, Intrinsic::rint,
16979 Intrinsic::experimental_constrained_rint))
16980 .getScalarVal();
16981 case PPC::BI__builtin_ppc_frim:
16982 case PPC::BI__builtin_ppc_frims:
16983 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
16984 *this, E, Intrinsic::floor,
16985 Intrinsic::experimental_constrained_floor))
16986 .getScalarVal();
16987 case PPC::BI__builtin_ppc_frin:
16988 case PPC::BI__builtin_ppc_frins:
16989 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
16990 *this, E, Intrinsic::round,
16991 Intrinsic::experimental_constrained_round))
16992 .getScalarVal();
16993 case PPC::BI__builtin_ppc_frip:
16994 case PPC::BI__builtin_ppc_frips:
16995 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
16996 *this, E, Intrinsic::ceil,
16997 Intrinsic::experimental_constrained_ceil))
16998 .getScalarVal();
16999 case PPC::BI__builtin_ppc_friz:
17000 case PPC::BI__builtin_ppc_frizs:
17001 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
17002 *this, E, Intrinsic::trunc,
17003 Intrinsic::experimental_constrained_trunc))
17004 .getScalarVal();
17005 case PPC::BI__builtin_ppc_fsqrt:
17006 case PPC::BI__builtin_ppc_fsqrts:
17007 return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
17008 *this, E, Intrinsic::sqrt,
17009 Intrinsic::experimental_constrained_sqrt))
17010 .getScalarVal();
17011 case PPC::BI__builtin_ppc_test_data_class: {
17012 Value *Op0 = EmitScalarExpr(E->getArg(0));
17013 Value *Op1 = EmitScalarExpr(E->getArg(1));
17014 return Builder.CreateCall(
17015 CGM.getIntrinsic(Intrinsic::ppc_test_data_class, Op0->getType()),
17016 {Op0, Op1}, "test_data_class");
17018 case PPC::BI__builtin_ppc_maxfe: {
17019 Value *Op0 = EmitScalarExpr(E->getArg(0));
17020 Value *Op1 = EmitScalarExpr(E->getArg(1));
17021 Value *Op2 = EmitScalarExpr(E->getArg(2));
17022 Value *Op3 = EmitScalarExpr(E->getArg(3));
17023 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::ppc_maxfe),
17024 {Op0, Op1, Op2, Op3});
17026 case PPC::BI__builtin_ppc_maxfl: {
17027 Value *Op0 = EmitScalarExpr(E->getArg(0));
17028 Value *Op1 = EmitScalarExpr(E->getArg(1));
17029 Value *Op2 = EmitScalarExpr(E->getArg(2));
17030 Value *Op3 = EmitScalarExpr(E->getArg(3));
17031 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::ppc_maxfl),
17032 {Op0, Op1, Op2, Op3});
17034 case PPC::BI__builtin_ppc_maxfs: {
17035 Value *Op0 = EmitScalarExpr(E->getArg(0));
17036 Value *Op1 = EmitScalarExpr(E->getArg(1));
17037 Value *Op2 = EmitScalarExpr(E->getArg(2));
17038 Value *Op3 = EmitScalarExpr(E->getArg(3));
17039 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::ppc_maxfs),
17040 {Op0, Op1, Op2, Op3});
17042 case PPC::BI__builtin_ppc_minfe: {
17043 Value *Op0 = EmitScalarExpr(E->getArg(0));
17044 Value *Op1 = EmitScalarExpr(E->getArg(1));
17045 Value *Op2 = EmitScalarExpr(E->getArg(2));
17046 Value *Op3 = EmitScalarExpr(E->getArg(3));
17047 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::ppc_minfe),
17048 {Op0, Op1, Op2, Op3});
17050 case PPC::BI__builtin_ppc_minfl: {
17051 Value *Op0 = EmitScalarExpr(E->getArg(0));
17052 Value *Op1 = EmitScalarExpr(E->getArg(1));
17053 Value *Op2 = EmitScalarExpr(E->getArg(2));
17054 Value *Op3 = EmitScalarExpr(E->getArg(3));
17055 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::ppc_minfl),
17056 {Op0, Op1, Op2, Op3});
17058 case PPC::BI__builtin_ppc_minfs: {
17059 Value *Op0 = EmitScalarExpr(E->getArg(0));
17060 Value *Op1 = EmitScalarExpr(E->getArg(1));
17061 Value *Op2 = EmitScalarExpr(E->getArg(2));
17062 Value *Op3 = EmitScalarExpr(E->getArg(3));
17063 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::ppc_minfs),
17064 {Op0, Op1, Op2, Op3});
17066 case PPC::BI__builtin_ppc_swdiv:
17067 case PPC::BI__builtin_ppc_swdivs: {
17068 Value *Op0 = EmitScalarExpr(E->getArg(0));
17069 Value *Op1 = EmitScalarExpr(E->getArg(1));
17070 return Builder.CreateFDiv(Op0, Op1, "swdiv");
17075 namespace {
17076 // If \p E is not null pointer, insert address space cast to match return
17077 // type of \p E if necessary.
17078 Value *EmitAMDGPUDispatchPtr(CodeGenFunction &CGF,
17079 const CallExpr *E = nullptr) {
17080 auto *F = CGF.CGM.getIntrinsic(Intrinsic::amdgcn_dispatch_ptr);
17081 auto *Call = CGF.Builder.CreateCall(F);
17082 Call->addRetAttr(
17083 Attribute::getWithDereferenceableBytes(Call->getContext(), 64));
17084 Call->addRetAttr(Attribute::getWithAlignment(Call->getContext(), Align(4)));
17085 if (!E)
17086 return Call;
17087 QualType BuiltinRetType = E->getType();
17088 auto *RetTy = cast<llvm::PointerType>(CGF.ConvertType(BuiltinRetType));
17089 if (RetTy == Call->getType())
17090 return Call;
17091 return CGF.Builder.CreateAddrSpaceCast(Call, RetTy);
17094 Value *EmitAMDGPUImplicitArgPtr(CodeGenFunction &CGF) {
17095 auto *F = CGF.CGM.getIntrinsic(Intrinsic::amdgcn_implicitarg_ptr);
17096 auto *Call = CGF.Builder.CreateCall(F);
17097 Call->addRetAttr(
17098 Attribute::getWithDereferenceableBytes(Call->getContext(), 256));
17099 Call->addRetAttr(Attribute::getWithAlignment(Call->getContext(), Align(8)));
17100 return Call;
17103 // \p Index is 0, 1, and 2 for x, y, and z dimension, respectively.
17104 Value *EmitAMDGPUWorkGroupSize(CodeGenFunction &CGF, unsigned Index) {
17105 bool IsCOV_5 = CGF.getTarget().getTargetOpts().CodeObjectVersion ==
17106 clang::TargetOptions::COV_5;
17107 Constant *Offset;
17108 Value *DP;
17109 if (IsCOV_5) {
17110 // Indexing the implicit kernarg segment.
17111 Offset = llvm::ConstantInt::get(CGF.Int32Ty, 12 + Index * 2);
17112 DP = EmitAMDGPUImplicitArgPtr(CGF);
17113 } else {
17114 // Indexing the HSA kernel_dispatch_packet struct.
17115 Offset = llvm::ConstantInt::get(CGF.Int32Ty, 4 + Index * 2);
17116 DP = EmitAMDGPUDispatchPtr(CGF);
17119 auto *GEP = CGF.Builder.CreateGEP(CGF.Int8Ty, DP, Offset);
17120 auto *LD = CGF.Builder.CreateLoad(
17121 Address(GEP, CGF.Int16Ty, CharUnits::fromQuantity(2)));
17122 llvm::MDBuilder MDHelper(CGF.getLLVMContext());
17123 llvm::MDNode *RNode = MDHelper.createRange(APInt(16, 1),
17124 APInt(16, CGF.getTarget().getMaxOpenCLWorkGroupSize() + 1));
17125 LD->setMetadata(llvm::LLVMContext::MD_range, RNode);
17126 LD->setMetadata(llvm::LLVMContext::MD_noundef,
17127 llvm::MDNode::get(CGF.getLLVMContext(), std::nullopt));
17128 LD->setMetadata(llvm::LLVMContext::MD_invariant_load,
17129 llvm::MDNode::get(CGF.getLLVMContext(), std::nullopt));
17130 return LD;
17133 // \p Index is 0, 1, and 2 for x, y, and z dimension, respectively.
17134 Value *EmitAMDGPUGridSize(CodeGenFunction &CGF, unsigned Index) {
17135 const unsigned XOffset = 12;
17136 auto *DP = EmitAMDGPUDispatchPtr(CGF);
17137 // Indexing the HSA kernel_dispatch_packet struct.
17138 auto *Offset = llvm::ConstantInt::get(CGF.Int32Ty, XOffset + Index * 4);
17139 auto *GEP = CGF.Builder.CreateGEP(CGF.Int8Ty, DP, Offset);
17140 auto *LD = CGF.Builder.CreateLoad(
17141 Address(GEP, CGF.Int32Ty, CharUnits::fromQuantity(4)));
17142 LD->setMetadata(llvm::LLVMContext::MD_invariant_load,
17143 llvm::MDNode::get(CGF.getLLVMContext(), std::nullopt));
17144 return LD;
17146 } // namespace
17148 // For processing memory ordering and memory scope arguments of various
17149 // amdgcn builtins.
17150 // \p Order takes a C++11 comptabile memory-ordering specifier and converts
17151 // it into LLVM's memory ordering specifier using atomic C ABI, and writes
17152 // to \p AO. \p Scope takes a const char * and converts it into AMDGCN
17153 // specific SyncScopeID and writes it to \p SSID.
17154 void CodeGenFunction::ProcessOrderScopeAMDGCN(Value *Order, Value *Scope,
17155 llvm::AtomicOrdering &AO,
17156 llvm::SyncScope::ID &SSID) {
17157 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
17159 // Map C11/C++11 memory ordering to LLVM memory ordering
17160 assert(llvm::isValidAtomicOrderingCABI(ord));
17161 switch (static_cast<llvm::AtomicOrderingCABI>(ord)) {
17162 case llvm::AtomicOrderingCABI::acquire:
17163 case llvm::AtomicOrderingCABI::consume:
17164 AO = llvm::AtomicOrdering::Acquire;
17165 break;
17166 case llvm::AtomicOrderingCABI::release:
17167 AO = llvm::AtomicOrdering::Release;
17168 break;
17169 case llvm::AtomicOrderingCABI::acq_rel:
17170 AO = llvm::AtomicOrdering::AcquireRelease;
17171 break;
17172 case llvm::AtomicOrderingCABI::seq_cst:
17173 AO = llvm::AtomicOrdering::SequentiallyConsistent;
17174 break;
17175 case llvm::AtomicOrderingCABI::relaxed:
17176 AO = llvm::AtomicOrdering::Monotonic;
17177 break;
17180 StringRef scp;
17181 llvm::getConstantStringInfo(Scope, scp);
17182 SSID = getLLVMContext().getOrInsertSyncScopeID(scp);
17185 Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
17186 const CallExpr *E) {
17187 llvm::AtomicOrdering AO = llvm::AtomicOrdering::SequentiallyConsistent;
17188 llvm::SyncScope::ID SSID;
17189 switch (BuiltinID) {
17190 case AMDGPU::BI__builtin_amdgcn_div_scale:
17191 case AMDGPU::BI__builtin_amdgcn_div_scalef: {
17192 // Translate from the intrinsics's struct return to the builtin's out
17193 // argument.
17195 Address FlagOutPtr = EmitPointerWithAlignment(E->getArg(3));
17197 llvm::Value *X = EmitScalarExpr(E->getArg(0));
17198 llvm::Value *Y = EmitScalarExpr(E->getArg(1));
17199 llvm::Value *Z = EmitScalarExpr(E->getArg(2));
17201 llvm::Function *Callee = CGM.getIntrinsic(Intrinsic::amdgcn_div_scale,
17202 X->getType());
17204 llvm::Value *Tmp = Builder.CreateCall(Callee, {X, Y, Z});
17206 llvm::Value *Result = Builder.CreateExtractValue(Tmp, 0);
17207 llvm::Value *Flag = Builder.CreateExtractValue(Tmp, 1);
17209 llvm::Type *RealFlagType = FlagOutPtr.getElementType();
17211 llvm::Value *FlagExt = Builder.CreateZExt(Flag, RealFlagType);
17212 Builder.CreateStore(FlagExt, FlagOutPtr);
17213 return Result;
17215 case AMDGPU::BI__builtin_amdgcn_div_fmas:
17216 case AMDGPU::BI__builtin_amdgcn_div_fmasf: {
17217 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
17218 llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
17219 llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
17220 llvm::Value *Src3 = EmitScalarExpr(E->getArg(3));
17222 llvm::Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_div_fmas,
17223 Src0->getType());
17224 llvm::Value *Src3ToBool = Builder.CreateIsNotNull(Src3);
17225 return Builder.CreateCall(F, {Src0, Src1, Src2, Src3ToBool});
17228 case AMDGPU::BI__builtin_amdgcn_ds_swizzle:
17229 return emitBinaryBuiltin(*this, E, Intrinsic::amdgcn_ds_swizzle);
17230 case AMDGPU::BI__builtin_amdgcn_mov_dpp8:
17231 return emitBinaryBuiltin(*this, E, Intrinsic::amdgcn_mov_dpp8);
17232 case AMDGPU::BI__builtin_amdgcn_mov_dpp:
17233 case AMDGPU::BI__builtin_amdgcn_update_dpp: {
17234 llvm::SmallVector<llvm::Value *, 6> Args;
17235 for (unsigned I = 0; I != E->getNumArgs(); ++I)
17236 Args.push_back(EmitScalarExpr(E->getArg(I)));
17237 assert(Args.size() == 5 || Args.size() == 6);
17238 if (Args.size() == 5)
17239 Args.insert(Args.begin(), llvm::PoisonValue::get(Args[0]->getType()));
17240 Function *F =
17241 CGM.getIntrinsic(Intrinsic::amdgcn_update_dpp, Args[0]->getType());
17242 return Builder.CreateCall(F, Args);
17244 case AMDGPU::BI__builtin_amdgcn_div_fixup:
17245 case AMDGPU::BI__builtin_amdgcn_div_fixupf:
17246 case AMDGPU::BI__builtin_amdgcn_div_fixuph:
17247 return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_div_fixup);
17248 case AMDGPU::BI__builtin_amdgcn_trig_preop:
17249 case AMDGPU::BI__builtin_amdgcn_trig_preopf:
17250 return emitFPIntBuiltin(*this, E, Intrinsic::amdgcn_trig_preop);
17251 case AMDGPU::BI__builtin_amdgcn_rcp:
17252 case AMDGPU::BI__builtin_amdgcn_rcpf:
17253 case AMDGPU::BI__builtin_amdgcn_rcph:
17254 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rcp);
17255 case AMDGPU::BI__builtin_amdgcn_sqrt:
17256 case AMDGPU::BI__builtin_amdgcn_sqrtf:
17257 case AMDGPU::BI__builtin_amdgcn_sqrth:
17258 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_sqrt);
17259 case AMDGPU::BI__builtin_amdgcn_rsq:
17260 case AMDGPU::BI__builtin_amdgcn_rsqf:
17261 case AMDGPU::BI__builtin_amdgcn_rsqh:
17262 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rsq);
17263 case AMDGPU::BI__builtin_amdgcn_rsq_clamp:
17264 case AMDGPU::BI__builtin_amdgcn_rsq_clampf:
17265 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rsq_clamp);
17266 case AMDGPU::BI__builtin_amdgcn_sinf:
17267 case AMDGPU::BI__builtin_amdgcn_sinh:
17268 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_sin);
17269 case AMDGPU::BI__builtin_amdgcn_cosf:
17270 case AMDGPU::BI__builtin_amdgcn_cosh:
17271 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_cos);
17272 case AMDGPU::BI__builtin_amdgcn_dispatch_ptr:
17273 return EmitAMDGPUDispatchPtr(*this, E);
17274 case AMDGPU::BI__builtin_amdgcn_logf:
17275 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_log);
17276 case AMDGPU::BI__builtin_amdgcn_exp2f:
17277 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_exp2);
17278 case AMDGPU::BI__builtin_amdgcn_log_clampf:
17279 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_log_clamp);
17280 case AMDGPU::BI__builtin_amdgcn_ldexp:
17281 case AMDGPU::BI__builtin_amdgcn_ldexpf:
17282 case AMDGPU::BI__builtin_amdgcn_ldexph: {
17283 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
17284 llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
17285 llvm::Function *F =
17286 CGM.getIntrinsic(Intrinsic::ldexp, {Src0->getType(), Src1->getType()});
17287 return Builder.CreateCall(F, {Src0, Src1});
17289 case AMDGPU::BI__builtin_amdgcn_frexp_mant:
17290 case AMDGPU::BI__builtin_amdgcn_frexp_mantf:
17291 case AMDGPU::BI__builtin_amdgcn_frexp_manth:
17292 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_frexp_mant);
17293 case AMDGPU::BI__builtin_amdgcn_frexp_exp:
17294 case AMDGPU::BI__builtin_amdgcn_frexp_expf: {
17295 Value *Src0 = EmitScalarExpr(E->getArg(0));
17296 Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_frexp_exp,
17297 { Builder.getInt32Ty(), Src0->getType() });
17298 return Builder.CreateCall(F, Src0);
17300 case AMDGPU::BI__builtin_amdgcn_frexp_exph: {
17301 Value *Src0 = EmitScalarExpr(E->getArg(0));
17302 Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_frexp_exp,
17303 { Builder.getInt16Ty(), Src0->getType() });
17304 return Builder.CreateCall(F, Src0);
17306 case AMDGPU::BI__builtin_amdgcn_fract:
17307 case AMDGPU::BI__builtin_amdgcn_fractf:
17308 case AMDGPU::BI__builtin_amdgcn_fracth:
17309 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_fract);
17310 case AMDGPU::BI__builtin_amdgcn_lerp:
17311 return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_lerp);
17312 case AMDGPU::BI__builtin_amdgcn_ubfe:
17313 return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_ubfe);
17314 case AMDGPU::BI__builtin_amdgcn_sbfe:
17315 return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_sbfe);
17316 case AMDGPU::BI__builtin_amdgcn_ballot_w32:
17317 case AMDGPU::BI__builtin_amdgcn_ballot_w64: {
17318 llvm::Type *ResultType = ConvertType(E->getType());
17319 llvm::Value *Src = EmitScalarExpr(E->getArg(0));
17320 Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_ballot, { ResultType });
17321 return Builder.CreateCall(F, { Src });
17323 case AMDGPU::BI__builtin_amdgcn_uicmp:
17324 case AMDGPU::BI__builtin_amdgcn_uicmpl:
17325 case AMDGPU::BI__builtin_amdgcn_sicmp:
17326 case AMDGPU::BI__builtin_amdgcn_sicmpl: {
17327 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
17328 llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
17329 llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
17331 // FIXME-GFX10: How should 32 bit mask be handled?
17332 Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_icmp,
17333 { Builder.getInt64Ty(), Src0->getType() });
17334 return Builder.CreateCall(F, { Src0, Src1, Src2 });
17336 case AMDGPU::BI__builtin_amdgcn_fcmp:
17337 case AMDGPU::BI__builtin_amdgcn_fcmpf: {
17338 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
17339 llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
17340 llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
17342 // FIXME-GFX10: How should 32 bit mask be handled?
17343 Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_fcmp,
17344 { Builder.getInt64Ty(), Src0->getType() });
17345 return Builder.CreateCall(F, { Src0, Src1, Src2 });
17347 case AMDGPU::BI__builtin_amdgcn_class:
17348 case AMDGPU::BI__builtin_amdgcn_classf:
17349 case AMDGPU::BI__builtin_amdgcn_classh:
17350 return emitFPIntBuiltin(*this, E, Intrinsic::amdgcn_class);
17351 case AMDGPU::BI__builtin_amdgcn_fmed3f:
17352 case AMDGPU::BI__builtin_amdgcn_fmed3h:
17353 return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_fmed3);
17354 case AMDGPU::BI__builtin_amdgcn_ds_append:
17355 case AMDGPU::BI__builtin_amdgcn_ds_consume: {
17356 Intrinsic::ID Intrin = BuiltinID == AMDGPU::BI__builtin_amdgcn_ds_append ?
17357 Intrinsic::amdgcn_ds_append : Intrinsic::amdgcn_ds_consume;
17358 Value *Src0 = EmitScalarExpr(E->getArg(0));
17359 Function *F = CGM.getIntrinsic(Intrin, { Src0->getType() });
17360 return Builder.CreateCall(F, { Src0, Builder.getFalse() });
17362 case AMDGPU::BI__builtin_amdgcn_ds_faddf:
17363 case AMDGPU::BI__builtin_amdgcn_ds_fminf:
17364 case AMDGPU::BI__builtin_amdgcn_ds_fmaxf: {
17365 Intrinsic::ID Intrin;
17366 switch (BuiltinID) {
17367 case AMDGPU::BI__builtin_amdgcn_ds_faddf:
17368 Intrin = Intrinsic::amdgcn_ds_fadd;
17369 break;
17370 case AMDGPU::BI__builtin_amdgcn_ds_fminf:
17371 Intrin = Intrinsic::amdgcn_ds_fmin;
17372 break;
17373 case AMDGPU::BI__builtin_amdgcn_ds_fmaxf:
17374 Intrin = Intrinsic::amdgcn_ds_fmax;
17375 break;
17377 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
17378 llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
17379 llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
17380 llvm::Value *Src3 = EmitScalarExpr(E->getArg(3));
17381 llvm::Value *Src4 = EmitScalarExpr(E->getArg(4));
17382 llvm::Function *F = CGM.getIntrinsic(Intrin, { Src1->getType() });
17383 llvm::FunctionType *FTy = F->getFunctionType();
17384 llvm::Type *PTy = FTy->getParamType(0);
17385 Src0 = Builder.CreatePointerBitCastOrAddrSpaceCast(Src0, PTy);
17386 return Builder.CreateCall(F, { Src0, Src1, Src2, Src3, Src4 });
17388 case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_f64:
17389 case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_f32:
17390 case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_v2f16:
17391 case AMDGPU::BI__builtin_amdgcn_global_atomic_fmin_f64:
17392 case AMDGPU::BI__builtin_amdgcn_global_atomic_fmax_f64:
17393 case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_f64:
17394 case AMDGPU::BI__builtin_amdgcn_flat_atomic_fmin_f64:
17395 case AMDGPU::BI__builtin_amdgcn_flat_atomic_fmax_f64:
17396 case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_f32:
17397 case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_v2f16: {
17398 Intrinsic::ID IID;
17399 llvm::Type *ArgTy = llvm::Type::getDoubleTy(getLLVMContext());
17400 switch (BuiltinID) {
17401 case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_f32:
17402 ArgTy = llvm::Type::getFloatTy(getLLVMContext());
17403 IID = Intrinsic::amdgcn_global_atomic_fadd;
17404 break;
17405 case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_v2f16:
17406 ArgTy = llvm::FixedVectorType::get(
17407 llvm::Type::getHalfTy(getLLVMContext()), 2);
17408 IID = Intrinsic::amdgcn_global_atomic_fadd;
17409 break;
17410 case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_f64:
17411 IID = Intrinsic::amdgcn_global_atomic_fadd;
17412 break;
17413 case AMDGPU::BI__builtin_amdgcn_global_atomic_fmin_f64:
17414 IID = Intrinsic::amdgcn_global_atomic_fmin;
17415 break;
17416 case AMDGPU::BI__builtin_amdgcn_global_atomic_fmax_f64:
17417 IID = Intrinsic::amdgcn_global_atomic_fmax;
17418 break;
17419 case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_f64:
17420 IID = Intrinsic::amdgcn_flat_atomic_fadd;
17421 break;
17422 case AMDGPU::BI__builtin_amdgcn_flat_atomic_fmin_f64:
17423 IID = Intrinsic::amdgcn_flat_atomic_fmin;
17424 break;
17425 case AMDGPU::BI__builtin_amdgcn_flat_atomic_fmax_f64:
17426 IID = Intrinsic::amdgcn_flat_atomic_fmax;
17427 break;
17428 case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_f32:
17429 ArgTy = llvm::Type::getFloatTy(getLLVMContext());
17430 IID = Intrinsic::amdgcn_flat_atomic_fadd;
17431 break;
17432 case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_v2f16:
17433 ArgTy = llvm::FixedVectorType::get(
17434 llvm::Type::getHalfTy(getLLVMContext()), 2);
17435 IID = Intrinsic::amdgcn_flat_atomic_fadd;
17436 break;
17438 llvm::Value *Addr = EmitScalarExpr(E->getArg(0));
17439 llvm::Value *Val = EmitScalarExpr(E->getArg(1));
17440 llvm::Function *F =
17441 CGM.getIntrinsic(IID, {ArgTy, Addr->getType(), Val->getType()});
17442 return Builder.CreateCall(F, {Addr, Val});
17444 case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_v2bf16:
17445 case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_v2bf16: {
17446 Intrinsic::ID IID;
17447 switch (BuiltinID) {
17448 case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_v2bf16:
17449 IID = Intrinsic::amdgcn_global_atomic_fadd_v2bf16;
17450 break;
17451 case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_v2bf16:
17452 IID = Intrinsic::amdgcn_flat_atomic_fadd_v2bf16;
17453 break;
17455 llvm::Value *Addr = EmitScalarExpr(E->getArg(0));
17456 llvm::Value *Val = EmitScalarExpr(E->getArg(1));
17457 llvm::Function *F = CGM.getIntrinsic(IID, {Addr->getType()});
17458 return Builder.CreateCall(F, {Addr, Val});
17460 case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_f64:
17461 case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_f32:
17462 case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_v2f16: {
17463 Intrinsic::ID IID;
17464 llvm::Type *ArgTy;
17465 switch (BuiltinID) {
17466 case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_f32:
17467 ArgTy = llvm::Type::getFloatTy(getLLVMContext());
17468 IID = Intrinsic::amdgcn_ds_fadd;
17469 break;
17470 case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_f64:
17471 ArgTy = llvm::Type::getDoubleTy(getLLVMContext());
17472 IID = Intrinsic::amdgcn_ds_fadd;
17473 break;
17474 case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_v2f16:
17475 ArgTy = llvm::FixedVectorType::get(
17476 llvm::Type::getHalfTy(getLLVMContext()), 2);
17477 IID = Intrinsic::amdgcn_ds_fadd;
17478 break;
17480 llvm::Value *Addr = EmitScalarExpr(E->getArg(0));
17481 llvm::Value *Val = EmitScalarExpr(E->getArg(1));
17482 llvm::Constant *ZeroI32 = llvm::ConstantInt::getIntegerValue(
17483 llvm::Type::getInt32Ty(getLLVMContext()), APInt(32, 0, true));
17484 llvm::Constant *ZeroI1 = llvm::ConstantInt::getIntegerValue(
17485 llvm::Type::getInt1Ty(getLLVMContext()), APInt(1, 0));
17486 llvm::Function *F = CGM.getIntrinsic(IID, {ArgTy});
17487 return Builder.CreateCall(F, {Addr, Val, ZeroI32, ZeroI32, ZeroI1});
17489 case AMDGPU::BI__builtin_amdgcn_read_exec:
17490 case AMDGPU::BI__builtin_amdgcn_read_exec_lo:
17491 case AMDGPU::BI__builtin_amdgcn_read_exec_hi: {
17492 return EmitAMDGCNBallotForExec(*this, E, Int64Ty, Int64Ty);
17494 case AMDGPU::BI__builtin_amdgcn_image_bvh_intersect_ray:
17495 case AMDGPU::BI__builtin_amdgcn_image_bvh_intersect_ray_h:
17496 case AMDGPU::BI__builtin_amdgcn_image_bvh_intersect_ray_l:
17497 case AMDGPU::BI__builtin_amdgcn_image_bvh_intersect_ray_lh: {
17498 llvm::Value *NodePtr = EmitScalarExpr(E->getArg(0));
17499 llvm::Value *RayExtent = EmitScalarExpr(E->getArg(1));
17500 llvm::Value *RayOrigin = EmitScalarExpr(E->getArg(2));
17501 llvm::Value *RayDir = EmitScalarExpr(E->getArg(3));
17502 llvm::Value *RayInverseDir = EmitScalarExpr(E->getArg(4));
17503 llvm::Value *TextureDescr = EmitScalarExpr(E->getArg(5));
17505 // The builtins take these arguments as vec4 where the last element is
17506 // ignored. The intrinsic takes them as vec3.
17507 RayOrigin = Builder.CreateShuffleVector(RayOrigin, RayOrigin,
17508 ArrayRef<int>{0, 1, 2});
17509 RayDir =
17510 Builder.CreateShuffleVector(RayDir, RayDir, ArrayRef<int>{0, 1, 2});
17511 RayInverseDir = Builder.CreateShuffleVector(RayInverseDir, RayInverseDir,
17512 ArrayRef<int>{0, 1, 2});
17514 Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_image_bvh_intersect_ray,
17515 {NodePtr->getType(), RayDir->getType()});
17516 return Builder.CreateCall(F, {NodePtr, RayExtent, RayOrigin, RayDir,
17517 RayInverseDir, TextureDescr});
17520 case AMDGPU::BI__builtin_amdgcn_ds_bvh_stack_rtn: {
17521 SmallVector<Value *, 4> Args;
17522 for (int i = 0, e = E->getNumArgs(); i != e; ++i)
17523 Args.push_back(EmitScalarExpr(E->getArg(i)));
17525 Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_ds_bvh_stack_rtn);
17526 Value *Call = Builder.CreateCall(F, Args);
17527 Value *Rtn = Builder.CreateExtractValue(Call, 0);
17528 Value *A = Builder.CreateExtractValue(Call, 1);
17529 llvm::Type *RetTy = ConvertType(E->getType());
17530 Value *I0 = Builder.CreateInsertElement(PoisonValue::get(RetTy), Rtn,
17531 (uint64_t)0);
17532 return Builder.CreateInsertElement(I0, A, 1);
17535 case AMDGPU::BI__builtin_amdgcn_wmma_bf16_16x16x16_bf16_w32:
17536 case AMDGPU::BI__builtin_amdgcn_wmma_bf16_16x16x16_bf16_w64:
17537 case AMDGPU::BI__builtin_amdgcn_wmma_f16_16x16x16_f16_w32:
17538 case AMDGPU::BI__builtin_amdgcn_wmma_f16_16x16x16_f16_w64:
17539 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_bf16_w32:
17540 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_bf16_w64:
17541 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_f16_w32:
17542 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_f16_w64:
17543 case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu4_w32:
17544 case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu4_w64:
17545 case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu8_w32:
17546 case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu8_w64: {
17548 // These operations perform a matrix multiplication and accumulation of
17549 // the form:
17550 // D = A * B + C
17551 // The return type always matches the type of matrix C.
17552 unsigned ArgForMatchingRetType;
17553 unsigned BuiltinWMMAOp;
17555 switch (BuiltinID) {
17556 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_f16_w32:
17557 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_f16_w64:
17558 ArgForMatchingRetType = 2;
17559 BuiltinWMMAOp = Intrinsic::amdgcn_wmma_f32_16x16x16_f16;
17560 break;
17561 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_bf16_w32:
17562 case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_bf16_w64:
17563 ArgForMatchingRetType = 2;
17564 BuiltinWMMAOp = Intrinsic::amdgcn_wmma_f32_16x16x16_bf16;
17565 break;
17566 case AMDGPU::BI__builtin_amdgcn_wmma_f16_16x16x16_f16_w32:
17567 case AMDGPU::BI__builtin_amdgcn_wmma_f16_16x16x16_f16_w64:
17568 ArgForMatchingRetType = 2;
17569 BuiltinWMMAOp = Intrinsic::amdgcn_wmma_f16_16x16x16_f16;
17570 break;
17571 case AMDGPU::BI__builtin_amdgcn_wmma_bf16_16x16x16_bf16_w32:
17572 case AMDGPU::BI__builtin_amdgcn_wmma_bf16_16x16x16_bf16_w64:
17573 ArgForMatchingRetType = 2;
17574 BuiltinWMMAOp = Intrinsic::amdgcn_wmma_bf16_16x16x16_bf16;
17575 break;
17576 case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu8_w32:
17577 case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu8_w64:
17578 ArgForMatchingRetType = 4;
17579 BuiltinWMMAOp = Intrinsic::amdgcn_wmma_i32_16x16x16_iu8;
17580 break;
17581 case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu4_w32:
17582 case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu4_w64:
17583 ArgForMatchingRetType = 4;
17584 BuiltinWMMAOp = Intrinsic::amdgcn_wmma_i32_16x16x16_iu4;
17585 break;
17588 SmallVector<Value *, 6> Args;
17589 for (int i = 0, e = E->getNumArgs(); i != e; ++i)
17590 Args.push_back(EmitScalarExpr(E->getArg(i)));
17592 Function *F = CGM.getIntrinsic(BuiltinWMMAOp,
17593 {Args[ArgForMatchingRetType]->getType()});
17595 return Builder.CreateCall(F, Args);
17598 // amdgcn workitem
17599 case AMDGPU::BI__builtin_amdgcn_workitem_id_x:
17600 return emitRangedBuiltin(*this, Intrinsic::amdgcn_workitem_id_x, 0, 1024);
17601 case AMDGPU::BI__builtin_amdgcn_workitem_id_y:
17602 return emitRangedBuiltin(*this, Intrinsic::amdgcn_workitem_id_y, 0, 1024);
17603 case AMDGPU::BI__builtin_amdgcn_workitem_id_z:
17604 return emitRangedBuiltin(*this, Intrinsic::amdgcn_workitem_id_z, 0, 1024);
17606 // amdgcn workgroup size
17607 case AMDGPU::BI__builtin_amdgcn_workgroup_size_x:
17608 return EmitAMDGPUWorkGroupSize(*this, 0);
17609 case AMDGPU::BI__builtin_amdgcn_workgroup_size_y:
17610 return EmitAMDGPUWorkGroupSize(*this, 1);
17611 case AMDGPU::BI__builtin_amdgcn_workgroup_size_z:
17612 return EmitAMDGPUWorkGroupSize(*this, 2);
17614 // amdgcn grid size
17615 case AMDGPU::BI__builtin_amdgcn_grid_size_x:
17616 return EmitAMDGPUGridSize(*this, 0);
17617 case AMDGPU::BI__builtin_amdgcn_grid_size_y:
17618 return EmitAMDGPUGridSize(*this, 1);
17619 case AMDGPU::BI__builtin_amdgcn_grid_size_z:
17620 return EmitAMDGPUGridSize(*this, 2);
17622 // r600 intrinsics
17623 case AMDGPU::BI__builtin_r600_recipsqrt_ieee:
17624 case AMDGPU::BI__builtin_r600_recipsqrt_ieeef:
17625 return emitUnaryBuiltin(*this, E, Intrinsic::r600_recipsqrt_ieee);
17626 case AMDGPU::BI__builtin_r600_read_tidig_x:
17627 return emitRangedBuiltin(*this, Intrinsic::r600_read_tidig_x, 0, 1024);
17628 case AMDGPU::BI__builtin_r600_read_tidig_y:
17629 return emitRangedBuiltin(*this, Intrinsic::r600_read_tidig_y, 0, 1024);
17630 case AMDGPU::BI__builtin_r600_read_tidig_z:
17631 return emitRangedBuiltin(*this, Intrinsic::r600_read_tidig_z, 0, 1024);
17632 case AMDGPU::BI__builtin_amdgcn_alignbit: {
17633 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
17634 llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
17635 llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
17636 Function *F = CGM.getIntrinsic(Intrinsic::fshr, Src0->getType());
17637 return Builder.CreateCall(F, { Src0, Src1, Src2 });
17639 case AMDGPU::BI__builtin_amdgcn_fence: {
17640 ProcessOrderScopeAMDGCN(EmitScalarExpr(E->getArg(0)),
17641 EmitScalarExpr(E->getArg(1)), AO, SSID);
17642 return Builder.CreateFence(AO, SSID);
17644 case AMDGPU::BI__builtin_amdgcn_atomic_inc32:
17645 case AMDGPU::BI__builtin_amdgcn_atomic_inc64:
17646 case AMDGPU::BI__builtin_amdgcn_atomic_dec32:
17647 case AMDGPU::BI__builtin_amdgcn_atomic_dec64: {
17648 llvm::AtomicRMWInst::BinOp BinOp;
17649 switch (BuiltinID) {
17650 case AMDGPU::BI__builtin_amdgcn_atomic_inc32:
17651 case AMDGPU::BI__builtin_amdgcn_atomic_inc64:
17652 BinOp = llvm::AtomicRMWInst::UIncWrap;
17653 break;
17654 case AMDGPU::BI__builtin_amdgcn_atomic_dec32:
17655 case AMDGPU::BI__builtin_amdgcn_atomic_dec64:
17656 BinOp = llvm::AtomicRMWInst::UDecWrap;
17657 break;
17660 Value *Ptr = EmitScalarExpr(E->getArg(0));
17661 Value *Val = EmitScalarExpr(E->getArg(1));
17663 ProcessOrderScopeAMDGCN(EmitScalarExpr(E->getArg(2)),
17664 EmitScalarExpr(E->getArg(3)), AO, SSID);
17666 QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
17667 bool Volatile =
17668 PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
17670 llvm::AtomicRMWInst *RMW =
17671 Builder.CreateAtomicRMW(BinOp, Ptr, Val, AO, SSID);
17672 if (Volatile)
17673 RMW->setVolatile(true);
17674 return RMW;
17676 case AMDGPU::BI__builtin_amdgcn_s_sendmsg_rtn:
17677 case AMDGPU::BI__builtin_amdgcn_s_sendmsg_rtnl: {
17678 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
17679 llvm::Type *ResultType = ConvertType(E->getType());
17680 // s_sendmsg_rtn is mangled using return type only.
17681 Function *F =
17682 CGM.getIntrinsic(Intrinsic::amdgcn_s_sendmsg_rtn, {ResultType});
17683 return Builder.CreateCall(F, {Arg});
17685 default:
17686 return nullptr;
17690 /// Handle a SystemZ function in which the final argument is a pointer
17691 /// to an int that receives the post-instruction CC value. At the LLVM level
17692 /// this is represented as a function that returns a {result, cc} pair.
17693 static Value *EmitSystemZIntrinsicWithCC(CodeGenFunction &CGF,
17694 unsigned IntrinsicID,
17695 const CallExpr *E) {
17696 unsigned NumArgs = E->getNumArgs() - 1;
17697 SmallVector<Value *, 8> Args(NumArgs);
17698 for (unsigned I = 0; I < NumArgs; ++I)
17699 Args[I] = CGF.EmitScalarExpr(E->getArg(I));
17700 Address CCPtr = CGF.EmitPointerWithAlignment(E->getArg(NumArgs));
17701 Function *F = CGF.CGM.getIntrinsic(IntrinsicID);
17702 Value *Call = CGF.Builder.CreateCall(F, Args);
17703 Value *CC = CGF.Builder.CreateExtractValue(Call, 1);
17704 CGF.Builder.CreateStore(CC, CCPtr);
17705 return CGF.Builder.CreateExtractValue(Call, 0);
17708 Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
17709 const CallExpr *E) {
17710 switch (BuiltinID) {
17711 case SystemZ::BI__builtin_tbegin: {
17712 Value *TDB = EmitScalarExpr(E->getArg(0));
17713 Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff0c);
17714 Function *F = CGM.getIntrinsic(Intrinsic::s390_tbegin);
17715 return Builder.CreateCall(F, {TDB, Control});
17717 case SystemZ::BI__builtin_tbegin_nofloat: {
17718 Value *TDB = EmitScalarExpr(E->getArg(0));
17719 Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff0c);
17720 Function *F = CGM.getIntrinsic(Intrinsic::s390_tbegin_nofloat);
17721 return Builder.CreateCall(F, {TDB, Control});
17723 case SystemZ::BI__builtin_tbeginc: {
17724 Value *TDB = llvm::ConstantPointerNull::get(Int8PtrTy);
17725 Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff08);
17726 Function *F = CGM.getIntrinsic(Intrinsic::s390_tbeginc);
17727 return Builder.CreateCall(F, {TDB, Control});
17729 case SystemZ::BI__builtin_tabort: {
17730 Value *Data = EmitScalarExpr(E->getArg(0));
17731 Function *F = CGM.getIntrinsic(Intrinsic::s390_tabort);
17732 return Builder.CreateCall(F, Builder.CreateSExt(Data, Int64Ty, "tabort"));
17734 case SystemZ::BI__builtin_non_tx_store: {
17735 Value *Address = EmitScalarExpr(E->getArg(0));
17736 Value *Data = EmitScalarExpr(E->getArg(1));
17737 Function *F = CGM.getIntrinsic(Intrinsic::s390_ntstg);
17738 return Builder.CreateCall(F, {Data, Address});
17741 // Vector builtins. Note that most vector builtins are mapped automatically
17742 // to target-specific LLVM intrinsics. The ones handled specially here can
17743 // be represented via standard LLVM IR, which is preferable to enable common
17744 // LLVM optimizations.
17746 case SystemZ::BI__builtin_s390_vpopctb:
17747 case SystemZ::BI__builtin_s390_vpopcth:
17748 case SystemZ::BI__builtin_s390_vpopctf:
17749 case SystemZ::BI__builtin_s390_vpopctg: {
17750 llvm::Type *ResultType = ConvertType(E->getType());
17751 Value *X = EmitScalarExpr(E->getArg(0));
17752 Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ResultType);
17753 return Builder.CreateCall(F, X);
17756 case SystemZ::BI__builtin_s390_vclzb:
17757 case SystemZ::BI__builtin_s390_vclzh:
17758 case SystemZ::BI__builtin_s390_vclzf:
17759 case SystemZ::BI__builtin_s390_vclzg: {
17760 llvm::Type *ResultType = ConvertType(E->getType());
17761 Value *X = EmitScalarExpr(E->getArg(0));
17762 Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
17763 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ResultType);
17764 return Builder.CreateCall(F, {X, Undef});
17767 case SystemZ::BI__builtin_s390_vctzb:
17768 case SystemZ::BI__builtin_s390_vctzh:
17769 case SystemZ::BI__builtin_s390_vctzf:
17770 case SystemZ::BI__builtin_s390_vctzg: {
17771 llvm::Type *ResultType = ConvertType(E->getType());
17772 Value *X = EmitScalarExpr(E->getArg(0));
17773 Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
17774 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ResultType);
17775 return Builder.CreateCall(F, {X, Undef});
17778 case SystemZ::BI__builtin_s390_vfsqsb:
17779 case SystemZ::BI__builtin_s390_vfsqdb: {
17780 llvm::Type *ResultType = ConvertType(E->getType());
17781 Value *X = EmitScalarExpr(E->getArg(0));
17782 if (Builder.getIsFPConstrained()) {
17783 Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt, ResultType);
17784 return Builder.CreateConstrainedFPCall(F, { X });
17785 } else {
17786 Function *F = CGM.getIntrinsic(Intrinsic::sqrt, ResultType);
17787 return Builder.CreateCall(F, X);
17790 case SystemZ::BI__builtin_s390_vfmasb:
17791 case SystemZ::BI__builtin_s390_vfmadb: {
17792 llvm::Type *ResultType = ConvertType(E->getType());
17793 Value *X = EmitScalarExpr(E->getArg(0));
17794 Value *Y = EmitScalarExpr(E->getArg(1));
17795 Value *Z = EmitScalarExpr(E->getArg(2));
17796 if (Builder.getIsFPConstrained()) {
17797 Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
17798 return Builder.CreateConstrainedFPCall(F, {X, Y, Z});
17799 } else {
17800 Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
17801 return Builder.CreateCall(F, {X, Y, Z});
17804 case SystemZ::BI__builtin_s390_vfmssb:
17805 case SystemZ::BI__builtin_s390_vfmsdb: {
17806 llvm::Type *ResultType = ConvertType(E->getType());
17807 Value *X = EmitScalarExpr(E->getArg(0));
17808 Value *Y = EmitScalarExpr(E->getArg(1));
17809 Value *Z = EmitScalarExpr(E->getArg(2));
17810 if (Builder.getIsFPConstrained()) {
17811 Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
17812 return Builder.CreateConstrainedFPCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")});
17813 } else {
17814 Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
17815 return Builder.CreateCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")});
17818 case SystemZ::BI__builtin_s390_vfnmasb:
17819 case SystemZ::BI__builtin_s390_vfnmadb: {
17820 llvm::Type *ResultType = ConvertType(E->getType());
17821 Value *X = EmitScalarExpr(E->getArg(0));
17822 Value *Y = EmitScalarExpr(E->getArg(1));
17823 Value *Z = EmitScalarExpr(E->getArg(2));
17824 if (Builder.getIsFPConstrained()) {
17825 Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
17826 return Builder.CreateFNeg(Builder.CreateConstrainedFPCall(F, {X, Y, Z}), "neg");
17827 } else {
17828 Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
17829 return Builder.CreateFNeg(Builder.CreateCall(F, {X, Y, Z}), "neg");
17832 case SystemZ::BI__builtin_s390_vfnmssb:
17833 case SystemZ::BI__builtin_s390_vfnmsdb: {
17834 llvm::Type *ResultType = ConvertType(E->getType());
17835 Value *X = EmitScalarExpr(E->getArg(0));
17836 Value *Y = EmitScalarExpr(E->getArg(1));
17837 Value *Z = EmitScalarExpr(E->getArg(2));
17838 if (Builder.getIsFPConstrained()) {
17839 Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
17840 Value *NegZ = Builder.CreateFNeg(Z, "sub");
17841 return Builder.CreateFNeg(Builder.CreateConstrainedFPCall(F, {X, Y, NegZ}));
17842 } else {
17843 Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
17844 Value *NegZ = Builder.CreateFNeg(Z, "neg");
17845 return Builder.CreateFNeg(Builder.CreateCall(F, {X, Y, NegZ}));
17848 case SystemZ::BI__builtin_s390_vflpsb:
17849 case SystemZ::BI__builtin_s390_vflpdb: {
17850 llvm::Type *ResultType = ConvertType(E->getType());
17851 Value *X = EmitScalarExpr(E->getArg(0));
17852 Function *F = CGM.getIntrinsic(Intrinsic::fabs, ResultType);
17853 return Builder.CreateCall(F, X);
17855 case SystemZ::BI__builtin_s390_vflnsb:
17856 case SystemZ::BI__builtin_s390_vflndb: {
17857 llvm::Type *ResultType = ConvertType(E->getType());
17858 Value *X = EmitScalarExpr(E->getArg(0));
17859 Function *F = CGM.getIntrinsic(Intrinsic::fabs, ResultType);
17860 return Builder.CreateFNeg(Builder.CreateCall(F, X), "neg");
17862 case SystemZ::BI__builtin_s390_vfisb:
17863 case SystemZ::BI__builtin_s390_vfidb: {
17864 llvm::Type *ResultType = ConvertType(E->getType());
17865 Value *X = EmitScalarExpr(E->getArg(0));
17866 // Constant-fold the M4 and M5 mask arguments.
17867 llvm::APSInt M4 = *E->getArg(1)->getIntegerConstantExpr(getContext());
17868 llvm::APSInt M5 = *E->getArg(2)->getIntegerConstantExpr(getContext());
17869 // Check whether this instance can be represented via a LLVM standard
17870 // intrinsic. We only support some combinations of M4 and M5.
17871 Intrinsic::ID ID = Intrinsic::not_intrinsic;
17872 Intrinsic::ID CI;
17873 switch (M4.getZExtValue()) {
17874 default: break;
17875 case 0: // IEEE-inexact exception allowed
17876 switch (M5.getZExtValue()) {
17877 default: break;
17878 case 0: ID = Intrinsic::rint;
17879 CI = Intrinsic::experimental_constrained_rint; break;
17881 break;
17882 case 4: // IEEE-inexact exception suppressed
17883 switch (M5.getZExtValue()) {
17884 default: break;
17885 case 0: ID = Intrinsic::nearbyint;
17886 CI = Intrinsic::experimental_constrained_nearbyint; break;
17887 case 1: ID = Intrinsic::round;
17888 CI = Intrinsic::experimental_constrained_round; break;
17889 case 5: ID = Intrinsic::trunc;
17890 CI = Intrinsic::experimental_constrained_trunc; break;
17891 case 6: ID = Intrinsic::ceil;
17892 CI = Intrinsic::experimental_constrained_ceil; break;
17893 case 7: ID = Intrinsic::floor;
17894 CI = Intrinsic::experimental_constrained_floor; break;
17896 break;
17898 if (ID != Intrinsic::not_intrinsic) {
17899 if (Builder.getIsFPConstrained()) {
17900 Function *F = CGM.getIntrinsic(CI, ResultType);
17901 return Builder.CreateConstrainedFPCall(F, X);
17902 } else {
17903 Function *F = CGM.getIntrinsic(ID, ResultType);
17904 return Builder.CreateCall(F, X);
17907 switch (BuiltinID) { // FIXME: constrained version?
17908 case SystemZ::BI__builtin_s390_vfisb: ID = Intrinsic::s390_vfisb; break;
17909 case SystemZ::BI__builtin_s390_vfidb: ID = Intrinsic::s390_vfidb; break;
17910 default: llvm_unreachable("Unknown BuiltinID");
17912 Function *F = CGM.getIntrinsic(ID);
17913 Value *M4Value = llvm::ConstantInt::get(getLLVMContext(), M4);
17914 Value *M5Value = llvm::ConstantInt::get(getLLVMContext(), M5);
17915 return Builder.CreateCall(F, {X, M4Value, M5Value});
17917 case SystemZ::BI__builtin_s390_vfmaxsb:
17918 case SystemZ::BI__builtin_s390_vfmaxdb: {
17919 llvm::Type *ResultType = ConvertType(E->getType());
17920 Value *X = EmitScalarExpr(E->getArg(0));
17921 Value *Y = EmitScalarExpr(E->getArg(1));
17922 // Constant-fold the M4 mask argument.
17923 llvm::APSInt M4 = *E->getArg(2)->getIntegerConstantExpr(getContext());
17924 // Check whether this instance can be represented via a LLVM standard
17925 // intrinsic. We only support some values of M4.
17926 Intrinsic::ID ID = Intrinsic::not_intrinsic;
17927 Intrinsic::ID CI;
17928 switch (M4.getZExtValue()) {
17929 default: break;
17930 case 4: ID = Intrinsic::maxnum;
17931 CI = Intrinsic::experimental_constrained_maxnum; break;
17933 if (ID != Intrinsic::not_intrinsic) {
17934 if (Builder.getIsFPConstrained()) {
17935 Function *F = CGM.getIntrinsic(CI, ResultType);
17936 return Builder.CreateConstrainedFPCall(F, {X, Y});
17937 } else {
17938 Function *F = CGM.getIntrinsic(ID, ResultType);
17939 return Builder.CreateCall(F, {X, Y});
17942 switch (BuiltinID) {
17943 case SystemZ::BI__builtin_s390_vfmaxsb: ID = Intrinsic::s390_vfmaxsb; break;
17944 case SystemZ::BI__builtin_s390_vfmaxdb: ID = Intrinsic::s390_vfmaxdb; break;
17945 default: llvm_unreachable("Unknown BuiltinID");
17947 Function *F = CGM.getIntrinsic(ID);
17948 Value *M4Value = llvm::ConstantInt::get(getLLVMContext(), M4);
17949 return Builder.CreateCall(F, {X, Y, M4Value});
17951 case SystemZ::BI__builtin_s390_vfminsb:
17952 case SystemZ::BI__builtin_s390_vfmindb: {
17953 llvm::Type *ResultType = ConvertType(E->getType());
17954 Value *X = EmitScalarExpr(E->getArg(0));
17955 Value *Y = EmitScalarExpr(E->getArg(1));
17956 // Constant-fold the M4 mask argument.
17957 llvm::APSInt M4 = *E->getArg(2)->getIntegerConstantExpr(getContext());
17958 // Check whether this instance can be represented via a LLVM standard
17959 // intrinsic. We only support some values of M4.
17960 Intrinsic::ID ID = Intrinsic::not_intrinsic;
17961 Intrinsic::ID CI;
17962 switch (M4.getZExtValue()) {
17963 default: break;
17964 case 4: ID = Intrinsic::minnum;
17965 CI = Intrinsic::experimental_constrained_minnum; break;
17967 if (ID != Intrinsic::not_intrinsic) {
17968 if (Builder.getIsFPConstrained()) {
17969 Function *F = CGM.getIntrinsic(CI, ResultType);
17970 return Builder.CreateConstrainedFPCall(F, {X, Y});
17971 } else {
17972 Function *F = CGM.getIntrinsic(ID, ResultType);
17973 return Builder.CreateCall(F, {X, Y});
17976 switch (BuiltinID) {
17977 case SystemZ::BI__builtin_s390_vfminsb: ID = Intrinsic::s390_vfminsb; break;
17978 case SystemZ::BI__builtin_s390_vfmindb: ID = Intrinsic::s390_vfmindb; break;
17979 default: llvm_unreachable("Unknown BuiltinID");
17981 Function *F = CGM.getIntrinsic(ID);
17982 Value *M4Value = llvm::ConstantInt::get(getLLVMContext(), M4);
17983 return Builder.CreateCall(F, {X, Y, M4Value});
17986 case SystemZ::BI__builtin_s390_vlbrh:
17987 case SystemZ::BI__builtin_s390_vlbrf:
17988 case SystemZ::BI__builtin_s390_vlbrg: {
17989 llvm::Type *ResultType = ConvertType(E->getType());
17990 Value *X = EmitScalarExpr(E->getArg(0));
17991 Function *F = CGM.getIntrinsic(Intrinsic::bswap, ResultType);
17992 return Builder.CreateCall(F, X);
17995 // Vector intrinsics that output the post-instruction CC value.
17997 #define INTRINSIC_WITH_CC(NAME) \
17998 case SystemZ::BI__builtin_##NAME: \
17999 return EmitSystemZIntrinsicWithCC(*this, Intrinsic::NAME, E)
18001 INTRINSIC_WITH_CC(s390_vpkshs);
18002 INTRINSIC_WITH_CC(s390_vpksfs);
18003 INTRINSIC_WITH_CC(s390_vpksgs);
18005 INTRINSIC_WITH_CC(s390_vpklshs);
18006 INTRINSIC_WITH_CC(s390_vpklsfs);
18007 INTRINSIC_WITH_CC(s390_vpklsgs);
18009 INTRINSIC_WITH_CC(s390_vceqbs);
18010 INTRINSIC_WITH_CC(s390_vceqhs);
18011 INTRINSIC_WITH_CC(s390_vceqfs);
18012 INTRINSIC_WITH_CC(s390_vceqgs);
18014 INTRINSIC_WITH_CC(s390_vchbs);
18015 INTRINSIC_WITH_CC(s390_vchhs);
18016 INTRINSIC_WITH_CC(s390_vchfs);
18017 INTRINSIC_WITH_CC(s390_vchgs);
18019 INTRINSIC_WITH_CC(s390_vchlbs);
18020 INTRINSIC_WITH_CC(s390_vchlhs);
18021 INTRINSIC_WITH_CC(s390_vchlfs);
18022 INTRINSIC_WITH_CC(s390_vchlgs);
18024 INTRINSIC_WITH_CC(s390_vfaebs);
18025 INTRINSIC_WITH_CC(s390_vfaehs);
18026 INTRINSIC_WITH_CC(s390_vfaefs);
18028 INTRINSIC_WITH_CC(s390_vfaezbs);
18029 INTRINSIC_WITH_CC(s390_vfaezhs);
18030 INTRINSIC_WITH_CC(s390_vfaezfs);
18032 INTRINSIC_WITH_CC(s390_vfeebs);
18033 INTRINSIC_WITH_CC(s390_vfeehs);
18034 INTRINSIC_WITH_CC(s390_vfeefs);
18036 INTRINSIC_WITH_CC(s390_vfeezbs);
18037 INTRINSIC_WITH_CC(s390_vfeezhs);
18038 INTRINSIC_WITH_CC(s390_vfeezfs);
18040 INTRINSIC_WITH_CC(s390_vfenebs);
18041 INTRINSIC_WITH_CC(s390_vfenehs);
18042 INTRINSIC_WITH_CC(s390_vfenefs);
18044 INTRINSIC_WITH_CC(s390_vfenezbs);
18045 INTRINSIC_WITH_CC(s390_vfenezhs);
18046 INTRINSIC_WITH_CC(s390_vfenezfs);
18048 INTRINSIC_WITH_CC(s390_vistrbs);
18049 INTRINSIC_WITH_CC(s390_vistrhs);
18050 INTRINSIC_WITH_CC(s390_vistrfs);
18052 INTRINSIC_WITH_CC(s390_vstrcbs);
18053 INTRINSIC_WITH_CC(s390_vstrchs);
18054 INTRINSIC_WITH_CC(s390_vstrcfs);
18056 INTRINSIC_WITH_CC(s390_vstrczbs);
18057 INTRINSIC_WITH_CC(s390_vstrczhs);
18058 INTRINSIC_WITH_CC(s390_vstrczfs);
18060 INTRINSIC_WITH_CC(s390_vfcesbs);
18061 INTRINSIC_WITH_CC(s390_vfcedbs);
18062 INTRINSIC_WITH_CC(s390_vfchsbs);
18063 INTRINSIC_WITH_CC(s390_vfchdbs);
18064 INTRINSIC_WITH_CC(s390_vfchesbs);
18065 INTRINSIC_WITH_CC(s390_vfchedbs);
18067 INTRINSIC_WITH_CC(s390_vftcisb);
18068 INTRINSIC_WITH_CC(s390_vftcidb);
18070 INTRINSIC_WITH_CC(s390_vstrsb);
18071 INTRINSIC_WITH_CC(s390_vstrsh);
18072 INTRINSIC_WITH_CC(s390_vstrsf);
18074 INTRINSIC_WITH_CC(s390_vstrszb);
18075 INTRINSIC_WITH_CC(s390_vstrszh);
18076 INTRINSIC_WITH_CC(s390_vstrszf);
18078 #undef INTRINSIC_WITH_CC
18080 default:
18081 return nullptr;
18085 namespace {
18086 // Helper classes for mapping MMA builtins to particular LLVM intrinsic variant.
18087 struct NVPTXMmaLdstInfo {
18088 unsigned NumResults; // Number of elements to load/store
18089 // Intrinsic IDs for row/col variants. 0 if particular layout is unsupported.
18090 unsigned IID_col;
18091 unsigned IID_row;
18094 #define MMA_INTR(geom_op_type, layout) \
18095 Intrinsic::nvvm_wmma_##geom_op_type##_##layout##_stride
18096 #define MMA_LDST(n, geom_op_type) \
18097 { n, MMA_INTR(geom_op_type, col), MMA_INTR(geom_op_type, row) }
18099 static NVPTXMmaLdstInfo getNVPTXMmaLdstInfo(unsigned BuiltinID) {
18100 switch (BuiltinID) {
18101 // FP MMA loads
18102 case NVPTX::BI__hmma_m16n16k16_ld_a:
18103 return MMA_LDST(8, m16n16k16_load_a_f16);
18104 case NVPTX::BI__hmma_m16n16k16_ld_b:
18105 return MMA_LDST(8, m16n16k16_load_b_f16);
18106 case NVPTX::BI__hmma_m16n16k16_ld_c_f16:
18107 return MMA_LDST(4, m16n16k16_load_c_f16);
18108 case NVPTX::BI__hmma_m16n16k16_ld_c_f32:
18109 return MMA_LDST(8, m16n16k16_load_c_f32);
18110 case NVPTX::BI__hmma_m32n8k16_ld_a:
18111 return MMA_LDST(8, m32n8k16_load_a_f16);
18112 case NVPTX::BI__hmma_m32n8k16_ld_b:
18113 return MMA_LDST(8, m32n8k16_load_b_f16);
18114 case NVPTX::BI__hmma_m32n8k16_ld_c_f16:
18115 return MMA_LDST(4, m32n8k16_load_c_f16);
18116 case NVPTX::BI__hmma_m32n8k16_ld_c_f32:
18117 return MMA_LDST(8, m32n8k16_load_c_f32);
18118 case NVPTX::BI__hmma_m8n32k16_ld_a:
18119 return MMA_LDST(8, m8n32k16_load_a_f16);
18120 case NVPTX::BI__hmma_m8n32k16_ld_b:
18121 return MMA_LDST(8, m8n32k16_load_b_f16);
18122 case NVPTX::BI__hmma_m8n32k16_ld_c_f16:
18123 return MMA_LDST(4, m8n32k16_load_c_f16);
18124 case NVPTX::BI__hmma_m8n32k16_ld_c_f32:
18125 return MMA_LDST(8, m8n32k16_load_c_f32);
18127 // Integer MMA loads
18128 case NVPTX::BI__imma_m16n16k16_ld_a_s8:
18129 return MMA_LDST(2, m16n16k16_load_a_s8);
18130 case NVPTX::BI__imma_m16n16k16_ld_a_u8:
18131 return MMA_LDST(2, m16n16k16_load_a_u8);
18132 case NVPTX::BI__imma_m16n16k16_ld_b_s8:
18133 return MMA_LDST(2, m16n16k16_load_b_s8);
18134 case NVPTX::BI__imma_m16n16k16_ld_b_u8:
18135 return MMA_LDST(2, m16n16k16_load_b_u8);
18136 case NVPTX::BI__imma_m16n16k16_ld_c:
18137 return MMA_LDST(8, m16n16k16_load_c_s32);
18138 case NVPTX::BI__imma_m32n8k16_ld_a_s8:
18139 return MMA_LDST(4, m32n8k16_load_a_s8);
18140 case NVPTX::BI__imma_m32n8k16_ld_a_u8:
18141 return MMA_LDST(4, m32n8k16_load_a_u8);
18142 case NVPTX::BI__imma_m32n8k16_ld_b_s8:
18143 return MMA_LDST(1, m32n8k16_load_b_s8);
18144 case NVPTX::BI__imma_m32n8k16_ld_b_u8:
18145 return MMA_LDST(1, m32n8k16_load_b_u8);
18146 case NVPTX::BI__imma_m32n8k16_ld_c:
18147 return MMA_LDST(8, m32n8k16_load_c_s32);
18148 case NVPTX::BI__imma_m8n32k16_ld_a_s8:
18149 return MMA_LDST(1, m8n32k16_load_a_s8);
18150 case NVPTX::BI__imma_m8n32k16_ld_a_u8:
18151 return MMA_LDST(1, m8n32k16_load_a_u8);
18152 case NVPTX::BI__imma_m8n32k16_ld_b_s8:
18153 return MMA_LDST(4, m8n32k16_load_b_s8);
18154 case NVPTX::BI__imma_m8n32k16_ld_b_u8:
18155 return MMA_LDST(4, m8n32k16_load_b_u8);
18156 case NVPTX::BI__imma_m8n32k16_ld_c:
18157 return MMA_LDST(8, m8n32k16_load_c_s32);
18159 // Sub-integer MMA loads.
18160 // Only row/col layout is supported by A/B fragments.
18161 case NVPTX::BI__imma_m8n8k32_ld_a_s4:
18162 return {1, 0, MMA_INTR(m8n8k32_load_a_s4, row)};
18163 case NVPTX::BI__imma_m8n8k32_ld_a_u4:
18164 return {1, 0, MMA_INTR(m8n8k32_load_a_u4, row)};
18165 case NVPTX::BI__imma_m8n8k32_ld_b_s4:
18166 return {1, MMA_INTR(m8n8k32_load_b_s4, col), 0};
18167 case NVPTX::BI__imma_m8n8k32_ld_b_u4:
18168 return {1, MMA_INTR(m8n8k32_load_b_u4, col), 0};
18169 case NVPTX::BI__imma_m8n8k32_ld_c:
18170 return MMA_LDST(2, m8n8k32_load_c_s32);
18171 case NVPTX::BI__bmma_m8n8k128_ld_a_b1:
18172 return {1, 0, MMA_INTR(m8n8k128_load_a_b1, row)};
18173 case NVPTX::BI__bmma_m8n8k128_ld_b_b1:
18174 return {1, MMA_INTR(m8n8k128_load_b_b1, col), 0};
18175 case NVPTX::BI__bmma_m8n8k128_ld_c:
18176 return MMA_LDST(2, m8n8k128_load_c_s32);
18178 // Double MMA loads
18179 case NVPTX::BI__dmma_m8n8k4_ld_a:
18180 return MMA_LDST(1, m8n8k4_load_a_f64);
18181 case NVPTX::BI__dmma_m8n8k4_ld_b:
18182 return MMA_LDST(1, m8n8k4_load_b_f64);
18183 case NVPTX::BI__dmma_m8n8k4_ld_c:
18184 return MMA_LDST(2, m8n8k4_load_c_f64);
18186 // Alternate float MMA loads
18187 case NVPTX::BI__mma_bf16_m16n16k16_ld_a:
18188 return MMA_LDST(4, m16n16k16_load_a_bf16);
18189 case NVPTX::BI__mma_bf16_m16n16k16_ld_b:
18190 return MMA_LDST(4, m16n16k16_load_b_bf16);
18191 case NVPTX::BI__mma_bf16_m8n32k16_ld_a:
18192 return MMA_LDST(2, m8n32k16_load_a_bf16);
18193 case NVPTX::BI__mma_bf16_m8n32k16_ld_b:
18194 return MMA_LDST(8, m8n32k16_load_b_bf16);
18195 case NVPTX::BI__mma_bf16_m32n8k16_ld_a:
18196 return MMA_LDST(8, m32n8k16_load_a_bf16);
18197 case NVPTX::BI__mma_bf16_m32n8k16_ld_b:
18198 return MMA_LDST(2, m32n8k16_load_b_bf16);
18199 case NVPTX::BI__mma_tf32_m16n16k8_ld_a:
18200 return MMA_LDST(4, m16n16k8_load_a_tf32);
18201 case NVPTX::BI__mma_tf32_m16n16k8_ld_b:
18202 return MMA_LDST(4, m16n16k8_load_b_tf32);
18203 case NVPTX::BI__mma_tf32_m16n16k8_ld_c:
18204 return MMA_LDST(8, m16n16k8_load_c_f32);
18206 // NOTE: We need to follow inconsitent naming scheme used by NVCC. Unlike
18207 // PTX and LLVM IR where stores always use fragment D, NVCC builtins always
18208 // use fragment C for both loads and stores.
18209 // FP MMA stores.
18210 case NVPTX::BI__hmma_m16n16k16_st_c_f16:
18211 return MMA_LDST(4, m16n16k16_store_d_f16);
18212 case NVPTX::BI__hmma_m16n16k16_st_c_f32:
18213 return MMA_LDST(8, m16n16k16_store_d_f32);
18214 case NVPTX::BI__hmma_m32n8k16_st_c_f16:
18215 return MMA_LDST(4, m32n8k16_store_d_f16);
18216 case NVPTX::BI__hmma_m32n8k16_st_c_f32:
18217 return MMA_LDST(8, m32n8k16_store_d_f32);
18218 case NVPTX::BI__hmma_m8n32k16_st_c_f16:
18219 return MMA_LDST(4, m8n32k16_store_d_f16);
18220 case NVPTX::BI__hmma_m8n32k16_st_c_f32:
18221 return MMA_LDST(8, m8n32k16_store_d_f32);
18223 // Integer and sub-integer MMA stores.
18224 // Another naming quirk. Unlike other MMA builtins that use PTX types in the
18225 // name, integer loads/stores use LLVM's i32.
18226 case NVPTX::BI__imma_m16n16k16_st_c_i32:
18227 return MMA_LDST(8, m16n16k16_store_d_s32);
18228 case NVPTX::BI__imma_m32n8k16_st_c_i32:
18229 return MMA_LDST(8, m32n8k16_store_d_s32);
18230 case NVPTX::BI__imma_m8n32k16_st_c_i32:
18231 return MMA_LDST(8, m8n32k16_store_d_s32);
18232 case NVPTX::BI__imma_m8n8k32_st_c_i32:
18233 return MMA_LDST(2, m8n8k32_store_d_s32);
18234 case NVPTX::BI__bmma_m8n8k128_st_c_i32:
18235 return MMA_LDST(2, m8n8k128_store_d_s32);
18237 // Double MMA store
18238 case NVPTX::BI__dmma_m8n8k4_st_c_f64:
18239 return MMA_LDST(2, m8n8k4_store_d_f64);
18241 // Alternate float MMA store
18242 case NVPTX::BI__mma_m16n16k8_st_c_f32:
18243 return MMA_LDST(8, m16n16k8_store_d_f32);
18245 default:
18246 llvm_unreachable("Unknown MMA builtin");
18249 #undef MMA_LDST
18250 #undef MMA_INTR
18253 struct NVPTXMmaInfo {
18254 unsigned NumEltsA;
18255 unsigned NumEltsB;
18256 unsigned NumEltsC;
18257 unsigned NumEltsD;
18259 // Variants are ordered by layout-A/layout-B/satf, where 'row' has priority
18260 // over 'col' for layout. The index of non-satf variants is expected to match
18261 // the undocumented layout constants used by CUDA's mma.hpp.
18262 std::array<unsigned, 8> Variants;
18264 unsigned getMMAIntrinsic(int Layout, bool Satf) {
18265 unsigned Index = Layout + 4 * Satf;
18266 if (Index >= Variants.size())
18267 return 0;
18268 return Variants[Index];
18272 // Returns an intrinsic that matches Layout and Satf for valid combinations of
18273 // Layout and Satf, 0 otherwise.
18274 static NVPTXMmaInfo getNVPTXMmaInfo(unsigned BuiltinID) {
18275 // clang-format off
18276 #define MMA_VARIANTS(geom, type) \
18277 Intrinsic::nvvm_wmma_##geom##_mma_row_row_##type, \
18278 Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type, \
18279 Intrinsic::nvvm_wmma_##geom##_mma_col_row_##type, \
18280 Intrinsic::nvvm_wmma_##geom##_mma_col_col_##type
18281 #define MMA_SATF_VARIANTS(geom, type) \
18282 MMA_VARIANTS(geom, type), \
18283 Intrinsic::nvvm_wmma_##geom##_mma_row_row_##type##_satfinite, \
18284 Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type##_satfinite, \
18285 Intrinsic::nvvm_wmma_##geom##_mma_col_row_##type##_satfinite, \
18286 Intrinsic::nvvm_wmma_##geom##_mma_col_col_##type##_satfinite
18287 // Sub-integer MMA only supports row.col layout.
18288 #define MMA_VARIANTS_I4(geom, type) \
18289 0, \
18290 Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type, \
18291 0, \
18292 0, \
18293 0, \
18294 Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type##_satfinite, \
18295 0, \
18297 // b1 MMA does not support .satfinite.
18298 #define MMA_VARIANTS_B1_XOR(geom, type) \
18299 0, \
18300 Intrinsic::nvvm_wmma_##geom##_mma_xor_popc_row_col_##type, \
18301 0, \
18302 0, \
18303 0, \
18304 0, \
18305 0, \
18307 #define MMA_VARIANTS_B1_AND(geom, type) \
18308 0, \
18309 Intrinsic::nvvm_wmma_##geom##_mma_and_popc_row_col_##type, \
18310 0, \
18311 0, \
18312 0, \
18313 0, \
18314 0, \
18316 // clang-format on
18317 switch (BuiltinID) {
18318 // FP MMA
18319 // Note that 'type' argument of MMA_SATF_VARIANTS uses D_C notation, while
18320 // NumEltsN of return value are ordered as A,B,C,D.
18321 case NVPTX::BI__hmma_m16n16k16_mma_f16f16:
18322 return {8, 8, 4, 4, {{MMA_SATF_VARIANTS(m16n16k16, f16_f16)}}};
18323 case NVPTX::BI__hmma_m16n16k16_mma_f32f16:
18324 return {8, 8, 4, 8, {{MMA_SATF_VARIANTS(m16n16k16, f32_f16)}}};
18325 case NVPTX::BI__hmma_m16n16k16_mma_f16f32:
18326 return {8, 8, 8, 4, {{MMA_SATF_VARIANTS(m16n16k16, f16_f32)}}};
18327 case NVPTX::BI__hmma_m16n16k16_mma_f32f32:
18328 return {8, 8, 8, 8, {{MMA_SATF_VARIANTS(m16n16k16, f32_f32)}}};
18329 case NVPTX::BI__hmma_m32n8k16_mma_f16f16:
18330 return {8, 8, 4, 4, {{MMA_SATF_VARIANTS(m32n8k16, f16_f16)}}};
18331 case NVPTX::BI__hmma_m32n8k16_mma_f32f16:
18332 return {8, 8, 4, 8, {{MMA_SATF_VARIANTS(m32n8k16, f32_f16)}}};
18333 case NVPTX::BI__hmma_m32n8k16_mma_f16f32:
18334 return {8, 8, 8, 4, {{MMA_SATF_VARIANTS(m32n8k16, f16_f32)}}};
18335 case NVPTX::BI__hmma_m32n8k16_mma_f32f32:
18336 return {8, 8, 8, 8, {{MMA_SATF_VARIANTS(m32n8k16, f32_f32)}}};
18337 case NVPTX::BI__hmma_m8n32k16_mma_f16f16:
18338 return {8, 8, 4, 4, {{MMA_SATF_VARIANTS(m8n32k16, f16_f16)}}};
18339 case NVPTX::BI__hmma_m8n32k16_mma_f32f16:
18340 return {8, 8, 4, 8, {{MMA_SATF_VARIANTS(m8n32k16, f32_f16)}}};
18341 case NVPTX::BI__hmma_m8n32k16_mma_f16f32:
18342 return {8, 8, 8, 4, {{MMA_SATF_VARIANTS(m8n32k16, f16_f32)}}};
18343 case NVPTX::BI__hmma_m8n32k16_mma_f32f32:
18344 return {8, 8, 8, 8, {{MMA_SATF_VARIANTS(m8n32k16, f32_f32)}}};
18346 // Integer MMA
18347 case NVPTX::BI__imma_m16n16k16_mma_s8:
18348 return {2, 2, 8, 8, {{MMA_SATF_VARIANTS(m16n16k16, s8)}}};
18349 case NVPTX::BI__imma_m16n16k16_mma_u8:
18350 return {2, 2, 8, 8, {{MMA_SATF_VARIANTS(m16n16k16, u8)}}};
18351 case NVPTX::BI__imma_m32n8k16_mma_s8:
18352 return {4, 1, 8, 8, {{MMA_SATF_VARIANTS(m32n8k16, s8)}}};
18353 case NVPTX::BI__imma_m32n8k16_mma_u8:
18354 return {4, 1, 8, 8, {{MMA_SATF_VARIANTS(m32n8k16, u8)}}};
18355 case NVPTX::BI__imma_m8n32k16_mma_s8:
18356 return {1, 4, 8, 8, {{MMA_SATF_VARIANTS(m8n32k16, s8)}}};
18357 case NVPTX::BI__imma_m8n32k16_mma_u8:
18358 return {1, 4, 8, 8, {{MMA_SATF_VARIANTS(m8n32k16, u8)}}};
18360 // Sub-integer MMA
18361 case NVPTX::BI__imma_m8n8k32_mma_s4:
18362 return {1, 1, 2, 2, {{MMA_VARIANTS_I4(m8n8k32, s4)}}};
18363 case NVPTX::BI__imma_m8n8k32_mma_u4:
18364 return {1, 1, 2, 2, {{MMA_VARIANTS_I4(m8n8k32, u4)}}};
18365 case NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1:
18366 return {1, 1, 2, 2, {{MMA_VARIANTS_B1_XOR(m8n8k128, b1)}}};
18367 case NVPTX::BI__bmma_m8n8k128_mma_and_popc_b1:
18368 return {1, 1, 2, 2, {{MMA_VARIANTS_B1_AND(m8n8k128, b1)}}};
18370 // Double MMA
18371 case NVPTX::BI__dmma_m8n8k4_mma_f64:
18372 return {1, 1, 2, 2, {{MMA_VARIANTS(m8n8k4, f64)}}};
18374 // Alternate FP MMA
18375 case NVPTX::BI__mma_bf16_m16n16k16_mma_f32:
18376 return {4, 4, 8, 8, {{MMA_VARIANTS(m16n16k16, bf16)}}};
18377 case NVPTX::BI__mma_bf16_m8n32k16_mma_f32:
18378 return {2, 8, 8, 8, {{MMA_VARIANTS(m8n32k16, bf16)}}};
18379 case NVPTX::BI__mma_bf16_m32n8k16_mma_f32:
18380 return {8, 2, 8, 8, {{MMA_VARIANTS(m32n8k16, bf16)}}};
18381 case NVPTX::BI__mma_tf32_m16n16k8_mma_f32:
18382 return {4, 4, 8, 8, {{MMA_VARIANTS(m16n16k8, tf32)}}};
18383 default:
18384 llvm_unreachable("Unexpected builtin ID.");
18386 #undef MMA_VARIANTS
18387 #undef MMA_SATF_VARIANTS
18388 #undef MMA_VARIANTS_I4
18389 #undef MMA_VARIANTS_B1_AND
18390 #undef MMA_VARIANTS_B1_XOR
18393 static Value *MakeLdgLdu(unsigned IntrinsicID, CodeGenFunction &CGF,
18394 const CallExpr *E) {
18395 Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
18396 QualType ArgType = E->getArg(0)->getType();
18397 clang::CharUnits Align = CGF.CGM.getNaturalPointeeTypeAlignment(ArgType);
18398 llvm::Type *ElemTy = CGF.ConvertTypeForMem(ArgType->getPointeeType());
18399 return CGF.Builder.CreateCall(
18400 CGF.CGM.getIntrinsic(IntrinsicID, {ElemTy, Ptr->getType()}),
18401 {Ptr, ConstantInt::get(CGF.Builder.getInt32Ty(), Align.getQuantity())});
18404 static Value *MakeScopedAtomic(unsigned IntrinsicID, CodeGenFunction &CGF,
18405 const CallExpr *E) {
18406 Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
18407 llvm::Type *ElemTy =
18408 CGF.ConvertTypeForMem(E->getArg(0)->getType()->getPointeeType());
18409 return CGF.Builder.CreateCall(
18410 CGF.CGM.getIntrinsic(IntrinsicID, {ElemTy, Ptr->getType()}),
18411 {Ptr, CGF.EmitScalarExpr(E->getArg(1))});
18414 static Value *MakeCpAsync(unsigned IntrinsicID, unsigned IntrinsicIDS,
18415 CodeGenFunction &CGF, const CallExpr *E,
18416 int SrcSize) {
18417 return E->getNumArgs() == 3
18418 ? CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(IntrinsicIDS),
18419 {CGF.EmitScalarExpr(E->getArg(0)),
18420 CGF.EmitScalarExpr(E->getArg(1)),
18421 CGF.EmitScalarExpr(E->getArg(2))})
18422 : CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(IntrinsicID),
18423 {CGF.EmitScalarExpr(E->getArg(0)),
18424 CGF.EmitScalarExpr(E->getArg(1))});
18427 static Value *MakeHalfType(unsigned IntrinsicID, unsigned BuiltinID,
18428 const CallExpr *E, CodeGenFunction &CGF) {
18429 auto &C = CGF.CGM.getContext();
18430 if (!(C.getLangOpts().NativeHalfType ||
18431 !C.getTargetInfo().useFP16ConversionIntrinsics())) {
18432 CGF.CGM.Error(E->getExprLoc(), C.BuiltinInfo.getName(BuiltinID).str() +
18433 " requires native half type support.");
18434 return nullptr;
18437 if (IntrinsicID == Intrinsic::nvvm_ldg_global_f ||
18438 IntrinsicID == Intrinsic::nvvm_ldu_global_f)
18439 return MakeLdgLdu(IntrinsicID, CGF, E);
18441 SmallVector<Value *, 16> Args;
18442 auto *F = CGF.CGM.getIntrinsic(IntrinsicID);
18443 auto *FTy = F->getFunctionType();
18444 unsigned ICEArguments = 0;
18445 ASTContext::GetBuiltinTypeError Error;
18446 C.GetBuiltinType(BuiltinID, Error, &ICEArguments);
18447 assert(Error == ASTContext::GE_None && "Should not codegen an error");
18448 for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) {
18449 assert((ICEArguments & (1 << i)) == 0);
18450 auto *ArgValue = CGF.EmitScalarExpr(E->getArg(i));
18451 auto *PTy = FTy->getParamType(i);
18452 if (PTy != ArgValue->getType())
18453 ArgValue = CGF.Builder.CreateBitCast(ArgValue, PTy);
18454 Args.push_back(ArgValue);
18457 return CGF.Builder.CreateCall(F, Args);
18459 } // namespace
18461 Value *CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID,
18462 const CallExpr *E) {
18463 switch (BuiltinID) {
18464 case NVPTX::BI__nvvm_atom_add_gen_i:
18465 case NVPTX::BI__nvvm_atom_add_gen_l:
18466 case NVPTX::BI__nvvm_atom_add_gen_ll:
18467 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Add, E);
18469 case NVPTX::BI__nvvm_atom_sub_gen_i:
18470 case NVPTX::BI__nvvm_atom_sub_gen_l:
18471 case NVPTX::BI__nvvm_atom_sub_gen_ll:
18472 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Sub, E);
18474 case NVPTX::BI__nvvm_atom_and_gen_i:
18475 case NVPTX::BI__nvvm_atom_and_gen_l:
18476 case NVPTX::BI__nvvm_atom_and_gen_ll:
18477 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::And, E);
18479 case NVPTX::BI__nvvm_atom_or_gen_i:
18480 case NVPTX::BI__nvvm_atom_or_gen_l:
18481 case NVPTX::BI__nvvm_atom_or_gen_ll:
18482 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Or, E);
18484 case NVPTX::BI__nvvm_atom_xor_gen_i:
18485 case NVPTX::BI__nvvm_atom_xor_gen_l:
18486 case NVPTX::BI__nvvm_atom_xor_gen_ll:
18487 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Xor, E);
18489 case NVPTX::BI__nvvm_atom_xchg_gen_i:
18490 case NVPTX::BI__nvvm_atom_xchg_gen_l:
18491 case NVPTX::BI__nvvm_atom_xchg_gen_ll:
18492 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Xchg, E);
18494 case NVPTX::BI__nvvm_atom_max_gen_i:
18495 case NVPTX::BI__nvvm_atom_max_gen_l:
18496 case NVPTX::BI__nvvm_atom_max_gen_ll:
18497 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Max, E);
18499 case NVPTX::BI__nvvm_atom_max_gen_ui:
18500 case NVPTX::BI__nvvm_atom_max_gen_ul:
18501 case NVPTX::BI__nvvm_atom_max_gen_ull:
18502 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::UMax, E);
18504 case NVPTX::BI__nvvm_atom_min_gen_i:
18505 case NVPTX::BI__nvvm_atom_min_gen_l:
18506 case NVPTX::BI__nvvm_atom_min_gen_ll:
18507 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Min, E);
18509 case NVPTX::BI__nvvm_atom_min_gen_ui:
18510 case NVPTX::BI__nvvm_atom_min_gen_ul:
18511 case NVPTX::BI__nvvm_atom_min_gen_ull:
18512 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::UMin, E);
18514 case NVPTX::BI__nvvm_atom_cas_gen_i:
18515 case NVPTX::BI__nvvm_atom_cas_gen_l:
18516 case NVPTX::BI__nvvm_atom_cas_gen_ll:
18517 // __nvvm_atom_cas_gen_* should return the old value rather than the
18518 // success flag.
18519 return MakeAtomicCmpXchgValue(*this, E, /*ReturnBool=*/false);
18521 case NVPTX::BI__nvvm_atom_add_gen_f:
18522 case NVPTX::BI__nvvm_atom_add_gen_d: {
18523 Value *Ptr = EmitScalarExpr(E->getArg(0));
18524 Value *Val = EmitScalarExpr(E->getArg(1));
18525 return Builder.CreateAtomicRMW(llvm::AtomicRMWInst::FAdd, Ptr, Val,
18526 AtomicOrdering::SequentiallyConsistent);
18529 case NVPTX::BI__nvvm_atom_inc_gen_ui: {
18530 Value *Ptr = EmitScalarExpr(E->getArg(0));
18531 Value *Val = EmitScalarExpr(E->getArg(1));
18532 Function *FnALI32 =
18533 CGM.getIntrinsic(Intrinsic::nvvm_atomic_load_inc_32, Ptr->getType());
18534 return Builder.CreateCall(FnALI32, {Ptr, Val});
18537 case NVPTX::BI__nvvm_atom_dec_gen_ui: {
18538 Value *Ptr = EmitScalarExpr(E->getArg(0));
18539 Value *Val = EmitScalarExpr(E->getArg(1));
18540 Function *FnALD32 =
18541 CGM.getIntrinsic(Intrinsic::nvvm_atomic_load_dec_32, Ptr->getType());
18542 return Builder.CreateCall(FnALD32, {Ptr, Val});
18545 case NVPTX::BI__nvvm_ldg_c:
18546 case NVPTX::BI__nvvm_ldg_sc:
18547 case NVPTX::BI__nvvm_ldg_c2:
18548 case NVPTX::BI__nvvm_ldg_sc2:
18549 case NVPTX::BI__nvvm_ldg_c4:
18550 case NVPTX::BI__nvvm_ldg_sc4:
18551 case NVPTX::BI__nvvm_ldg_s:
18552 case NVPTX::BI__nvvm_ldg_s2:
18553 case NVPTX::BI__nvvm_ldg_s4:
18554 case NVPTX::BI__nvvm_ldg_i:
18555 case NVPTX::BI__nvvm_ldg_i2:
18556 case NVPTX::BI__nvvm_ldg_i4:
18557 case NVPTX::BI__nvvm_ldg_l:
18558 case NVPTX::BI__nvvm_ldg_l2:
18559 case NVPTX::BI__nvvm_ldg_ll:
18560 case NVPTX::BI__nvvm_ldg_ll2:
18561 case NVPTX::BI__nvvm_ldg_uc:
18562 case NVPTX::BI__nvvm_ldg_uc2:
18563 case NVPTX::BI__nvvm_ldg_uc4:
18564 case NVPTX::BI__nvvm_ldg_us:
18565 case NVPTX::BI__nvvm_ldg_us2:
18566 case NVPTX::BI__nvvm_ldg_us4:
18567 case NVPTX::BI__nvvm_ldg_ui:
18568 case NVPTX::BI__nvvm_ldg_ui2:
18569 case NVPTX::BI__nvvm_ldg_ui4:
18570 case NVPTX::BI__nvvm_ldg_ul:
18571 case NVPTX::BI__nvvm_ldg_ul2:
18572 case NVPTX::BI__nvvm_ldg_ull:
18573 case NVPTX::BI__nvvm_ldg_ull2:
18574 // PTX Interoperability section 2.2: "For a vector with an even number of
18575 // elements, its alignment is set to number of elements times the alignment
18576 // of its member: n*alignof(t)."
18577 return MakeLdgLdu(Intrinsic::nvvm_ldg_global_i, *this, E);
18578 case NVPTX::BI__nvvm_ldg_f:
18579 case NVPTX::BI__nvvm_ldg_f2:
18580 case NVPTX::BI__nvvm_ldg_f4:
18581 case NVPTX::BI__nvvm_ldg_d:
18582 case NVPTX::BI__nvvm_ldg_d2:
18583 return MakeLdgLdu(Intrinsic::nvvm_ldg_global_f, *this, E);
18585 case NVPTX::BI__nvvm_ldu_c:
18586 case NVPTX::BI__nvvm_ldu_sc:
18587 case NVPTX::BI__nvvm_ldu_c2:
18588 case NVPTX::BI__nvvm_ldu_sc2:
18589 case NVPTX::BI__nvvm_ldu_c4:
18590 case NVPTX::BI__nvvm_ldu_sc4:
18591 case NVPTX::BI__nvvm_ldu_s:
18592 case NVPTX::BI__nvvm_ldu_s2:
18593 case NVPTX::BI__nvvm_ldu_s4:
18594 case NVPTX::BI__nvvm_ldu_i:
18595 case NVPTX::BI__nvvm_ldu_i2:
18596 case NVPTX::BI__nvvm_ldu_i4:
18597 case NVPTX::BI__nvvm_ldu_l:
18598 case NVPTX::BI__nvvm_ldu_l2:
18599 case NVPTX::BI__nvvm_ldu_ll:
18600 case NVPTX::BI__nvvm_ldu_ll2:
18601 case NVPTX::BI__nvvm_ldu_uc:
18602 case NVPTX::BI__nvvm_ldu_uc2:
18603 case NVPTX::BI__nvvm_ldu_uc4:
18604 case NVPTX::BI__nvvm_ldu_us:
18605 case NVPTX::BI__nvvm_ldu_us2:
18606 case NVPTX::BI__nvvm_ldu_us4:
18607 case NVPTX::BI__nvvm_ldu_ui:
18608 case NVPTX::BI__nvvm_ldu_ui2:
18609 case NVPTX::BI__nvvm_ldu_ui4:
18610 case NVPTX::BI__nvvm_ldu_ul:
18611 case NVPTX::BI__nvvm_ldu_ul2:
18612 case NVPTX::BI__nvvm_ldu_ull:
18613 case NVPTX::BI__nvvm_ldu_ull2:
18614 return MakeLdgLdu(Intrinsic::nvvm_ldu_global_i, *this, E);
18615 case NVPTX::BI__nvvm_ldu_f:
18616 case NVPTX::BI__nvvm_ldu_f2:
18617 case NVPTX::BI__nvvm_ldu_f4:
18618 case NVPTX::BI__nvvm_ldu_d:
18619 case NVPTX::BI__nvvm_ldu_d2:
18620 return MakeLdgLdu(Intrinsic::nvvm_ldu_global_f, *this, E);
18622 case NVPTX::BI__nvvm_atom_cta_add_gen_i:
18623 case NVPTX::BI__nvvm_atom_cta_add_gen_l:
18624 case NVPTX::BI__nvvm_atom_cta_add_gen_ll:
18625 return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_i_cta, *this, E);
18626 case NVPTX::BI__nvvm_atom_sys_add_gen_i:
18627 case NVPTX::BI__nvvm_atom_sys_add_gen_l:
18628 case NVPTX::BI__nvvm_atom_sys_add_gen_ll:
18629 return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_i_sys, *this, E);
18630 case NVPTX::BI__nvvm_atom_cta_add_gen_f:
18631 case NVPTX::BI__nvvm_atom_cta_add_gen_d:
18632 return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_f_cta, *this, E);
18633 case NVPTX::BI__nvvm_atom_sys_add_gen_f:
18634 case NVPTX::BI__nvvm_atom_sys_add_gen_d:
18635 return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_f_sys, *this, E);
18636 case NVPTX::BI__nvvm_atom_cta_xchg_gen_i:
18637 case NVPTX::BI__nvvm_atom_cta_xchg_gen_l:
18638 case NVPTX::BI__nvvm_atom_cta_xchg_gen_ll:
18639 return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_gen_i_cta, *this, E);
18640 case NVPTX::BI__nvvm_atom_sys_xchg_gen_i:
18641 case NVPTX::BI__nvvm_atom_sys_xchg_gen_l:
18642 case NVPTX::BI__nvvm_atom_sys_xchg_gen_ll:
18643 return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_gen_i_sys, *this, E);
18644 case NVPTX::BI__nvvm_atom_cta_max_gen_i:
18645 case NVPTX::BI__nvvm_atom_cta_max_gen_ui:
18646 case NVPTX::BI__nvvm_atom_cta_max_gen_l:
18647 case NVPTX::BI__nvvm_atom_cta_max_gen_ul:
18648 case NVPTX::BI__nvvm_atom_cta_max_gen_ll:
18649 case NVPTX::BI__nvvm_atom_cta_max_gen_ull:
18650 return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_i_cta, *this, E);
18651 case NVPTX::BI__nvvm_atom_sys_max_gen_i:
18652 case NVPTX::BI__nvvm_atom_sys_max_gen_ui:
18653 case NVPTX::BI__nvvm_atom_sys_max_gen_l:
18654 case NVPTX::BI__nvvm_atom_sys_max_gen_ul:
18655 case NVPTX::BI__nvvm_atom_sys_max_gen_ll:
18656 case NVPTX::BI__nvvm_atom_sys_max_gen_ull:
18657 return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_i_sys, *this, E);
18658 case NVPTX::BI__nvvm_atom_cta_min_gen_i:
18659 case NVPTX::BI__nvvm_atom_cta_min_gen_ui:
18660 case NVPTX::BI__nvvm_atom_cta_min_gen_l:
18661 case NVPTX::BI__nvvm_atom_cta_min_gen_ul:
18662 case NVPTX::BI__nvvm_atom_cta_min_gen_ll:
18663 case NVPTX::BI__nvvm_atom_cta_min_gen_ull:
18664 return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_i_cta, *this, E);
18665 case NVPTX::BI__nvvm_atom_sys_min_gen_i:
18666 case NVPTX::BI__nvvm_atom_sys_min_gen_ui:
18667 case NVPTX::BI__nvvm_atom_sys_min_gen_l:
18668 case NVPTX::BI__nvvm_atom_sys_min_gen_ul:
18669 case NVPTX::BI__nvvm_atom_sys_min_gen_ll:
18670 case NVPTX::BI__nvvm_atom_sys_min_gen_ull:
18671 return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_i_sys, *this, E);
18672 case NVPTX::BI__nvvm_atom_cta_inc_gen_ui:
18673 return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_gen_i_cta, *this, E);
18674 case NVPTX::BI__nvvm_atom_cta_dec_gen_ui:
18675 return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_gen_i_cta, *this, E);
18676 case NVPTX::BI__nvvm_atom_sys_inc_gen_ui:
18677 return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_gen_i_sys, *this, E);
18678 case NVPTX::BI__nvvm_atom_sys_dec_gen_ui:
18679 return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_gen_i_sys, *this, E);
18680 case NVPTX::BI__nvvm_atom_cta_and_gen_i:
18681 case NVPTX::BI__nvvm_atom_cta_and_gen_l:
18682 case NVPTX::BI__nvvm_atom_cta_and_gen_ll:
18683 return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_gen_i_cta, *this, E);
18684 case NVPTX::BI__nvvm_atom_sys_and_gen_i:
18685 case NVPTX::BI__nvvm_atom_sys_and_gen_l:
18686 case NVPTX::BI__nvvm_atom_sys_and_gen_ll:
18687 return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_gen_i_sys, *this, E);
18688 case NVPTX::BI__nvvm_atom_cta_or_gen_i:
18689 case NVPTX::BI__nvvm_atom_cta_or_gen_l:
18690 case NVPTX::BI__nvvm_atom_cta_or_gen_ll:
18691 return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_gen_i_cta, *this, E);
18692 case NVPTX::BI__nvvm_atom_sys_or_gen_i:
18693 case NVPTX::BI__nvvm_atom_sys_or_gen_l:
18694 case NVPTX::BI__nvvm_atom_sys_or_gen_ll:
18695 return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_gen_i_sys, *this, E);
18696 case NVPTX::BI__nvvm_atom_cta_xor_gen_i:
18697 case NVPTX::BI__nvvm_atom_cta_xor_gen_l:
18698 case NVPTX::BI__nvvm_atom_cta_xor_gen_ll:
18699 return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_gen_i_cta, *this, E);
18700 case NVPTX::BI__nvvm_atom_sys_xor_gen_i:
18701 case NVPTX::BI__nvvm_atom_sys_xor_gen_l:
18702 case NVPTX::BI__nvvm_atom_sys_xor_gen_ll:
18703 return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_gen_i_sys, *this, E);
18704 case NVPTX::BI__nvvm_atom_cta_cas_gen_i:
18705 case NVPTX::BI__nvvm_atom_cta_cas_gen_l:
18706 case NVPTX::BI__nvvm_atom_cta_cas_gen_ll: {
18707 Value *Ptr = EmitScalarExpr(E->getArg(0));
18708 llvm::Type *ElemTy =
18709 ConvertTypeForMem(E->getArg(0)->getType()->getPointeeType());
18710 return Builder.CreateCall(
18711 CGM.getIntrinsic(
18712 Intrinsic::nvvm_atomic_cas_gen_i_cta, {ElemTy, Ptr->getType()}),
18713 {Ptr, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2))});
18715 case NVPTX::BI__nvvm_atom_sys_cas_gen_i:
18716 case NVPTX::BI__nvvm_atom_sys_cas_gen_l:
18717 case NVPTX::BI__nvvm_atom_sys_cas_gen_ll: {
18718 Value *Ptr = EmitScalarExpr(E->getArg(0));
18719 llvm::Type *ElemTy =
18720 ConvertTypeForMem(E->getArg(0)->getType()->getPointeeType());
18721 return Builder.CreateCall(
18722 CGM.getIntrinsic(
18723 Intrinsic::nvvm_atomic_cas_gen_i_sys, {ElemTy, Ptr->getType()}),
18724 {Ptr, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2))});
18726 case NVPTX::BI__nvvm_match_all_sync_i32p:
18727 case NVPTX::BI__nvvm_match_all_sync_i64p: {
18728 Value *Mask = EmitScalarExpr(E->getArg(0));
18729 Value *Val = EmitScalarExpr(E->getArg(1));
18730 Address PredOutPtr = EmitPointerWithAlignment(E->getArg(2));
18731 Value *ResultPair = Builder.CreateCall(
18732 CGM.getIntrinsic(BuiltinID == NVPTX::BI__nvvm_match_all_sync_i32p
18733 ? Intrinsic::nvvm_match_all_sync_i32p
18734 : Intrinsic::nvvm_match_all_sync_i64p),
18735 {Mask, Val});
18736 Value *Pred = Builder.CreateZExt(Builder.CreateExtractValue(ResultPair, 1),
18737 PredOutPtr.getElementType());
18738 Builder.CreateStore(Pred, PredOutPtr);
18739 return Builder.CreateExtractValue(ResultPair, 0);
18742 // FP MMA loads
18743 case NVPTX::BI__hmma_m16n16k16_ld_a:
18744 case NVPTX::BI__hmma_m16n16k16_ld_b:
18745 case NVPTX::BI__hmma_m16n16k16_ld_c_f16:
18746 case NVPTX::BI__hmma_m16n16k16_ld_c_f32:
18747 case NVPTX::BI__hmma_m32n8k16_ld_a:
18748 case NVPTX::BI__hmma_m32n8k16_ld_b:
18749 case NVPTX::BI__hmma_m32n8k16_ld_c_f16:
18750 case NVPTX::BI__hmma_m32n8k16_ld_c_f32:
18751 case NVPTX::BI__hmma_m8n32k16_ld_a:
18752 case NVPTX::BI__hmma_m8n32k16_ld_b:
18753 case NVPTX::BI__hmma_m8n32k16_ld_c_f16:
18754 case NVPTX::BI__hmma_m8n32k16_ld_c_f32:
18755 // Integer MMA loads.
18756 case NVPTX::BI__imma_m16n16k16_ld_a_s8:
18757 case NVPTX::BI__imma_m16n16k16_ld_a_u8:
18758 case NVPTX::BI__imma_m16n16k16_ld_b_s8:
18759 case NVPTX::BI__imma_m16n16k16_ld_b_u8:
18760 case NVPTX::BI__imma_m16n16k16_ld_c:
18761 case NVPTX::BI__imma_m32n8k16_ld_a_s8:
18762 case NVPTX::BI__imma_m32n8k16_ld_a_u8:
18763 case NVPTX::BI__imma_m32n8k16_ld_b_s8:
18764 case NVPTX::BI__imma_m32n8k16_ld_b_u8:
18765 case NVPTX::BI__imma_m32n8k16_ld_c:
18766 case NVPTX::BI__imma_m8n32k16_ld_a_s8:
18767 case NVPTX::BI__imma_m8n32k16_ld_a_u8:
18768 case NVPTX::BI__imma_m8n32k16_ld_b_s8:
18769 case NVPTX::BI__imma_m8n32k16_ld_b_u8:
18770 case NVPTX::BI__imma_m8n32k16_ld_c:
18771 // Sub-integer MMA loads.
18772 case NVPTX::BI__imma_m8n8k32_ld_a_s4:
18773 case NVPTX::BI__imma_m8n8k32_ld_a_u4:
18774 case NVPTX::BI__imma_m8n8k32_ld_b_s4:
18775 case NVPTX::BI__imma_m8n8k32_ld_b_u4:
18776 case NVPTX::BI__imma_m8n8k32_ld_c:
18777 case NVPTX::BI__bmma_m8n8k128_ld_a_b1:
18778 case NVPTX::BI__bmma_m8n8k128_ld_b_b1:
18779 case NVPTX::BI__bmma_m8n8k128_ld_c:
18780 // Double MMA loads.
18781 case NVPTX::BI__dmma_m8n8k4_ld_a:
18782 case NVPTX::BI__dmma_m8n8k4_ld_b:
18783 case NVPTX::BI__dmma_m8n8k4_ld_c:
18784 // Alternate float MMA loads.
18785 case NVPTX::BI__mma_bf16_m16n16k16_ld_a:
18786 case NVPTX::BI__mma_bf16_m16n16k16_ld_b:
18787 case NVPTX::BI__mma_bf16_m8n32k16_ld_a:
18788 case NVPTX::BI__mma_bf16_m8n32k16_ld_b:
18789 case NVPTX::BI__mma_bf16_m32n8k16_ld_a:
18790 case NVPTX::BI__mma_bf16_m32n8k16_ld_b:
18791 case NVPTX::BI__mma_tf32_m16n16k8_ld_a:
18792 case NVPTX::BI__mma_tf32_m16n16k8_ld_b:
18793 case NVPTX::BI__mma_tf32_m16n16k8_ld_c: {
18794 Address Dst = EmitPointerWithAlignment(E->getArg(0));
18795 Value *Src = EmitScalarExpr(E->getArg(1));
18796 Value *Ldm = EmitScalarExpr(E->getArg(2));
18797 std::optional<llvm::APSInt> isColMajorArg =
18798 E->getArg(3)->getIntegerConstantExpr(getContext());
18799 if (!isColMajorArg)
18800 return nullptr;
18801 bool isColMajor = isColMajorArg->getSExtValue();
18802 NVPTXMmaLdstInfo II = getNVPTXMmaLdstInfo(BuiltinID);
18803 unsigned IID = isColMajor ? II.IID_col : II.IID_row;
18804 if (IID == 0)
18805 return nullptr;
18807 Value *Result =
18808 Builder.CreateCall(CGM.getIntrinsic(IID, Src->getType()), {Src, Ldm});
18810 // Save returned values.
18811 assert(II.NumResults);
18812 if (II.NumResults == 1) {
18813 Builder.CreateAlignedStore(Result, Dst.getPointer(),
18814 CharUnits::fromQuantity(4));
18815 } else {
18816 for (unsigned i = 0; i < II.NumResults; ++i) {
18817 Builder.CreateAlignedStore(
18818 Builder.CreateBitCast(Builder.CreateExtractValue(Result, i),
18819 Dst.getElementType()),
18820 Builder.CreateGEP(Dst.getElementType(), Dst.getPointer(),
18821 llvm::ConstantInt::get(IntTy, i)),
18822 CharUnits::fromQuantity(4));
18825 return Result;
18828 case NVPTX::BI__hmma_m16n16k16_st_c_f16:
18829 case NVPTX::BI__hmma_m16n16k16_st_c_f32:
18830 case NVPTX::BI__hmma_m32n8k16_st_c_f16:
18831 case NVPTX::BI__hmma_m32n8k16_st_c_f32:
18832 case NVPTX::BI__hmma_m8n32k16_st_c_f16:
18833 case NVPTX::BI__hmma_m8n32k16_st_c_f32:
18834 case NVPTX::BI__imma_m16n16k16_st_c_i32:
18835 case NVPTX::BI__imma_m32n8k16_st_c_i32:
18836 case NVPTX::BI__imma_m8n32k16_st_c_i32:
18837 case NVPTX::BI__imma_m8n8k32_st_c_i32:
18838 case NVPTX::BI__bmma_m8n8k128_st_c_i32:
18839 case NVPTX::BI__dmma_m8n8k4_st_c_f64:
18840 case NVPTX::BI__mma_m16n16k8_st_c_f32: {
18841 Value *Dst = EmitScalarExpr(E->getArg(0));
18842 Address Src = EmitPointerWithAlignment(E->getArg(1));
18843 Value *Ldm = EmitScalarExpr(E->getArg(2));
18844 std::optional<llvm::APSInt> isColMajorArg =
18845 E->getArg(3)->getIntegerConstantExpr(getContext());
18846 if (!isColMajorArg)
18847 return nullptr;
18848 bool isColMajor = isColMajorArg->getSExtValue();
18849 NVPTXMmaLdstInfo II = getNVPTXMmaLdstInfo(BuiltinID);
18850 unsigned IID = isColMajor ? II.IID_col : II.IID_row;
18851 if (IID == 0)
18852 return nullptr;
18853 Function *Intrinsic =
18854 CGM.getIntrinsic(IID, Dst->getType());
18855 llvm::Type *ParamType = Intrinsic->getFunctionType()->getParamType(1);
18856 SmallVector<Value *, 10> Values = {Dst};
18857 for (unsigned i = 0; i < II.NumResults; ++i) {
18858 Value *V = Builder.CreateAlignedLoad(
18859 Src.getElementType(),
18860 Builder.CreateGEP(Src.getElementType(), Src.getPointer(),
18861 llvm::ConstantInt::get(IntTy, i)),
18862 CharUnits::fromQuantity(4));
18863 Values.push_back(Builder.CreateBitCast(V, ParamType));
18865 Values.push_back(Ldm);
18866 Value *Result = Builder.CreateCall(Intrinsic, Values);
18867 return Result;
18870 // BI__hmma_m16n16k16_mma_<Dtype><CType>(d, a, b, c, layout, satf) -->
18871 // Intrinsic::nvvm_wmma_m16n16k16_mma_sync<layout A,B><DType><CType><Satf>
18872 case NVPTX::BI__hmma_m16n16k16_mma_f16f16:
18873 case NVPTX::BI__hmma_m16n16k16_mma_f32f16:
18874 case NVPTX::BI__hmma_m16n16k16_mma_f32f32:
18875 case NVPTX::BI__hmma_m16n16k16_mma_f16f32:
18876 case NVPTX::BI__hmma_m32n8k16_mma_f16f16:
18877 case NVPTX::BI__hmma_m32n8k16_mma_f32f16:
18878 case NVPTX::BI__hmma_m32n8k16_mma_f32f32:
18879 case NVPTX::BI__hmma_m32n8k16_mma_f16f32:
18880 case NVPTX::BI__hmma_m8n32k16_mma_f16f16:
18881 case NVPTX::BI__hmma_m8n32k16_mma_f32f16:
18882 case NVPTX::BI__hmma_m8n32k16_mma_f32f32:
18883 case NVPTX::BI__hmma_m8n32k16_mma_f16f32:
18884 case NVPTX::BI__imma_m16n16k16_mma_s8:
18885 case NVPTX::BI__imma_m16n16k16_mma_u8:
18886 case NVPTX::BI__imma_m32n8k16_mma_s8:
18887 case NVPTX::BI__imma_m32n8k16_mma_u8:
18888 case NVPTX::BI__imma_m8n32k16_mma_s8:
18889 case NVPTX::BI__imma_m8n32k16_mma_u8:
18890 case NVPTX::BI__imma_m8n8k32_mma_s4:
18891 case NVPTX::BI__imma_m8n8k32_mma_u4:
18892 case NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1:
18893 case NVPTX::BI__bmma_m8n8k128_mma_and_popc_b1:
18894 case NVPTX::BI__dmma_m8n8k4_mma_f64:
18895 case NVPTX::BI__mma_bf16_m16n16k16_mma_f32:
18896 case NVPTX::BI__mma_bf16_m8n32k16_mma_f32:
18897 case NVPTX::BI__mma_bf16_m32n8k16_mma_f32:
18898 case NVPTX::BI__mma_tf32_m16n16k8_mma_f32: {
18899 Address Dst = EmitPointerWithAlignment(E->getArg(0));
18900 Address SrcA = EmitPointerWithAlignment(E->getArg(1));
18901 Address SrcB = EmitPointerWithAlignment(E->getArg(2));
18902 Address SrcC = EmitPointerWithAlignment(E->getArg(3));
18903 std::optional<llvm::APSInt> LayoutArg =
18904 E->getArg(4)->getIntegerConstantExpr(getContext());
18905 if (!LayoutArg)
18906 return nullptr;
18907 int Layout = LayoutArg->getSExtValue();
18908 if (Layout < 0 || Layout > 3)
18909 return nullptr;
18910 llvm::APSInt SatfArg;
18911 if (BuiltinID == NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1 ||
18912 BuiltinID == NVPTX::BI__bmma_m8n8k128_mma_and_popc_b1)
18913 SatfArg = 0; // .b1 does not have satf argument.
18914 else if (std::optional<llvm::APSInt> OptSatfArg =
18915 E->getArg(5)->getIntegerConstantExpr(getContext()))
18916 SatfArg = *OptSatfArg;
18917 else
18918 return nullptr;
18919 bool Satf = SatfArg.getSExtValue();
18920 NVPTXMmaInfo MI = getNVPTXMmaInfo(BuiltinID);
18921 unsigned IID = MI.getMMAIntrinsic(Layout, Satf);
18922 if (IID == 0) // Unsupported combination of Layout/Satf.
18923 return nullptr;
18925 SmallVector<Value *, 24> Values;
18926 Function *Intrinsic = CGM.getIntrinsic(IID);
18927 llvm::Type *AType = Intrinsic->getFunctionType()->getParamType(0);
18928 // Load A
18929 for (unsigned i = 0; i < MI.NumEltsA; ++i) {
18930 Value *V = Builder.CreateAlignedLoad(
18931 SrcA.getElementType(),
18932 Builder.CreateGEP(SrcA.getElementType(), SrcA.getPointer(),
18933 llvm::ConstantInt::get(IntTy, i)),
18934 CharUnits::fromQuantity(4));
18935 Values.push_back(Builder.CreateBitCast(V, AType));
18937 // Load B
18938 llvm::Type *BType = Intrinsic->getFunctionType()->getParamType(MI.NumEltsA);
18939 for (unsigned i = 0; i < MI.NumEltsB; ++i) {
18940 Value *V = Builder.CreateAlignedLoad(
18941 SrcB.getElementType(),
18942 Builder.CreateGEP(SrcB.getElementType(), SrcB.getPointer(),
18943 llvm::ConstantInt::get(IntTy, i)),
18944 CharUnits::fromQuantity(4));
18945 Values.push_back(Builder.CreateBitCast(V, BType));
18947 // Load C
18948 llvm::Type *CType =
18949 Intrinsic->getFunctionType()->getParamType(MI.NumEltsA + MI.NumEltsB);
18950 for (unsigned i = 0; i < MI.NumEltsC; ++i) {
18951 Value *V = Builder.CreateAlignedLoad(
18952 SrcC.getElementType(),
18953 Builder.CreateGEP(SrcC.getElementType(), SrcC.getPointer(),
18954 llvm::ConstantInt::get(IntTy, i)),
18955 CharUnits::fromQuantity(4));
18956 Values.push_back(Builder.CreateBitCast(V, CType));
18958 Value *Result = Builder.CreateCall(Intrinsic, Values);
18959 llvm::Type *DType = Dst.getElementType();
18960 for (unsigned i = 0; i < MI.NumEltsD; ++i)
18961 Builder.CreateAlignedStore(
18962 Builder.CreateBitCast(Builder.CreateExtractValue(Result, i), DType),
18963 Builder.CreateGEP(Dst.getElementType(), Dst.getPointer(),
18964 llvm::ConstantInt::get(IntTy, i)),
18965 CharUnits::fromQuantity(4));
18966 return Result;
18968 // The following builtins require half type support
18969 case NVPTX::BI__nvvm_ex2_approx_f16:
18970 return MakeHalfType(Intrinsic::nvvm_ex2_approx_f16, BuiltinID, E, *this);
18971 case NVPTX::BI__nvvm_ex2_approx_f16x2:
18972 return MakeHalfType(Intrinsic::nvvm_ex2_approx_f16x2, BuiltinID, E, *this);
18973 case NVPTX::BI__nvvm_ff2f16x2_rn:
18974 return MakeHalfType(Intrinsic::nvvm_ff2f16x2_rn, BuiltinID, E, *this);
18975 case NVPTX::BI__nvvm_ff2f16x2_rn_relu:
18976 return MakeHalfType(Intrinsic::nvvm_ff2f16x2_rn_relu, BuiltinID, E, *this);
18977 case NVPTX::BI__nvvm_ff2f16x2_rz:
18978 return MakeHalfType(Intrinsic::nvvm_ff2f16x2_rz, BuiltinID, E, *this);
18979 case NVPTX::BI__nvvm_ff2f16x2_rz_relu:
18980 return MakeHalfType(Intrinsic::nvvm_ff2f16x2_rz_relu, BuiltinID, E, *this);
18981 case NVPTX::BI__nvvm_fma_rn_f16:
18982 return MakeHalfType(Intrinsic::nvvm_fma_rn_f16, BuiltinID, E, *this);
18983 case NVPTX::BI__nvvm_fma_rn_f16x2:
18984 return MakeHalfType(Intrinsic::nvvm_fma_rn_f16x2, BuiltinID, E, *this);
18985 case NVPTX::BI__nvvm_fma_rn_ftz_f16:
18986 return MakeHalfType(Intrinsic::nvvm_fma_rn_ftz_f16, BuiltinID, E, *this);
18987 case NVPTX::BI__nvvm_fma_rn_ftz_f16x2:
18988 return MakeHalfType(Intrinsic::nvvm_fma_rn_ftz_f16x2, BuiltinID, E, *this);
18989 case NVPTX::BI__nvvm_fma_rn_ftz_relu_f16:
18990 return MakeHalfType(Intrinsic::nvvm_fma_rn_ftz_relu_f16, BuiltinID, E,
18991 *this);
18992 case NVPTX::BI__nvvm_fma_rn_ftz_relu_f16x2:
18993 return MakeHalfType(Intrinsic::nvvm_fma_rn_ftz_relu_f16x2, BuiltinID, E,
18994 *this);
18995 case NVPTX::BI__nvvm_fma_rn_ftz_sat_f16:
18996 return MakeHalfType(Intrinsic::nvvm_fma_rn_ftz_sat_f16, BuiltinID, E,
18997 *this);
18998 case NVPTX::BI__nvvm_fma_rn_ftz_sat_f16x2:
18999 return MakeHalfType(Intrinsic::nvvm_fma_rn_ftz_sat_f16x2, BuiltinID, E,
19000 *this);
19001 case NVPTX::BI__nvvm_fma_rn_relu_f16:
19002 return MakeHalfType(Intrinsic::nvvm_fma_rn_relu_f16, BuiltinID, E, *this);
19003 case NVPTX::BI__nvvm_fma_rn_relu_f16x2:
19004 return MakeHalfType(Intrinsic::nvvm_fma_rn_relu_f16x2, BuiltinID, E, *this);
19005 case NVPTX::BI__nvvm_fma_rn_sat_f16:
19006 return MakeHalfType(Intrinsic::nvvm_fma_rn_sat_f16, BuiltinID, E, *this);
19007 case NVPTX::BI__nvvm_fma_rn_sat_f16x2:
19008 return MakeHalfType(Intrinsic::nvvm_fma_rn_sat_f16x2, BuiltinID, E, *this);
19009 case NVPTX::BI__nvvm_fmax_f16:
19010 return MakeHalfType(Intrinsic::nvvm_fmax_f16, BuiltinID, E, *this);
19011 case NVPTX::BI__nvvm_fmax_f16x2:
19012 return MakeHalfType(Intrinsic::nvvm_fmax_f16x2, BuiltinID, E, *this);
19013 case NVPTX::BI__nvvm_fmax_ftz_f16:
19014 return MakeHalfType(Intrinsic::nvvm_fmax_ftz_f16, BuiltinID, E, *this);
19015 case NVPTX::BI__nvvm_fmax_ftz_f16x2:
19016 return MakeHalfType(Intrinsic::nvvm_fmax_ftz_f16x2, BuiltinID, E, *this);
19017 case NVPTX::BI__nvvm_fmax_ftz_nan_f16:
19018 return MakeHalfType(Intrinsic::nvvm_fmax_ftz_nan_f16, BuiltinID, E, *this);
19019 case NVPTX::BI__nvvm_fmax_ftz_nan_f16x2:
19020 return MakeHalfType(Intrinsic::nvvm_fmax_ftz_nan_f16x2, BuiltinID, E,
19021 *this);
19022 case NVPTX::BI__nvvm_fmax_ftz_nan_xorsign_abs_f16:
19023 return MakeHalfType(Intrinsic::nvvm_fmax_ftz_nan_xorsign_abs_f16, BuiltinID,
19024 E, *this);
19025 case NVPTX::BI__nvvm_fmax_ftz_nan_xorsign_abs_f16x2:
19026 return MakeHalfType(Intrinsic::nvvm_fmax_ftz_nan_xorsign_abs_f16x2,
19027 BuiltinID, E, *this);
19028 case NVPTX::BI__nvvm_fmax_ftz_xorsign_abs_f16:
19029 return MakeHalfType(Intrinsic::nvvm_fmax_ftz_xorsign_abs_f16, BuiltinID, E,
19030 *this);
19031 case NVPTX::BI__nvvm_fmax_ftz_xorsign_abs_f16x2:
19032 return MakeHalfType(Intrinsic::nvvm_fmax_ftz_xorsign_abs_f16x2, BuiltinID,
19033 E, *this);
19034 case NVPTX::BI__nvvm_fmax_nan_f16:
19035 return MakeHalfType(Intrinsic::nvvm_fmax_nan_f16, BuiltinID, E, *this);
19036 case NVPTX::BI__nvvm_fmax_nan_f16x2:
19037 return MakeHalfType(Intrinsic::nvvm_fmax_nan_f16x2, BuiltinID, E, *this);
19038 case NVPTX::BI__nvvm_fmax_nan_xorsign_abs_f16:
19039 return MakeHalfType(Intrinsic::nvvm_fmax_nan_xorsign_abs_f16, BuiltinID, E,
19040 *this);
19041 case NVPTX::BI__nvvm_fmax_nan_xorsign_abs_f16x2:
19042 return MakeHalfType(Intrinsic::nvvm_fmax_nan_xorsign_abs_f16x2, BuiltinID,
19043 E, *this);
19044 case NVPTX::BI__nvvm_fmax_xorsign_abs_f16:
19045 return MakeHalfType(Intrinsic::nvvm_fmax_xorsign_abs_f16, BuiltinID, E,
19046 *this);
19047 case NVPTX::BI__nvvm_fmax_xorsign_abs_f16x2:
19048 return MakeHalfType(Intrinsic::nvvm_fmax_xorsign_abs_f16x2, BuiltinID, E,
19049 *this);
19050 case NVPTX::BI__nvvm_fmin_f16:
19051 return MakeHalfType(Intrinsic::nvvm_fmin_f16, BuiltinID, E, *this);
19052 case NVPTX::BI__nvvm_fmin_f16x2:
19053 return MakeHalfType(Intrinsic::nvvm_fmin_f16x2, BuiltinID, E, *this);
19054 case NVPTX::BI__nvvm_fmin_ftz_f16:
19055 return MakeHalfType(Intrinsic::nvvm_fmin_ftz_f16, BuiltinID, E, *this);
19056 case NVPTX::BI__nvvm_fmin_ftz_f16x2:
19057 return MakeHalfType(Intrinsic::nvvm_fmin_ftz_f16x2, BuiltinID, E, *this);
19058 case NVPTX::BI__nvvm_fmin_ftz_nan_f16:
19059 return MakeHalfType(Intrinsic::nvvm_fmin_ftz_nan_f16, BuiltinID, E, *this);
19060 case NVPTX::BI__nvvm_fmin_ftz_nan_f16x2:
19061 return MakeHalfType(Intrinsic::nvvm_fmin_ftz_nan_f16x2, BuiltinID, E,
19062 *this);
19063 case NVPTX::BI__nvvm_fmin_ftz_nan_xorsign_abs_f16:
19064 return MakeHalfType(Intrinsic::nvvm_fmin_ftz_nan_xorsign_abs_f16, BuiltinID,
19065 E, *this);
19066 case NVPTX::BI__nvvm_fmin_ftz_nan_xorsign_abs_f16x2:
19067 return MakeHalfType(Intrinsic::nvvm_fmin_ftz_nan_xorsign_abs_f16x2,
19068 BuiltinID, E, *this);
19069 case NVPTX::BI__nvvm_fmin_ftz_xorsign_abs_f16:
19070 return MakeHalfType(Intrinsic::nvvm_fmin_ftz_xorsign_abs_f16, BuiltinID, E,
19071 *this);
19072 case NVPTX::BI__nvvm_fmin_ftz_xorsign_abs_f16x2:
19073 return MakeHalfType(Intrinsic::nvvm_fmin_ftz_xorsign_abs_f16x2, BuiltinID,
19074 E, *this);
19075 case NVPTX::BI__nvvm_fmin_nan_f16:
19076 return MakeHalfType(Intrinsic::nvvm_fmin_nan_f16, BuiltinID, E, *this);
19077 case NVPTX::BI__nvvm_fmin_nan_f16x2:
19078 return MakeHalfType(Intrinsic::nvvm_fmin_nan_f16x2, BuiltinID, E, *this);
19079 case NVPTX::BI__nvvm_fmin_nan_xorsign_abs_f16:
19080 return MakeHalfType(Intrinsic::nvvm_fmin_nan_xorsign_abs_f16, BuiltinID, E,
19081 *this);
19082 case NVPTX::BI__nvvm_fmin_nan_xorsign_abs_f16x2:
19083 return MakeHalfType(Intrinsic::nvvm_fmin_nan_xorsign_abs_f16x2, BuiltinID,
19084 E, *this);
19085 case NVPTX::BI__nvvm_fmin_xorsign_abs_f16:
19086 return MakeHalfType(Intrinsic::nvvm_fmin_xorsign_abs_f16, BuiltinID, E,
19087 *this);
19088 case NVPTX::BI__nvvm_fmin_xorsign_abs_f16x2:
19089 return MakeHalfType(Intrinsic::nvvm_fmin_xorsign_abs_f16x2, BuiltinID, E,
19090 *this);
19091 case NVPTX::BI__nvvm_ldg_h:
19092 return MakeHalfType(Intrinsic::nvvm_ldg_global_f, BuiltinID, E, *this);
19093 case NVPTX::BI__nvvm_ldg_h2:
19094 return MakeHalfType(Intrinsic::nvvm_ldg_global_f, BuiltinID, E, *this);
19095 case NVPTX::BI__nvvm_ldu_h:
19096 return MakeHalfType(Intrinsic::nvvm_ldu_global_f, BuiltinID, E, *this);
19097 case NVPTX::BI__nvvm_ldu_h2: {
19098 return MakeHalfType(Intrinsic::nvvm_ldu_global_f, BuiltinID, E, *this);
19100 case NVPTX::BI__nvvm_cp_async_ca_shared_global_4:
19101 return MakeCpAsync(Intrinsic::nvvm_cp_async_ca_shared_global_4,
19102 Intrinsic::nvvm_cp_async_ca_shared_global_4_s, *this, E,
19104 case NVPTX::BI__nvvm_cp_async_ca_shared_global_8:
19105 return MakeCpAsync(Intrinsic::nvvm_cp_async_ca_shared_global_8,
19106 Intrinsic::nvvm_cp_async_ca_shared_global_8_s, *this, E,
19108 case NVPTX::BI__nvvm_cp_async_ca_shared_global_16:
19109 return MakeCpAsync(Intrinsic::nvvm_cp_async_ca_shared_global_16,
19110 Intrinsic::nvvm_cp_async_ca_shared_global_16_s, *this, E,
19111 16);
19112 case NVPTX::BI__nvvm_cp_async_cg_shared_global_16:
19113 return MakeCpAsync(Intrinsic::nvvm_cp_async_cg_shared_global_16,
19114 Intrinsic::nvvm_cp_async_cg_shared_global_16_s, *this, E,
19115 16);
19116 case NVPTX::BI__nvvm_read_ptx_sreg_clusterid_x:
19117 return Builder.CreateCall(
19118 CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_clusterid_x));
19119 case NVPTX::BI__nvvm_read_ptx_sreg_clusterid_y:
19120 return Builder.CreateCall(
19121 CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_clusterid_y));
19122 case NVPTX::BI__nvvm_read_ptx_sreg_clusterid_z:
19123 return Builder.CreateCall(
19124 CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_clusterid_z));
19125 case NVPTX::BI__nvvm_read_ptx_sreg_clusterid_w:
19126 return Builder.CreateCall(
19127 CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_clusterid_w));
19128 case NVPTX::BI__nvvm_read_ptx_sreg_nclusterid_x:
19129 return Builder.CreateCall(
19130 CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_nclusterid_x));
19131 case NVPTX::BI__nvvm_read_ptx_sreg_nclusterid_y:
19132 return Builder.CreateCall(
19133 CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_nclusterid_y));
19134 case NVPTX::BI__nvvm_read_ptx_sreg_nclusterid_z:
19135 return Builder.CreateCall(
19136 CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_nclusterid_z));
19137 case NVPTX::BI__nvvm_read_ptx_sreg_nclusterid_w:
19138 return Builder.CreateCall(
19139 CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_nclusterid_w));
19140 case NVPTX::BI__nvvm_read_ptx_sreg_cluster_ctaid_x:
19141 return Builder.CreateCall(
19142 CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_cluster_ctaid_x));
19143 case NVPTX::BI__nvvm_read_ptx_sreg_cluster_ctaid_y:
19144 return Builder.CreateCall(
19145 CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_cluster_ctaid_y));
19146 case NVPTX::BI__nvvm_read_ptx_sreg_cluster_ctaid_z:
19147 return Builder.CreateCall(
19148 CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_cluster_ctaid_z));
19149 case NVPTX::BI__nvvm_read_ptx_sreg_cluster_ctaid_w:
19150 return Builder.CreateCall(
19151 CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_cluster_ctaid_w));
19152 case NVPTX::BI__nvvm_read_ptx_sreg_cluster_nctaid_x:
19153 return Builder.CreateCall(
19154 CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_cluster_nctaid_x));
19155 case NVPTX::BI__nvvm_read_ptx_sreg_cluster_nctaid_y:
19156 return Builder.CreateCall(
19157 CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_cluster_nctaid_y));
19158 case NVPTX::BI__nvvm_read_ptx_sreg_cluster_nctaid_z:
19159 return Builder.CreateCall(
19160 CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_cluster_nctaid_z));
19161 case NVPTX::BI__nvvm_read_ptx_sreg_cluster_nctaid_w:
19162 return Builder.CreateCall(
19163 CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_cluster_nctaid_w));
19164 case NVPTX::BI__nvvm_read_ptx_sreg_cluster_ctarank:
19165 return Builder.CreateCall(
19166 CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_cluster_ctarank));
19167 case NVPTX::BI__nvvm_read_ptx_sreg_cluster_nctarank:
19168 return Builder.CreateCall(
19169 CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_cluster_nctarank));
19170 case NVPTX::BI__nvvm_is_explicit_cluster:
19171 return Builder.CreateCall(
19172 CGM.getIntrinsic(Intrinsic::nvvm_is_explicit_cluster));
19173 case NVPTX::BI__nvvm_isspacep_shared_cluster:
19174 return Builder.CreateCall(
19175 CGM.getIntrinsic(Intrinsic::nvvm_isspacep_shared_cluster),
19176 EmitScalarExpr(E->getArg(0)));
19177 case NVPTX::BI__nvvm_mapa:
19178 return Builder.CreateCall(
19179 CGM.getIntrinsic(Intrinsic::nvvm_mapa),
19180 {EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1))});
19181 case NVPTX::BI__nvvm_mapa_shared_cluster:
19182 return Builder.CreateCall(
19183 CGM.getIntrinsic(Intrinsic::nvvm_mapa_shared_cluster),
19184 {EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1))});
19185 case NVPTX::BI__nvvm_getctarank:
19186 return Builder.CreateCall(
19187 CGM.getIntrinsic(Intrinsic::nvvm_getctarank),
19188 EmitScalarExpr(E->getArg(0)));
19189 case NVPTX::BI__nvvm_getctarank_shared_cluster:
19190 return Builder.CreateCall(
19191 CGM.getIntrinsic(Intrinsic::nvvm_getctarank_shared_cluster),
19192 EmitScalarExpr(E->getArg(0)));
19193 case NVPTX::BI__nvvm_barrier_cluster_arrive:
19194 return Builder.CreateCall(
19195 CGM.getIntrinsic(Intrinsic::nvvm_barrier_cluster_arrive));
19196 case NVPTX::BI__nvvm_barrier_cluster_arrive_relaxed:
19197 return Builder.CreateCall(
19198 CGM.getIntrinsic(Intrinsic::nvvm_barrier_cluster_arrive_relaxed));
19199 case NVPTX::BI__nvvm_barrier_cluster_wait:
19200 return Builder.CreateCall(
19201 CGM.getIntrinsic(Intrinsic::nvvm_barrier_cluster_wait));
19202 case NVPTX::BI__nvvm_fence_sc_cluster:
19203 return Builder.CreateCall(
19204 CGM.getIntrinsic(Intrinsic::nvvm_fence_sc_cluster));
19205 default:
19206 return nullptr;
19210 namespace {
19211 struct BuiltinAlignArgs {
19212 llvm::Value *Src = nullptr;
19213 llvm::Type *SrcType = nullptr;
19214 llvm::Value *Alignment = nullptr;
19215 llvm::Value *Mask = nullptr;
19216 llvm::IntegerType *IntType = nullptr;
19218 BuiltinAlignArgs(const CallExpr *E, CodeGenFunction &CGF) {
19219 QualType AstType = E->getArg(0)->getType();
19220 if (AstType->isArrayType())
19221 Src = CGF.EmitArrayToPointerDecay(E->getArg(0)).getPointer();
19222 else
19223 Src = CGF.EmitScalarExpr(E->getArg(0));
19224 SrcType = Src->getType();
19225 if (SrcType->isPointerTy()) {
19226 IntType = IntegerType::get(
19227 CGF.getLLVMContext(),
19228 CGF.CGM.getDataLayout().getIndexTypeSizeInBits(SrcType));
19229 } else {
19230 assert(SrcType->isIntegerTy());
19231 IntType = cast<llvm::IntegerType>(SrcType);
19233 Alignment = CGF.EmitScalarExpr(E->getArg(1));
19234 Alignment = CGF.Builder.CreateZExtOrTrunc(Alignment, IntType, "alignment");
19235 auto *One = llvm::ConstantInt::get(IntType, 1);
19236 Mask = CGF.Builder.CreateSub(Alignment, One, "mask");
19239 } // namespace
19241 /// Generate (x & (y-1)) == 0.
19242 RValue CodeGenFunction::EmitBuiltinIsAligned(const CallExpr *E) {
19243 BuiltinAlignArgs Args(E, *this);
19244 llvm::Value *SrcAddress = Args.Src;
19245 if (Args.SrcType->isPointerTy())
19246 SrcAddress =
19247 Builder.CreateBitOrPointerCast(Args.Src, Args.IntType, "src_addr");
19248 return RValue::get(Builder.CreateICmpEQ(
19249 Builder.CreateAnd(SrcAddress, Args.Mask, "set_bits"),
19250 llvm::Constant::getNullValue(Args.IntType), "is_aligned"));
19253 /// Generate (x & ~(y-1)) to align down or ((x+(y-1)) & ~(y-1)) to align up.
19254 /// Note: For pointer types we can avoid ptrtoint/inttoptr pairs by using the
19255 /// llvm.ptrmask intrinsic (with a GEP before in the align_up case).
19256 /// TODO: actually use ptrmask once most optimization passes know about it.
19257 RValue CodeGenFunction::EmitBuiltinAlignTo(const CallExpr *E, bool AlignUp) {
19258 BuiltinAlignArgs Args(E, *this);
19259 llvm::Value *SrcAddr = Args.Src;
19260 if (Args.Src->getType()->isPointerTy())
19261 SrcAddr = Builder.CreatePtrToInt(Args.Src, Args.IntType, "intptr");
19262 llvm::Value *SrcForMask = SrcAddr;
19263 if (AlignUp) {
19264 // When aligning up we have to first add the mask to ensure we go over the
19265 // next alignment value and then align down to the next valid multiple.
19266 // By adding the mask, we ensure that align_up on an already aligned
19267 // value will not change the value.
19268 SrcForMask = Builder.CreateAdd(SrcForMask, Args.Mask, "over_boundary");
19270 // Invert the mask to only clear the lower bits.
19271 llvm::Value *InvertedMask = Builder.CreateNot(Args.Mask, "inverted_mask");
19272 llvm::Value *Result =
19273 Builder.CreateAnd(SrcForMask, InvertedMask, "aligned_result");
19274 if (Args.Src->getType()->isPointerTy()) {
19275 /// TODO: Use ptrmask instead of ptrtoint+gep once it is optimized well.
19276 // Result = Builder.CreateIntrinsic(
19277 // Intrinsic::ptrmask, {Args.SrcType, SrcForMask->getType(), Args.IntType},
19278 // {SrcForMask, NegatedMask}, nullptr, "aligned_result");
19279 Result->setName("aligned_intptr");
19280 llvm::Value *Difference = Builder.CreateSub(Result, SrcAddr, "diff");
19281 // The result must point to the same underlying allocation. This means we
19282 // can use an inbounds GEP to enable better optimization.
19283 if (getLangOpts().isSignedOverflowDefined())
19284 Result =
19285 Builder.CreateGEP(Int8Ty, Args.Src, Difference, "aligned_result");
19286 else
19287 Result = EmitCheckedInBoundsGEP(Int8Ty, Args.Src, Difference,
19288 /*SignedIndices=*/true,
19289 /*isSubtraction=*/!AlignUp,
19290 E->getExprLoc(), "aligned_result");
19291 // Emit an alignment assumption to ensure that the new alignment is
19292 // propagated to loads/stores, etc.
19293 emitAlignmentAssumption(Result, E, E->getExprLoc(), Args.Alignment);
19295 assert(Result->getType() == Args.SrcType);
19296 return RValue::get(Result);
19299 Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
19300 const CallExpr *E) {
19301 switch (BuiltinID) {
19302 case WebAssembly::BI__builtin_wasm_memory_size: {
19303 llvm::Type *ResultType = ConvertType(E->getType());
19304 Value *I = EmitScalarExpr(E->getArg(0));
19305 Function *Callee =
19306 CGM.getIntrinsic(Intrinsic::wasm_memory_size, ResultType);
19307 return Builder.CreateCall(Callee, I);
19309 case WebAssembly::BI__builtin_wasm_memory_grow: {
19310 llvm::Type *ResultType = ConvertType(E->getType());
19311 Value *Args[] = {EmitScalarExpr(E->getArg(0)),
19312 EmitScalarExpr(E->getArg(1))};
19313 Function *Callee =
19314 CGM.getIntrinsic(Intrinsic::wasm_memory_grow, ResultType);
19315 return Builder.CreateCall(Callee, Args);
19317 case WebAssembly::BI__builtin_wasm_tls_size: {
19318 llvm::Type *ResultType = ConvertType(E->getType());
19319 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_tls_size, ResultType);
19320 return Builder.CreateCall(Callee);
19322 case WebAssembly::BI__builtin_wasm_tls_align: {
19323 llvm::Type *ResultType = ConvertType(E->getType());
19324 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_tls_align, ResultType);
19325 return Builder.CreateCall(Callee);
19327 case WebAssembly::BI__builtin_wasm_tls_base: {
19328 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_tls_base);
19329 return Builder.CreateCall(Callee);
19331 case WebAssembly::BI__builtin_wasm_throw: {
19332 Value *Tag = EmitScalarExpr(E->getArg(0));
19333 Value *Obj = EmitScalarExpr(E->getArg(1));
19334 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_throw);
19335 return Builder.CreateCall(Callee, {Tag, Obj});
19337 case WebAssembly::BI__builtin_wasm_rethrow: {
19338 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_rethrow);
19339 return Builder.CreateCall(Callee);
19341 case WebAssembly::BI__builtin_wasm_memory_atomic_wait32: {
19342 Value *Addr = EmitScalarExpr(E->getArg(0));
19343 Value *Expected = EmitScalarExpr(E->getArg(1));
19344 Value *Timeout = EmitScalarExpr(E->getArg(2));
19345 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_atomic_wait32);
19346 return Builder.CreateCall(Callee, {Addr, Expected, Timeout});
19348 case WebAssembly::BI__builtin_wasm_memory_atomic_wait64: {
19349 Value *Addr = EmitScalarExpr(E->getArg(0));
19350 Value *Expected = EmitScalarExpr(E->getArg(1));
19351 Value *Timeout = EmitScalarExpr(E->getArg(2));
19352 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_atomic_wait64);
19353 return Builder.CreateCall(Callee, {Addr, Expected, Timeout});
19355 case WebAssembly::BI__builtin_wasm_memory_atomic_notify: {
19356 Value *Addr = EmitScalarExpr(E->getArg(0));
19357 Value *Count = EmitScalarExpr(E->getArg(1));
19358 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_atomic_notify);
19359 return Builder.CreateCall(Callee, {Addr, Count});
19361 case WebAssembly::BI__builtin_wasm_trunc_s_i32_f32:
19362 case WebAssembly::BI__builtin_wasm_trunc_s_i32_f64:
19363 case WebAssembly::BI__builtin_wasm_trunc_s_i64_f32:
19364 case WebAssembly::BI__builtin_wasm_trunc_s_i64_f64: {
19365 Value *Src = EmitScalarExpr(E->getArg(0));
19366 llvm::Type *ResT = ConvertType(E->getType());
19367 Function *Callee =
19368 CGM.getIntrinsic(Intrinsic::wasm_trunc_signed, {ResT, Src->getType()});
19369 return Builder.CreateCall(Callee, {Src});
19371 case WebAssembly::BI__builtin_wasm_trunc_u_i32_f32:
19372 case WebAssembly::BI__builtin_wasm_trunc_u_i32_f64:
19373 case WebAssembly::BI__builtin_wasm_trunc_u_i64_f32:
19374 case WebAssembly::BI__builtin_wasm_trunc_u_i64_f64: {
19375 Value *Src = EmitScalarExpr(E->getArg(0));
19376 llvm::Type *ResT = ConvertType(E->getType());
19377 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_trunc_unsigned,
19378 {ResT, Src->getType()});
19379 return Builder.CreateCall(Callee, {Src});
19381 case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32_f32:
19382 case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32_f64:
19383 case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i64_f32:
19384 case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i64_f64:
19385 case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32x4_f32x4: {
19386 Value *Src = EmitScalarExpr(E->getArg(0));
19387 llvm::Type *ResT = ConvertType(E->getType());
19388 Function *Callee =
19389 CGM.getIntrinsic(Intrinsic::fptosi_sat, {ResT, Src->getType()});
19390 return Builder.CreateCall(Callee, {Src});
19392 case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32_f32:
19393 case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32_f64:
19394 case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i64_f32:
19395 case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i64_f64:
19396 case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32x4_f32x4: {
19397 Value *Src = EmitScalarExpr(E->getArg(0));
19398 llvm::Type *ResT = ConvertType(E->getType());
19399 Function *Callee =
19400 CGM.getIntrinsic(Intrinsic::fptoui_sat, {ResT, Src->getType()});
19401 return Builder.CreateCall(Callee, {Src});
19403 case WebAssembly::BI__builtin_wasm_min_f32:
19404 case WebAssembly::BI__builtin_wasm_min_f64:
19405 case WebAssembly::BI__builtin_wasm_min_f32x4:
19406 case WebAssembly::BI__builtin_wasm_min_f64x2: {
19407 Value *LHS = EmitScalarExpr(E->getArg(0));
19408 Value *RHS = EmitScalarExpr(E->getArg(1));
19409 Function *Callee =
19410 CGM.getIntrinsic(Intrinsic::minimum, ConvertType(E->getType()));
19411 return Builder.CreateCall(Callee, {LHS, RHS});
19413 case WebAssembly::BI__builtin_wasm_max_f32:
19414 case WebAssembly::BI__builtin_wasm_max_f64:
19415 case WebAssembly::BI__builtin_wasm_max_f32x4:
19416 case WebAssembly::BI__builtin_wasm_max_f64x2: {
19417 Value *LHS = EmitScalarExpr(E->getArg(0));
19418 Value *RHS = EmitScalarExpr(E->getArg(1));
19419 Function *Callee =
19420 CGM.getIntrinsic(Intrinsic::maximum, ConvertType(E->getType()));
19421 return Builder.CreateCall(Callee, {LHS, RHS});
19423 case WebAssembly::BI__builtin_wasm_pmin_f32x4:
19424 case WebAssembly::BI__builtin_wasm_pmin_f64x2: {
19425 Value *LHS = EmitScalarExpr(E->getArg(0));
19426 Value *RHS = EmitScalarExpr(E->getArg(1));
19427 Function *Callee =
19428 CGM.getIntrinsic(Intrinsic::wasm_pmin, ConvertType(E->getType()));
19429 return Builder.CreateCall(Callee, {LHS, RHS});
19431 case WebAssembly::BI__builtin_wasm_pmax_f32x4:
19432 case WebAssembly::BI__builtin_wasm_pmax_f64x2: {
19433 Value *LHS = EmitScalarExpr(E->getArg(0));
19434 Value *RHS = EmitScalarExpr(E->getArg(1));
19435 Function *Callee =
19436 CGM.getIntrinsic(Intrinsic::wasm_pmax, ConvertType(E->getType()));
19437 return Builder.CreateCall(Callee, {LHS, RHS});
19439 case WebAssembly::BI__builtin_wasm_ceil_f32x4:
19440 case WebAssembly::BI__builtin_wasm_floor_f32x4:
19441 case WebAssembly::BI__builtin_wasm_trunc_f32x4:
19442 case WebAssembly::BI__builtin_wasm_nearest_f32x4:
19443 case WebAssembly::BI__builtin_wasm_ceil_f64x2:
19444 case WebAssembly::BI__builtin_wasm_floor_f64x2:
19445 case WebAssembly::BI__builtin_wasm_trunc_f64x2:
19446 case WebAssembly::BI__builtin_wasm_nearest_f64x2: {
19447 unsigned IntNo;
19448 switch (BuiltinID) {
19449 case WebAssembly::BI__builtin_wasm_ceil_f32x4:
19450 case WebAssembly::BI__builtin_wasm_ceil_f64x2:
19451 IntNo = Intrinsic::ceil;
19452 break;
19453 case WebAssembly::BI__builtin_wasm_floor_f32x4:
19454 case WebAssembly::BI__builtin_wasm_floor_f64x2:
19455 IntNo = Intrinsic::floor;
19456 break;
19457 case WebAssembly::BI__builtin_wasm_trunc_f32x4:
19458 case WebAssembly::BI__builtin_wasm_trunc_f64x2:
19459 IntNo = Intrinsic::trunc;
19460 break;
19461 case WebAssembly::BI__builtin_wasm_nearest_f32x4:
19462 case WebAssembly::BI__builtin_wasm_nearest_f64x2:
19463 IntNo = Intrinsic::nearbyint;
19464 break;
19465 default:
19466 llvm_unreachable("unexpected builtin ID");
19468 Value *Value = EmitScalarExpr(E->getArg(0));
19469 Function *Callee = CGM.getIntrinsic(IntNo, ConvertType(E->getType()));
19470 return Builder.CreateCall(Callee, Value);
19472 case WebAssembly::BI__builtin_wasm_ref_null_extern: {
19473 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_ref_null_extern);
19474 return Builder.CreateCall(Callee);
19476 case WebAssembly::BI__builtin_wasm_ref_null_func: {
19477 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_ref_null_func);
19478 return Builder.CreateCall(Callee);
19480 case WebAssembly::BI__builtin_wasm_swizzle_i8x16: {
19481 Value *Src = EmitScalarExpr(E->getArg(0));
19482 Value *Indices = EmitScalarExpr(E->getArg(1));
19483 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_swizzle);
19484 return Builder.CreateCall(Callee, {Src, Indices});
19486 case WebAssembly::BI__builtin_wasm_add_sat_s_i8x16:
19487 case WebAssembly::BI__builtin_wasm_add_sat_u_i8x16:
19488 case WebAssembly::BI__builtin_wasm_add_sat_s_i16x8:
19489 case WebAssembly::BI__builtin_wasm_add_sat_u_i16x8:
19490 case WebAssembly::BI__builtin_wasm_sub_sat_s_i8x16:
19491 case WebAssembly::BI__builtin_wasm_sub_sat_u_i8x16:
19492 case WebAssembly::BI__builtin_wasm_sub_sat_s_i16x8:
19493 case WebAssembly::BI__builtin_wasm_sub_sat_u_i16x8: {
19494 unsigned IntNo;
19495 switch (BuiltinID) {
19496 case WebAssembly::BI__builtin_wasm_add_sat_s_i8x16:
19497 case WebAssembly::BI__builtin_wasm_add_sat_s_i16x8:
19498 IntNo = Intrinsic::sadd_sat;
19499 break;
19500 case WebAssembly::BI__builtin_wasm_add_sat_u_i8x16:
19501 case WebAssembly::BI__builtin_wasm_add_sat_u_i16x8:
19502 IntNo = Intrinsic::uadd_sat;
19503 break;
19504 case WebAssembly::BI__builtin_wasm_sub_sat_s_i8x16:
19505 case WebAssembly::BI__builtin_wasm_sub_sat_s_i16x8:
19506 IntNo = Intrinsic::wasm_sub_sat_signed;
19507 break;
19508 case WebAssembly::BI__builtin_wasm_sub_sat_u_i8x16:
19509 case WebAssembly::BI__builtin_wasm_sub_sat_u_i16x8:
19510 IntNo = Intrinsic::wasm_sub_sat_unsigned;
19511 break;
19512 default:
19513 llvm_unreachable("unexpected builtin ID");
19515 Value *LHS = EmitScalarExpr(E->getArg(0));
19516 Value *RHS = EmitScalarExpr(E->getArg(1));
19517 Function *Callee = CGM.getIntrinsic(IntNo, ConvertType(E->getType()));
19518 return Builder.CreateCall(Callee, {LHS, RHS});
19520 case WebAssembly::BI__builtin_wasm_abs_i8x16:
19521 case WebAssembly::BI__builtin_wasm_abs_i16x8:
19522 case WebAssembly::BI__builtin_wasm_abs_i32x4:
19523 case WebAssembly::BI__builtin_wasm_abs_i64x2: {
19524 Value *Vec = EmitScalarExpr(E->getArg(0));
19525 Value *Neg = Builder.CreateNeg(Vec, "neg");
19526 Constant *Zero = llvm::Constant::getNullValue(Vec->getType());
19527 Value *ICmp = Builder.CreateICmpSLT(Vec, Zero, "abscond");
19528 return Builder.CreateSelect(ICmp, Neg, Vec, "abs");
19530 case WebAssembly::BI__builtin_wasm_min_s_i8x16:
19531 case WebAssembly::BI__builtin_wasm_min_u_i8x16:
19532 case WebAssembly::BI__builtin_wasm_max_s_i8x16:
19533 case WebAssembly::BI__builtin_wasm_max_u_i8x16:
19534 case WebAssembly::BI__builtin_wasm_min_s_i16x8:
19535 case WebAssembly::BI__builtin_wasm_min_u_i16x8:
19536 case WebAssembly::BI__builtin_wasm_max_s_i16x8:
19537 case WebAssembly::BI__builtin_wasm_max_u_i16x8:
19538 case WebAssembly::BI__builtin_wasm_min_s_i32x4:
19539 case WebAssembly::BI__builtin_wasm_min_u_i32x4:
19540 case WebAssembly::BI__builtin_wasm_max_s_i32x4:
19541 case WebAssembly::BI__builtin_wasm_max_u_i32x4: {
19542 Value *LHS = EmitScalarExpr(E->getArg(0));
19543 Value *RHS = EmitScalarExpr(E->getArg(1));
19544 Value *ICmp;
19545 switch (BuiltinID) {
19546 case WebAssembly::BI__builtin_wasm_min_s_i8x16:
19547 case WebAssembly::BI__builtin_wasm_min_s_i16x8:
19548 case WebAssembly::BI__builtin_wasm_min_s_i32x4:
19549 ICmp = Builder.CreateICmpSLT(LHS, RHS);
19550 break;
19551 case WebAssembly::BI__builtin_wasm_min_u_i8x16:
19552 case WebAssembly::BI__builtin_wasm_min_u_i16x8:
19553 case WebAssembly::BI__builtin_wasm_min_u_i32x4:
19554 ICmp = Builder.CreateICmpULT(LHS, RHS);
19555 break;
19556 case WebAssembly::BI__builtin_wasm_max_s_i8x16:
19557 case WebAssembly::BI__builtin_wasm_max_s_i16x8:
19558 case WebAssembly::BI__builtin_wasm_max_s_i32x4:
19559 ICmp = Builder.CreateICmpSGT(LHS, RHS);
19560 break;
19561 case WebAssembly::BI__builtin_wasm_max_u_i8x16:
19562 case WebAssembly::BI__builtin_wasm_max_u_i16x8:
19563 case WebAssembly::BI__builtin_wasm_max_u_i32x4:
19564 ICmp = Builder.CreateICmpUGT(LHS, RHS);
19565 break;
19566 default:
19567 llvm_unreachable("unexpected builtin ID");
19569 return Builder.CreateSelect(ICmp, LHS, RHS);
19571 case WebAssembly::BI__builtin_wasm_avgr_u_i8x16:
19572 case WebAssembly::BI__builtin_wasm_avgr_u_i16x8: {
19573 Value *LHS = EmitScalarExpr(E->getArg(0));
19574 Value *RHS = EmitScalarExpr(E->getArg(1));
19575 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_avgr_unsigned,
19576 ConvertType(E->getType()));
19577 return Builder.CreateCall(Callee, {LHS, RHS});
19579 case WebAssembly::BI__builtin_wasm_q15mulr_sat_s_i16x8: {
19580 Value *LHS = EmitScalarExpr(E->getArg(0));
19581 Value *RHS = EmitScalarExpr(E->getArg(1));
19582 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_q15mulr_sat_signed);
19583 return Builder.CreateCall(Callee, {LHS, RHS});
19585 case WebAssembly::BI__builtin_wasm_extadd_pairwise_i8x16_s_i16x8:
19586 case WebAssembly::BI__builtin_wasm_extadd_pairwise_i8x16_u_i16x8:
19587 case WebAssembly::BI__builtin_wasm_extadd_pairwise_i16x8_s_i32x4:
19588 case WebAssembly::BI__builtin_wasm_extadd_pairwise_i16x8_u_i32x4: {
19589 Value *Vec = EmitScalarExpr(E->getArg(0));
19590 unsigned IntNo;
19591 switch (BuiltinID) {
19592 case WebAssembly::BI__builtin_wasm_extadd_pairwise_i8x16_s_i16x8:
19593 case WebAssembly::BI__builtin_wasm_extadd_pairwise_i16x8_s_i32x4:
19594 IntNo = Intrinsic::wasm_extadd_pairwise_signed;
19595 break;
19596 case WebAssembly::BI__builtin_wasm_extadd_pairwise_i8x16_u_i16x8:
19597 case WebAssembly::BI__builtin_wasm_extadd_pairwise_i16x8_u_i32x4:
19598 IntNo = Intrinsic::wasm_extadd_pairwise_unsigned;
19599 break;
19600 default:
19601 llvm_unreachable("unexpected builtin ID");
19604 Function *Callee = CGM.getIntrinsic(IntNo, ConvertType(E->getType()));
19605 return Builder.CreateCall(Callee, Vec);
19607 case WebAssembly::BI__builtin_wasm_bitselect: {
19608 Value *V1 = EmitScalarExpr(E->getArg(0));
19609 Value *V2 = EmitScalarExpr(E->getArg(1));
19610 Value *C = EmitScalarExpr(E->getArg(2));
19611 Function *Callee =
19612 CGM.getIntrinsic(Intrinsic::wasm_bitselect, ConvertType(E->getType()));
19613 return Builder.CreateCall(Callee, {V1, V2, C});
19615 case WebAssembly::BI__builtin_wasm_dot_s_i32x4_i16x8: {
19616 Value *LHS = EmitScalarExpr(E->getArg(0));
19617 Value *RHS = EmitScalarExpr(E->getArg(1));
19618 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_dot);
19619 return Builder.CreateCall(Callee, {LHS, RHS});
19621 case WebAssembly::BI__builtin_wasm_popcnt_i8x16: {
19622 Value *Vec = EmitScalarExpr(E->getArg(0));
19623 Function *Callee =
19624 CGM.getIntrinsic(Intrinsic::ctpop, ConvertType(E->getType()));
19625 return Builder.CreateCall(Callee, {Vec});
19627 case WebAssembly::BI__builtin_wasm_any_true_v128:
19628 case WebAssembly::BI__builtin_wasm_all_true_i8x16:
19629 case WebAssembly::BI__builtin_wasm_all_true_i16x8:
19630 case WebAssembly::BI__builtin_wasm_all_true_i32x4:
19631 case WebAssembly::BI__builtin_wasm_all_true_i64x2: {
19632 unsigned IntNo;
19633 switch (BuiltinID) {
19634 case WebAssembly::BI__builtin_wasm_any_true_v128:
19635 IntNo = Intrinsic::wasm_anytrue;
19636 break;
19637 case WebAssembly::BI__builtin_wasm_all_true_i8x16:
19638 case WebAssembly::BI__builtin_wasm_all_true_i16x8:
19639 case WebAssembly::BI__builtin_wasm_all_true_i32x4:
19640 case WebAssembly::BI__builtin_wasm_all_true_i64x2:
19641 IntNo = Intrinsic::wasm_alltrue;
19642 break;
19643 default:
19644 llvm_unreachable("unexpected builtin ID");
19646 Value *Vec = EmitScalarExpr(E->getArg(0));
19647 Function *Callee = CGM.getIntrinsic(IntNo, Vec->getType());
19648 return Builder.CreateCall(Callee, {Vec});
19650 case WebAssembly::BI__builtin_wasm_bitmask_i8x16:
19651 case WebAssembly::BI__builtin_wasm_bitmask_i16x8:
19652 case WebAssembly::BI__builtin_wasm_bitmask_i32x4:
19653 case WebAssembly::BI__builtin_wasm_bitmask_i64x2: {
19654 Value *Vec = EmitScalarExpr(E->getArg(0));
19655 Function *Callee =
19656 CGM.getIntrinsic(Intrinsic::wasm_bitmask, Vec->getType());
19657 return Builder.CreateCall(Callee, {Vec});
19659 case WebAssembly::BI__builtin_wasm_abs_f32x4:
19660 case WebAssembly::BI__builtin_wasm_abs_f64x2: {
19661 Value *Vec = EmitScalarExpr(E->getArg(0));
19662 Function *Callee = CGM.getIntrinsic(Intrinsic::fabs, Vec->getType());
19663 return Builder.CreateCall(Callee, {Vec});
19665 case WebAssembly::BI__builtin_wasm_sqrt_f32x4:
19666 case WebAssembly::BI__builtin_wasm_sqrt_f64x2: {
19667 Value *Vec = EmitScalarExpr(E->getArg(0));
19668 Function *Callee = CGM.getIntrinsic(Intrinsic::sqrt, Vec->getType());
19669 return Builder.CreateCall(Callee, {Vec});
19671 case WebAssembly::BI__builtin_wasm_narrow_s_i8x16_i16x8:
19672 case WebAssembly::BI__builtin_wasm_narrow_u_i8x16_i16x8:
19673 case WebAssembly::BI__builtin_wasm_narrow_s_i16x8_i32x4:
19674 case WebAssembly::BI__builtin_wasm_narrow_u_i16x8_i32x4: {
19675 Value *Low = EmitScalarExpr(E->getArg(0));
19676 Value *High = EmitScalarExpr(E->getArg(1));
19677 unsigned IntNo;
19678 switch (BuiltinID) {
19679 case WebAssembly::BI__builtin_wasm_narrow_s_i8x16_i16x8:
19680 case WebAssembly::BI__builtin_wasm_narrow_s_i16x8_i32x4:
19681 IntNo = Intrinsic::wasm_narrow_signed;
19682 break;
19683 case WebAssembly::BI__builtin_wasm_narrow_u_i8x16_i16x8:
19684 case WebAssembly::BI__builtin_wasm_narrow_u_i16x8_i32x4:
19685 IntNo = Intrinsic::wasm_narrow_unsigned;
19686 break;
19687 default:
19688 llvm_unreachable("unexpected builtin ID");
19690 Function *Callee =
19691 CGM.getIntrinsic(IntNo, {ConvertType(E->getType()), Low->getType()});
19692 return Builder.CreateCall(Callee, {Low, High});
19694 case WebAssembly::BI__builtin_wasm_trunc_sat_s_zero_f64x2_i32x4:
19695 case WebAssembly::BI__builtin_wasm_trunc_sat_u_zero_f64x2_i32x4: {
19696 Value *Vec = EmitScalarExpr(E->getArg(0));
19697 unsigned IntNo;
19698 switch (BuiltinID) {
19699 case WebAssembly::BI__builtin_wasm_trunc_sat_s_zero_f64x2_i32x4:
19700 IntNo = Intrinsic::fptosi_sat;
19701 break;
19702 case WebAssembly::BI__builtin_wasm_trunc_sat_u_zero_f64x2_i32x4:
19703 IntNo = Intrinsic::fptoui_sat;
19704 break;
19705 default:
19706 llvm_unreachable("unexpected builtin ID");
19708 llvm::Type *SrcT = Vec->getType();
19709 llvm::Type *TruncT = SrcT->getWithNewType(Builder.getInt32Ty());
19710 Function *Callee = CGM.getIntrinsic(IntNo, {TruncT, SrcT});
19711 Value *Trunc = Builder.CreateCall(Callee, Vec);
19712 Value *Splat = Constant::getNullValue(TruncT);
19713 return Builder.CreateShuffleVector(Trunc, Splat, ArrayRef<int>{0, 1, 2, 3});
19715 case WebAssembly::BI__builtin_wasm_shuffle_i8x16: {
19716 Value *Ops[18];
19717 size_t OpIdx = 0;
19718 Ops[OpIdx++] = EmitScalarExpr(E->getArg(0));
19719 Ops[OpIdx++] = EmitScalarExpr(E->getArg(1));
19720 while (OpIdx < 18) {
19721 std::optional<llvm::APSInt> LaneConst =
19722 E->getArg(OpIdx)->getIntegerConstantExpr(getContext());
19723 assert(LaneConst && "Constant arg isn't actually constant?");
19724 Ops[OpIdx++] = llvm::ConstantInt::get(getLLVMContext(), *LaneConst);
19726 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_shuffle);
19727 return Builder.CreateCall(Callee, Ops);
19729 case WebAssembly::BI__builtin_wasm_relaxed_madd_f32x4:
19730 case WebAssembly::BI__builtin_wasm_relaxed_nmadd_f32x4:
19731 case WebAssembly::BI__builtin_wasm_relaxed_madd_f64x2:
19732 case WebAssembly::BI__builtin_wasm_relaxed_nmadd_f64x2: {
19733 Value *A = EmitScalarExpr(E->getArg(0));
19734 Value *B = EmitScalarExpr(E->getArg(1));
19735 Value *C = EmitScalarExpr(E->getArg(2));
19736 unsigned IntNo;
19737 switch (BuiltinID) {
19738 case WebAssembly::BI__builtin_wasm_relaxed_madd_f32x4:
19739 case WebAssembly::BI__builtin_wasm_relaxed_madd_f64x2:
19740 IntNo = Intrinsic::wasm_relaxed_madd;
19741 break;
19742 case WebAssembly::BI__builtin_wasm_relaxed_nmadd_f32x4:
19743 case WebAssembly::BI__builtin_wasm_relaxed_nmadd_f64x2:
19744 IntNo = Intrinsic::wasm_relaxed_nmadd;
19745 break;
19746 default:
19747 llvm_unreachable("unexpected builtin ID");
19749 Function *Callee = CGM.getIntrinsic(IntNo, A->getType());
19750 return Builder.CreateCall(Callee, {A, B, C});
19752 case WebAssembly::BI__builtin_wasm_relaxed_laneselect_i8x16:
19753 case WebAssembly::BI__builtin_wasm_relaxed_laneselect_i16x8:
19754 case WebAssembly::BI__builtin_wasm_relaxed_laneselect_i32x4:
19755 case WebAssembly::BI__builtin_wasm_relaxed_laneselect_i64x2: {
19756 Value *A = EmitScalarExpr(E->getArg(0));
19757 Value *B = EmitScalarExpr(E->getArg(1));
19758 Value *C = EmitScalarExpr(E->getArg(2));
19759 Function *Callee =
19760 CGM.getIntrinsic(Intrinsic::wasm_relaxed_laneselect, A->getType());
19761 return Builder.CreateCall(Callee, {A, B, C});
19763 case WebAssembly::BI__builtin_wasm_relaxed_swizzle_i8x16: {
19764 Value *Src = EmitScalarExpr(E->getArg(0));
19765 Value *Indices = EmitScalarExpr(E->getArg(1));
19766 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_relaxed_swizzle);
19767 return Builder.CreateCall(Callee, {Src, Indices});
19769 case WebAssembly::BI__builtin_wasm_relaxed_min_f32x4:
19770 case WebAssembly::BI__builtin_wasm_relaxed_max_f32x4:
19771 case WebAssembly::BI__builtin_wasm_relaxed_min_f64x2:
19772 case WebAssembly::BI__builtin_wasm_relaxed_max_f64x2: {
19773 Value *LHS = EmitScalarExpr(E->getArg(0));
19774 Value *RHS = EmitScalarExpr(E->getArg(1));
19775 unsigned IntNo;
19776 switch (BuiltinID) {
19777 case WebAssembly::BI__builtin_wasm_relaxed_min_f32x4:
19778 case WebAssembly::BI__builtin_wasm_relaxed_min_f64x2:
19779 IntNo = Intrinsic::wasm_relaxed_min;
19780 break;
19781 case WebAssembly::BI__builtin_wasm_relaxed_max_f32x4:
19782 case WebAssembly::BI__builtin_wasm_relaxed_max_f64x2:
19783 IntNo = Intrinsic::wasm_relaxed_max;
19784 break;
19785 default:
19786 llvm_unreachable("unexpected builtin ID");
19788 Function *Callee = CGM.getIntrinsic(IntNo, LHS->getType());
19789 return Builder.CreateCall(Callee, {LHS, RHS});
19791 case WebAssembly::BI__builtin_wasm_relaxed_trunc_s_i32x4_f32x4:
19792 case WebAssembly::BI__builtin_wasm_relaxed_trunc_u_i32x4_f32x4:
19793 case WebAssembly::BI__builtin_wasm_relaxed_trunc_s_zero_i32x4_f64x2:
19794 case WebAssembly::BI__builtin_wasm_relaxed_trunc_u_zero_i32x4_f64x2: {
19795 Value *Vec = EmitScalarExpr(E->getArg(0));
19796 unsigned IntNo;
19797 switch (BuiltinID) {
19798 case WebAssembly::BI__builtin_wasm_relaxed_trunc_s_i32x4_f32x4:
19799 IntNo = Intrinsic::wasm_relaxed_trunc_signed;
19800 break;
19801 case WebAssembly::BI__builtin_wasm_relaxed_trunc_u_i32x4_f32x4:
19802 IntNo = Intrinsic::wasm_relaxed_trunc_unsigned;
19803 break;
19804 case WebAssembly::BI__builtin_wasm_relaxed_trunc_s_zero_i32x4_f64x2:
19805 IntNo = Intrinsic::wasm_relaxed_trunc_signed_zero;
19806 break;
19807 case WebAssembly::BI__builtin_wasm_relaxed_trunc_u_zero_i32x4_f64x2:
19808 IntNo = Intrinsic::wasm_relaxed_trunc_unsigned_zero;
19809 break;
19810 default:
19811 llvm_unreachable("unexpected builtin ID");
19813 Function *Callee = CGM.getIntrinsic(IntNo);
19814 return Builder.CreateCall(Callee, {Vec});
19816 case WebAssembly::BI__builtin_wasm_relaxed_q15mulr_s_i16x8: {
19817 Value *LHS = EmitScalarExpr(E->getArg(0));
19818 Value *RHS = EmitScalarExpr(E->getArg(1));
19819 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_relaxed_q15mulr_signed);
19820 return Builder.CreateCall(Callee, {LHS, RHS});
19822 case WebAssembly::BI__builtin_wasm_relaxed_dot_i8x16_i7x16_s_i16x8: {
19823 Value *LHS = EmitScalarExpr(E->getArg(0));
19824 Value *RHS = EmitScalarExpr(E->getArg(1));
19825 Function *Callee =
19826 CGM.getIntrinsic(Intrinsic::wasm_relaxed_dot_i8x16_i7x16_signed);
19827 return Builder.CreateCall(Callee, {LHS, RHS});
19829 case WebAssembly::BI__builtin_wasm_relaxed_dot_i8x16_i7x16_add_s_i32x4: {
19830 Value *LHS = EmitScalarExpr(E->getArg(0));
19831 Value *RHS = EmitScalarExpr(E->getArg(1));
19832 Value *Acc = EmitScalarExpr(E->getArg(2));
19833 Function *Callee =
19834 CGM.getIntrinsic(Intrinsic::wasm_relaxed_dot_i8x16_i7x16_add_signed);
19835 return Builder.CreateCall(Callee, {LHS, RHS, Acc});
19837 case WebAssembly::BI__builtin_wasm_relaxed_dot_bf16x8_add_f32_f32x4: {
19838 Value *LHS = EmitScalarExpr(E->getArg(0));
19839 Value *RHS = EmitScalarExpr(E->getArg(1));
19840 Value *Acc = EmitScalarExpr(E->getArg(2));
19841 Function *Callee =
19842 CGM.getIntrinsic(Intrinsic::wasm_relaxed_dot_bf16x8_add_f32);
19843 return Builder.CreateCall(Callee, {LHS, RHS, Acc});
19845 case WebAssembly::BI__builtin_wasm_table_get: {
19846 assert(E->getArg(0)->getType()->isArrayType());
19847 Value *Table = EmitArrayToPointerDecay(E->getArg(0)).getPointer();
19848 Value *Index = EmitScalarExpr(E->getArg(1));
19849 Function *Callee;
19850 if (E->getType().isWebAssemblyExternrefType())
19851 Callee = CGM.getIntrinsic(Intrinsic::wasm_table_get_externref);
19852 else if (E->getType().isWebAssemblyFuncrefType())
19853 Callee = CGM.getIntrinsic(Intrinsic::wasm_table_get_funcref);
19854 else
19855 llvm_unreachable(
19856 "Unexpected reference type for __builtin_wasm_table_get");
19857 return Builder.CreateCall(Callee, {Table, Index});
19859 case WebAssembly::BI__builtin_wasm_table_set: {
19860 assert(E->getArg(0)->getType()->isArrayType());
19861 Value *Table = EmitArrayToPointerDecay(E->getArg(0)).getPointer();
19862 Value *Index = EmitScalarExpr(E->getArg(1));
19863 Value *Val = EmitScalarExpr(E->getArg(2));
19864 Function *Callee;
19865 if (E->getArg(2)->getType().isWebAssemblyExternrefType())
19866 Callee = CGM.getIntrinsic(Intrinsic::wasm_table_set_externref);
19867 else if (E->getArg(2)->getType().isWebAssemblyFuncrefType())
19868 Callee = CGM.getIntrinsic(Intrinsic::wasm_table_set_funcref);
19869 else
19870 llvm_unreachable(
19871 "Unexpected reference type for __builtin_wasm_table_set");
19872 return Builder.CreateCall(Callee, {Table, Index, Val});
19874 case WebAssembly::BI__builtin_wasm_table_size: {
19875 assert(E->getArg(0)->getType()->isArrayType());
19876 Value *Value = EmitArrayToPointerDecay(E->getArg(0)).getPointer();
19877 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_table_size);
19878 return Builder.CreateCall(Callee, Value);
19880 case WebAssembly::BI__builtin_wasm_table_grow: {
19881 assert(E->getArg(0)->getType()->isArrayType());
19882 Value *Table = EmitArrayToPointerDecay(E->getArg(0)).getPointer();
19883 Value *Val = EmitScalarExpr(E->getArg(1));
19884 Value *NElems = EmitScalarExpr(E->getArg(2));
19886 Function *Callee;
19887 if (E->getArg(1)->getType().isWebAssemblyExternrefType())
19888 Callee = CGM.getIntrinsic(Intrinsic::wasm_table_grow_externref);
19889 else if (E->getArg(2)->getType().isWebAssemblyFuncrefType())
19890 Callee = CGM.getIntrinsic(Intrinsic::wasm_table_fill_funcref);
19891 else
19892 llvm_unreachable(
19893 "Unexpected reference type for __builtin_wasm_table_grow");
19895 return Builder.CreateCall(Callee, {Table, Val, NElems});
19897 case WebAssembly::BI__builtin_wasm_table_fill: {
19898 assert(E->getArg(0)->getType()->isArrayType());
19899 Value *Table = EmitArrayToPointerDecay(E->getArg(0)).getPointer();
19900 Value *Index = EmitScalarExpr(E->getArg(1));
19901 Value *Val = EmitScalarExpr(E->getArg(2));
19902 Value *NElems = EmitScalarExpr(E->getArg(3));
19904 Function *Callee;
19905 if (E->getArg(2)->getType().isWebAssemblyExternrefType())
19906 Callee = CGM.getIntrinsic(Intrinsic::wasm_table_fill_externref);
19907 else if (E->getArg(2)->getType().isWebAssemblyFuncrefType())
19908 Callee = CGM.getIntrinsic(Intrinsic::wasm_table_fill_funcref);
19909 else
19910 llvm_unreachable(
19911 "Unexpected reference type for __builtin_wasm_table_fill");
19913 return Builder.CreateCall(Callee, {Table, Index, Val, NElems});
19915 case WebAssembly::BI__builtin_wasm_table_copy: {
19916 assert(E->getArg(0)->getType()->isArrayType());
19917 Value *TableX = EmitArrayToPointerDecay(E->getArg(0)).getPointer();
19918 Value *TableY = EmitArrayToPointerDecay(E->getArg(1)).getPointer();
19919 Value *DstIdx = EmitScalarExpr(E->getArg(2));
19920 Value *SrcIdx = EmitScalarExpr(E->getArg(3));
19921 Value *NElems = EmitScalarExpr(E->getArg(4));
19923 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_table_copy);
19925 return Builder.CreateCall(Callee, {TableX, TableY, SrcIdx, DstIdx, NElems});
19927 default:
19928 return nullptr;
19932 static std::pair<Intrinsic::ID, unsigned>
19933 getIntrinsicForHexagonNonClangBuiltin(unsigned BuiltinID) {
19934 struct Info {
19935 unsigned BuiltinID;
19936 Intrinsic::ID IntrinsicID;
19937 unsigned VecLen;
19939 static Info Infos[] = {
19940 #define CUSTOM_BUILTIN_MAPPING(x,s) \
19941 { Hexagon::BI__builtin_HEXAGON_##x, Intrinsic::hexagon_##x, s },
19942 CUSTOM_BUILTIN_MAPPING(L2_loadrub_pci, 0)
19943 CUSTOM_BUILTIN_MAPPING(L2_loadrb_pci, 0)
19944 CUSTOM_BUILTIN_MAPPING(L2_loadruh_pci, 0)
19945 CUSTOM_BUILTIN_MAPPING(L2_loadrh_pci, 0)
19946 CUSTOM_BUILTIN_MAPPING(L2_loadri_pci, 0)
19947 CUSTOM_BUILTIN_MAPPING(L2_loadrd_pci, 0)
19948 CUSTOM_BUILTIN_MAPPING(L2_loadrub_pcr, 0)
19949 CUSTOM_BUILTIN_MAPPING(L2_loadrb_pcr, 0)
19950 CUSTOM_BUILTIN_MAPPING(L2_loadruh_pcr, 0)
19951 CUSTOM_BUILTIN_MAPPING(L2_loadrh_pcr, 0)
19952 CUSTOM_BUILTIN_MAPPING(L2_loadri_pcr, 0)
19953 CUSTOM_BUILTIN_MAPPING(L2_loadrd_pcr, 0)
19954 CUSTOM_BUILTIN_MAPPING(S2_storerb_pci, 0)
19955 CUSTOM_BUILTIN_MAPPING(S2_storerh_pci, 0)
19956 CUSTOM_BUILTIN_MAPPING(S2_storerf_pci, 0)
19957 CUSTOM_BUILTIN_MAPPING(S2_storeri_pci, 0)
19958 CUSTOM_BUILTIN_MAPPING(S2_storerd_pci, 0)
19959 CUSTOM_BUILTIN_MAPPING(S2_storerb_pcr, 0)
19960 CUSTOM_BUILTIN_MAPPING(S2_storerh_pcr, 0)
19961 CUSTOM_BUILTIN_MAPPING(S2_storerf_pcr, 0)
19962 CUSTOM_BUILTIN_MAPPING(S2_storeri_pcr, 0)
19963 CUSTOM_BUILTIN_MAPPING(S2_storerd_pcr, 0)
19964 // Legacy builtins that take a vector in place of a vector predicate.
19965 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstoreq, 64)
19966 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorenq, 64)
19967 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentq, 64)
19968 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentnq, 64)
19969 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstoreq_128B, 128)
19970 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorenq_128B, 128)
19971 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentq_128B, 128)
19972 CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentnq_128B, 128)
19973 #include "clang/Basic/BuiltinsHexagonMapCustomDep.def"
19974 #undef CUSTOM_BUILTIN_MAPPING
19977 auto CmpInfo = [] (Info A, Info B) { return A.BuiltinID < B.BuiltinID; };
19978 static const bool SortOnce = (llvm::sort(Infos, CmpInfo), true);
19979 (void)SortOnce;
19981 const Info *F = llvm::lower_bound(Infos, Info{BuiltinID, 0, 0}, CmpInfo);
19982 if (F == std::end(Infos) || F->BuiltinID != BuiltinID)
19983 return {Intrinsic::not_intrinsic, 0};
19985 return {F->IntrinsicID, F->VecLen};
19988 Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID,
19989 const CallExpr *E) {
19990 Intrinsic::ID ID;
19991 unsigned VecLen;
19992 std::tie(ID, VecLen) = getIntrinsicForHexagonNonClangBuiltin(BuiltinID);
19994 auto MakeCircOp = [this, E](unsigned IntID, bool IsLoad) {
19995 // The base pointer is passed by address, so it needs to be loaded.
19996 Address A = EmitPointerWithAlignment(E->getArg(0));
19997 Address BP = Address(A.getPointer(), Int8PtrTy, A.getAlignment());
19998 llvm::Value *Base = Builder.CreateLoad(BP);
19999 // The treatment of both loads and stores is the same: the arguments for
20000 // the builtin are the same as the arguments for the intrinsic.
20001 // Load:
20002 // builtin(Base, Inc, Mod, Start) -> intr(Base, Inc, Mod, Start)
20003 // builtin(Base, Mod, Start) -> intr(Base, Mod, Start)
20004 // Store:
20005 // builtin(Base, Inc, Mod, Val, Start) -> intr(Base, Inc, Mod, Val, Start)
20006 // builtin(Base, Mod, Val, Start) -> intr(Base, Mod, Val, Start)
20007 SmallVector<llvm::Value*,5> Ops = { Base };
20008 for (unsigned i = 1, e = E->getNumArgs(); i != e; ++i)
20009 Ops.push_back(EmitScalarExpr(E->getArg(i)));
20011 llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(IntID), Ops);
20012 // The load intrinsics generate two results (Value, NewBase), stores
20013 // generate one (NewBase). The new base address needs to be stored.
20014 llvm::Value *NewBase = IsLoad ? Builder.CreateExtractValue(Result, 1)
20015 : Result;
20016 llvm::Value *LV = EmitScalarExpr(E->getArg(0));
20017 Address Dest = EmitPointerWithAlignment(E->getArg(0));
20018 llvm::Value *RetVal =
20019 Builder.CreateAlignedStore(NewBase, LV, Dest.getAlignment());
20020 if (IsLoad)
20021 RetVal = Builder.CreateExtractValue(Result, 0);
20022 return RetVal;
20025 // Handle the conversion of bit-reverse load intrinsics to bit code.
20026 // The intrinsic call after this function only reads from memory and the
20027 // write to memory is dealt by the store instruction.
20028 auto MakeBrevLd = [this, E](unsigned IntID, llvm::Type *DestTy) {
20029 // The intrinsic generates one result, which is the new value for the base
20030 // pointer. It needs to be returned. The result of the load instruction is
20031 // passed to intrinsic by address, so the value needs to be stored.
20032 llvm::Value *BaseAddress = EmitScalarExpr(E->getArg(0));
20034 // Expressions like &(*pt++) will be incremented per evaluation.
20035 // EmitPointerWithAlignment and EmitScalarExpr evaluates the expression
20036 // per call.
20037 Address DestAddr = EmitPointerWithAlignment(E->getArg(1));
20038 DestAddr = Address(DestAddr.getPointer(), Int8Ty, DestAddr.getAlignment());
20039 llvm::Value *DestAddress = DestAddr.getPointer();
20041 // Operands are Base, Dest, Modifier.
20042 // The intrinsic format in LLVM IR is defined as
20043 // { ValueType, i8* } (i8*, i32).
20044 llvm::Value *Result = Builder.CreateCall(
20045 CGM.getIntrinsic(IntID), {BaseAddress, EmitScalarExpr(E->getArg(2))});
20047 // The value needs to be stored as the variable is passed by reference.
20048 llvm::Value *DestVal = Builder.CreateExtractValue(Result, 0);
20050 // The store needs to be truncated to fit the destination type.
20051 // While i32 and i64 are natively supported on Hexagon, i8 and i16 needs
20052 // to be handled with stores of respective destination type.
20053 DestVal = Builder.CreateTrunc(DestVal, DestTy);
20055 Builder.CreateAlignedStore(DestVal, DestAddress, DestAddr.getAlignment());
20056 // The updated value of the base pointer is returned.
20057 return Builder.CreateExtractValue(Result, 1);
20060 auto V2Q = [this, VecLen] (llvm::Value *Vec) {
20061 Intrinsic::ID ID = VecLen == 128 ? Intrinsic::hexagon_V6_vandvrt_128B
20062 : Intrinsic::hexagon_V6_vandvrt;
20063 return Builder.CreateCall(CGM.getIntrinsic(ID),
20064 {Vec, Builder.getInt32(-1)});
20066 auto Q2V = [this, VecLen] (llvm::Value *Pred) {
20067 Intrinsic::ID ID = VecLen == 128 ? Intrinsic::hexagon_V6_vandqrt_128B
20068 : Intrinsic::hexagon_V6_vandqrt;
20069 return Builder.CreateCall(CGM.getIntrinsic(ID),
20070 {Pred, Builder.getInt32(-1)});
20073 switch (BuiltinID) {
20074 // These intrinsics return a tuple {Vector, VectorPred} in LLVM IR,
20075 // and the corresponding C/C++ builtins use loads/stores to update
20076 // the predicate.
20077 case Hexagon::BI__builtin_HEXAGON_V6_vaddcarry:
20078 case Hexagon::BI__builtin_HEXAGON_V6_vaddcarry_128B:
20079 case Hexagon::BI__builtin_HEXAGON_V6_vsubcarry:
20080 case Hexagon::BI__builtin_HEXAGON_V6_vsubcarry_128B: {
20081 // Get the type from the 0-th argument.
20082 llvm::Type *VecType = ConvertType(E->getArg(0)->getType());
20083 Address PredAddr =
20084 EmitPointerWithAlignment(E->getArg(2)).withElementType(VecType);
20085 llvm::Value *PredIn = V2Q(Builder.CreateLoad(PredAddr));
20086 llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(ID),
20087 {EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1)), PredIn});
20089 llvm::Value *PredOut = Builder.CreateExtractValue(Result, 1);
20090 Builder.CreateAlignedStore(Q2V(PredOut), PredAddr.getPointer(),
20091 PredAddr.getAlignment());
20092 return Builder.CreateExtractValue(Result, 0);
20094 // These are identical to the builtins above, except they don't consume
20095 // input carry, only generate carry-out. Since they still produce two
20096 // outputs, generate the store of the predicate, but no load.
20097 case Hexagon::BI__builtin_HEXAGON_V6_vaddcarryo:
20098 case Hexagon::BI__builtin_HEXAGON_V6_vaddcarryo_128B:
20099 case Hexagon::BI__builtin_HEXAGON_V6_vsubcarryo:
20100 case Hexagon::BI__builtin_HEXAGON_V6_vsubcarryo_128B: {
20101 // Get the type from the 0-th argument.
20102 llvm::Type *VecType = ConvertType(E->getArg(0)->getType());
20103 Address PredAddr =
20104 EmitPointerWithAlignment(E->getArg(2)).withElementType(VecType);
20105 llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(ID),
20106 {EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1))});
20108 llvm::Value *PredOut = Builder.CreateExtractValue(Result, 1);
20109 Builder.CreateAlignedStore(Q2V(PredOut), PredAddr.getPointer(),
20110 PredAddr.getAlignment());
20111 return Builder.CreateExtractValue(Result, 0);
20114 case Hexagon::BI__builtin_HEXAGON_V6_vmaskedstoreq:
20115 case Hexagon::BI__builtin_HEXAGON_V6_vmaskedstorenq:
20116 case Hexagon::BI__builtin_HEXAGON_V6_vmaskedstorentq:
20117 case Hexagon::BI__builtin_HEXAGON_V6_vmaskedstorentnq:
20118 case Hexagon::BI__builtin_HEXAGON_V6_vmaskedstoreq_128B:
20119 case Hexagon::BI__builtin_HEXAGON_V6_vmaskedstorenq_128B:
20120 case Hexagon::BI__builtin_HEXAGON_V6_vmaskedstorentq_128B:
20121 case Hexagon::BI__builtin_HEXAGON_V6_vmaskedstorentnq_128B: {
20122 SmallVector<llvm::Value*,4> Ops;
20123 const Expr *PredOp = E->getArg(0);
20124 // There will be an implicit cast to a boolean vector. Strip it.
20125 if (auto *Cast = dyn_cast<ImplicitCastExpr>(PredOp)) {
20126 if (Cast->getCastKind() == CK_BitCast)
20127 PredOp = Cast->getSubExpr();
20128 Ops.push_back(V2Q(EmitScalarExpr(PredOp)));
20130 for (int i = 1, e = E->getNumArgs(); i != e; ++i)
20131 Ops.push_back(EmitScalarExpr(E->getArg(i)));
20132 return Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
20135 case Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci:
20136 case Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci:
20137 case Hexagon::BI__builtin_HEXAGON_L2_loadruh_pci:
20138 case Hexagon::BI__builtin_HEXAGON_L2_loadrh_pci:
20139 case Hexagon::BI__builtin_HEXAGON_L2_loadri_pci:
20140 case Hexagon::BI__builtin_HEXAGON_L2_loadrd_pci:
20141 case Hexagon::BI__builtin_HEXAGON_L2_loadrub_pcr:
20142 case Hexagon::BI__builtin_HEXAGON_L2_loadrb_pcr:
20143 case Hexagon::BI__builtin_HEXAGON_L2_loadruh_pcr:
20144 case Hexagon::BI__builtin_HEXAGON_L2_loadrh_pcr:
20145 case Hexagon::BI__builtin_HEXAGON_L2_loadri_pcr:
20146 case Hexagon::BI__builtin_HEXAGON_L2_loadrd_pcr:
20147 return MakeCircOp(ID, /*IsLoad=*/true);
20148 case Hexagon::BI__builtin_HEXAGON_S2_storerb_pci:
20149 case Hexagon::BI__builtin_HEXAGON_S2_storerh_pci:
20150 case Hexagon::BI__builtin_HEXAGON_S2_storerf_pci:
20151 case Hexagon::BI__builtin_HEXAGON_S2_storeri_pci:
20152 case Hexagon::BI__builtin_HEXAGON_S2_storerd_pci:
20153 case Hexagon::BI__builtin_HEXAGON_S2_storerb_pcr:
20154 case Hexagon::BI__builtin_HEXAGON_S2_storerh_pcr:
20155 case Hexagon::BI__builtin_HEXAGON_S2_storerf_pcr:
20156 case Hexagon::BI__builtin_HEXAGON_S2_storeri_pcr:
20157 case Hexagon::BI__builtin_HEXAGON_S2_storerd_pcr:
20158 return MakeCircOp(ID, /*IsLoad=*/false);
20159 case Hexagon::BI__builtin_brev_ldub:
20160 return MakeBrevLd(Intrinsic::hexagon_L2_loadrub_pbr, Int8Ty);
20161 case Hexagon::BI__builtin_brev_ldb:
20162 return MakeBrevLd(Intrinsic::hexagon_L2_loadrb_pbr, Int8Ty);
20163 case Hexagon::BI__builtin_brev_lduh:
20164 return MakeBrevLd(Intrinsic::hexagon_L2_loadruh_pbr, Int16Ty);
20165 case Hexagon::BI__builtin_brev_ldh:
20166 return MakeBrevLd(Intrinsic::hexagon_L2_loadrh_pbr, Int16Ty);
20167 case Hexagon::BI__builtin_brev_ldw:
20168 return MakeBrevLd(Intrinsic::hexagon_L2_loadri_pbr, Int32Ty);
20169 case Hexagon::BI__builtin_brev_ldd:
20170 return MakeBrevLd(Intrinsic::hexagon_L2_loadrd_pbr, Int64Ty);
20171 } // switch
20173 return nullptr;
20176 Value *CodeGenFunction::EmitRISCVBuiltinExpr(unsigned BuiltinID,
20177 const CallExpr *E,
20178 ReturnValueSlot ReturnValue) {
20179 SmallVector<Value *, 4> Ops;
20180 llvm::Type *ResultType = ConvertType(E->getType());
20182 // Find out if any arguments are required to be integer constant expressions.
20183 unsigned ICEArguments = 0;
20184 ASTContext::GetBuiltinTypeError Error;
20185 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
20186 if (Error == ASTContext::GE_Missing_type) {
20187 // Vector intrinsics don't have a type string.
20188 assert(BuiltinID >= clang::RISCV::FirstRVVBuiltin &&
20189 BuiltinID <= clang::RISCV::LastRVVBuiltin);
20190 ICEArguments = 0;
20191 if (BuiltinID == RISCVVector::BI__builtin_rvv_vget_v ||
20192 BuiltinID == RISCVVector::BI__builtin_rvv_vset_v)
20193 ICEArguments = 1 << 1;
20194 } else {
20195 assert(Error == ASTContext::GE_None && "Unexpected error");
20198 if (BuiltinID == RISCV::BI__builtin_riscv_ntl_load)
20199 ICEArguments |= (1 << 1);
20200 if (BuiltinID == RISCV::BI__builtin_riscv_ntl_store)
20201 ICEArguments |= (1 << 2);
20203 for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) {
20204 // Handle aggregate argument, namely RVV tuple types in segment load/store
20205 if (hasAggregateEvaluationKind(E->getArg(i)->getType())) {
20206 LValue L = EmitAggExprToLValue(E->getArg(i));
20207 llvm::Value *AggValue = Builder.CreateLoad(L.getAddress(*this));
20208 Ops.push_back(AggValue);
20209 continue;
20212 // If this is a normal argument, just emit it as a scalar.
20213 if ((ICEArguments & (1 << i)) == 0) {
20214 Ops.push_back(EmitScalarExpr(E->getArg(i)));
20215 continue;
20218 // If this is required to be a constant, constant fold it so that we know
20219 // that the generated intrinsic gets a ConstantInt.
20220 Ops.push_back(llvm::ConstantInt::get(
20221 getLLVMContext(), *E->getArg(i)->getIntegerConstantExpr(getContext())));
20224 Intrinsic::ID ID = Intrinsic::not_intrinsic;
20225 unsigned NF = 1;
20226 // The 0th bit simulates the `vta` of RVV
20227 // The 1st bit simulates the `vma` of RVV
20228 constexpr unsigned RVV_VTA = 0x1;
20229 constexpr unsigned RVV_VMA = 0x2;
20230 int PolicyAttrs = 0;
20231 bool IsMasked = false;
20233 // Required for overloaded intrinsics.
20234 llvm::SmallVector<llvm::Type *, 2> IntrinsicTypes;
20235 switch (BuiltinID) {
20236 default: llvm_unreachable("unexpected builtin ID");
20237 case RISCV::BI__builtin_riscv_orc_b_32:
20238 case RISCV::BI__builtin_riscv_orc_b_64:
20239 case RISCV::BI__builtin_riscv_clz_32:
20240 case RISCV::BI__builtin_riscv_clz_64:
20241 case RISCV::BI__builtin_riscv_ctz_32:
20242 case RISCV::BI__builtin_riscv_ctz_64:
20243 case RISCV::BI__builtin_riscv_clmul_32:
20244 case RISCV::BI__builtin_riscv_clmul_64:
20245 case RISCV::BI__builtin_riscv_clmulh_32:
20246 case RISCV::BI__builtin_riscv_clmulh_64:
20247 case RISCV::BI__builtin_riscv_clmulr_32:
20248 case RISCV::BI__builtin_riscv_clmulr_64:
20249 case RISCV::BI__builtin_riscv_xperm4_32:
20250 case RISCV::BI__builtin_riscv_xperm4_64:
20251 case RISCV::BI__builtin_riscv_xperm8_32:
20252 case RISCV::BI__builtin_riscv_xperm8_64:
20253 case RISCV::BI__builtin_riscv_brev8_32:
20254 case RISCV::BI__builtin_riscv_brev8_64:
20255 case RISCV::BI__builtin_riscv_zip_32:
20256 case RISCV::BI__builtin_riscv_unzip_32: {
20257 switch (BuiltinID) {
20258 default: llvm_unreachable("unexpected builtin ID");
20259 // Zbb
20260 case RISCV::BI__builtin_riscv_orc_b_32:
20261 case RISCV::BI__builtin_riscv_orc_b_64:
20262 ID = Intrinsic::riscv_orc_b;
20263 break;
20264 case RISCV::BI__builtin_riscv_clz_32:
20265 case RISCV::BI__builtin_riscv_clz_64: {
20266 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Ops[0]->getType());
20267 Value *Result = Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)});
20268 if (Result->getType() != ResultType)
20269 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
20270 "cast");
20271 return Result;
20273 case RISCV::BI__builtin_riscv_ctz_32:
20274 case RISCV::BI__builtin_riscv_ctz_64: {
20275 Function *F = CGM.getIntrinsic(Intrinsic::cttz, Ops[0]->getType());
20276 Value *Result = Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)});
20277 if (Result->getType() != ResultType)
20278 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
20279 "cast");
20280 return Result;
20283 // Zbc
20284 case RISCV::BI__builtin_riscv_clmul_32:
20285 case RISCV::BI__builtin_riscv_clmul_64:
20286 ID = Intrinsic::riscv_clmul;
20287 break;
20288 case RISCV::BI__builtin_riscv_clmulh_32:
20289 case RISCV::BI__builtin_riscv_clmulh_64:
20290 ID = Intrinsic::riscv_clmulh;
20291 break;
20292 case RISCV::BI__builtin_riscv_clmulr_32:
20293 case RISCV::BI__builtin_riscv_clmulr_64:
20294 ID = Intrinsic::riscv_clmulr;
20295 break;
20297 // Zbkx
20298 case RISCV::BI__builtin_riscv_xperm8_32:
20299 case RISCV::BI__builtin_riscv_xperm8_64:
20300 ID = Intrinsic::riscv_xperm8;
20301 break;
20302 case RISCV::BI__builtin_riscv_xperm4_32:
20303 case RISCV::BI__builtin_riscv_xperm4_64:
20304 ID = Intrinsic::riscv_xperm4;
20305 break;
20307 // Zbkb
20308 case RISCV::BI__builtin_riscv_brev8_32:
20309 case RISCV::BI__builtin_riscv_brev8_64:
20310 ID = Intrinsic::riscv_brev8;
20311 break;
20312 case RISCV::BI__builtin_riscv_zip_32:
20313 ID = Intrinsic::riscv_zip;
20314 break;
20315 case RISCV::BI__builtin_riscv_unzip_32:
20316 ID = Intrinsic::riscv_unzip;
20317 break;
20320 IntrinsicTypes = {ResultType};
20321 break;
20324 // Zk builtins
20326 // Zknh
20327 case RISCV::BI__builtin_riscv_sha256sig0:
20328 ID = Intrinsic::riscv_sha256sig0;
20329 break;
20330 case RISCV::BI__builtin_riscv_sha256sig1:
20331 ID = Intrinsic::riscv_sha256sig1;
20332 break;
20333 case RISCV::BI__builtin_riscv_sha256sum0:
20334 ID = Intrinsic::riscv_sha256sum0;
20335 break;
20336 case RISCV::BI__builtin_riscv_sha256sum1:
20337 ID = Intrinsic::riscv_sha256sum1;
20338 break;
20340 // Zksed
20341 case RISCV::BI__builtin_riscv_sm4ks:
20342 ID = Intrinsic::riscv_sm4ks;
20343 break;
20344 case RISCV::BI__builtin_riscv_sm4ed:
20345 ID = Intrinsic::riscv_sm4ed;
20346 break;
20348 // Zksh
20349 case RISCV::BI__builtin_riscv_sm3p0:
20350 ID = Intrinsic::riscv_sm3p0;
20351 break;
20352 case RISCV::BI__builtin_riscv_sm3p1:
20353 ID = Intrinsic::riscv_sm3p1;
20354 break;
20356 // Zihintntl
20357 case RISCV::BI__builtin_riscv_ntl_load: {
20358 llvm::Type *ResTy = ConvertType(E->getType());
20359 unsigned DomainVal = 5; // Default __RISCV_NTLH_ALL
20360 if (Ops.size() == 2)
20361 DomainVal = cast<ConstantInt>(Ops[1])->getZExtValue();
20363 llvm::MDNode *RISCVDomainNode = llvm::MDNode::get(
20364 getLLVMContext(),
20365 llvm::ConstantAsMetadata::get(Builder.getInt32(DomainVal)));
20366 llvm::MDNode *NontemporalNode = llvm::MDNode::get(
20367 getLLVMContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
20369 int Width;
20370 if(ResTy->isScalableTy()) {
20371 const ScalableVectorType *SVTy = cast<ScalableVectorType>(ResTy);
20372 llvm::Type *ScalarTy = ResTy->getScalarType();
20373 Width = ScalarTy->getPrimitiveSizeInBits() *
20374 SVTy->getElementCount().getKnownMinValue();
20375 } else
20376 Width = ResTy->getPrimitiveSizeInBits();
20377 LoadInst *Load = Builder.CreateLoad(
20378 Address(Ops[0], ResTy, CharUnits::fromQuantity(Width / 8)));
20380 Load->setMetadata(llvm::LLVMContext::MD_nontemporal, NontemporalNode);
20381 Load->setMetadata(CGM.getModule().getMDKindID("riscv-nontemporal-domain"),
20382 RISCVDomainNode);
20384 return Load;
20386 case RISCV::BI__builtin_riscv_ntl_store: {
20387 unsigned DomainVal = 5; // Default __RISCV_NTLH_ALL
20388 if (Ops.size() == 3)
20389 DomainVal = cast<ConstantInt>(Ops[2])->getZExtValue();
20391 llvm::MDNode *RISCVDomainNode = llvm::MDNode::get(
20392 getLLVMContext(),
20393 llvm::ConstantAsMetadata::get(Builder.getInt32(DomainVal)));
20394 llvm::MDNode *NontemporalNode = llvm::MDNode::get(
20395 getLLVMContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
20397 Value *BC = Builder.CreateBitCast(
20398 Ops[0], llvm::PointerType::getUnqual(Ops[1]->getType()), "cast");
20400 StoreInst *Store = Builder.CreateDefaultAlignedStore(Ops[1], BC);
20401 Store->setMetadata(llvm::LLVMContext::MD_nontemporal, NontemporalNode);
20402 Store->setMetadata(CGM.getModule().getMDKindID("riscv-nontemporal-domain"),
20403 RISCVDomainNode);
20405 return Store;
20408 // Vector builtins are handled from here.
20409 #include "clang/Basic/riscv_vector_builtin_cg.inc"
20410 // SiFive Vector builtins are handled from here.
20411 #include "clang/Basic/riscv_sifive_vector_builtin_cg.inc"
20414 assert(ID != Intrinsic::not_intrinsic);
20416 llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
20417 return Builder.CreateCall(F, Ops, "");