1 //===--- CGExprScalar.cpp - Emit LLVM Code for Scalar Exprs ---------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This contains code to emit Expr nodes with scalar LLVM types as LLVM code.
11 //===----------------------------------------------------------------------===//
14 #include "CGCleanup.h"
15 #include "CGDebugInfo.h"
16 #include "CGObjCRuntime.h"
17 #include "CGOpenMPRuntime.h"
18 #include "CGRecordLayout.h"
19 #include "CodeGenFunction.h"
20 #include "CodeGenModule.h"
21 #include "ConstantEmitter.h"
22 #include "TargetInfo.h"
23 #include "clang/AST/ASTContext.h"
24 #include "clang/AST/Attr.h"
25 #include "clang/AST/DeclObjC.h"
26 #include "clang/AST/Expr.h"
27 #include "clang/AST/RecordLayout.h"
28 #include "clang/AST/StmtVisitor.h"
29 #include "clang/Basic/CodeGenOptions.h"
30 #include "clang/Basic/TargetInfo.h"
31 #include "llvm/ADT/APFixedPoint.h"
32 #include "llvm/IR/CFG.h"
33 #include "llvm/IR/Constants.h"
34 #include "llvm/IR/DataLayout.h"
35 #include "llvm/IR/DerivedTypes.h"
36 #include "llvm/IR/FixedPointBuilder.h"
37 #include "llvm/IR/Function.h"
38 #include "llvm/IR/GetElementPtrTypeIterator.h"
39 #include "llvm/IR/GlobalVariable.h"
40 #include "llvm/IR/Intrinsics.h"
41 #include "llvm/IR/IntrinsicsPowerPC.h"
42 #include "llvm/IR/MatrixBuilder.h"
43 #include "llvm/IR/Module.h"
44 #include "llvm/Support/TypeSize.h"
48 using namespace clang
;
49 using namespace CodeGen
;
52 //===----------------------------------------------------------------------===//
53 // Scalar Expression Emitter
54 //===----------------------------------------------------------------------===//
57 extern cl::opt
<bool> EnableSingleByteCoverage
;
62 /// Determine whether the given binary operation may overflow.
63 /// Sets \p Result to the value of the operation for BO_Add, BO_Sub, BO_Mul,
64 /// and signed BO_{Div,Rem}. For these opcodes, and for unsigned BO_{Div,Rem},
65 /// the returned overflow check is precise. The returned value is 'true' for
66 /// all other opcodes, to be conservative.
67 bool mayHaveIntegerOverflow(llvm::ConstantInt
*LHS
, llvm::ConstantInt
*RHS
,
68 BinaryOperator::Opcode Opcode
, bool Signed
,
69 llvm::APInt
&Result
) {
70 // Assume overflow is possible, unless we can prove otherwise.
72 const auto &LHSAP
= LHS
->getValue();
73 const auto &RHSAP
= RHS
->getValue();
74 if (Opcode
== BO_Add
) {
75 Result
= Signed
? LHSAP
.sadd_ov(RHSAP
, Overflow
)
76 : LHSAP
.uadd_ov(RHSAP
, Overflow
);
77 } else if (Opcode
== BO_Sub
) {
78 Result
= Signed
? LHSAP
.ssub_ov(RHSAP
, Overflow
)
79 : LHSAP
.usub_ov(RHSAP
, Overflow
);
80 } else if (Opcode
== BO_Mul
) {
81 Result
= Signed
? LHSAP
.smul_ov(RHSAP
, Overflow
)
82 : LHSAP
.umul_ov(RHSAP
, Overflow
);
83 } else if (Opcode
== BO_Div
|| Opcode
== BO_Rem
) {
84 if (Signed
&& !RHS
->isZero())
85 Result
= LHSAP
.sdiv_ov(RHSAP
, Overflow
);
95 QualType Ty
; // Computation Type.
96 BinaryOperator::Opcode Opcode
; // Opcode of BinOp to perform
98 const Expr
*E
; // Entire expr, for error unsupported. May not be binop.
100 /// Check if the binop can result in integer overflow.
101 bool mayHaveIntegerOverflow() const {
102 // Without constant input, we can't rule out overflow.
103 auto *LHSCI
= dyn_cast
<llvm::ConstantInt
>(LHS
);
104 auto *RHSCI
= dyn_cast
<llvm::ConstantInt
>(RHS
);
105 if (!LHSCI
|| !RHSCI
)
109 return ::mayHaveIntegerOverflow(
110 LHSCI
, RHSCI
, Opcode
, Ty
->hasSignedIntegerRepresentation(), Result
);
113 /// Check if the binop computes a division or a remainder.
114 bool isDivremOp() const {
115 return Opcode
== BO_Div
|| Opcode
== BO_Rem
|| Opcode
== BO_DivAssign
||
116 Opcode
== BO_RemAssign
;
119 /// Check if the binop can result in an integer division by zero.
120 bool mayHaveIntegerDivisionByZero() const {
122 if (auto *CI
= dyn_cast
<llvm::ConstantInt
>(RHS
))
127 /// Check if the binop can result in a float division by zero.
128 bool mayHaveFloatDivisionByZero() const {
130 if (auto *CFP
= dyn_cast
<llvm::ConstantFP
>(RHS
))
131 return CFP
->isZero();
135 /// Check if at least one operand is a fixed point type. In such cases, this
136 /// operation did not follow usual arithmetic conversion and both operands
137 /// might not be of the same type.
138 bool isFixedPointOp() const {
139 // We cannot simply check the result type since comparison operations return
141 if (const auto *BinOp
= dyn_cast
<BinaryOperator
>(E
)) {
142 QualType LHSType
= BinOp
->getLHS()->getType();
143 QualType RHSType
= BinOp
->getRHS()->getType();
144 return LHSType
->isFixedPointType() || RHSType
->isFixedPointType();
146 if (const auto *UnOp
= dyn_cast
<UnaryOperator
>(E
))
147 return UnOp
->getSubExpr()->getType()->isFixedPointType();
151 /// Check if the RHS has a signed integer representation.
152 bool rhsHasSignedIntegerRepresentation() const {
153 if (const auto *BinOp
= dyn_cast
<BinaryOperator
>(E
)) {
154 QualType RHSType
= BinOp
->getRHS()->getType();
155 return RHSType
->hasSignedIntegerRepresentation();
161 static bool MustVisitNullValue(const Expr
*E
) {
162 // If a null pointer expression's type is the C++0x nullptr_t, then
163 // it's not necessarily a simple constant and it must be evaluated
164 // for its potential side effects.
165 return E
->getType()->isNullPtrType();
168 /// If \p E is a widened promoted integer, get its base (unpromoted) type.
169 static std::optional
<QualType
> getUnwidenedIntegerType(const ASTContext
&Ctx
,
171 const Expr
*Base
= E
->IgnoreImpCasts();
175 QualType BaseTy
= Base
->getType();
176 if (!Ctx
.isPromotableIntegerType(BaseTy
) ||
177 Ctx
.getTypeSize(BaseTy
) >= Ctx
.getTypeSize(E
->getType()))
183 /// Check if \p E is a widened promoted integer.
184 static bool IsWidenedIntegerOp(const ASTContext
&Ctx
, const Expr
*E
) {
185 return getUnwidenedIntegerType(Ctx
, E
).has_value();
188 /// Check if we can skip the overflow check for \p Op.
189 static bool CanElideOverflowCheck(const ASTContext
&Ctx
, const BinOpInfo
&Op
) {
190 assert((isa
<UnaryOperator
>(Op
.E
) || isa
<BinaryOperator
>(Op
.E
)) &&
191 "Expected a unary or binary operator");
193 // If the binop has constant inputs and we can prove there is no overflow,
194 // we can elide the overflow check.
195 if (!Op
.mayHaveIntegerOverflow())
198 // If a unary op has a widened operand, the op cannot overflow.
199 if (const auto *UO
= dyn_cast
<UnaryOperator
>(Op
.E
))
200 return !UO
->canOverflow();
202 // We usually don't need overflow checks for binops with widened operands.
203 // Multiplication with promoted unsigned operands is a special case.
204 const auto *BO
= cast
<BinaryOperator
>(Op
.E
);
205 auto OptionalLHSTy
= getUnwidenedIntegerType(Ctx
, BO
->getLHS());
209 auto OptionalRHSTy
= getUnwidenedIntegerType(Ctx
, BO
->getRHS());
213 QualType LHSTy
= *OptionalLHSTy
;
214 QualType RHSTy
= *OptionalRHSTy
;
216 // This is the simple case: binops without unsigned multiplication, and with
217 // widened operands. No overflow check is needed here.
218 if ((Op
.Opcode
!= BO_Mul
&& Op
.Opcode
!= BO_MulAssign
) ||
219 !LHSTy
->isUnsignedIntegerType() || !RHSTy
->isUnsignedIntegerType())
222 // For unsigned multiplication the overflow check can be elided if either one
223 // of the unpromoted types are less than half the size of the promoted type.
224 unsigned PromotedSize
= Ctx
.getTypeSize(Op
.E
->getType());
225 return (2 * Ctx
.getTypeSize(LHSTy
)) < PromotedSize
||
226 (2 * Ctx
.getTypeSize(RHSTy
)) < PromotedSize
;
229 class ScalarExprEmitter
230 : public StmtVisitor
<ScalarExprEmitter
, Value
*> {
231 CodeGenFunction
&CGF
;
232 CGBuilderTy
&Builder
;
233 bool IgnoreResultAssign
;
234 llvm::LLVMContext
&VMContext
;
237 ScalarExprEmitter(CodeGenFunction
&cgf
, bool ira
=false)
238 : CGF(cgf
), Builder(CGF
.Builder
), IgnoreResultAssign(ira
),
239 VMContext(cgf
.getLLVMContext()) {
242 //===--------------------------------------------------------------------===//
244 //===--------------------------------------------------------------------===//
246 bool TestAndClearIgnoreResultAssign() {
247 bool I
= IgnoreResultAssign
;
248 IgnoreResultAssign
= false;
252 llvm::Type
*ConvertType(QualType T
) { return CGF
.ConvertType(T
); }
253 LValue
EmitLValue(const Expr
*E
) { return CGF
.EmitLValue(E
); }
254 LValue
EmitCheckedLValue(const Expr
*E
, CodeGenFunction::TypeCheckKind TCK
) {
255 return CGF
.EmitCheckedLValue(E
, TCK
);
258 void EmitBinOpCheck(ArrayRef
<std::pair
<Value
*, SanitizerMask
>> Checks
,
259 const BinOpInfo
&Info
);
261 Value
*EmitLoadOfLValue(LValue LV
, SourceLocation Loc
) {
262 return CGF
.EmitLoadOfLValue(LV
, Loc
).getScalarVal();
265 void EmitLValueAlignmentAssumption(const Expr
*E
, Value
*V
) {
266 const AlignValueAttr
*AVAttr
= nullptr;
267 if (const auto *DRE
= dyn_cast
<DeclRefExpr
>(E
)) {
268 const ValueDecl
*VD
= DRE
->getDecl();
270 if (VD
->getType()->isReferenceType()) {
271 if (const auto *TTy
=
272 VD
->getType().getNonReferenceType()->getAs
<TypedefType
>())
273 AVAttr
= TTy
->getDecl()->getAttr
<AlignValueAttr
>();
275 // Assumptions for function parameters are emitted at the start of the
276 // function, so there is no need to repeat that here,
277 // unless the alignment-assumption sanitizer is enabled,
278 // then we prefer the assumption over alignment attribute
279 // on IR function param.
280 if (isa
<ParmVarDecl
>(VD
) && !CGF
.SanOpts
.has(SanitizerKind::Alignment
))
283 AVAttr
= VD
->getAttr
<AlignValueAttr
>();
288 if (const auto *TTy
= E
->getType()->getAs
<TypedefType
>())
289 AVAttr
= TTy
->getDecl()->getAttr
<AlignValueAttr
>();
294 Value
*AlignmentValue
= CGF
.EmitScalarExpr(AVAttr
->getAlignment());
295 llvm::ConstantInt
*AlignmentCI
= cast
<llvm::ConstantInt
>(AlignmentValue
);
296 CGF
.emitAlignmentAssumption(V
, E
, AVAttr
->getLocation(), AlignmentCI
);
299 /// EmitLoadOfLValue - Given an expression with complex type that represents a
300 /// value l-value, this method emits the address of the l-value, then loads
301 /// and returns the result.
302 Value
*EmitLoadOfLValue(const Expr
*E
) {
303 Value
*V
= EmitLoadOfLValue(EmitCheckedLValue(E
, CodeGenFunction::TCK_Load
),
306 EmitLValueAlignmentAssumption(E
, V
);
310 /// EmitConversionToBool - Convert the specified expression value to a
311 /// boolean (i1) truth value. This is equivalent to "Val != 0".
312 Value
*EmitConversionToBool(Value
*Src
, QualType DstTy
);
314 /// Emit a check that a conversion from a floating-point type does not
316 void EmitFloatConversionCheck(Value
*OrigSrc
, QualType OrigSrcType
,
317 Value
*Src
, QualType SrcType
, QualType DstType
,
318 llvm::Type
*DstTy
, SourceLocation Loc
);
320 /// Known implicit conversion check kinds.
321 /// This is used for bitfield conversion checks as well.
322 /// Keep in sync with the enum of the same name in ubsan_handlers.h
323 enum ImplicitConversionCheckKind
: unsigned char {
324 ICCK_IntegerTruncation
= 0, // Legacy, was only used by clang 7.
325 ICCK_UnsignedIntegerTruncation
= 1,
326 ICCK_SignedIntegerTruncation
= 2,
327 ICCK_IntegerSignChange
= 3,
328 ICCK_SignedIntegerTruncationOrSignChange
= 4,
331 /// Emit a check that an [implicit] truncation of an integer does not
332 /// discard any bits. It is not UB, so we use the value after truncation.
333 void EmitIntegerTruncationCheck(Value
*Src
, QualType SrcType
, Value
*Dst
,
334 QualType DstType
, SourceLocation Loc
);
336 /// Emit a check that an [implicit] conversion of an integer does not change
337 /// the sign of the value. It is not UB, so we use the value after conversion.
338 /// NOTE: Src and Dst may be the exact same value! (point to the same thing)
339 void EmitIntegerSignChangeCheck(Value
*Src
, QualType SrcType
, Value
*Dst
,
340 QualType DstType
, SourceLocation Loc
);
342 /// Emit a conversion from the specified type to the specified destination
343 /// type, both of which are LLVM scalar types.
344 struct ScalarConversionOpts
{
345 bool TreatBooleanAsSigned
;
346 bool EmitImplicitIntegerTruncationChecks
;
347 bool EmitImplicitIntegerSignChangeChecks
;
349 ScalarConversionOpts()
350 : TreatBooleanAsSigned(false),
351 EmitImplicitIntegerTruncationChecks(false),
352 EmitImplicitIntegerSignChangeChecks(false) {}
354 ScalarConversionOpts(clang::SanitizerSet SanOpts
)
355 : TreatBooleanAsSigned(false),
356 EmitImplicitIntegerTruncationChecks(
357 SanOpts
.hasOneOf(SanitizerKind::ImplicitIntegerTruncation
)),
358 EmitImplicitIntegerSignChangeChecks(
359 SanOpts
.has(SanitizerKind::ImplicitIntegerSignChange
)) {}
361 Value
*EmitScalarCast(Value
*Src
, QualType SrcType
, QualType DstType
,
362 llvm::Type
*SrcTy
, llvm::Type
*DstTy
,
363 ScalarConversionOpts Opts
);
365 EmitScalarConversion(Value
*Src
, QualType SrcTy
, QualType DstTy
,
367 ScalarConversionOpts Opts
= ScalarConversionOpts());
369 /// Convert between either a fixed point and other fixed point or fixed point
371 Value
*EmitFixedPointConversion(Value
*Src
, QualType SrcTy
, QualType DstTy
,
374 /// Emit a conversion from the specified complex type to the specified
375 /// destination type, where the destination type is an LLVM scalar type.
376 Value
*EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src
,
377 QualType SrcTy
, QualType DstTy
,
380 /// EmitNullValue - Emit a value that corresponds to null for the given type.
381 Value
*EmitNullValue(QualType Ty
);
383 /// EmitFloatToBoolConversion - Perform an FP to boolean conversion.
384 Value
*EmitFloatToBoolConversion(Value
*V
) {
385 // Compare against 0.0 for fp scalars.
386 llvm::Value
*Zero
= llvm::Constant::getNullValue(V
->getType());
387 return Builder
.CreateFCmpUNE(V
, Zero
, "tobool");
390 /// EmitPointerToBoolConversion - Perform a pointer to boolean conversion.
391 Value
*EmitPointerToBoolConversion(Value
*V
, QualType QT
) {
392 Value
*Zero
= CGF
.CGM
.getNullPointer(cast
<llvm::PointerType
>(V
->getType()), QT
);
394 return Builder
.CreateICmpNE(V
, Zero
, "tobool");
397 Value
*EmitIntToBoolConversion(Value
*V
) {
398 // Because of the type rules of C, we often end up computing a
399 // logical value, then zero extending it to int, then wanting it
400 // as a logical value again. Optimize this common case.
401 if (llvm::ZExtInst
*ZI
= dyn_cast
<llvm::ZExtInst
>(V
)) {
402 if (ZI
->getOperand(0)->getType() == Builder
.getInt1Ty()) {
403 Value
*Result
= ZI
->getOperand(0);
404 // If there aren't any more uses, zap the instruction to save space.
405 // Note that there can be more uses, for example if this
406 // is the result of an assignment.
408 ZI
->eraseFromParent();
413 return Builder
.CreateIsNotNull(V
, "tobool");
416 //===--------------------------------------------------------------------===//
418 //===--------------------------------------------------------------------===//
420 Value
*Visit(Expr
*E
) {
421 ApplyDebugLocation
DL(CGF
, E
);
422 return StmtVisitor
<ScalarExprEmitter
, Value
*>::Visit(E
);
425 Value
*VisitStmt(Stmt
*S
) {
426 S
->dump(llvm::errs(), CGF
.getContext());
427 llvm_unreachable("Stmt can't have complex result type!");
429 Value
*VisitExpr(Expr
*S
);
431 Value
*VisitConstantExpr(ConstantExpr
*E
) {
432 // A constant expression of type 'void' generates no code and produces no
434 if (E
->getType()->isVoidType())
437 if (Value
*Result
= ConstantEmitter(CGF
).tryEmitConstantExpr(E
)) {
439 return CGF
.EmitLoadOfScalar(
440 Address(Result
, CGF
.convertTypeForLoadStore(E
->getType()),
441 CGF
.getContext().getTypeAlignInChars(E
->getType())),
442 /*Volatile*/ false, E
->getType(), E
->getExprLoc());
445 return Visit(E
->getSubExpr());
447 Value
*VisitParenExpr(ParenExpr
*PE
) {
448 return Visit(PE
->getSubExpr());
450 Value
*VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr
*E
) {
451 return Visit(E
->getReplacement());
453 Value
*VisitGenericSelectionExpr(GenericSelectionExpr
*GE
) {
454 return Visit(GE
->getResultExpr());
456 Value
*VisitCoawaitExpr(CoawaitExpr
*S
) {
457 return CGF
.EmitCoawaitExpr(*S
).getScalarVal();
459 Value
*VisitCoyieldExpr(CoyieldExpr
*S
) {
460 return CGF
.EmitCoyieldExpr(*S
).getScalarVal();
462 Value
*VisitUnaryCoawait(const UnaryOperator
*E
) {
463 return Visit(E
->getSubExpr());
467 Value
*VisitIntegerLiteral(const IntegerLiteral
*E
) {
468 return Builder
.getInt(E
->getValue());
470 Value
*VisitFixedPointLiteral(const FixedPointLiteral
*E
) {
471 return Builder
.getInt(E
->getValue());
473 Value
*VisitFloatingLiteral(const FloatingLiteral
*E
) {
474 return llvm::ConstantFP::get(VMContext
, E
->getValue());
476 Value
*VisitCharacterLiteral(const CharacterLiteral
*E
) {
477 return llvm::ConstantInt::get(ConvertType(E
->getType()), E
->getValue());
479 Value
*VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr
*E
) {
480 return llvm::ConstantInt::get(ConvertType(E
->getType()), E
->getValue());
482 Value
*VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr
*E
) {
483 return llvm::ConstantInt::get(ConvertType(E
->getType()), E
->getValue());
485 Value
*VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr
*E
) {
486 if (E
->getType()->isVoidType())
489 return EmitNullValue(E
->getType());
491 Value
*VisitGNUNullExpr(const GNUNullExpr
*E
) {
492 return EmitNullValue(E
->getType());
494 Value
*VisitOffsetOfExpr(OffsetOfExpr
*E
);
495 Value
*VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr
*E
);
496 Value
*VisitAddrLabelExpr(const AddrLabelExpr
*E
) {
497 llvm::Value
*V
= CGF
.GetAddrOfLabel(E
->getLabel());
498 return Builder
.CreateBitCast(V
, ConvertType(E
->getType()));
501 Value
*VisitSizeOfPackExpr(SizeOfPackExpr
*E
) {
502 return llvm::ConstantInt::get(ConvertType(E
->getType()),E
->getPackLength());
505 Value
*VisitPseudoObjectExpr(PseudoObjectExpr
*E
) {
506 return CGF
.EmitPseudoObjectRValue(E
).getScalarVal();
509 Value
*VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr
*E
);
510 Value
*VisitEmbedExpr(EmbedExpr
*E
);
512 Value
*VisitOpaqueValueExpr(OpaqueValueExpr
*E
) {
514 return EmitLoadOfLValue(CGF
.getOrCreateOpaqueLValueMapping(E
),
517 // Otherwise, assume the mapping is the scalar directly.
518 return CGF
.getOrCreateOpaqueRValueMapping(E
).getScalarVal();
522 Value
*VisitDeclRefExpr(DeclRefExpr
*E
) {
523 if (CodeGenFunction::ConstantEmission Constant
= CGF
.tryEmitAsConstant(E
))
524 return CGF
.emitScalarConstant(Constant
, E
);
525 return EmitLoadOfLValue(E
);
528 Value
*VisitObjCSelectorExpr(ObjCSelectorExpr
*E
) {
529 return CGF
.EmitObjCSelectorExpr(E
);
531 Value
*VisitObjCProtocolExpr(ObjCProtocolExpr
*E
) {
532 return CGF
.EmitObjCProtocolExpr(E
);
534 Value
*VisitObjCIvarRefExpr(ObjCIvarRefExpr
*E
) {
535 return EmitLoadOfLValue(E
);
537 Value
*VisitObjCMessageExpr(ObjCMessageExpr
*E
) {
538 if (E
->getMethodDecl() &&
539 E
->getMethodDecl()->getReturnType()->isReferenceType())
540 return EmitLoadOfLValue(E
);
541 return CGF
.EmitObjCMessageExpr(E
).getScalarVal();
544 Value
*VisitObjCIsaExpr(ObjCIsaExpr
*E
) {
545 LValue LV
= CGF
.EmitObjCIsaExpr(E
);
546 Value
*V
= CGF
.EmitLoadOfLValue(LV
, E
->getExprLoc()).getScalarVal();
550 Value
*VisitObjCAvailabilityCheckExpr(ObjCAvailabilityCheckExpr
*E
) {
551 VersionTuple Version
= E
->getVersion();
553 // If we're checking for a platform older than our minimum deployment
554 // target, we can fold the check away.
555 if (Version
<= CGF
.CGM
.getTarget().getPlatformMinVersion())
556 return llvm::ConstantInt::get(Builder
.getInt1Ty(), 1);
558 return CGF
.EmitBuiltinAvailable(Version
);
561 Value
*VisitArraySubscriptExpr(ArraySubscriptExpr
*E
);
562 Value
*VisitMatrixSubscriptExpr(MatrixSubscriptExpr
*E
);
563 Value
*VisitShuffleVectorExpr(ShuffleVectorExpr
*E
);
564 Value
*VisitConvertVectorExpr(ConvertVectorExpr
*E
);
565 Value
*VisitMemberExpr(MemberExpr
*E
);
566 Value
*VisitExtVectorElementExpr(Expr
*E
) { return EmitLoadOfLValue(E
); }
567 Value
*VisitCompoundLiteralExpr(CompoundLiteralExpr
*E
) {
568 // Strictly speaking, we shouldn't be calling EmitLoadOfLValue, which
569 // transitively calls EmitCompoundLiteralLValue, here in C++ since compound
570 // literals aren't l-values in C++. We do so simply because that's the
571 // cleanest way to handle compound literals in C++.
572 // See the discussion here: https://reviews.llvm.org/D64464
573 return EmitLoadOfLValue(E
);
576 Value
*VisitInitListExpr(InitListExpr
*E
);
578 Value
*VisitArrayInitIndexExpr(ArrayInitIndexExpr
*E
) {
579 assert(CGF
.getArrayInitIndex() &&
580 "ArrayInitIndexExpr not inside an ArrayInitLoopExpr?");
581 return CGF
.getArrayInitIndex();
584 Value
*VisitImplicitValueInitExpr(const ImplicitValueInitExpr
*E
) {
585 return EmitNullValue(E
->getType());
587 Value
*VisitExplicitCastExpr(ExplicitCastExpr
*E
) {
588 CGF
.CGM
.EmitExplicitCastExprType(E
, &CGF
);
589 return VisitCastExpr(E
);
591 Value
*VisitCastExpr(CastExpr
*E
);
593 Value
*VisitCallExpr(const CallExpr
*E
) {
594 if (E
->getCallReturnType(CGF
.getContext())->isReferenceType())
595 return EmitLoadOfLValue(E
);
597 Value
*V
= CGF
.EmitCallExpr(E
).getScalarVal();
599 EmitLValueAlignmentAssumption(E
, V
);
603 Value
*VisitStmtExpr(const StmtExpr
*E
);
606 Value
*VisitUnaryPostDec(const UnaryOperator
*E
) {
607 LValue LV
= EmitLValue(E
->getSubExpr());
608 return EmitScalarPrePostIncDec(E
, LV
, false, false);
610 Value
*VisitUnaryPostInc(const UnaryOperator
*E
) {
611 LValue LV
= EmitLValue(E
->getSubExpr());
612 return EmitScalarPrePostIncDec(E
, LV
, true, false);
614 Value
*VisitUnaryPreDec(const UnaryOperator
*E
) {
615 LValue LV
= EmitLValue(E
->getSubExpr());
616 return EmitScalarPrePostIncDec(E
, LV
, false, true);
618 Value
*VisitUnaryPreInc(const UnaryOperator
*E
) {
619 LValue LV
= EmitLValue(E
->getSubExpr());
620 return EmitScalarPrePostIncDec(E
, LV
, true, true);
623 llvm::Value
*EmitIncDecConsiderOverflowBehavior(const UnaryOperator
*E
,
627 llvm::Value
*EmitScalarPrePostIncDec(const UnaryOperator
*E
, LValue LV
,
628 bool isInc
, bool isPre
);
631 Value
*VisitUnaryAddrOf(const UnaryOperator
*E
) {
632 if (isa
<MemberPointerType
>(E
->getType())) // never sugared
633 return CGF
.CGM
.getMemberPointerConstant(E
);
635 return EmitLValue(E
->getSubExpr()).getPointer(CGF
);
637 Value
*VisitUnaryDeref(const UnaryOperator
*E
) {
638 if (E
->getType()->isVoidType())
639 return Visit(E
->getSubExpr()); // the actual value should be unused
640 return EmitLoadOfLValue(E
);
643 Value
*VisitUnaryPlus(const UnaryOperator
*E
,
644 QualType PromotionType
= QualType());
645 Value
*VisitPlus(const UnaryOperator
*E
, QualType PromotionType
);
646 Value
*VisitUnaryMinus(const UnaryOperator
*E
,
647 QualType PromotionType
= QualType());
648 Value
*VisitMinus(const UnaryOperator
*E
, QualType PromotionType
);
650 Value
*VisitUnaryNot (const UnaryOperator
*E
);
651 Value
*VisitUnaryLNot (const UnaryOperator
*E
);
652 Value
*VisitUnaryReal(const UnaryOperator
*E
,
653 QualType PromotionType
= QualType());
654 Value
*VisitReal(const UnaryOperator
*E
, QualType PromotionType
);
655 Value
*VisitUnaryImag(const UnaryOperator
*E
,
656 QualType PromotionType
= QualType());
657 Value
*VisitImag(const UnaryOperator
*E
, QualType PromotionType
);
658 Value
*VisitUnaryExtension(const UnaryOperator
*E
) {
659 return Visit(E
->getSubExpr());
663 Value
*VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr
*E
) {
664 return EmitLoadOfLValue(E
);
666 Value
*VisitSourceLocExpr(SourceLocExpr
*SLE
) {
667 auto &Ctx
= CGF
.getContext();
669 SLE
->EvaluateInContext(Ctx
, CGF
.CurSourceLocExprScope
.getDefaultExpr());
670 return ConstantEmitter(CGF
).emitAbstract(SLE
->getLocation(), Evaluated
,
674 Value
*VisitCXXDefaultArgExpr(CXXDefaultArgExpr
*DAE
) {
675 CodeGenFunction::CXXDefaultArgExprScope
Scope(CGF
, DAE
);
676 return Visit(DAE
->getExpr());
678 Value
*VisitCXXDefaultInitExpr(CXXDefaultInitExpr
*DIE
) {
679 CodeGenFunction::CXXDefaultInitExprScope
Scope(CGF
, DIE
);
680 return Visit(DIE
->getExpr());
682 Value
*VisitCXXThisExpr(CXXThisExpr
*TE
) {
683 return CGF
.LoadCXXThis();
686 Value
*VisitExprWithCleanups(ExprWithCleanups
*E
);
687 Value
*VisitCXXNewExpr(const CXXNewExpr
*E
) {
688 return CGF
.EmitCXXNewExpr(E
);
690 Value
*VisitCXXDeleteExpr(const CXXDeleteExpr
*E
) {
691 CGF
.EmitCXXDeleteExpr(E
);
695 Value
*VisitTypeTraitExpr(const TypeTraitExpr
*E
) {
696 return llvm::ConstantInt::get(ConvertType(E
->getType()), E
->getValue());
699 Value
*VisitConceptSpecializationExpr(const ConceptSpecializationExpr
*E
) {
700 return Builder
.getInt1(E
->isSatisfied());
703 Value
*VisitRequiresExpr(const RequiresExpr
*E
) {
704 return Builder
.getInt1(E
->isSatisfied());
707 Value
*VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr
*E
) {
708 return llvm::ConstantInt::get(Builder
.getInt32Ty(), E
->getValue());
711 Value
*VisitExpressionTraitExpr(const ExpressionTraitExpr
*E
) {
712 return llvm::ConstantInt::get(Builder
.getInt1Ty(), E
->getValue());
715 Value
*VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr
*E
) {
716 // C++ [expr.pseudo]p1:
717 // The result shall only be used as the operand for the function call
718 // operator (), and the result of such a call has type void. The only
719 // effect is the evaluation of the postfix-expression before the dot or
721 CGF
.EmitScalarExpr(E
->getBase());
725 Value
*VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr
*E
) {
726 return EmitNullValue(E
->getType());
729 Value
*VisitCXXThrowExpr(const CXXThrowExpr
*E
) {
730 CGF
.EmitCXXThrowExpr(E
);
734 Value
*VisitCXXNoexceptExpr(const CXXNoexceptExpr
*E
) {
735 return Builder
.getInt1(E
->getValue());
739 Value
*EmitMul(const BinOpInfo
&Ops
) {
740 if (Ops
.Ty
->isSignedIntegerOrEnumerationType()) {
741 switch (CGF
.getLangOpts().getSignedOverflowBehavior()) {
742 case LangOptions::SOB_Defined
:
743 if (!CGF
.SanOpts
.has(SanitizerKind::SignedIntegerOverflow
))
744 return Builder
.CreateMul(Ops
.LHS
, Ops
.RHS
, "mul");
746 case LangOptions::SOB_Undefined
:
747 if (!CGF
.SanOpts
.has(SanitizerKind::SignedIntegerOverflow
))
748 return Builder
.CreateNSWMul(Ops
.LHS
, Ops
.RHS
, "mul");
750 case LangOptions::SOB_Trapping
:
751 if (CanElideOverflowCheck(CGF
.getContext(), Ops
))
752 return Builder
.CreateNSWMul(Ops
.LHS
, Ops
.RHS
, "mul");
753 return EmitOverflowCheckedBinOp(Ops
);
757 if (Ops
.Ty
->isConstantMatrixType()) {
758 llvm::MatrixBuilder
MB(Builder
);
759 // We need to check the types of the operands of the operator to get the
760 // correct matrix dimensions.
761 auto *BO
= cast
<BinaryOperator
>(Ops
.E
);
762 auto *LHSMatTy
= dyn_cast
<ConstantMatrixType
>(
763 BO
->getLHS()->getType().getCanonicalType());
764 auto *RHSMatTy
= dyn_cast
<ConstantMatrixType
>(
765 BO
->getRHS()->getType().getCanonicalType());
766 CodeGenFunction::CGFPOptionsRAII
FPOptsRAII(CGF
, Ops
.FPFeatures
);
767 if (LHSMatTy
&& RHSMatTy
)
768 return MB
.CreateMatrixMultiply(Ops
.LHS
, Ops
.RHS
, LHSMatTy
->getNumRows(),
769 LHSMatTy
->getNumColumns(),
770 RHSMatTy
->getNumColumns());
771 return MB
.CreateScalarMultiply(Ops
.LHS
, Ops
.RHS
);
774 if (Ops
.Ty
->isUnsignedIntegerType() &&
775 CGF
.SanOpts
.has(SanitizerKind::UnsignedIntegerOverflow
) &&
776 !CanElideOverflowCheck(CGF
.getContext(), Ops
))
777 return EmitOverflowCheckedBinOp(Ops
);
779 if (Ops
.LHS
->getType()->isFPOrFPVectorTy()) {
780 // Preserve the old values
781 CodeGenFunction::CGFPOptionsRAII
FPOptsRAII(CGF
, Ops
.FPFeatures
);
782 return Builder
.CreateFMul(Ops
.LHS
, Ops
.RHS
, "mul");
784 if (Ops
.isFixedPointOp())
785 return EmitFixedPointBinOp(Ops
);
786 return Builder
.CreateMul(Ops
.LHS
, Ops
.RHS
, "mul");
788 /// Create a binary op that checks for overflow.
789 /// Currently only supports +, - and *.
790 Value
*EmitOverflowCheckedBinOp(const BinOpInfo
&Ops
);
792 // Check for undefined division and modulus behaviors.
793 void EmitUndefinedBehaviorIntegerDivAndRemCheck(const BinOpInfo
&Ops
,
794 llvm::Value
*Zero
,bool isDiv
);
795 // Common helper for getting how wide LHS of shift is.
796 static Value
*GetMaximumShiftAmount(Value
*LHS
, Value
*RHS
, bool RHSIsSigned
);
798 // Used for shifting constraints for OpenCL, do mask for powers of 2, URem for
799 // non powers of two.
800 Value
*ConstrainShiftValue(Value
*LHS
, Value
*RHS
, const Twine
&Name
);
802 Value
*EmitDiv(const BinOpInfo
&Ops
);
803 Value
*EmitRem(const BinOpInfo
&Ops
);
804 Value
*EmitAdd(const BinOpInfo
&Ops
);
805 Value
*EmitSub(const BinOpInfo
&Ops
);
806 Value
*EmitShl(const BinOpInfo
&Ops
);
807 Value
*EmitShr(const BinOpInfo
&Ops
);
808 Value
*EmitAnd(const BinOpInfo
&Ops
) {
809 return Builder
.CreateAnd(Ops
.LHS
, Ops
.RHS
, "and");
811 Value
*EmitXor(const BinOpInfo
&Ops
) {
812 return Builder
.CreateXor(Ops
.LHS
, Ops
.RHS
, "xor");
814 Value
*EmitOr (const BinOpInfo
&Ops
) {
815 return Builder
.CreateOr(Ops
.LHS
, Ops
.RHS
, "or");
818 // Helper functions for fixed point binary operations.
819 Value
*EmitFixedPointBinOp(const BinOpInfo
&Ops
);
821 BinOpInfo
EmitBinOps(const BinaryOperator
*E
,
822 QualType PromotionTy
= QualType());
824 Value
*EmitPromotedValue(Value
*result
, QualType PromotionType
);
825 Value
*EmitUnPromotedValue(Value
*result
, QualType ExprType
);
826 Value
*EmitPromoted(const Expr
*E
, QualType PromotionType
);
828 LValue
EmitCompoundAssignLValue(const CompoundAssignOperator
*E
,
829 Value
*(ScalarExprEmitter::*F
)(const BinOpInfo
&),
832 Value
*EmitCompoundAssign(const CompoundAssignOperator
*E
,
833 Value
*(ScalarExprEmitter::*F
)(const BinOpInfo
&));
835 QualType
getPromotionType(QualType Ty
) {
836 const auto &Ctx
= CGF
.getContext();
837 if (auto *CT
= Ty
->getAs
<ComplexType
>()) {
838 QualType ElementType
= CT
->getElementType();
839 if (ElementType
.UseExcessPrecision(Ctx
))
840 return Ctx
.getComplexType(Ctx
.FloatTy
);
843 if (Ty
.UseExcessPrecision(Ctx
)) {
844 if (auto *VT
= Ty
->getAs
<VectorType
>()) {
845 unsigned NumElements
= VT
->getNumElements();
846 return Ctx
.getVectorType(Ctx
.FloatTy
, NumElements
, VT
->getVectorKind());
854 // Binary operators and binary compound assignment operators.
855 #define HANDLEBINOP(OP) \
856 Value *VisitBin##OP(const BinaryOperator *E) { \
857 QualType promotionTy = getPromotionType(E->getType()); \
858 auto result = Emit##OP(EmitBinOps(E, promotionTy)); \
859 if (result && !promotionTy.isNull()) \
860 result = EmitUnPromotedValue(result, E->getType()); \
863 Value *VisitBin##OP##Assign(const CompoundAssignOperator *E) { \
864 return EmitCompoundAssign(E, &ScalarExprEmitter::Emit##OP); \
879 Value
*EmitCompare(const BinaryOperator
*E
, llvm::CmpInst::Predicate UICmpOpc
,
880 llvm::CmpInst::Predicate SICmpOpc
,
881 llvm::CmpInst::Predicate FCmpOpc
, bool IsSignaling
);
882 #define VISITCOMP(CODE, UI, SI, FP, SIG) \
883 Value *VisitBin##CODE(const BinaryOperator *E) { \
884 return EmitCompare(E, llvm::ICmpInst::UI, llvm::ICmpInst::SI, \
885 llvm::FCmpInst::FP, SIG); }
886 VISITCOMP(LT
, ICMP_ULT
, ICMP_SLT
, FCMP_OLT
, true)
887 VISITCOMP(GT
, ICMP_UGT
, ICMP_SGT
, FCMP_OGT
, true)
888 VISITCOMP(LE
, ICMP_ULE
, ICMP_SLE
, FCMP_OLE
, true)
889 VISITCOMP(GE
, ICMP_UGE
, ICMP_SGE
, FCMP_OGE
, true)
890 VISITCOMP(EQ
, ICMP_EQ
, ICMP_EQ
, FCMP_OEQ
, false)
891 VISITCOMP(NE
, ICMP_NE
, ICMP_NE
, FCMP_UNE
, false)
894 Value
*VisitBinAssign (const BinaryOperator
*E
);
896 Value
*VisitBinLAnd (const BinaryOperator
*E
);
897 Value
*VisitBinLOr (const BinaryOperator
*E
);
898 Value
*VisitBinComma (const BinaryOperator
*E
);
900 Value
*VisitBinPtrMemD(const Expr
*E
) { return EmitLoadOfLValue(E
); }
901 Value
*VisitBinPtrMemI(const Expr
*E
) { return EmitLoadOfLValue(E
); }
903 Value
*VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator
*E
) {
904 return Visit(E
->getSemanticForm());
908 Value
*VisitBlockExpr(const BlockExpr
*BE
);
909 Value
*VisitAbstractConditionalOperator(const AbstractConditionalOperator
*);
910 Value
*VisitChooseExpr(ChooseExpr
*CE
);
911 Value
*VisitVAArgExpr(VAArgExpr
*VE
);
912 Value
*VisitObjCStringLiteral(const ObjCStringLiteral
*E
) {
913 return CGF
.EmitObjCStringLiteral(E
);
915 Value
*VisitObjCBoxedExpr(ObjCBoxedExpr
*E
) {
916 return CGF
.EmitObjCBoxedExpr(E
);
918 Value
*VisitObjCArrayLiteral(ObjCArrayLiteral
*E
) {
919 return CGF
.EmitObjCArrayLiteral(E
);
921 Value
*VisitObjCDictionaryLiteral(ObjCDictionaryLiteral
*E
) {
922 return CGF
.EmitObjCDictionaryLiteral(E
);
924 Value
*VisitAsTypeExpr(AsTypeExpr
*CE
);
925 Value
*VisitAtomicExpr(AtomicExpr
*AE
);
926 Value
*VisitPackIndexingExpr(PackIndexingExpr
*E
) {
927 return Visit(E
->getSelectedExpr());
930 } // end anonymous namespace.
932 //===----------------------------------------------------------------------===//
934 //===----------------------------------------------------------------------===//
936 /// EmitConversionToBool - Convert the specified expression value to a
937 /// boolean (i1) truth value. This is equivalent to "Val != 0".
938 Value
*ScalarExprEmitter::EmitConversionToBool(Value
*Src
, QualType SrcType
) {
939 assert(SrcType
.isCanonical() && "EmitScalarConversion strips typedefs");
941 if (SrcType
->isRealFloatingType())
942 return EmitFloatToBoolConversion(Src
);
944 if (const MemberPointerType
*MPT
= dyn_cast
<MemberPointerType
>(SrcType
))
945 return CGF
.CGM
.getCXXABI().EmitMemberPointerIsNotNull(CGF
, Src
, MPT
);
947 assert((SrcType
->isIntegerType() || isa
<llvm::PointerType
>(Src
->getType())) &&
948 "Unknown scalar type to convert");
950 if (isa
<llvm::IntegerType
>(Src
->getType()))
951 return EmitIntToBoolConversion(Src
);
953 assert(isa
<llvm::PointerType
>(Src
->getType()));
954 return EmitPointerToBoolConversion(Src
, SrcType
);
957 void ScalarExprEmitter::EmitFloatConversionCheck(
958 Value
*OrigSrc
, QualType OrigSrcType
, Value
*Src
, QualType SrcType
,
959 QualType DstType
, llvm::Type
*DstTy
, SourceLocation Loc
) {
960 assert(SrcType
->isFloatingType() && "not a conversion from floating point");
961 if (!isa
<llvm::IntegerType
>(DstTy
))
964 CodeGenFunction::SanitizerScope
SanScope(&CGF
);
968 llvm::Value
*Check
= nullptr;
969 const llvm::fltSemantics
&SrcSema
=
970 CGF
.getContext().getFloatTypeSemantics(OrigSrcType
);
972 // Floating-point to integer. This has undefined behavior if the source is
973 // +-Inf, NaN, or doesn't fit into the destination type (after truncation
975 unsigned Width
= CGF
.getContext().getIntWidth(DstType
);
976 bool Unsigned
= DstType
->isUnsignedIntegerOrEnumerationType();
978 APSInt Min
= APSInt::getMinValue(Width
, Unsigned
);
979 APFloat
MinSrc(SrcSema
, APFloat::uninitialized
);
980 if (MinSrc
.convertFromAPInt(Min
, !Unsigned
, APFloat::rmTowardZero
) &
982 // Don't need an overflow check for lower bound. Just check for
984 MinSrc
= APFloat::getInf(SrcSema
, true);
986 // Find the largest value which is too small to represent (before
987 // truncation toward zero).
988 MinSrc
.subtract(APFloat(SrcSema
, 1), APFloat::rmTowardNegative
);
990 APSInt Max
= APSInt::getMaxValue(Width
, Unsigned
);
991 APFloat
MaxSrc(SrcSema
, APFloat::uninitialized
);
992 if (MaxSrc
.convertFromAPInt(Max
, !Unsigned
, APFloat::rmTowardZero
) &
994 // Don't need an overflow check for upper bound. Just check for
996 MaxSrc
= APFloat::getInf(SrcSema
, false);
998 // Find the smallest value which is too large to represent (before
999 // truncation toward zero).
1000 MaxSrc
.add(APFloat(SrcSema
, 1), APFloat::rmTowardPositive
);
1002 // If we're converting from __half, convert the range to float to match
1004 if (OrigSrcType
->isHalfType()) {
1005 const llvm::fltSemantics
&Sema
=
1006 CGF
.getContext().getFloatTypeSemantics(SrcType
);
1008 MinSrc
.convert(Sema
, APFloat::rmTowardZero
, &IsInexact
);
1009 MaxSrc
.convert(Sema
, APFloat::rmTowardZero
, &IsInexact
);
1013 Builder
.CreateFCmpOGT(Src
, llvm::ConstantFP::get(VMContext
, MinSrc
));
1015 Builder
.CreateFCmpOLT(Src
, llvm::ConstantFP::get(VMContext
, MaxSrc
));
1016 Check
= Builder
.CreateAnd(GE
, LE
);
1018 llvm::Constant
*StaticArgs
[] = {CGF
.EmitCheckSourceLocation(Loc
),
1019 CGF
.EmitCheckTypeDescriptor(OrigSrcType
),
1020 CGF
.EmitCheckTypeDescriptor(DstType
)};
1021 CGF
.EmitCheck(std::make_pair(Check
, SanitizerKind::FloatCastOverflow
),
1022 SanitizerHandler::FloatCastOverflow
, StaticArgs
, OrigSrc
);
1025 // Should be called within CodeGenFunction::SanitizerScope RAII scope.
1026 // Returns 'i1 false' when the truncation Src -> Dst was lossy.
1027 static std::pair
<ScalarExprEmitter::ImplicitConversionCheckKind
,
1028 std::pair
<llvm::Value
*, SanitizerMask
>>
1029 EmitIntegerTruncationCheckHelper(Value
*Src
, QualType SrcType
, Value
*Dst
,
1030 QualType DstType
, CGBuilderTy
&Builder
) {
1031 llvm::Type
*SrcTy
= Src
->getType();
1032 llvm::Type
*DstTy
= Dst
->getType();
1033 (void)DstTy
; // Only used in assert()
1035 // This should be truncation of integral types.
1037 assert(SrcTy
->getScalarSizeInBits() > Dst
->getType()->getScalarSizeInBits());
1038 assert(isa
<llvm::IntegerType
>(SrcTy
) && isa
<llvm::IntegerType
>(DstTy
) &&
1039 "non-integer llvm type");
1041 bool SrcSigned
= SrcType
->isSignedIntegerOrEnumerationType();
1042 bool DstSigned
= DstType
->isSignedIntegerOrEnumerationType();
1044 // If both (src and dst) types are unsigned, then it's an unsigned truncation.
1045 // Else, it is a signed truncation.
1046 ScalarExprEmitter::ImplicitConversionCheckKind Kind
;
1048 if (!SrcSigned
&& !DstSigned
) {
1049 Kind
= ScalarExprEmitter::ICCK_UnsignedIntegerTruncation
;
1050 Mask
= SanitizerKind::ImplicitUnsignedIntegerTruncation
;
1052 Kind
= ScalarExprEmitter::ICCK_SignedIntegerTruncation
;
1053 Mask
= SanitizerKind::ImplicitSignedIntegerTruncation
;
1056 llvm::Value
*Check
= nullptr;
1057 // 1. Extend the truncated value back to the same width as the Src.
1058 Check
= Builder
.CreateIntCast(Dst
, SrcTy
, DstSigned
, "anyext");
1059 // 2. Equality-compare with the original source value
1060 Check
= Builder
.CreateICmpEQ(Check
, Src
, "truncheck");
1061 // If the comparison result is 'i1 false', then the truncation was lossy.
1062 return std::make_pair(Kind
, std::make_pair(Check
, Mask
));
1065 static bool PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(
1066 QualType SrcType
, QualType DstType
) {
1067 return SrcType
->isIntegerType() && DstType
->isIntegerType();
1070 void ScalarExprEmitter::EmitIntegerTruncationCheck(Value
*Src
, QualType SrcType
,
1071 Value
*Dst
, QualType DstType
,
1072 SourceLocation Loc
) {
1073 if (!CGF
.SanOpts
.hasOneOf(SanitizerKind::ImplicitIntegerTruncation
))
1076 // We only care about int->int conversions here.
1077 // We ignore conversions to/from pointer and/or bool.
1078 if (!PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(SrcType
,
1082 unsigned SrcBits
= Src
->getType()->getScalarSizeInBits();
1083 unsigned DstBits
= Dst
->getType()->getScalarSizeInBits();
1084 // This must be truncation. Else we do not care.
1085 if (SrcBits
<= DstBits
)
1088 assert(!DstType
->isBooleanType() && "we should not get here with booleans.");
1090 // If the integer sign change sanitizer is enabled,
1091 // and we are truncating from larger unsigned type to smaller signed type,
1092 // let that next sanitizer deal with it.
1093 bool SrcSigned
= SrcType
->isSignedIntegerOrEnumerationType();
1094 bool DstSigned
= DstType
->isSignedIntegerOrEnumerationType();
1095 if (CGF
.SanOpts
.has(SanitizerKind::ImplicitIntegerSignChange
) &&
1096 (!SrcSigned
&& DstSigned
))
1099 CodeGenFunction::SanitizerScope
SanScope(&CGF
);
1101 std::pair
<ScalarExprEmitter::ImplicitConversionCheckKind
,
1102 std::pair
<llvm::Value
*, SanitizerMask
>>
1104 EmitIntegerTruncationCheckHelper(Src
, SrcType
, Dst
, DstType
, Builder
);
1105 // If the comparison result is 'i1 false', then the truncation was lossy.
1107 // Do we care about this type of truncation?
1108 if (!CGF
.SanOpts
.has(Check
.second
.second
))
1111 llvm::Constant
*StaticArgs
[] = {
1112 CGF
.EmitCheckSourceLocation(Loc
), CGF
.EmitCheckTypeDescriptor(SrcType
),
1113 CGF
.EmitCheckTypeDescriptor(DstType
),
1114 llvm::ConstantInt::get(Builder
.getInt8Ty(), Check
.first
),
1115 llvm::ConstantInt::get(Builder
.getInt32Ty(), 0)};
1117 CGF
.EmitCheck(Check
.second
, SanitizerHandler::ImplicitConversion
, StaticArgs
,
1121 static llvm::Value
*EmitIsNegativeTestHelper(Value
*V
, QualType VType
,
1123 CGBuilderTy
&Builder
) {
1124 bool VSigned
= VType
->isSignedIntegerOrEnumerationType();
1125 llvm::Type
*VTy
= V
->getType();
1127 // If the value is unsigned, then it is never negative.
1128 return llvm::ConstantInt::getFalse(VTy
->getContext());
1130 llvm::Constant
*Zero
= llvm::ConstantInt::get(VTy
, 0);
1131 return Builder
.CreateICmp(llvm::ICmpInst::ICMP_SLT
, V
, Zero
,
1132 llvm::Twine(Name
) + "." + V
->getName() +
1133 ".negativitycheck");
1136 // Should be called within CodeGenFunction::SanitizerScope RAII scope.
1137 // Returns 'i1 false' when the conversion Src -> Dst changed the sign.
1138 static std::pair
<ScalarExprEmitter::ImplicitConversionCheckKind
,
1139 std::pair
<llvm::Value
*, SanitizerMask
>>
1140 EmitIntegerSignChangeCheckHelper(Value
*Src
, QualType SrcType
, Value
*Dst
,
1141 QualType DstType
, CGBuilderTy
&Builder
) {
1142 llvm::Type
*SrcTy
= Src
->getType();
1143 llvm::Type
*DstTy
= Dst
->getType();
1145 assert(isa
<llvm::IntegerType
>(SrcTy
) && isa
<llvm::IntegerType
>(DstTy
) &&
1146 "non-integer llvm type");
1148 bool SrcSigned
= SrcType
->isSignedIntegerOrEnumerationType();
1149 bool DstSigned
= DstType
->isSignedIntegerOrEnumerationType();
1150 (void)SrcSigned
; // Only used in assert()
1151 (void)DstSigned
; // Only used in assert()
1152 unsigned SrcBits
= SrcTy
->getScalarSizeInBits();
1153 unsigned DstBits
= DstTy
->getScalarSizeInBits();
1154 (void)SrcBits
; // Only used in assert()
1155 (void)DstBits
; // Only used in assert()
1157 assert(((SrcBits
!= DstBits
) || (SrcSigned
!= DstSigned
)) &&
1158 "either the widths should be different, or the signednesses.");
1160 // 1. Was the old Value negative?
1161 llvm::Value
*SrcIsNegative
=
1162 EmitIsNegativeTestHelper(Src
, SrcType
, "src", Builder
);
1163 // 2. Is the new Value negative?
1164 llvm::Value
*DstIsNegative
=
1165 EmitIsNegativeTestHelper(Dst
, DstType
, "dst", Builder
);
1166 // 3. Now, was the 'negativity status' preserved during the conversion?
1167 // NOTE: conversion from negative to zero is considered to change the sign.
1168 // (We want to get 'false' when the conversion changed the sign)
1169 // So we should just equality-compare the negativity statuses.
1170 llvm::Value
*Check
= nullptr;
1171 Check
= Builder
.CreateICmpEQ(SrcIsNegative
, DstIsNegative
, "signchangecheck");
1172 // If the comparison result is 'false', then the conversion changed the sign.
1173 return std::make_pair(
1174 ScalarExprEmitter::ICCK_IntegerSignChange
,
1175 std::make_pair(Check
, SanitizerKind::ImplicitIntegerSignChange
));
1178 void ScalarExprEmitter::EmitIntegerSignChangeCheck(Value
*Src
, QualType SrcType
,
1179 Value
*Dst
, QualType DstType
,
1180 SourceLocation Loc
) {
1181 if (!CGF
.SanOpts
.has(SanitizerKind::ImplicitIntegerSignChange
))
1184 llvm::Type
*SrcTy
= Src
->getType();
1185 llvm::Type
*DstTy
= Dst
->getType();
1187 // We only care about int->int conversions here.
1188 // We ignore conversions to/from pointer and/or bool.
1189 if (!PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(SrcType
,
1193 bool SrcSigned
= SrcType
->isSignedIntegerOrEnumerationType();
1194 bool DstSigned
= DstType
->isSignedIntegerOrEnumerationType();
1195 unsigned SrcBits
= SrcTy
->getScalarSizeInBits();
1196 unsigned DstBits
= DstTy
->getScalarSizeInBits();
1198 // Now, we do not need to emit the check in *all* of the cases.
1199 // We can avoid emitting it in some obvious cases where it would have been
1200 // dropped by the opt passes (instcombine) always anyways.
1201 // If it's a cast between effectively the same type, no check.
1202 // NOTE: this is *not* equivalent to checking the canonical types.
1203 if (SrcSigned
== DstSigned
&& SrcBits
== DstBits
)
1205 // At least one of the values needs to have signed type.
1206 // If both are unsigned, then obviously, neither of them can be negative.
1207 if (!SrcSigned
&& !DstSigned
)
1209 // If the conversion is to *larger* *signed* type, then no check is needed.
1210 // Because either sign-extension happens (so the sign will remain),
1211 // or zero-extension will happen (the sign bit will be zero.)
1212 if ((DstBits
> SrcBits
) && DstSigned
)
1214 if (CGF
.SanOpts
.has(SanitizerKind::ImplicitSignedIntegerTruncation
) &&
1215 (SrcBits
> DstBits
) && SrcSigned
) {
1216 // If the signed integer truncation sanitizer is enabled,
1217 // and this is a truncation from signed type, then no check is needed.
1218 // Because here sign change check is interchangeable with truncation check.
1221 // That's it. We can't rule out any more cases with the data we have.
1223 CodeGenFunction::SanitizerScope
SanScope(&CGF
);
1225 std::pair
<ScalarExprEmitter::ImplicitConversionCheckKind
,
1226 std::pair
<llvm::Value
*, SanitizerMask
>>
1229 // Each of these checks needs to return 'false' when an issue was detected.
1230 ImplicitConversionCheckKind CheckKind
;
1231 llvm::SmallVector
<std::pair
<llvm::Value
*, SanitizerMask
>, 2> Checks
;
1232 // So we can 'and' all the checks together, and still get 'false',
1233 // if at least one of the checks detected an issue.
1235 Check
= EmitIntegerSignChangeCheckHelper(Src
, SrcType
, Dst
, DstType
, Builder
);
1236 CheckKind
= Check
.first
;
1237 Checks
.emplace_back(Check
.second
);
1239 if (CGF
.SanOpts
.has(SanitizerKind::ImplicitSignedIntegerTruncation
) &&
1240 (SrcBits
> DstBits
) && !SrcSigned
&& DstSigned
) {
1241 // If the signed integer truncation sanitizer was enabled,
1242 // and we are truncating from larger unsigned type to smaller signed type,
1243 // let's handle the case we skipped in that check.
1245 EmitIntegerTruncationCheckHelper(Src
, SrcType
, Dst
, DstType
, Builder
);
1246 CheckKind
= ICCK_SignedIntegerTruncationOrSignChange
;
1247 Checks
.emplace_back(Check
.second
);
1248 // If the comparison result is 'i1 false', then the truncation was lossy.
1251 llvm::Constant
*StaticArgs
[] = {
1252 CGF
.EmitCheckSourceLocation(Loc
), CGF
.EmitCheckTypeDescriptor(SrcType
),
1253 CGF
.EmitCheckTypeDescriptor(DstType
),
1254 llvm::ConstantInt::get(Builder
.getInt8Ty(), CheckKind
),
1255 llvm::ConstantInt::get(Builder
.getInt32Ty(), 0)};
1256 // EmitCheck() will 'and' all the checks together.
1257 CGF
.EmitCheck(Checks
, SanitizerHandler::ImplicitConversion
, StaticArgs
,
1261 // Should be called within CodeGenFunction::SanitizerScope RAII scope.
1262 // Returns 'i1 false' when the truncation Src -> Dst was lossy.
1263 static std::pair
<ScalarExprEmitter::ImplicitConversionCheckKind
,
1264 std::pair
<llvm::Value
*, SanitizerMask
>>
1265 EmitBitfieldTruncationCheckHelper(Value
*Src
, QualType SrcType
, Value
*Dst
,
1266 QualType DstType
, CGBuilderTy
&Builder
) {
1267 bool SrcSigned
= SrcType
->isSignedIntegerOrEnumerationType();
1268 bool DstSigned
= DstType
->isSignedIntegerOrEnumerationType();
1270 ScalarExprEmitter::ImplicitConversionCheckKind Kind
;
1271 if (!SrcSigned
&& !DstSigned
)
1272 Kind
= ScalarExprEmitter::ICCK_UnsignedIntegerTruncation
;
1274 Kind
= ScalarExprEmitter::ICCK_SignedIntegerTruncation
;
1276 llvm::Value
*Check
= nullptr;
1277 // 1. Extend the truncated value back to the same width as the Src.
1278 Check
= Builder
.CreateIntCast(Dst
, Src
->getType(), DstSigned
, "bf.anyext");
1279 // 2. Equality-compare with the original source value
1280 Check
= Builder
.CreateICmpEQ(Check
, Src
, "bf.truncheck");
1281 // If the comparison result is 'i1 false', then the truncation was lossy.
1283 return std::make_pair(
1284 Kind
, std::make_pair(Check
, SanitizerKind::ImplicitBitfieldConversion
));
1287 // Should be called within CodeGenFunction::SanitizerScope RAII scope.
1288 // Returns 'i1 false' when the conversion Src -> Dst changed the sign.
1289 static std::pair
<ScalarExprEmitter::ImplicitConversionCheckKind
,
1290 std::pair
<llvm::Value
*, SanitizerMask
>>
1291 EmitBitfieldSignChangeCheckHelper(Value
*Src
, QualType SrcType
, Value
*Dst
,
1292 QualType DstType
, CGBuilderTy
&Builder
) {
1293 // 1. Was the old Value negative?
1294 llvm::Value
*SrcIsNegative
=
1295 EmitIsNegativeTestHelper(Src
, SrcType
, "bf.src", Builder
);
1296 // 2. Is the new Value negative?
1297 llvm::Value
*DstIsNegative
=
1298 EmitIsNegativeTestHelper(Dst
, DstType
, "bf.dst", Builder
);
1299 // 3. Now, was the 'negativity status' preserved during the conversion?
1300 // NOTE: conversion from negative to zero is considered to change the sign.
1301 // (We want to get 'false' when the conversion changed the sign)
1302 // So we should just equality-compare the negativity statuses.
1303 llvm::Value
*Check
= nullptr;
1305 Builder
.CreateICmpEQ(SrcIsNegative
, DstIsNegative
, "bf.signchangecheck");
1306 // If the comparison result is 'false', then the conversion changed the sign.
1307 return std::make_pair(
1308 ScalarExprEmitter::ICCK_IntegerSignChange
,
1309 std::make_pair(Check
, SanitizerKind::ImplicitBitfieldConversion
));
1312 void CodeGenFunction::EmitBitfieldConversionCheck(Value
*Src
, QualType SrcType
,
1313 Value
*Dst
, QualType DstType
,
1314 const CGBitFieldInfo
&Info
,
1315 SourceLocation Loc
) {
1317 if (!SanOpts
.has(SanitizerKind::ImplicitBitfieldConversion
))
1320 // We only care about int->int conversions here.
1321 // We ignore conversions to/from pointer and/or bool.
1322 if (!PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(SrcType
,
1326 if (DstType
->isBooleanType() || SrcType
->isBooleanType())
1329 // This should be truncation of integral types.
1330 assert(isa
<llvm::IntegerType
>(Src
->getType()) &&
1331 isa
<llvm::IntegerType
>(Dst
->getType()) && "non-integer llvm type");
1333 // TODO: Calculate src width to avoid emitting code
1334 // for unecessary cases.
1335 unsigned SrcBits
= ConvertType(SrcType
)->getScalarSizeInBits();
1336 unsigned DstBits
= Info
.Size
;
1338 bool SrcSigned
= SrcType
->isSignedIntegerOrEnumerationType();
1339 bool DstSigned
= DstType
->isSignedIntegerOrEnumerationType();
1341 CodeGenFunction::SanitizerScope
SanScope(this);
1343 std::pair
<ScalarExprEmitter::ImplicitConversionCheckKind
,
1344 std::pair
<llvm::Value
*, SanitizerMask
>>
1348 bool EmitTruncation
= DstBits
< SrcBits
;
1349 // If Dst is signed and Src unsigned, we want to be more specific
1350 // about the CheckKind we emit, in this case we want to emit
1351 // ICCK_SignedIntegerTruncationOrSignChange.
1352 bool EmitTruncationFromUnsignedToSigned
=
1353 EmitTruncation
&& DstSigned
&& !SrcSigned
;
1355 bool SameTypeSameSize
= SrcSigned
== DstSigned
&& SrcBits
== DstBits
;
1356 bool BothUnsigned
= !SrcSigned
&& !DstSigned
;
1357 bool LargerSigned
= (DstBits
> SrcBits
) && DstSigned
;
1358 // We can avoid emitting sign change checks in some obvious cases
1359 // 1. If Src and Dst have the same signedness and size
1360 // 2. If both are unsigned sign check is unecessary!
1361 // 3. If Dst is signed and bigger than Src, either
1362 // sign-extension or zero-extension will make sure
1363 // the sign remains.
1364 bool EmitSignChange
= !SameTypeSameSize
&& !BothUnsigned
&& !LargerSigned
;
1368 EmitBitfieldTruncationCheckHelper(Src
, SrcType
, Dst
, DstType
, Builder
);
1369 else if (EmitSignChange
) {
1370 assert(((SrcBits
!= DstBits
) || (SrcSigned
!= DstSigned
)) &&
1371 "either the widths should be different, or the signednesses.");
1373 EmitBitfieldSignChangeCheckHelper(Src
, SrcType
, Dst
, DstType
, Builder
);
1377 ScalarExprEmitter::ImplicitConversionCheckKind CheckKind
= Check
.first
;
1378 if (EmitTruncationFromUnsignedToSigned
)
1379 CheckKind
= ScalarExprEmitter::ICCK_SignedIntegerTruncationOrSignChange
;
1381 llvm::Constant
*StaticArgs
[] = {
1382 EmitCheckSourceLocation(Loc
), EmitCheckTypeDescriptor(SrcType
),
1383 EmitCheckTypeDescriptor(DstType
),
1384 llvm::ConstantInt::get(Builder
.getInt8Ty(), CheckKind
),
1385 llvm::ConstantInt::get(Builder
.getInt32Ty(), Info
.Size
)};
1387 EmitCheck(Check
.second
, SanitizerHandler::ImplicitConversion
, StaticArgs
,
1391 Value
*ScalarExprEmitter::EmitScalarCast(Value
*Src
, QualType SrcType
,
1392 QualType DstType
, llvm::Type
*SrcTy
,
1394 ScalarConversionOpts Opts
) {
1395 // The Element types determine the type of cast to perform.
1396 llvm::Type
*SrcElementTy
;
1397 llvm::Type
*DstElementTy
;
1398 QualType SrcElementType
;
1399 QualType DstElementType
;
1400 if (SrcType
->isMatrixType() && DstType
->isMatrixType()) {
1401 SrcElementTy
= cast
<llvm::VectorType
>(SrcTy
)->getElementType();
1402 DstElementTy
= cast
<llvm::VectorType
>(DstTy
)->getElementType();
1403 SrcElementType
= SrcType
->castAs
<MatrixType
>()->getElementType();
1404 DstElementType
= DstType
->castAs
<MatrixType
>()->getElementType();
1406 assert(!SrcType
->isMatrixType() && !DstType
->isMatrixType() &&
1407 "cannot cast between matrix and non-matrix types");
1408 SrcElementTy
= SrcTy
;
1409 DstElementTy
= DstTy
;
1410 SrcElementType
= SrcType
;
1411 DstElementType
= DstType
;
1414 if (isa
<llvm::IntegerType
>(SrcElementTy
)) {
1415 bool InputSigned
= SrcElementType
->isSignedIntegerOrEnumerationType();
1416 if (SrcElementType
->isBooleanType() && Opts
.TreatBooleanAsSigned
) {
1420 if (isa
<llvm::IntegerType
>(DstElementTy
))
1421 return Builder
.CreateIntCast(Src
, DstTy
, InputSigned
, "conv");
1423 return Builder
.CreateSIToFP(Src
, DstTy
, "conv");
1424 return Builder
.CreateUIToFP(Src
, DstTy
, "conv");
1427 if (isa
<llvm::IntegerType
>(DstElementTy
)) {
1428 assert(SrcElementTy
->isFloatingPointTy() && "Unknown real conversion");
1429 bool IsSigned
= DstElementType
->isSignedIntegerOrEnumerationType();
1431 // If we can't recognize overflow as undefined behavior, assume that
1432 // overflow saturates. This protects against normal optimizations if we are
1433 // compiling with non-standard FP semantics.
1434 if (!CGF
.CGM
.getCodeGenOpts().StrictFloatCastOverflow
) {
1435 llvm::Intrinsic::ID IID
=
1436 IsSigned
? llvm::Intrinsic::fptosi_sat
: llvm::Intrinsic::fptoui_sat
;
1437 return Builder
.CreateCall(CGF
.CGM
.getIntrinsic(IID
, {DstTy
, SrcTy
}), Src
);
1441 return Builder
.CreateFPToSI(Src
, DstTy
, "conv");
1442 return Builder
.CreateFPToUI(Src
, DstTy
, "conv");
1445 if (DstElementTy
->getTypeID() < SrcElementTy
->getTypeID())
1446 return Builder
.CreateFPTrunc(Src
, DstTy
, "conv");
1447 return Builder
.CreateFPExt(Src
, DstTy
, "conv");
1450 /// Emit a conversion from the specified type to the specified destination type,
1451 /// both of which are LLVM scalar types.
1452 Value
*ScalarExprEmitter::EmitScalarConversion(Value
*Src
, QualType SrcType
,
1455 ScalarConversionOpts Opts
) {
1456 // All conversions involving fixed point types should be handled by the
1457 // EmitFixedPoint family functions. This is done to prevent bloating up this
1458 // function more, and although fixed point numbers are represented by
1459 // integers, we do not want to follow any logic that assumes they should be
1460 // treated as integers.
1461 // TODO(leonardchan): When necessary, add another if statement checking for
1462 // conversions to fixed point types from other types.
1463 if (SrcType
->isFixedPointType()) {
1464 if (DstType
->isBooleanType())
1465 // It is important that we check this before checking if the dest type is
1466 // an integer because booleans are technically integer types.
1467 // We do not need to check the padding bit on unsigned types if unsigned
1468 // padding is enabled because overflow into this bit is undefined
1470 return Builder
.CreateIsNotNull(Src
, "tobool");
1471 if (DstType
->isFixedPointType() || DstType
->isIntegerType() ||
1472 DstType
->isRealFloatingType())
1473 return EmitFixedPointConversion(Src
, SrcType
, DstType
, Loc
);
1476 "Unhandled scalar conversion from a fixed point type to another type.");
1477 } else if (DstType
->isFixedPointType()) {
1478 if (SrcType
->isIntegerType() || SrcType
->isRealFloatingType())
1479 // This also includes converting booleans and enums to fixed point types.
1480 return EmitFixedPointConversion(Src
, SrcType
, DstType
, Loc
);
1483 "Unhandled scalar conversion to a fixed point type from another type.");
1486 QualType NoncanonicalSrcType
= SrcType
;
1487 QualType NoncanonicalDstType
= DstType
;
1489 SrcType
= CGF
.getContext().getCanonicalType(SrcType
);
1490 DstType
= CGF
.getContext().getCanonicalType(DstType
);
1491 if (SrcType
== DstType
) return Src
;
1493 if (DstType
->isVoidType()) return nullptr;
1495 llvm::Value
*OrigSrc
= Src
;
1496 QualType OrigSrcType
= SrcType
;
1497 llvm::Type
*SrcTy
= Src
->getType();
1499 // Handle conversions to bool first, they are special: comparisons against 0.
1500 if (DstType
->isBooleanType())
1501 return EmitConversionToBool(Src
, SrcType
);
1503 llvm::Type
*DstTy
= ConvertType(DstType
);
1505 // Cast from half through float if half isn't a native type.
1506 if (SrcType
->isHalfType() && !CGF
.getContext().getLangOpts().NativeHalfType
) {
1507 // Cast to FP using the intrinsic if the half type itself isn't supported.
1508 if (DstTy
->isFloatingPointTy()) {
1509 if (CGF
.getContext().getTargetInfo().useFP16ConversionIntrinsics())
1510 return Builder
.CreateCall(
1511 CGF
.CGM
.getIntrinsic(llvm::Intrinsic::convert_from_fp16
, DstTy
),
1514 // Cast to other types through float, using either the intrinsic or FPExt,
1515 // depending on whether the half type itself is supported
1516 // (as opposed to operations on half, available with NativeHalfType).
1517 if (CGF
.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
1518 Src
= Builder
.CreateCall(
1519 CGF
.CGM
.getIntrinsic(llvm::Intrinsic::convert_from_fp16
,
1523 Src
= Builder
.CreateFPExt(Src
, CGF
.CGM
.FloatTy
, "conv");
1525 SrcType
= CGF
.getContext().FloatTy
;
1526 SrcTy
= CGF
.FloatTy
;
1530 // Ignore conversions like int -> uint.
1531 if (SrcTy
== DstTy
) {
1532 if (Opts
.EmitImplicitIntegerSignChangeChecks
)
1533 EmitIntegerSignChangeCheck(Src
, NoncanonicalSrcType
, Src
,
1534 NoncanonicalDstType
, Loc
);
1539 // Handle pointer conversions next: pointers can only be converted to/from
1540 // other pointers and integers. Check for pointer types in terms of LLVM, as
1541 // some native types (like Obj-C id) may map to a pointer type.
1542 if (auto DstPT
= dyn_cast
<llvm::PointerType
>(DstTy
)) {
1543 // The source value may be an integer, or a pointer.
1544 if (isa
<llvm::PointerType
>(SrcTy
))
1547 assert(SrcType
->isIntegerType() && "Not ptr->ptr or int->ptr conversion?");
1548 // First, convert to the correct width so that we control the kind of
1550 llvm::Type
*MiddleTy
= CGF
.CGM
.getDataLayout().getIntPtrType(DstPT
);
1551 bool InputSigned
= SrcType
->isSignedIntegerOrEnumerationType();
1552 llvm::Value
* IntResult
=
1553 Builder
.CreateIntCast(Src
, MiddleTy
, InputSigned
, "conv");
1554 // Then, cast to pointer.
1555 return Builder
.CreateIntToPtr(IntResult
, DstTy
, "conv");
1558 if (isa
<llvm::PointerType
>(SrcTy
)) {
1559 // Must be an ptr to int cast.
1560 assert(isa
<llvm::IntegerType
>(DstTy
) && "not ptr->int?");
1561 return Builder
.CreatePtrToInt(Src
, DstTy
, "conv");
1564 // A scalar can be splatted to an extended vector of the same element type
1565 if (DstType
->isExtVectorType() && !SrcType
->isVectorType()) {
1566 // Sema should add casts to make sure that the source expression's type is
1567 // the same as the vector's element type (sans qualifiers)
1568 assert(DstType
->castAs
<ExtVectorType
>()->getElementType().getTypePtr() ==
1569 SrcType
.getTypePtr() &&
1570 "Splatted expr doesn't match with vector element type?");
1572 // Splat the element across to all elements
1573 unsigned NumElements
= cast
<llvm::FixedVectorType
>(DstTy
)->getNumElements();
1574 return Builder
.CreateVectorSplat(NumElements
, Src
, "splat");
1577 if (SrcType
->isMatrixType() && DstType
->isMatrixType())
1578 return EmitScalarCast(Src
, SrcType
, DstType
, SrcTy
, DstTy
, Opts
);
1580 if (isa
<llvm::VectorType
>(SrcTy
) || isa
<llvm::VectorType
>(DstTy
)) {
1581 // Allow bitcast from vector to integer/fp of the same size.
1582 llvm::TypeSize SrcSize
= SrcTy
->getPrimitiveSizeInBits();
1583 llvm::TypeSize DstSize
= DstTy
->getPrimitiveSizeInBits();
1584 if (SrcSize
== DstSize
)
1585 return Builder
.CreateBitCast(Src
, DstTy
, "conv");
1587 // Conversions between vectors of different sizes are not allowed except
1588 // when vectors of half are involved. Operations on storage-only half
1589 // vectors require promoting half vector operands to float vectors and
1590 // truncating the result, which is either an int or float vector, to a
1591 // short or half vector.
1593 // Source and destination are both expected to be vectors.
1594 llvm::Type
*SrcElementTy
= cast
<llvm::VectorType
>(SrcTy
)->getElementType();
1595 llvm::Type
*DstElementTy
= cast
<llvm::VectorType
>(DstTy
)->getElementType();
1598 assert(((SrcElementTy
->isIntegerTy() &&
1599 DstElementTy
->isIntegerTy()) ||
1600 (SrcElementTy
->isFloatingPointTy() &&
1601 DstElementTy
->isFloatingPointTy())) &&
1602 "unexpected conversion between a floating-point vector and an "
1605 // Truncate an i32 vector to an i16 vector.
1606 if (SrcElementTy
->isIntegerTy())
1607 return Builder
.CreateIntCast(Src
, DstTy
, false, "conv");
1609 // Truncate a float vector to a half vector.
1610 if (SrcSize
> DstSize
)
1611 return Builder
.CreateFPTrunc(Src
, DstTy
, "conv");
1613 // Promote a half vector to a float vector.
1614 return Builder
.CreateFPExt(Src
, DstTy
, "conv");
1617 // Finally, we have the arithmetic types: real int/float.
1618 Value
*Res
= nullptr;
1619 llvm::Type
*ResTy
= DstTy
;
1621 // An overflowing conversion has undefined behavior if either the source type
1622 // or the destination type is a floating-point type. However, we consider the
1623 // range of representable values for all floating-point types to be
1624 // [-inf,+inf], so no overflow can ever happen when the destination type is a
1625 // floating-point type.
1626 if (CGF
.SanOpts
.has(SanitizerKind::FloatCastOverflow
) &&
1627 OrigSrcType
->isFloatingType())
1628 EmitFloatConversionCheck(OrigSrc
, OrigSrcType
, Src
, SrcType
, DstType
, DstTy
,
1631 // Cast to half through float if half isn't a native type.
1632 if (DstType
->isHalfType() && !CGF
.getContext().getLangOpts().NativeHalfType
) {
1633 // Make sure we cast in a single step if from another FP type.
1634 if (SrcTy
->isFloatingPointTy()) {
1635 // Use the intrinsic if the half type itself isn't supported
1636 // (as opposed to operations on half, available with NativeHalfType).
1637 if (CGF
.getContext().getTargetInfo().useFP16ConversionIntrinsics())
1638 return Builder
.CreateCall(
1639 CGF
.CGM
.getIntrinsic(llvm::Intrinsic::convert_to_fp16
, SrcTy
), Src
);
1640 // If the half type is supported, just use an fptrunc.
1641 return Builder
.CreateFPTrunc(Src
, DstTy
);
1643 DstTy
= CGF
.FloatTy
;
1646 Res
= EmitScalarCast(Src
, SrcType
, DstType
, SrcTy
, DstTy
, Opts
);
1648 if (DstTy
!= ResTy
) {
1649 if (CGF
.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
1650 assert(ResTy
->isIntegerTy(16) && "Only half FP requires extra conversion");
1651 Res
= Builder
.CreateCall(
1652 CGF
.CGM
.getIntrinsic(llvm::Intrinsic::convert_to_fp16
, CGF
.CGM
.FloatTy
),
1655 Res
= Builder
.CreateFPTrunc(Res
, ResTy
, "conv");
1659 if (Opts
.EmitImplicitIntegerTruncationChecks
)
1660 EmitIntegerTruncationCheck(Src
, NoncanonicalSrcType
, Res
,
1661 NoncanonicalDstType
, Loc
);
1663 if (Opts
.EmitImplicitIntegerSignChangeChecks
)
1664 EmitIntegerSignChangeCheck(Src
, NoncanonicalSrcType
, Res
,
1665 NoncanonicalDstType
, Loc
);
1670 Value
*ScalarExprEmitter::EmitFixedPointConversion(Value
*Src
, QualType SrcTy
,
1672 SourceLocation Loc
) {
1673 llvm::FixedPointBuilder
<CGBuilderTy
> FPBuilder(Builder
);
1674 llvm::Value
*Result
;
1675 if (SrcTy
->isRealFloatingType())
1676 Result
= FPBuilder
.CreateFloatingToFixed(Src
,
1677 CGF
.getContext().getFixedPointSemantics(DstTy
));
1678 else if (DstTy
->isRealFloatingType())
1679 Result
= FPBuilder
.CreateFixedToFloating(Src
,
1680 CGF
.getContext().getFixedPointSemantics(SrcTy
),
1681 ConvertType(DstTy
));
1683 auto SrcFPSema
= CGF
.getContext().getFixedPointSemantics(SrcTy
);
1684 auto DstFPSema
= CGF
.getContext().getFixedPointSemantics(DstTy
);
1686 if (DstTy
->isIntegerType())
1687 Result
= FPBuilder
.CreateFixedToInteger(Src
, SrcFPSema
,
1688 DstFPSema
.getWidth(),
1689 DstFPSema
.isSigned());
1690 else if (SrcTy
->isIntegerType())
1691 Result
= FPBuilder
.CreateIntegerToFixed(Src
, SrcFPSema
.isSigned(),
1694 Result
= FPBuilder
.CreateFixedToFixed(Src
, SrcFPSema
, DstFPSema
);
1699 /// Emit a conversion from the specified complex type to the specified
1700 /// destination type, where the destination type is an LLVM scalar type.
1701 Value
*ScalarExprEmitter::EmitComplexToScalarConversion(
1702 CodeGenFunction::ComplexPairTy Src
, QualType SrcTy
, QualType DstTy
,
1703 SourceLocation Loc
) {
1704 // Get the source element type.
1705 SrcTy
= SrcTy
->castAs
<ComplexType
>()->getElementType();
1707 // Handle conversions to bool first, they are special: comparisons against 0.
1708 if (DstTy
->isBooleanType()) {
1709 // Complex != 0 -> (Real != 0) | (Imag != 0)
1710 Src
.first
= EmitScalarConversion(Src
.first
, SrcTy
, DstTy
, Loc
);
1711 Src
.second
= EmitScalarConversion(Src
.second
, SrcTy
, DstTy
, Loc
);
1712 return Builder
.CreateOr(Src
.first
, Src
.second
, "tobool");
1715 // C99 6.3.1.7p2: "When a value of complex type is converted to a real type,
1716 // the imaginary part of the complex value is discarded and the value of the
1717 // real part is converted according to the conversion rules for the
1718 // corresponding real type.
1719 return EmitScalarConversion(Src
.first
, SrcTy
, DstTy
, Loc
);
1722 Value
*ScalarExprEmitter::EmitNullValue(QualType Ty
) {
1723 return CGF
.EmitFromMemory(CGF
.CGM
.EmitNullConstant(Ty
), Ty
);
1726 /// Emit a sanitization check for the given "binary" operation (which
1727 /// might actually be a unary increment which has been lowered to a binary
1728 /// operation). The check passes if all values in \p Checks (which are \c i1),
1730 void ScalarExprEmitter::EmitBinOpCheck(
1731 ArrayRef
<std::pair
<Value
*, SanitizerMask
>> Checks
, const BinOpInfo
&Info
) {
1732 assert(CGF
.IsSanitizerScope
);
1733 SanitizerHandler Check
;
1734 SmallVector
<llvm::Constant
*, 4> StaticData
;
1735 SmallVector
<llvm::Value
*, 2> DynamicData
;
1737 BinaryOperatorKind Opcode
= Info
.Opcode
;
1738 if (BinaryOperator::isCompoundAssignmentOp(Opcode
))
1739 Opcode
= BinaryOperator::getOpForCompoundAssignment(Opcode
);
1741 StaticData
.push_back(CGF
.EmitCheckSourceLocation(Info
.E
->getExprLoc()));
1742 const UnaryOperator
*UO
= dyn_cast
<UnaryOperator
>(Info
.E
);
1743 if (UO
&& UO
->getOpcode() == UO_Minus
) {
1744 Check
= SanitizerHandler::NegateOverflow
;
1745 StaticData
.push_back(CGF
.EmitCheckTypeDescriptor(UO
->getType()));
1746 DynamicData
.push_back(Info
.RHS
);
1748 if (BinaryOperator::isShiftOp(Opcode
)) {
1749 // Shift LHS negative or too large, or RHS out of bounds.
1750 Check
= SanitizerHandler::ShiftOutOfBounds
;
1751 const BinaryOperator
*BO
= cast
<BinaryOperator
>(Info
.E
);
1752 StaticData
.push_back(
1753 CGF
.EmitCheckTypeDescriptor(BO
->getLHS()->getType()));
1754 StaticData
.push_back(
1755 CGF
.EmitCheckTypeDescriptor(BO
->getRHS()->getType()));
1756 } else if (Opcode
== BO_Div
|| Opcode
== BO_Rem
) {
1757 // Divide or modulo by zero, or signed overflow (eg INT_MAX / -1).
1758 Check
= SanitizerHandler::DivremOverflow
;
1759 StaticData
.push_back(CGF
.EmitCheckTypeDescriptor(Info
.Ty
));
1761 // Arithmetic overflow (+, -, *).
1763 case BO_Add
: Check
= SanitizerHandler::AddOverflow
; break;
1764 case BO_Sub
: Check
= SanitizerHandler::SubOverflow
; break;
1765 case BO_Mul
: Check
= SanitizerHandler::MulOverflow
; break;
1766 default: llvm_unreachable("unexpected opcode for bin op check");
1768 StaticData
.push_back(CGF
.EmitCheckTypeDescriptor(Info
.Ty
));
1770 DynamicData
.push_back(Info
.LHS
);
1771 DynamicData
.push_back(Info
.RHS
);
1774 CGF
.EmitCheck(Checks
, Check
, StaticData
, DynamicData
);
1777 //===----------------------------------------------------------------------===//
1779 //===----------------------------------------------------------------------===//
1781 Value
*ScalarExprEmitter::VisitExpr(Expr
*E
) {
1782 CGF
.ErrorUnsupported(E
, "scalar expression");
1783 if (E
->getType()->isVoidType())
1785 return llvm::UndefValue::get(CGF
.ConvertType(E
->getType()));
1789 ScalarExprEmitter::VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr
*E
) {
1790 ASTContext
&Context
= CGF
.getContext();
1791 unsigned AddrSpace
=
1792 Context
.getTargetAddressSpace(CGF
.CGM
.GetGlobalConstantAddressSpace());
1793 llvm::Constant
*GlobalConstStr
= Builder
.CreateGlobalStringPtr(
1794 E
->ComputeName(Context
), "__usn_str", AddrSpace
);
1796 llvm::Type
*ExprTy
= ConvertType(E
->getType());
1797 return Builder
.CreatePointerBitCastOrAddrSpaceCast(GlobalConstStr
, ExprTy
,
1801 Value
*ScalarExprEmitter::VisitEmbedExpr(EmbedExpr
*E
) {
1802 assert(E
->getDataElementCount() == 1);
1803 auto It
= E
->begin();
1804 return Builder
.getInt((*It
)->getValue());
1807 Value
*ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr
*E
) {
1809 if (E
->getNumSubExprs() == 2) {
1810 Value
*LHS
= CGF
.EmitScalarExpr(E
->getExpr(0));
1811 Value
*RHS
= CGF
.EmitScalarExpr(E
->getExpr(1));
1814 auto *LTy
= cast
<llvm::FixedVectorType
>(LHS
->getType());
1815 unsigned LHSElts
= LTy
->getNumElements();
1819 auto *MTy
= cast
<llvm::FixedVectorType
>(Mask
->getType());
1821 // Mask off the high bits of each shuffle index.
1823 llvm::ConstantInt::get(MTy
, llvm::NextPowerOf2(LHSElts
- 1) - 1);
1824 Mask
= Builder
.CreateAnd(Mask
, MaskBits
, "mask");
1827 // mask = mask & maskbits
1829 // n = extract mask i
1830 // x = extract val n
1831 // newv = insert newv, x, i
1832 auto *RTy
= llvm::FixedVectorType::get(LTy
->getElementType(),
1833 MTy
->getNumElements());
1834 Value
* NewV
= llvm::PoisonValue::get(RTy
);
1835 for (unsigned i
= 0, e
= MTy
->getNumElements(); i
!= e
; ++i
) {
1836 Value
*IIndx
= llvm::ConstantInt::get(CGF
.SizeTy
, i
);
1837 Value
*Indx
= Builder
.CreateExtractElement(Mask
, IIndx
, "shuf_idx");
1839 Value
*VExt
= Builder
.CreateExtractElement(LHS
, Indx
, "shuf_elt");
1840 NewV
= Builder
.CreateInsertElement(NewV
, VExt
, IIndx
, "shuf_ins");
1845 Value
* V1
= CGF
.EmitScalarExpr(E
->getExpr(0));
1846 Value
* V2
= CGF
.EmitScalarExpr(E
->getExpr(1));
1848 SmallVector
<int, 32> Indices
;
1849 for (unsigned i
= 2; i
< E
->getNumSubExprs(); ++i
) {
1850 llvm::APSInt Idx
= E
->getShuffleMaskIdx(CGF
.getContext(), i
-2);
1851 // Check for -1 and output it as undef in the IR.
1852 if (Idx
.isSigned() && Idx
.isAllOnes())
1853 Indices
.push_back(-1);
1855 Indices
.push_back(Idx
.getZExtValue());
1858 return Builder
.CreateShuffleVector(V1
, V2
, Indices
, "shuffle");
1861 Value
*ScalarExprEmitter::VisitConvertVectorExpr(ConvertVectorExpr
*E
) {
1862 QualType SrcType
= E
->getSrcExpr()->getType(),
1863 DstType
= E
->getType();
1865 Value
*Src
= CGF
.EmitScalarExpr(E
->getSrcExpr());
1867 SrcType
= CGF
.getContext().getCanonicalType(SrcType
);
1868 DstType
= CGF
.getContext().getCanonicalType(DstType
);
1869 if (SrcType
== DstType
) return Src
;
1871 assert(SrcType
->isVectorType() &&
1872 "ConvertVector source type must be a vector");
1873 assert(DstType
->isVectorType() &&
1874 "ConvertVector destination type must be a vector");
1876 llvm::Type
*SrcTy
= Src
->getType();
1877 llvm::Type
*DstTy
= ConvertType(DstType
);
1879 // Ignore conversions like int -> uint.
1883 QualType SrcEltType
= SrcType
->castAs
<VectorType
>()->getElementType(),
1884 DstEltType
= DstType
->castAs
<VectorType
>()->getElementType();
1886 assert(SrcTy
->isVectorTy() &&
1887 "ConvertVector source IR type must be a vector");
1888 assert(DstTy
->isVectorTy() &&
1889 "ConvertVector destination IR type must be a vector");
1891 llvm::Type
*SrcEltTy
= cast
<llvm::VectorType
>(SrcTy
)->getElementType(),
1892 *DstEltTy
= cast
<llvm::VectorType
>(DstTy
)->getElementType();
1894 if (DstEltType
->isBooleanType()) {
1895 assert((SrcEltTy
->isFloatingPointTy() ||
1896 isa
<llvm::IntegerType
>(SrcEltTy
)) && "Unknown boolean conversion");
1898 llvm::Value
*Zero
= llvm::Constant::getNullValue(SrcTy
);
1899 if (SrcEltTy
->isFloatingPointTy()) {
1900 return Builder
.CreateFCmpUNE(Src
, Zero
, "tobool");
1902 return Builder
.CreateICmpNE(Src
, Zero
, "tobool");
1906 // We have the arithmetic types: real int/float.
1907 Value
*Res
= nullptr;
1909 if (isa
<llvm::IntegerType
>(SrcEltTy
)) {
1910 bool InputSigned
= SrcEltType
->isSignedIntegerOrEnumerationType();
1911 if (isa
<llvm::IntegerType
>(DstEltTy
))
1912 Res
= Builder
.CreateIntCast(Src
, DstTy
, InputSigned
, "conv");
1913 else if (InputSigned
)
1914 Res
= Builder
.CreateSIToFP(Src
, DstTy
, "conv");
1916 Res
= Builder
.CreateUIToFP(Src
, DstTy
, "conv");
1917 } else if (isa
<llvm::IntegerType
>(DstEltTy
)) {
1918 assert(SrcEltTy
->isFloatingPointTy() && "Unknown real conversion");
1919 if (DstEltType
->isSignedIntegerOrEnumerationType())
1920 Res
= Builder
.CreateFPToSI(Src
, DstTy
, "conv");
1922 Res
= Builder
.CreateFPToUI(Src
, DstTy
, "conv");
1924 assert(SrcEltTy
->isFloatingPointTy() && DstEltTy
->isFloatingPointTy() &&
1925 "Unknown real conversion");
1926 if (DstEltTy
->getTypeID() < SrcEltTy
->getTypeID())
1927 Res
= Builder
.CreateFPTrunc(Src
, DstTy
, "conv");
1929 Res
= Builder
.CreateFPExt(Src
, DstTy
, "conv");
1935 Value
*ScalarExprEmitter::VisitMemberExpr(MemberExpr
*E
) {
1936 if (CodeGenFunction::ConstantEmission Constant
= CGF
.tryEmitAsConstant(E
)) {
1937 CGF
.EmitIgnoredExpr(E
->getBase());
1938 return CGF
.emitScalarConstant(Constant
, E
);
1940 Expr::EvalResult Result
;
1941 if (E
->EvaluateAsInt(Result
, CGF
.getContext(), Expr::SE_AllowSideEffects
)) {
1942 llvm::APSInt Value
= Result
.Val
.getInt();
1943 CGF
.EmitIgnoredExpr(E
->getBase());
1944 return Builder
.getInt(Value
);
1948 llvm::Value
*Result
= EmitLoadOfLValue(E
);
1950 // If -fdebug-info-for-profiling is specified, emit a pseudo variable and its
1951 // debug info for the pointer, even if there is no variable associated with
1952 // the pointer's expression.
1953 if (CGF
.CGM
.getCodeGenOpts().DebugInfoForProfiling
&& CGF
.getDebugInfo()) {
1954 if (llvm::LoadInst
*Load
= dyn_cast
<llvm::LoadInst
>(Result
)) {
1955 if (llvm::GetElementPtrInst
*GEP
=
1956 dyn_cast
<llvm::GetElementPtrInst
>(Load
->getPointerOperand())) {
1957 if (llvm::Instruction
*Pointer
=
1958 dyn_cast
<llvm::Instruction
>(GEP
->getPointerOperand())) {
1959 QualType Ty
= E
->getBase()->getType();
1961 Ty
= CGF
.getContext().getPointerType(Ty
);
1962 CGF
.getDebugInfo()->EmitPseudoVariable(Builder
, Pointer
, Ty
);
1970 Value
*ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr
*E
) {
1971 TestAndClearIgnoreResultAssign();
1973 // Emit subscript expressions in rvalue context's. For most cases, this just
1974 // loads the lvalue formed by the subscript expr. However, we have to be
1975 // careful, because the base of a vector subscript is occasionally an rvalue,
1976 // so we can't get it as an lvalue.
1977 if (!E
->getBase()->getType()->isVectorType() &&
1978 !E
->getBase()->getType()->isSveVLSBuiltinType())
1979 return EmitLoadOfLValue(E
);
1981 // Handle the vector case. The base must be a vector, the index must be an
1983 Value
*Base
= Visit(E
->getBase());
1984 Value
*Idx
= Visit(E
->getIdx());
1985 QualType IdxTy
= E
->getIdx()->getType();
1987 if (CGF
.SanOpts
.has(SanitizerKind::ArrayBounds
))
1988 CGF
.EmitBoundsCheck(E
, E
->getBase(), Idx
, IdxTy
, /*Accessed*/true);
1990 return Builder
.CreateExtractElement(Base
, Idx
, "vecext");
1993 Value
*ScalarExprEmitter::VisitMatrixSubscriptExpr(MatrixSubscriptExpr
*E
) {
1994 TestAndClearIgnoreResultAssign();
1996 // Handle the vector case. The base must be a vector, the index must be an
1998 Value
*RowIdx
= Visit(E
->getRowIdx());
1999 Value
*ColumnIdx
= Visit(E
->getColumnIdx());
2001 const auto *MatrixTy
= E
->getBase()->getType()->castAs
<ConstantMatrixType
>();
2002 unsigned NumRows
= MatrixTy
->getNumRows();
2003 llvm::MatrixBuilder
MB(Builder
);
2004 Value
*Idx
= MB
.CreateIndex(RowIdx
, ColumnIdx
, NumRows
);
2005 if (CGF
.CGM
.getCodeGenOpts().OptimizationLevel
> 0)
2006 MB
.CreateIndexAssumption(Idx
, MatrixTy
->getNumElementsFlattened());
2008 Value
*Matrix
= Visit(E
->getBase());
2010 // TODO: Should we emit bounds checks with SanitizerKind::ArrayBounds?
2011 return Builder
.CreateExtractElement(Matrix
, Idx
, "matrixext");
2014 static int getMaskElt(llvm::ShuffleVectorInst
*SVI
, unsigned Idx
,
2016 int MV
= SVI
->getMaskValue(Idx
);
2022 static int getAsInt32(llvm::ConstantInt
*C
, llvm::Type
*I32Ty
) {
2023 assert(llvm::ConstantInt::isValueValidForType(I32Ty
, C
->getZExtValue()) &&
2024 "Index operand too large for shufflevector mask!");
2025 return C
->getZExtValue();
2028 Value
*ScalarExprEmitter::VisitInitListExpr(InitListExpr
*E
) {
2029 bool Ignore
= TestAndClearIgnoreResultAssign();
2031 assert (Ignore
== false && "init list ignored");
2032 unsigned NumInitElements
= E
->getNumInits();
2034 if (E
->hadArrayRangeDesignator())
2035 CGF
.ErrorUnsupported(E
, "GNU array range designator extension");
2037 llvm::VectorType
*VType
=
2038 dyn_cast
<llvm::VectorType
>(ConvertType(E
->getType()));
2041 if (NumInitElements
== 0) {
2042 // C++11 value-initialization for the scalar.
2043 return EmitNullValue(E
->getType());
2045 // We have a scalar in braces. Just use the first element.
2046 return Visit(E
->getInit(0));
2049 if (isa
<llvm::ScalableVectorType
>(VType
)) {
2050 if (NumInitElements
== 0) {
2051 // C++11 value-initialization for the vector.
2052 return EmitNullValue(E
->getType());
2055 if (NumInitElements
== 1) {
2056 Expr
*InitVector
= E
->getInit(0);
2058 // Initialize from another scalable vector of the same type.
2059 if (InitVector
->getType() == E
->getType())
2060 return Visit(InitVector
);
2063 llvm_unreachable("Unexpected initialization of a scalable vector!");
2066 unsigned ResElts
= cast
<llvm::FixedVectorType
>(VType
)->getNumElements();
2068 // Loop over initializers collecting the Value for each, and remembering
2069 // whether the source was swizzle (ExtVectorElementExpr). This will allow
2070 // us to fold the shuffle for the swizzle into the shuffle for the vector
2071 // initializer, since LLVM optimizers generally do not want to touch
2073 unsigned CurIdx
= 0;
2074 bool VIsPoisonShuffle
= false;
2075 llvm::Value
*V
= llvm::PoisonValue::get(VType
);
2076 for (unsigned i
= 0; i
!= NumInitElements
; ++i
) {
2077 Expr
*IE
= E
->getInit(i
);
2078 Value
*Init
= Visit(IE
);
2079 SmallVector
<int, 16> Args
;
2081 llvm::VectorType
*VVT
= dyn_cast
<llvm::VectorType
>(Init
->getType());
2083 // Handle scalar elements. If the scalar initializer is actually one
2084 // element of a different vector of the same width, use shuffle instead of
2087 if (isa
<ExtVectorElementExpr
>(IE
)) {
2088 llvm::ExtractElementInst
*EI
= cast
<llvm::ExtractElementInst
>(Init
);
2090 if (cast
<llvm::FixedVectorType
>(EI
->getVectorOperandType())
2091 ->getNumElements() == ResElts
) {
2092 llvm::ConstantInt
*C
= cast
<llvm::ConstantInt
>(EI
->getIndexOperand());
2093 Value
*LHS
= nullptr, *RHS
= nullptr;
2095 // insert into poison -> shuffle (src, poison)
2096 // shufflemask must use an i32
2097 Args
.push_back(getAsInt32(C
, CGF
.Int32Ty
));
2098 Args
.resize(ResElts
, -1);
2100 LHS
= EI
->getVectorOperand();
2102 VIsPoisonShuffle
= true;
2103 } else if (VIsPoisonShuffle
) {
2104 // insert into poison shuffle && size match -> shuffle (v, src)
2105 llvm::ShuffleVectorInst
*SVV
= cast
<llvm::ShuffleVectorInst
>(V
);
2106 for (unsigned j
= 0; j
!= CurIdx
; ++j
)
2107 Args
.push_back(getMaskElt(SVV
, j
, 0));
2108 Args
.push_back(ResElts
+ C
->getZExtValue());
2109 Args
.resize(ResElts
, -1);
2111 LHS
= cast
<llvm::ShuffleVectorInst
>(V
)->getOperand(0);
2112 RHS
= EI
->getVectorOperand();
2113 VIsPoisonShuffle
= false;
2115 if (!Args
.empty()) {
2116 V
= Builder
.CreateShuffleVector(LHS
, RHS
, Args
);
2122 V
= Builder
.CreateInsertElement(V
, Init
, Builder
.getInt32(CurIdx
),
2124 VIsPoisonShuffle
= false;
2129 unsigned InitElts
= cast
<llvm::FixedVectorType
>(VVT
)->getNumElements();
2131 // If the initializer is an ExtVecEltExpr (a swizzle), and the swizzle's
2132 // input is the same width as the vector being constructed, generate an
2133 // optimized shuffle of the swizzle input into the result.
2134 unsigned Offset
= (CurIdx
== 0) ? 0 : ResElts
;
2135 if (isa
<ExtVectorElementExpr
>(IE
)) {
2136 llvm::ShuffleVectorInst
*SVI
= cast
<llvm::ShuffleVectorInst
>(Init
);
2137 Value
*SVOp
= SVI
->getOperand(0);
2138 auto *OpTy
= cast
<llvm::FixedVectorType
>(SVOp
->getType());
2140 if (OpTy
->getNumElements() == ResElts
) {
2141 for (unsigned j
= 0; j
!= CurIdx
; ++j
) {
2142 // If the current vector initializer is a shuffle with poison, merge
2143 // this shuffle directly into it.
2144 if (VIsPoisonShuffle
) {
2145 Args
.push_back(getMaskElt(cast
<llvm::ShuffleVectorInst
>(V
), j
, 0));
2150 for (unsigned j
= 0, je
= InitElts
; j
!= je
; ++j
)
2151 Args
.push_back(getMaskElt(SVI
, j
, Offset
));
2152 Args
.resize(ResElts
, -1);
2154 if (VIsPoisonShuffle
)
2155 V
= cast
<llvm::ShuffleVectorInst
>(V
)->getOperand(0);
2161 // Extend init to result vector length, and then shuffle its contribution
2162 // to the vector initializer into V.
2164 for (unsigned j
= 0; j
!= InitElts
; ++j
)
2166 Args
.resize(ResElts
, -1);
2167 Init
= Builder
.CreateShuffleVector(Init
, Args
, "vext");
2170 for (unsigned j
= 0; j
!= CurIdx
; ++j
)
2172 for (unsigned j
= 0; j
!= InitElts
; ++j
)
2173 Args
.push_back(j
+ Offset
);
2174 Args
.resize(ResElts
, -1);
2177 // If V is poison, make sure it ends up on the RHS of the shuffle to aid
2178 // merging subsequent shuffles into this one.
2181 V
= Builder
.CreateShuffleVector(V
, Init
, Args
, "vecinit");
2182 VIsPoisonShuffle
= isa
<llvm::PoisonValue
>(Init
);
2186 // FIXME: evaluate codegen vs. shuffling against constant null vector.
2187 // Emit remaining default initializers.
2188 llvm::Type
*EltTy
= VType
->getElementType();
2190 // Emit remaining default initializers
2191 for (/* Do not initialize i*/; CurIdx
< ResElts
; ++CurIdx
) {
2192 Value
*Idx
= Builder
.getInt32(CurIdx
);
2193 llvm::Value
*Init
= llvm::Constant::getNullValue(EltTy
);
2194 V
= Builder
.CreateInsertElement(V
, Init
, Idx
, "vecinit");
2199 bool CodeGenFunction::ShouldNullCheckClassCastValue(const CastExpr
*CE
) {
2200 const Expr
*E
= CE
->getSubExpr();
2202 if (CE
->getCastKind() == CK_UncheckedDerivedToBase
)
2205 if (isa
<CXXThisExpr
>(E
->IgnoreParens())) {
2206 // We always assume that 'this' is never null.
2210 if (const ImplicitCastExpr
*ICE
= dyn_cast
<ImplicitCastExpr
>(CE
)) {
2211 // And that glvalue casts are never null.
2212 if (ICE
->isGLValue())
2219 // VisitCastExpr - Emit code for an explicit or implicit cast. Implicit casts
2220 // have to handle a more broad range of conversions than explicit casts, as they
2221 // handle things like function to ptr-to-function decay etc.
2222 Value
*ScalarExprEmitter::VisitCastExpr(CastExpr
*CE
) {
2223 Expr
*E
= CE
->getSubExpr();
2224 QualType DestTy
= CE
->getType();
2225 CastKind Kind
= CE
->getCastKind();
2226 CodeGenFunction::CGFPOptionsRAII
FPOptions(CGF
, CE
);
2228 // These cases are generally not written to ignore the result of
2229 // evaluating their sub-expressions, so we clear this now.
2230 bool Ignored
= TestAndClearIgnoreResultAssign();
2232 // Since almost all cast kinds apply to scalars, this switch doesn't have
2233 // a default case, so the compiler will warn on a missing case. The cases
2234 // are in the same order as in the CastKind enum.
2236 case CK_Dependent
: llvm_unreachable("dependent cast kind in IR gen!");
2237 case CK_BuiltinFnToFnPtr
:
2238 llvm_unreachable("builtin functions are handled elsewhere");
2240 case CK_LValueBitCast
:
2241 case CK_ObjCObjectLValueCast
: {
2242 Address Addr
= EmitLValue(E
).getAddress();
2243 Addr
= Addr
.withElementType(CGF
.ConvertTypeForMem(DestTy
));
2244 LValue LV
= CGF
.MakeAddrLValue(Addr
, DestTy
);
2245 return EmitLoadOfLValue(LV
, CE
->getExprLoc());
2248 case CK_LValueToRValueBitCast
: {
2249 LValue SourceLVal
= CGF
.EmitLValue(E
);
2251 SourceLVal
.getAddress().withElementType(CGF
.ConvertTypeForMem(DestTy
));
2252 LValue DestLV
= CGF
.MakeAddrLValue(Addr
, DestTy
);
2253 DestLV
.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
2254 return EmitLoadOfLValue(DestLV
, CE
->getExprLoc());
2257 case CK_CPointerToObjCPointerCast
:
2258 case CK_BlockPointerToObjCPointerCast
:
2259 case CK_AnyPointerToBlockPointerCast
:
2261 Value
*Src
= Visit(const_cast<Expr
*>(E
));
2262 llvm::Type
*SrcTy
= Src
->getType();
2263 llvm::Type
*DstTy
= ConvertType(DestTy
);
2265 (!SrcTy
->isPtrOrPtrVectorTy() || !DstTy
->isPtrOrPtrVectorTy() ||
2266 SrcTy
->getPointerAddressSpace() == DstTy
->getPointerAddressSpace()) &&
2267 "Address-space cast must be used to convert address spaces");
2269 if (CGF
.SanOpts
.has(SanitizerKind::CFIUnrelatedCast
)) {
2270 if (auto *PT
= DestTy
->getAs
<PointerType
>()) {
2271 CGF
.EmitVTablePtrCheckForCast(
2272 PT
->getPointeeType(),
2274 CGF
.ConvertTypeForMem(
2275 E
->getType()->castAs
<PointerType
>()->getPointeeType()),
2276 CGF
.getPointerAlign()),
2277 /*MayBeNull=*/true, CodeGenFunction::CFITCK_UnrelatedCast
,
2282 if (CGF
.CGM
.getCodeGenOpts().StrictVTablePointers
) {
2283 const QualType SrcType
= E
->getType();
2285 if (SrcType
.mayBeNotDynamicClass() && DestTy
.mayBeDynamicClass()) {
2286 // Casting to pointer that could carry dynamic information (provided by
2287 // invariant.group) requires launder.
2288 Src
= Builder
.CreateLaunderInvariantGroup(Src
);
2289 } else if (SrcType
.mayBeDynamicClass() && DestTy
.mayBeNotDynamicClass()) {
2290 // Casting to pointer that does not carry dynamic information (provided
2291 // by invariant.group) requires stripping it. Note that we don't do it
2292 // if the source could not be dynamic type and destination could be
2293 // dynamic because dynamic information is already laundered. It is
2294 // because launder(strip(src)) == launder(src), so there is no need to
2295 // add extra strip before launder.
2296 Src
= Builder
.CreateStripInvariantGroup(Src
);
2300 // Update heapallocsite metadata when there is an explicit pointer cast.
2301 if (auto *CI
= dyn_cast
<llvm::CallBase
>(Src
)) {
2302 if (CI
->getMetadata("heapallocsite") && isa
<ExplicitCastExpr
>(CE
) &&
2303 !isa
<CastExpr
>(E
)) {
2304 QualType PointeeType
= DestTy
->getPointeeType();
2305 if (!PointeeType
.isNull())
2306 CGF
.getDebugInfo()->addHeapAllocSiteMetadata(CI
, PointeeType
,
2311 // If Src is a fixed vector and Dst is a scalable vector, and both have the
2312 // same element type, use the llvm.vector.insert intrinsic to perform the
2314 if (auto *FixedSrcTy
= dyn_cast
<llvm::FixedVectorType
>(SrcTy
)) {
2315 if (auto *ScalableDstTy
= dyn_cast
<llvm::ScalableVectorType
>(DstTy
)) {
2316 // If we are casting a fixed i8 vector to a scalable i1 predicate
2317 // vector, use a vector insert and bitcast the result.
2318 if (ScalableDstTy
->getElementType()->isIntegerTy(1) &&
2319 ScalableDstTy
->getElementCount().isKnownMultipleOf(8) &&
2320 FixedSrcTy
->getElementType()->isIntegerTy(8)) {
2321 ScalableDstTy
= llvm::ScalableVectorType::get(
2322 FixedSrcTy
->getElementType(),
2323 ScalableDstTy
->getElementCount().getKnownMinValue() / 8);
2325 if (FixedSrcTy
->getElementType() == ScalableDstTy
->getElementType()) {
2326 llvm::Value
*UndefVec
= llvm::UndefValue::get(ScalableDstTy
);
2327 llvm::Value
*Zero
= llvm::Constant::getNullValue(CGF
.CGM
.Int64Ty
);
2328 llvm::Value
*Result
= Builder
.CreateInsertVector(
2329 ScalableDstTy
, UndefVec
, Src
, Zero
, "cast.scalable");
2330 if (Result
->getType() != DstTy
)
2331 Result
= Builder
.CreateBitCast(Result
, DstTy
);
2337 // If Src is a scalable vector and Dst is a fixed vector, and both have the
2338 // same element type, use the llvm.vector.extract intrinsic to perform the
2340 if (auto *ScalableSrcTy
= dyn_cast
<llvm::ScalableVectorType
>(SrcTy
)) {
2341 if (auto *FixedDstTy
= dyn_cast
<llvm::FixedVectorType
>(DstTy
)) {
2342 // If we are casting a scalable i1 predicate vector to a fixed i8
2343 // vector, bitcast the source and use a vector extract.
2344 if (ScalableSrcTy
->getElementType()->isIntegerTy(1) &&
2345 ScalableSrcTy
->getElementCount().isKnownMultipleOf(8) &&
2346 FixedDstTy
->getElementType()->isIntegerTy(8)) {
2347 ScalableSrcTy
= llvm::ScalableVectorType::get(
2348 FixedDstTy
->getElementType(),
2349 ScalableSrcTy
->getElementCount().getKnownMinValue() / 8);
2350 Src
= Builder
.CreateBitCast(Src
, ScalableSrcTy
);
2352 if (ScalableSrcTy
->getElementType() == FixedDstTy
->getElementType()) {
2353 llvm::Value
*Zero
= llvm::Constant::getNullValue(CGF
.CGM
.Int64Ty
);
2354 return Builder
.CreateExtractVector(DstTy
, Src
, Zero
, "cast.fixed");
2359 // Perform VLAT <-> VLST bitcast through memory.
2360 // TODO: since the llvm.vector.{insert,extract} intrinsics
2361 // require the element types of the vectors to be the same, we
2362 // need to keep this around for bitcasts between VLAT <-> VLST where
2363 // the element types of the vectors are not the same, until we figure
2364 // out a better way of doing these casts.
2365 if ((isa
<llvm::FixedVectorType
>(SrcTy
) &&
2366 isa
<llvm::ScalableVectorType
>(DstTy
)) ||
2367 (isa
<llvm::ScalableVectorType
>(SrcTy
) &&
2368 isa
<llvm::FixedVectorType
>(DstTy
))) {
2369 Address Addr
= CGF
.CreateDefaultAlignTempAlloca(SrcTy
, "saved-value");
2370 LValue LV
= CGF
.MakeAddrLValue(Addr
, E
->getType());
2371 CGF
.EmitStoreOfScalar(Src
, LV
);
2372 Addr
= Addr
.withElementType(CGF
.ConvertTypeForMem(DestTy
));
2373 LValue DestLV
= CGF
.MakeAddrLValue(Addr
, DestTy
);
2374 DestLV
.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
2375 return EmitLoadOfLValue(DestLV
, CE
->getExprLoc());
2378 llvm::Value
*Result
= Builder
.CreateBitCast(Src
, DstTy
);
2379 return CGF
.authPointerToPointerCast(Result
, E
->getType(), DestTy
);
2381 case CK_AddressSpaceConversion
: {
2382 Expr::EvalResult Result
;
2383 if (E
->EvaluateAsRValue(Result
, CGF
.getContext()) &&
2384 Result
.Val
.isNullPointer()) {
2385 // If E has side effect, it is emitted even if its final result is a
2386 // null pointer. In that case, a DCE pass should be able to
2387 // eliminate the useless instructions emitted during translating E.
2388 if (Result
.HasSideEffects
)
2390 return CGF
.CGM
.getNullPointer(cast
<llvm::PointerType
>(
2391 ConvertType(DestTy
)), DestTy
);
2393 // Since target may map different address spaces in AST to the same address
2394 // space, an address space conversion may end up as a bitcast.
2395 return CGF
.CGM
.getTargetCodeGenInfo().performAddrSpaceCast(
2396 CGF
, Visit(E
), E
->getType()->getPointeeType().getAddressSpace(),
2397 DestTy
->getPointeeType().getAddressSpace(), ConvertType(DestTy
));
2399 case CK_AtomicToNonAtomic
:
2400 case CK_NonAtomicToAtomic
:
2401 case CK_UserDefinedConversion
:
2402 return Visit(const_cast<Expr
*>(E
));
2405 return CE
->changesVolatileQualification() ? EmitLoadOfLValue(CE
)
2406 : Visit(const_cast<Expr
*>(E
));
2409 case CK_BaseToDerived
: {
2410 const CXXRecordDecl
*DerivedClassDecl
= DestTy
->getPointeeCXXRecordDecl();
2411 assert(DerivedClassDecl
&& "BaseToDerived arg isn't a C++ object pointer!");
2413 Address Base
= CGF
.EmitPointerWithAlignment(E
);
2415 CGF
.GetAddressOfDerivedClass(Base
, DerivedClassDecl
,
2416 CE
->path_begin(), CE
->path_end(),
2417 CGF
.ShouldNullCheckClassCastValue(CE
));
2419 // C++11 [expr.static.cast]p11: Behavior is undefined if a downcast is
2420 // performed and the object is not of the derived type.
2421 if (CGF
.sanitizePerformTypeCheck())
2422 CGF
.EmitTypeCheck(CodeGenFunction::TCK_DowncastPointer
, CE
->getExprLoc(),
2423 Derived
, DestTy
->getPointeeType());
2425 if (CGF
.SanOpts
.has(SanitizerKind::CFIDerivedCast
))
2426 CGF
.EmitVTablePtrCheckForCast(DestTy
->getPointeeType(), Derived
,
2428 CodeGenFunction::CFITCK_DerivedCast
,
2431 return CGF
.getAsNaturalPointerTo(Derived
, CE
->getType()->getPointeeType());
2433 case CK_UncheckedDerivedToBase
:
2434 case CK_DerivedToBase
: {
2435 // The EmitPointerWithAlignment path does this fine; just discard
2437 return CGF
.getAsNaturalPointerTo(CGF
.EmitPointerWithAlignment(CE
),
2438 CE
->getType()->getPointeeType());
2442 Address V
= CGF
.EmitPointerWithAlignment(E
);
2443 const CXXDynamicCastExpr
*DCE
= cast
<CXXDynamicCastExpr
>(CE
);
2444 return CGF
.EmitDynamicCast(V
, DCE
);
2447 case CK_ArrayToPointerDecay
:
2448 return CGF
.getAsNaturalPointerTo(CGF
.EmitArrayToPointerDecay(E
),
2449 CE
->getType()->getPointeeType());
2450 case CK_FunctionToPointerDecay
:
2451 return EmitLValue(E
).getPointer(CGF
);
2453 case CK_NullToPointer
:
2454 if (MustVisitNullValue(E
))
2455 CGF
.EmitIgnoredExpr(E
);
2457 return CGF
.CGM
.getNullPointer(cast
<llvm::PointerType
>(ConvertType(DestTy
)),
2460 case CK_NullToMemberPointer
: {
2461 if (MustVisitNullValue(E
))
2462 CGF
.EmitIgnoredExpr(E
);
2464 const MemberPointerType
*MPT
= CE
->getType()->getAs
<MemberPointerType
>();
2465 return CGF
.CGM
.getCXXABI().EmitNullMemberPointer(MPT
);
2468 case CK_ReinterpretMemberPointer
:
2469 case CK_BaseToDerivedMemberPointer
:
2470 case CK_DerivedToBaseMemberPointer
: {
2471 Value
*Src
= Visit(E
);
2473 // Note that the AST doesn't distinguish between checked and
2474 // unchecked member pointer conversions, so we always have to
2475 // implement checked conversions here. This is inefficient when
2476 // actual control flow may be required in order to perform the
2477 // check, which it is for data member pointers (but not member
2478 // function pointers on Itanium and ARM).
2479 return CGF
.CGM
.getCXXABI().EmitMemberPointerConversion(CGF
, CE
, Src
);
2482 case CK_ARCProduceObject
:
2483 return CGF
.EmitARCRetainScalarExpr(E
);
2484 case CK_ARCConsumeObject
:
2485 return CGF
.EmitObjCConsumeObject(E
->getType(), Visit(E
));
2486 case CK_ARCReclaimReturnedObject
:
2487 return CGF
.EmitARCReclaimReturnedObject(E
, /*allowUnsafe*/ Ignored
);
2488 case CK_ARCExtendBlockObject
:
2489 return CGF
.EmitARCExtendBlockObject(E
);
2491 case CK_CopyAndAutoreleaseBlockObject
:
2492 return CGF
.EmitBlockCopyAndAutorelease(Visit(E
), E
->getType());
2494 case CK_FloatingRealToComplex
:
2495 case CK_FloatingComplexCast
:
2496 case CK_IntegralRealToComplex
:
2497 case CK_IntegralComplexCast
:
2498 case CK_IntegralComplexToFloatingComplex
:
2499 case CK_FloatingComplexToIntegralComplex
:
2500 case CK_ConstructorConversion
:
2502 case CK_HLSLArrayRValue
:
2503 llvm_unreachable("scalar cast to non-scalar value");
2505 case CK_LValueToRValue
:
2506 assert(CGF
.getContext().hasSameUnqualifiedType(E
->getType(), DestTy
));
2507 assert(E
->isGLValue() && "lvalue-to-rvalue applied to r-value!");
2508 return Visit(const_cast<Expr
*>(E
));
2510 case CK_IntegralToPointer
: {
2511 Value
*Src
= Visit(const_cast<Expr
*>(E
));
2513 // First, convert to the correct width so that we control the kind of
2515 auto DestLLVMTy
= ConvertType(DestTy
);
2516 llvm::Type
*MiddleTy
= CGF
.CGM
.getDataLayout().getIntPtrType(DestLLVMTy
);
2517 bool InputSigned
= E
->getType()->isSignedIntegerOrEnumerationType();
2518 llvm::Value
* IntResult
=
2519 Builder
.CreateIntCast(Src
, MiddleTy
, InputSigned
, "conv");
2521 auto *IntToPtr
= Builder
.CreateIntToPtr(IntResult
, DestLLVMTy
);
2523 if (CGF
.CGM
.getCodeGenOpts().StrictVTablePointers
) {
2524 // Going from integer to pointer that could be dynamic requires reloading
2525 // dynamic information from invariant.group.
2526 if (DestTy
.mayBeDynamicClass())
2527 IntToPtr
= Builder
.CreateLaunderInvariantGroup(IntToPtr
);
2530 IntToPtr
= CGF
.authPointerToPointerCast(IntToPtr
, E
->getType(), DestTy
);
2533 case CK_PointerToIntegral
: {
2534 assert(!DestTy
->isBooleanType() && "bool should use PointerToBool");
2535 auto *PtrExpr
= Visit(E
);
2537 if (CGF
.CGM
.getCodeGenOpts().StrictVTablePointers
) {
2538 const QualType SrcType
= E
->getType();
2540 // Casting to integer requires stripping dynamic information as it does
2542 if (SrcType
.mayBeDynamicClass())
2543 PtrExpr
= Builder
.CreateStripInvariantGroup(PtrExpr
);
2546 PtrExpr
= CGF
.authPointerToPointerCast(PtrExpr
, E
->getType(), DestTy
);
2547 return Builder
.CreatePtrToInt(PtrExpr
, ConvertType(DestTy
));
2550 CGF
.EmitIgnoredExpr(E
);
2553 case CK_MatrixCast
: {
2554 return EmitScalarConversion(Visit(E
), E
->getType(), DestTy
,
2557 case CK_VectorSplat
: {
2558 llvm::Type
*DstTy
= ConvertType(DestTy
);
2559 Value
*Elt
= Visit(const_cast<Expr
*>(E
));
2560 // Splat the element across to all elements
2561 llvm::ElementCount NumElements
=
2562 cast
<llvm::VectorType
>(DstTy
)->getElementCount();
2563 return Builder
.CreateVectorSplat(NumElements
, Elt
, "splat");
2566 case CK_FixedPointCast
:
2567 return EmitScalarConversion(Visit(E
), E
->getType(), DestTy
,
2570 case CK_FixedPointToBoolean
:
2571 assert(E
->getType()->isFixedPointType() &&
2572 "Expected src type to be fixed point type");
2573 assert(DestTy
->isBooleanType() && "Expected dest type to be boolean type");
2574 return EmitScalarConversion(Visit(E
), E
->getType(), DestTy
,
2577 case CK_FixedPointToIntegral
:
2578 assert(E
->getType()->isFixedPointType() &&
2579 "Expected src type to be fixed point type");
2580 assert(DestTy
->isIntegerType() && "Expected dest type to be an integer");
2581 return EmitScalarConversion(Visit(E
), E
->getType(), DestTy
,
2584 case CK_IntegralToFixedPoint
:
2585 assert(E
->getType()->isIntegerType() &&
2586 "Expected src type to be an integer");
2587 assert(DestTy
->isFixedPointType() &&
2588 "Expected dest type to be fixed point type");
2589 return EmitScalarConversion(Visit(E
), E
->getType(), DestTy
,
2592 case CK_IntegralCast
: {
2593 if (E
->getType()->isExtVectorType() && DestTy
->isExtVectorType()) {
2594 QualType SrcElTy
= E
->getType()->castAs
<VectorType
>()->getElementType();
2595 return Builder
.CreateIntCast(Visit(E
), ConvertType(DestTy
),
2596 SrcElTy
->isSignedIntegerOrEnumerationType(),
2599 ScalarConversionOpts Opts
;
2600 if (auto *ICE
= dyn_cast
<ImplicitCastExpr
>(CE
)) {
2601 if (!ICE
->isPartOfExplicitCast())
2602 Opts
= ScalarConversionOpts(CGF
.SanOpts
);
2604 return EmitScalarConversion(Visit(E
), E
->getType(), DestTy
,
2605 CE
->getExprLoc(), Opts
);
2607 case CK_IntegralToFloating
: {
2608 if (E
->getType()->isVectorType() && DestTy
->isVectorType()) {
2609 // TODO: Support constrained FP intrinsics.
2610 QualType SrcElTy
= E
->getType()->castAs
<VectorType
>()->getElementType();
2611 if (SrcElTy
->isSignedIntegerOrEnumerationType())
2612 return Builder
.CreateSIToFP(Visit(E
), ConvertType(DestTy
), "conv");
2613 return Builder
.CreateUIToFP(Visit(E
), ConvertType(DestTy
), "conv");
2615 CodeGenFunction::CGFPOptionsRAII
FPOptsRAII(CGF
, CE
);
2616 return EmitScalarConversion(Visit(E
), E
->getType(), DestTy
,
2619 case CK_FloatingToIntegral
: {
2620 if (E
->getType()->isVectorType() && DestTy
->isVectorType()) {
2621 // TODO: Support constrained FP intrinsics.
2622 QualType DstElTy
= DestTy
->castAs
<VectorType
>()->getElementType();
2623 if (DstElTy
->isSignedIntegerOrEnumerationType())
2624 return Builder
.CreateFPToSI(Visit(E
), ConvertType(DestTy
), "conv");
2625 return Builder
.CreateFPToUI(Visit(E
), ConvertType(DestTy
), "conv");
2627 CodeGenFunction::CGFPOptionsRAII
FPOptsRAII(CGF
, CE
);
2628 return EmitScalarConversion(Visit(E
), E
->getType(), DestTy
,
2631 case CK_FloatingCast
: {
2632 if (E
->getType()->isVectorType() && DestTy
->isVectorType()) {
2633 // TODO: Support constrained FP intrinsics.
2634 QualType SrcElTy
= E
->getType()->castAs
<VectorType
>()->getElementType();
2635 QualType DstElTy
= DestTy
->castAs
<VectorType
>()->getElementType();
2636 if (DstElTy
->castAs
<BuiltinType
>()->getKind() <
2637 SrcElTy
->castAs
<BuiltinType
>()->getKind())
2638 return Builder
.CreateFPTrunc(Visit(E
), ConvertType(DestTy
), "conv");
2639 return Builder
.CreateFPExt(Visit(E
), ConvertType(DestTy
), "conv");
2641 CodeGenFunction::CGFPOptionsRAII
FPOptsRAII(CGF
, CE
);
2642 return EmitScalarConversion(Visit(E
), E
->getType(), DestTy
,
2645 case CK_FixedPointToFloating
:
2646 case CK_FloatingToFixedPoint
: {
2647 CodeGenFunction::CGFPOptionsRAII
FPOptsRAII(CGF
, CE
);
2648 return EmitScalarConversion(Visit(E
), E
->getType(), DestTy
,
2651 case CK_BooleanToSignedIntegral
: {
2652 ScalarConversionOpts Opts
;
2653 Opts
.TreatBooleanAsSigned
= true;
2654 return EmitScalarConversion(Visit(E
), E
->getType(), DestTy
,
2655 CE
->getExprLoc(), Opts
);
2657 case CK_IntegralToBoolean
:
2658 return EmitIntToBoolConversion(Visit(E
));
2659 case CK_PointerToBoolean
:
2660 return EmitPointerToBoolConversion(Visit(E
), E
->getType());
2661 case CK_FloatingToBoolean
: {
2662 CodeGenFunction::CGFPOptionsRAII
FPOptsRAII(CGF
, CE
);
2663 return EmitFloatToBoolConversion(Visit(E
));
2665 case CK_MemberPointerToBoolean
: {
2666 llvm::Value
*MemPtr
= Visit(E
);
2667 const MemberPointerType
*MPT
= E
->getType()->getAs
<MemberPointerType
>();
2668 return CGF
.CGM
.getCXXABI().EmitMemberPointerIsNotNull(CGF
, MemPtr
, MPT
);
2671 case CK_FloatingComplexToReal
:
2672 case CK_IntegralComplexToReal
:
2673 return CGF
.EmitComplexExpr(E
, false, true).first
;
2675 case CK_FloatingComplexToBoolean
:
2676 case CK_IntegralComplexToBoolean
: {
2677 CodeGenFunction::ComplexPairTy V
= CGF
.EmitComplexExpr(E
);
2679 // TODO: kill this function off, inline appropriate case here
2680 return EmitComplexToScalarConversion(V
, E
->getType(), DestTy
,
2684 case CK_ZeroToOCLOpaqueType
: {
2685 assert((DestTy
->isEventT() || DestTy
->isQueueT() ||
2686 DestTy
->isOCLIntelSubgroupAVCType()) &&
2687 "CK_ZeroToOCLEvent cast on non-event type");
2688 return llvm::Constant::getNullValue(ConvertType(DestTy
));
2691 case CK_IntToOCLSampler
:
2692 return CGF
.CGM
.createOpenCLIntToSamplerConversion(E
, CGF
);
2694 case CK_HLSLVectorTruncation
: {
2695 assert(DestTy
->isVectorType() && "Expected dest type to be vector type");
2696 Value
*Vec
= Visit(const_cast<Expr
*>(E
));
2697 SmallVector
<int, 16> Mask
;
2698 unsigned NumElts
= DestTy
->castAs
<VectorType
>()->getNumElements();
2699 for (unsigned I
= 0; I
!= NumElts
; ++I
)
2702 return Builder
.CreateShuffleVector(Vec
, Mask
, "trunc");
2707 llvm_unreachable("unknown scalar cast");
2710 Value
*ScalarExprEmitter::VisitStmtExpr(const StmtExpr
*E
) {
2711 CodeGenFunction::StmtExprEvaluation
eval(CGF
);
2712 Address RetAlloca
= CGF
.EmitCompoundStmt(*E
->getSubStmt(),
2713 !E
->getType()->isVoidType());
2714 if (!RetAlloca
.isValid())
2716 return CGF
.EmitLoadOfScalar(CGF
.MakeAddrLValue(RetAlloca
, E
->getType()),
2720 Value
*ScalarExprEmitter::VisitExprWithCleanups(ExprWithCleanups
*E
) {
2721 CodeGenFunction::RunCleanupsScope
Scope(CGF
);
2722 Value
*V
= Visit(E
->getSubExpr());
2723 // Defend against dominance problems caused by jumps out of expression
2724 // evaluation through the shared cleanup block.
2725 Scope
.ForceCleanup({&V
});
2729 //===----------------------------------------------------------------------===//
2731 //===----------------------------------------------------------------------===//
2733 static BinOpInfo
createBinOpInfoFromIncDec(const UnaryOperator
*E
,
2734 llvm::Value
*InVal
, bool IsInc
,
2735 FPOptions FPFeatures
) {
2738 BinOp
.RHS
= llvm::ConstantInt::get(InVal
->getType(), 1, false);
2739 BinOp
.Ty
= E
->getType();
2740 BinOp
.Opcode
= IsInc
? BO_Add
: BO_Sub
;
2741 BinOp
.FPFeatures
= FPFeatures
;
2746 llvm::Value
*ScalarExprEmitter::EmitIncDecConsiderOverflowBehavior(
2747 const UnaryOperator
*E
, llvm::Value
*InVal
, bool IsInc
) {
2748 llvm::Value
*Amount
=
2749 llvm::ConstantInt::get(InVal
->getType(), IsInc
? 1 : -1, true);
2750 StringRef Name
= IsInc
? "inc" : "dec";
2751 switch (CGF
.getLangOpts().getSignedOverflowBehavior()) {
2752 case LangOptions::SOB_Defined
:
2753 if (!CGF
.SanOpts
.has(SanitizerKind::SignedIntegerOverflow
))
2754 return Builder
.CreateAdd(InVal
, Amount
, Name
);
2756 case LangOptions::SOB_Undefined
:
2757 if (!CGF
.SanOpts
.has(SanitizerKind::SignedIntegerOverflow
))
2758 return Builder
.CreateNSWAdd(InVal
, Amount
, Name
);
2760 case LangOptions::SOB_Trapping
:
2761 if (!E
->canOverflow())
2762 return Builder
.CreateNSWAdd(InVal
, Amount
, Name
);
2763 return EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(
2764 E
, InVal
, IsInc
, E
->getFPFeaturesInEffect(CGF
.getLangOpts())));
2766 llvm_unreachable("Unknown SignedOverflowBehaviorTy");
2770 /// Handles check and update for lastprivate conditional variables.
2771 class OMPLastprivateConditionalUpdateRAII
{
2773 CodeGenFunction
&CGF
;
2774 const UnaryOperator
*E
;
2777 OMPLastprivateConditionalUpdateRAII(CodeGenFunction
&CGF
,
2778 const UnaryOperator
*E
)
2780 ~OMPLastprivateConditionalUpdateRAII() {
2781 if (CGF
.getLangOpts().OpenMP
)
2782 CGF
.CGM
.getOpenMPRuntime().checkAndEmitLastprivateConditional(
2783 CGF
, E
->getSubExpr());
2789 ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator
*E
, LValue LV
,
2790 bool isInc
, bool isPre
) {
2791 OMPLastprivateConditionalUpdateRAII
OMPRegion(CGF
, E
);
2792 QualType type
= E
->getSubExpr()->getType();
2793 llvm::PHINode
*atomicPHI
= nullptr;
2796 llvm::Value
*Previous
= nullptr;
2797 QualType SrcType
= E
->getType();
2799 int amount
= (isInc
? 1 : -1);
2800 bool isSubtraction
= !isInc
;
2802 if (const AtomicType
*atomicTy
= type
->getAs
<AtomicType
>()) {
2803 type
= atomicTy
->getValueType();
2804 if (isInc
&& type
->isBooleanType()) {
2805 llvm::Value
*True
= CGF
.EmitToMemory(Builder
.getTrue(), type
);
2807 Builder
.CreateStore(True
, LV
.getAddress(), LV
.isVolatileQualified())
2808 ->setAtomic(llvm::AtomicOrdering::SequentiallyConsistent
);
2809 return Builder
.getTrue();
2811 // For atomic bool increment, we just store true and return it for
2812 // preincrement, do an atomic swap with true for postincrement
2813 return Builder
.CreateAtomicRMW(
2814 llvm::AtomicRMWInst::Xchg
, LV
.getAddress(), True
,
2815 llvm::AtomicOrdering::SequentiallyConsistent
);
2817 // Special case for atomic increment / decrement on integers, emit
2818 // atomicrmw instructions. We skip this if we want to be doing overflow
2819 // checking, and fall into the slow path with the atomic cmpxchg loop.
2820 if (!type
->isBooleanType() && type
->isIntegerType() &&
2821 !(type
->isUnsignedIntegerType() &&
2822 CGF
.SanOpts
.has(SanitizerKind::UnsignedIntegerOverflow
)) &&
2823 CGF
.getLangOpts().getSignedOverflowBehavior() !=
2824 LangOptions::SOB_Trapping
) {
2825 llvm::AtomicRMWInst::BinOp aop
= isInc
? llvm::AtomicRMWInst::Add
:
2826 llvm::AtomicRMWInst::Sub
;
2827 llvm::Instruction::BinaryOps op
= isInc
? llvm::Instruction::Add
:
2828 llvm::Instruction::Sub
;
2829 llvm::Value
*amt
= CGF
.EmitToMemory(
2830 llvm::ConstantInt::get(ConvertType(type
), 1, true), type
);
2832 Builder
.CreateAtomicRMW(aop
, LV
.getAddress(), amt
,
2833 llvm::AtomicOrdering::SequentiallyConsistent
);
2834 return isPre
? Builder
.CreateBinOp(op
, old
, amt
) : old
;
2836 // Special case for atomic increment/decrement on floats.
2837 // Bail out non-power-of-2-sized floating point types (e.g., x86_fp80).
2838 if (type
->isFloatingType()) {
2839 llvm::Type
*Ty
= ConvertType(type
);
2840 if (llvm::has_single_bit(Ty
->getScalarSizeInBits())) {
2841 llvm::AtomicRMWInst::BinOp aop
=
2842 isInc
? llvm::AtomicRMWInst::FAdd
: llvm::AtomicRMWInst::FSub
;
2843 llvm::Instruction::BinaryOps op
=
2844 isInc
? llvm::Instruction::FAdd
: llvm::Instruction::FSub
;
2845 llvm::Value
*amt
= llvm::ConstantFP::get(Ty
, 1.0);
2846 llvm::AtomicRMWInst
*old
= Builder
.CreateAtomicRMW(
2847 aop
, LV
.getAddress(), amt
,
2848 llvm::AtomicOrdering::SequentiallyConsistent
);
2850 return isPre
? Builder
.CreateBinOp(op
, old
, amt
) : old
;
2853 value
= EmitLoadOfLValue(LV
, E
->getExprLoc());
2855 // For every other atomic operation, we need to emit a load-op-cmpxchg loop
2856 llvm::BasicBlock
*startBB
= Builder
.GetInsertBlock();
2857 llvm::BasicBlock
*opBB
= CGF
.createBasicBlock("atomic_op", CGF
.CurFn
);
2858 value
= CGF
.EmitToMemory(value
, type
);
2859 Builder
.CreateBr(opBB
);
2860 Builder
.SetInsertPoint(opBB
);
2861 atomicPHI
= Builder
.CreatePHI(value
->getType(), 2);
2862 atomicPHI
->addIncoming(value
, startBB
);
2865 value
= EmitLoadOfLValue(LV
, E
->getExprLoc());
2869 // Special case of integer increment that we have to check first: bool++.
2870 // Due to promotion rules, we get:
2871 // bool++ -> bool = bool + 1
2872 // -> bool = (int)bool + 1
2873 // -> bool = ((int)bool + 1 != 0)
2874 // An interesting aspect of this is that increment is always true.
2875 // Decrement does not have this property.
2876 if (isInc
&& type
->isBooleanType()) {
2877 value
= Builder
.getTrue();
2879 // Most common case by far: integer increment.
2880 } else if (type
->isIntegerType()) {
2881 QualType promotedType
;
2882 bool canPerformLossyDemotionCheck
= false;
2883 if (CGF
.getContext().isPromotableIntegerType(type
)) {
2884 promotedType
= CGF
.getContext().getPromotedIntegerType(type
);
2885 assert(promotedType
!= type
&& "Shouldn't promote to the same type.");
2886 canPerformLossyDemotionCheck
= true;
2887 canPerformLossyDemotionCheck
&=
2888 CGF
.getContext().getCanonicalType(type
) !=
2889 CGF
.getContext().getCanonicalType(promotedType
);
2890 canPerformLossyDemotionCheck
&=
2891 PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(
2892 type
, promotedType
);
2893 assert((!canPerformLossyDemotionCheck
||
2894 type
->isSignedIntegerOrEnumerationType() ||
2895 promotedType
->isSignedIntegerOrEnumerationType() ||
2896 ConvertType(type
)->getScalarSizeInBits() ==
2897 ConvertType(promotedType
)->getScalarSizeInBits()) &&
2898 "The following check expects that if we do promotion to different "
2899 "underlying canonical type, at least one of the types (either "
2900 "base or promoted) will be signed, or the bitwidths will match.");
2902 if (CGF
.SanOpts
.hasOneOf(
2903 SanitizerKind::ImplicitIntegerArithmeticValueChange
|
2904 SanitizerKind::ImplicitBitfieldConversion
) &&
2905 canPerformLossyDemotionCheck
) {
2906 // While `x += 1` (for `x` with width less than int) is modeled as
2907 // promotion+arithmetics+demotion, and we can catch lossy demotion with
2908 // ease; inc/dec with width less than int can't overflow because of
2909 // promotion rules, so we omit promotion+demotion, which means that we can
2910 // not catch lossy "demotion". Because we still want to catch these cases
2911 // when the sanitizer is enabled, we perform the promotion, then perform
2912 // the increment/decrement in the wider type, and finally
2913 // perform the demotion. This will catch lossy demotions.
2915 // We have a special case for bitfields defined using all the bits of the
2916 // type. In this case we need to do the same trick as for the integer
2917 // sanitizer checks, i.e., promotion -> increment/decrement -> demotion.
2919 value
= EmitScalarConversion(value
, type
, promotedType
, E
->getExprLoc());
2920 Value
*amt
= llvm::ConstantInt::get(value
->getType(), amount
, true);
2921 value
= Builder
.CreateAdd(value
, amt
, isInc
? "inc" : "dec");
2922 // Do pass non-default ScalarConversionOpts so that sanitizer check is
2923 // emitted if LV is not a bitfield, otherwise the bitfield sanitizer
2924 // checks will take care of the conversion.
2925 ScalarConversionOpts Opts
;
2926 if (!LV
.isBitField())
2927 Opts
= ScalarConversionOpts(CGF
.SanOpts
);
2928 else if (CGF
.SanOpts
.has(SanitizerKind::ImplicitBitfieldConversion
)) {
2930 SrcType
= promotedType
;
2933 value
= EmitScalarConversion(value
, promotedType
, type
, E
->getExprLoc(),
2936 // Note that signed integer inc/dec with width less than int can't
2937 // overflow because of promotion rules; we're just eliding a few steps
2939 } else if (E
->canOverflow() && type
->isSignedIntegerOrEnumerationType()) {
2940 value
= EmitIncDecConsiderOverflowBehavior(E
, value
, isInc
);
2941 } else if (E
->canOverflow() && type
->isUnsignedIntegerType() &&
2942 CGF
.SanOpts
.has(SanitizerKind::UnsignedIntegerOverflow
)) {
2943 value
= EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(
2944 E
, value
, isInc
, E
->getFPFeaturesInEffect(CGF
.getLangOpts())));
2946 llvm::Value
*amt
= llvm::ConstantInt::get(value
->getType(), amount
, true);
2947 value
= Builder
.CreateAdd(value
, amt
, isInc
? "inc" : "dec");
2950 // Next most common: pointer increment.
2951 } else if (const PointerType
*ptr
= type
->getAs
<PointerType
>()) {
2952 QualType type
= ptr
->getPointeeType();
2954 // VLA types don't have constant size.
2955 if (const VariableArrayType
*vla
2956 = CGF
.getContext().getAsVariableArrayType(type
)) {
2957 llvm::Value
*numElts
= CGF
.getVLASize(vla
).NumElts
;
2958 if (!isInc
) numElts
= Builder
.CreateNSWNeg(numElts
, "vla.negsize");
2959 llvm::Type
*elemTy
= CGF
.ConvertTypeForMem(vla
->getElementType());
2960 if (CGF
.getLangOpts().isSignedOverflowDefined())
2961 value
= Builder
.CreateGEP(elemTy
, value
, numElts
, "vla.inc");
2963 value
= CGF
.EmitCheckedInBoundsGEP(
2964 elemTy
, value
, numElts
, /*SignedIndices=*/false, isSubtraction
,
2965 E
->getExprLoc(), "vla.inc");
2967 // Arithmetic on function pointers (!) is just +-1.
2968 } else if (type
->isFunctionType()) {
2969 llvm::Value
*amt
= Builder
.getInt32(amount
);
2971 if (CGF
.getLangOpts().isSignedOverflowDefined())
2972 value
= Builder
.CreateGEP(CGF
.Int8Ty
, value
, amt
, "incdec.funcptr");
2975 CGF
.EmitCheckedInBoundsGEP(CGF
.Int8Ty
, value
, amt
,
2976 /*SignedIndices=*/false, isSubtraction
,
2977 E
->getExprLoc(), "incdec.funcptr");
2979 // For everything else, we can just do a simple increment.
2981 llvm::Value
*amt
= Builder
.getInt32(amount
);
2982 llvm::Type
*elemTy
= CGF
.ConvertTypeForMem(type
);
2983 if (CGF
.getLangOpts().isSignedOverflowDefined())
2984 value
= Builder
.CreateGEP(elemTy
, value
, amt
, "incdec.ptr");
2986 value
= CGF
.EmitCheckedInBoundsGEP(
2987 elemTy
, value
, amt
, /*SignedIndices=*/false, isSubtraction
,
2988 E
->getExprLoc(), "incdec.ptr");
2991 // Vector increment/decrement.
2992 } else if (type
->isVectorType()) {
2993 if (type
->hasIntegerRepresentation()) {
2994 llvm::Value
*amt
= llvm::ConstantInt::get(value
->getType(), amount
);
2996 value
= Builder
.CreateAdd(value
, amt
, isInc
? "inc" : "dec");
2998 value
= Builder
.CreateFAdd(
3000 llvm::ConstantFP::get(value
->getType(), amount
),
3001 isInc
? "inc" : "dec");
3005 } else if (type
->isRealFloatingType()) {
3006 // Add the inc/dec to the real part.
3008 CodeGenFunction::CGFPOptionsRAII
FPOptsRAII(CGF
, E
);
3010 if (type
->isHalfType() && !CGF
.getContext().getLangOpts().NativeHalfType
) {
3011 // Another special case: half FP increment should be done via float
3012 if (CGF
.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
3013 value
= Builder
.CreateCall(
3014 CGF
.CGM
.getIntrinsic(llvm::Intrinsic::convert_from_fp16
,
3016 input
, "incdec.conv");
3018 value
= Builder
.CreateFPExt(input
, CGF
.CGM
.FloatTy
, "incdec.conv");
3022 if (value
->getType()->isFloatTy())
3023 amt
= llvm::ConstantFP::get(VMContext
,
3024 llvm::APFloat(static_cast<float>(amount
)));
3025 else if (value
->getType()->isDoubleTy())
3026 amt
= llvm::ConstantFP::get(VMContext
,
3027 llvm::APFloat(static_cast<double>(amount
)));
3029 // Remaining types are Half, Bfloat16, LongDouble, __ibm128 or __float128.
3030 // Convert from float.
3031 llvm::APFloat
F(static_cast<float>(amount
));
3033 const llvm::fltSemantics
*FS
;
3034 // Don't use getFloatTypeSemantics because Half isn't
3035 // necessarily represented using the "half" LLVM type.
3036 if (value
->getType()->isFP128Ty())
3037 FS
= &CGF
.getTarget().getFloat128Format();
3038 else if (value
->getType()->isHalfTy())
3039 FS
= &CGF
.getTarget().getHalfFormat();
3040 else if (value
->getType()->isBFloatTy())
3041 FS
= &CGF
.getTarget().getBFloat16Format();
3042 else if (value
->getType()->isPPC_FP128Ty())
3043 FS
= &CGF
.getTarget().getIbm128Format();
3045 FS
= &CGF
.getTarget().getLongDoubleFormat();
3046 F
.convert(*FS
, llvm::APFloat::rmTowardZero
, &ignored
);
3047 amt
= llvm::ConstantFP::get(VMContext
, F
);
3049 value
= Builder
.CreateFAdd(value
, amt
, isInc
? "inc" : "dec");
3051 if (type
->isHalfType() && !CGF
.getContext().getLangOpts().NativeHalfType
) {
3052 if (CGF
.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
3053 value
= Builder
.CreateCall(
3054 CGF
.CGM
.getIntrinsic(llvm::Intrinsic::convert_to_fp16
,
3056 value
, "incdec.conv");
3058 value
= Builder
.CreateFPTrunc(value
, input
->getType(), "incdec.conv");
3062 // Fixed-point types.
3063 } else if (type
->isFixedPointType()) {
3064 // Fixed-point types are tricky. In some cases, it isn't possible to
3065 // represent a 1 or a -1 in the type at all. Piggyback off of
3066 // EmitFixedPointBinOp to avoid having to reimplement saturation.
3069 Info
.Ty
= E
->getType();
3070 Info
.Opcode
= isInc
? BO_Add
: BO_Sub
;
3072 Info
.RHS
= llvm::ConstantInt::get(value
->getType(), 1, false);
3073 // If the type is signed, it's better to represent this as +(-1) or -(-1),
3074 // since -1 is guaranteed to be representable.
3075 if (type
->isSignedFixedPointType()) {
3076 Info
.Opcode
= isInc
? BO_Sub
: BO_Add
;
3077 Info
.RHS
= Builder
.CreateNeg(Info
.RHS
);
3079 // Now, convert from our invented integer literal to the type of the unary
3080 // op. This will upscale and saturate if necessary. This value can become
3081 // undef in some cases.
3082 llvm::FixedPointBuilder
<CGBuilderTy
> FPBuilder(Builder
);
3083 auto DstSema
= CGF
.getContext().getFixedPointSemantics(Info
.Ty
);
3084 Info
.RHS
= FPBuilder
.CreateIntegerToFixed(Info
.RHS
, true, DstSema
);
3085 value
= EmitFixedPointBinOp(Info
);
3087 // Objective-C pointer types.
3089 const ObjCObjectPointerType
*OPT
= type
->castAs
<ObjCObjectPointerType
>();
3091 CharUnits size
= CGF
.getContext().getTypeSizeInChars(OPT
->getObjectType());
3092 if (!isInc
) size
= -size
;
3093 llvm::Value
*sizeValue
=
3094 llvm::ConstantInt::get(CGF
.SizeTy
, size
.getQuantity());
3096 if (CGF
.getLangOpts().isSignedOverflowDefined())
3097 value
= Builder
.CreateGEP(CGF
.Int8Ty
, value
, sizeValue
, "incdec.objptr");
3099 value
= CGF
.EmitCheckedInBoundsGEP(
3100 CGF
.Int8Ty
, value
, sizeValue
, /*SignedIndices=*/false, isSubtraction
,
3101 E
->getExprLoc(), "incdec.objptr");
3102 value
= Builder
.CreateBitCast(value
, input
->getType());
3106 llvm::BasicBlock
*curBlock
= Builder
.GetInsertBlock();
3107 llvm::BasicBlock
*contBB
= CGF
.createBasicBlock("atomic_cont", CGF
.CurFn
);
3108 auto Pair
= CGF
.EmitAtomicCompareExchange(
3109 LV
, RValue::get(atomicPHI
), RValue::get(value
), E
->getExprLoc());
3110 llvm::Value
*old
= CGF
.EmitToMemory(Pair
.first
.getScalarVal(), type
);
3111 llvm::Value
*success
= Pair
.second
;
3112 atomicPHI
->addIncoming(old
, curBlock
);
3113 Builder
.CreateCondBr(success
, contBB
, atomicPHI
->getParent());
3114 Builder
.SetInsertPoint(contBB
);
3115 return isPre
? value
: input
;
3118 // Store the updated result through the lvalue.
3119 if (LV
.isBitField()) {
3120 Value
*Src
= Previous
? Previous
: value
;
3121 CGF
.EmitStoreThroughBitfieldLValue(RValue::get(value
), LV
, &value
);
3122 CGF
.EmitBitfieldConversionCheck(Src
, SrcType
, value
, E
->getType(),
3123 LV
.getBitFieldInfo(), E
->getExprLoc());
3125 CGF
.EmitStoreThroughLValue(RValue::get(value
), LV
);
3127 // If this is a postinc, return the value read from memory, otherwise use the
3129 return isPre
? value
: input
;
3133 Value
*ScalarExprEmitter::VisitUnaryPlus(const UnaryOperator
*E
,
3134 QualType PromotionType
) {
3135 QualType promotionTy
= PromotionType
.isNull()
3136 ? getPromotionType(E
->getSubExpr()->getType())
3138 Value
*result
= VisitPlus(E
, promotionTy
);
3139 if (result
&& !promotionTy
.isNull())
3140 result
= EmitUnPromotedValue(result
, E
->getType());
3144 Value
*ScalarExprEmitter::VisitPlus(const UnaryOperator
*E
,
3145 QualType PromotionType
) {
3146 // This differs from gcc, though, most likely due to a bug in gcc.
3147 TestAndClearIgnoreResultAssign();
3148 if (!PromotionType
.isNull())
3149 return CGF
.EmitPromotedScalarExpr(E
->getSubExpr(), PromotionType
);
3150 return Visit(E
->getSubExpr());
3153 Value
*ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator
*E
,
3154 QualType PromotionType
) {
3155 QualType promotionTy
= PromotionType
.isNull()
3156 ? getPromotionType(E
->getSubExpr()->getType())
3158 Value
*result
= VisitMinus(E
, promotionTy
);
3159 if (result
&& !promotionTy
.isNull())
3160 result
= EmitUnPromotedValue(result
, E
->getType());
3164 Value
*ScalarExprEmitter::VisitMinus(const UnaryOperator
*E
,
3165 QualType PromotionType
) {
3166 TestAndClearIgnoreResultAssign();
3168 if (!PromotionType
.isNull())
3169 Op
= CGF
.EmitPromotedScalarExpr(E
->getSubExpr(), PromotionType
);
3171 Op
= Visit(E
->getSubExpr());
3173 // Generate a unary FNeg for FP ops.
3174 if (Op
->getType()->isFPOrFPVectorTy())
3175 return Builder
.CreateFNeg(Op
, "fneg");
3177 // Emit unary minus with EmitSub so we handle overflow cases etc.
3180 BinOp
.LHS
= llvm::Constant::getNullValue(BinOp
.RHS
->getType());
3181 BinOp
.Ty
= E
->getType();
3182 BinOp
.Opcode
= BO_Sub
;
3183 BinOp
.FPFeatures
= E
->getFPFeaturesInEffect(CGF
.getLangOpts());
3185 return EmitSub(BinOp
);
3188 Value
*ScalarExprEmitter::VisitUnaryNot(const UnaryOperator
*E
) {
3189 TestAndClearIgnoreResultAssign();
3190 Value
*Op
= Visit(E
->getSubExpr());
3191 return Builder
.CreateNot(Op
, "not");
3194 Value
*ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator
*E
) {
3195 // Perform vector logical not on comparison with zero vector.
3196 if (E
->getType()->isVectorType() &&
3197 E
->getType()->castAs
<VectorType
>()->getVectorKind() ==
3198 VectorKind::Generic
) {
3199 Value
*Oper
= Visit(E
->getSubExpr());
3200 Value
*Zero
= llvm::Constant::getNullValue(Oper
->getType());
3202 if (Oper
->getType()->isFPOrFPVectorTy()) {
3203 CodeGenFunction::CGFPOptionsRAII
FPOptsRAII(
3204 CGF
, E
->getFPFeaturesInEffect(CGF
.getLangOpts()));
3205 Result
= Builder
.CreateFCmp(llvm::CmpInst::FCMP_OEQ
, Oper
, Zero
, "cmp");
3207 Result
= Builder
.CreateICmp(llvm::CmpInst::ICMP_EQ
, Oper
, Zero
, "cmp");
3208 return Builder
.CreateSExt(Result
, ConvertType(E
->getType()), "sext");
3211 // Compare operand to zero.
3212 Value
*BoolVal
= CGF
.EvaluateExprAsBool(E
->getSubExpr());
3215 // TODO: Could dynamically modify easy computations here. For example, if
3216 // the operand is an icmp ne, turn into icmp eq.
3217 BoolVal
= Builder
.CreateNot(BoolVal
, "lnot");
3219 // ZExt result to the expr type.
3220 return Builder
.CreateZExt(BoolVal
, ConvertType(E
->getType()), "lnot.ext");
3223 Value
*ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr
*E
) {
3224 // Try folding the offsetof to a constant.
3225 Expr::EvalResult EVResult
;
3226 if (E
->EvaluateAsInt(EVResult
, CGF
.getContext())) {
3227 llvm::APSInt Value
= EVResult
.Val
.getInt();
3228 return Builder
.getInt(Value
);
3231 // Loop over the components of the offsetof to compute the value.
3232 unsigned n
= E
->getNumComponents();
3233 llvm::Type
* ResultType
= ConvertType(E
->getType());
3234 llvm::Value
* Result
= llvm::Constant::getNullValue(ResultType
);
3235 QualType CurrentType
= E
->getTypeSourceInfo()->getType();
3236 for (unsigned i
= 0; i
!= n
; ++i
) {
3237 OffsetOfNode ON
= E
->getComponent(i
);
3238 llvm::Value
*Offset
= nullptr;
3239 switch (ON
.getKind()) {
3240 case OffsetOfNode::Array
: {
3241 // Compute the index
3242 Expr
*IdxExpr
= E
->getIndexExpr(ON
.getArrayExprIndex());
3243 llvm::Value
* Idx
= CGF
.EmitScalarExpr(IdxExpr
);
3244 bool IdxSigned
= IdxExpr
->getType()->isSignedIntegerOrEnumerationType();
3245 Idx
= Builder
.CreateIntCast(Idx
, ResultType
, IdxSigned
, "conv");
3247 // Save the element type
3249 CGF
.getContext().getAsArrayType(CurrentType
)->getElementType();
3251 // Compute the element size
3252 llvm::Value
* ElemSize
= llvm::ConstantInt::get(ResultType
,
3253 CGF
.getContext().getTypeSizeInChars(CurrentType
).getQuantity());
3255 // Multiply out to compute the result
3256 Offset
= Builder
.CreateMul(Idx
, ElemSize
);
3260 case OffsetOfNode::Field
: {
3261 FieldDecl
*MemberDecl
= ON
.getField();
3262 RecordDecl
*RD
= CurrentType
->castAs
<RecordType
>()->getDecl();
3263 const ASTRecordLayout
&RL
= CGF
.getContext().getASTRecordLayout(RD
);
3265 // Compute the index of the field in its parent.
3267 // FIXME: It would be nice if we didn't have to loop here!
3268 for (RecordDecl::field_iterator Field
= RD
->field_begin(),
3269 FieldEnd
= RD
->field_end();
3270 Field
!= FieldEnd
; ++Field
, ++i
) {
3271 if (*Field
== MemberDecl
)
3274 assert(i
< RL
.getFieldCount() && "offsetof field in wrong type");
3276 // Compute the offset to the field
3277 int64_t OffsetInt
= RL
.getFieldOffset(i
) /
3278 CGF
.getContext().getCharWidth();
3279 Offset
= llvm::ConstantInt::get(ResultType
, OffsetInt
);
3281 // Save the element type.
3282 CurrentType
= MemberDecl
->getType();
3286 case OffsetOfNode::Identifier
:
3287 llvm_unreachable("dependent __builtin_offsetof");
3289 case OffsetOfNode::Base
: {
3290 if (ON
.getBase()->isVirtual()) {
3291 CGF
.ErrorUnsupported(E
, "virtual base in offsetof");
3295 RecordDecl
*RD
= CurrentType
->castAs
<RecordType
>()->getDecl();
3296 const ASTRecordLayout
&RL
= CGF
.getContext().getASTRecordLayout(RD
);
3298 // Save the element type.
3299 CurrentType
= ON
.getBase()->getType();
3301 // Compute the offset to the base.
3302 auto *BaseRT
= CurrentType
->castAs
<RecordType
>();
3303 auto *BaseRD
= cast
<CXXRecordDecl
>(BaseRT
->getDecl());
3304 CharUnits OffsetInt
= RL
.getBaseClassOffset(BaseRD
);
3305 Offset
= llvm::ConstantInt::get(ResultType
, OffsetInt
.getQuantity());
3309 Result
= Builder
.CreateAdd(Result
, Offset
);
3314 /// VisitUnaryExprOrTypeTraitExpr - Return the size or alignment of the type of
3315 /// argument of the sizeof expression as an integer.
3317 ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr(
3318 const UnaryExprOrTypeTraitExpr
*E
) {
3319 QualType TypeToSize
= E
->getTypeOfArgument();
3320 if (auto Kind
= E
->getKind();
3321 Kind
== UETT_SizeOf
|| Kind
== UETT_DataSizeOf
) {
3322 if (const VariableArrayType
*VAT
=
3323 CGF
.getContext().getAsVariableArrayType(TypeToSize
)) {
3324 if (E
->isArgumentType()) {
3325 // sizeof(type) - make sure to emit the VLA size.
3326 CGF
.EmitVariablyModifiedType(TypeToSize
);
3328 // C99 6.5.3.4p2: If the argument is an expression of type
3329 // VLA, it is evaluated.
3330 CGF
.EmitIgnoredExpr(E
->getArgumentExpr());
3333 auto VlaSize
= CGF
.getVLASize(VAT
);
3334 llvm::Value
*size
= VlaSize
.NumElts
;
3336 // Scale the number of non-VLA elements by the non-VLA element size.
3337 CharUnits eltSize
= CGF
.getContext().getTypeSizeInChars(VlaSize
.Type
);
3338 if (!eltSize
.isOne())
3339 size
= CGF
.Builder
.CreateNUWMul(CGF
.CGM
.getSize(eltSize
), size
);
3343 } else if (E
->getKind() == UETT_OpenMPRequiredSimdAlign
) {
3346 .toCharUnitsFromBits(CGF
.getContext().getOpenMPDefaultSimdAlign(
3347 E
->getTypeOfArgument()->getPointeeType()))
3349 return llvm::ConstantInt::get(CGF
.SizeTy
, Alignment
);
3350 } else if (E
->getKind() == UETT_VectorElements
) {
3351 auto *VecTy
= cast
<llvm::VectorType
>(ConvertType(E
->getTypeOfArgument()));
3352 return Builder
.CreateElementCount(CGF
.SizeTy
, VecTy
->getElementCount());
3355 // If this isn't sizeof(vla), the result must be constant; use the constant
3356 // folding logic so we don't have to duplicate it here.
3357 return Builder
.getInt(E
->EvaluateKnownConstInt(CGF
.getContext()));
3360 Value
*ScalarExprEmitter::VisitUnaryReal(const UnaryOperator
*E
,
3361 QualType PromotionType
) {
3362 QualType promotionTy
= PromotionType
.isNull()
3363 ? getPromotionType(E
->getSubExpr()->getType())
3365 Value
*result
= VisitReal(E
, promotionTy
);
3366 if (result
&& !promotionTy
.isNull())
3367 result
= EmitUnPromotedValue(result
, E
->getType());
3371 Value
*ScalarExprEmitter::VisitReal(const UnaryOperator
*E
,
3372 QualType PromotionType
) {
3373 Expr
*Op
= E
->getSubExpr();
3374 if (Op
->getType()->isAnyComplexType()) {
3375 // If it's an l-value, load through the appropriate subobject l-value.
3376 // Note that we have to ask E because Op might be an l-value that
3377 // this won't work for, e.g. an Obj-C property.
3378 if (E
->isGLValue()) {
3379 if (!PromotionType
.isNull()) {
3380 CodeGenFunction::ComplexPairTy result
= CGF
.EmitComplexExpr(
3381 Op
, /*IgnoreReal*/ IgnoreResultAssign
, /*IgnoreImag*/ true);
3383 result
.first
= CGF
.EmitPromotedValue(result
, PromotionType
).first
;
3384 return result
.first
;
3386 return CGF
.EmitLoadOfLValue(CGF
.EmitLValue(E
), E
->getExprLoc())
3390 // Otherwise, calculate and project.
3391 return CGF
.EmitComplexExpr(Op
, false, true).first
;
3394 if (!PromotionType
.isNull())
3395 return CGF
.EmitPromotedScalarExpr(Op
, PromotionType
);
3399 Value
*ScalarExprEmitter::VisitUnaryImag(const UnaryOperator
*E
,
3400 QualType PromotionType
) {
3401 QualType promotionTy
= PromotionType
.isNull()
3402 ? getPromotionType(E
->getSubExpr()->getType())
3404 Value
*result
= VisitImag(E
, promotionTy
);
3405 if (result
&& !promotionTy
.isNull())
3406 result
= EmitUnPromotedValue(result
, E
->getType());
3410 Value
*ScalarExprEmitter::VisitImag(const UnaryOperator
*E
,
3411 QualType PromotionType
) {
3412 Expr
*Op
= E
->getSubExpr();
3413 if (Op
->getType()->isAnyComplexType()) {
3414 // If it's an l-value, load through the appropriate subobject l-value.
3415 // Note that we have to ask E because Op might be an l-value that
3416 // this won't work for, e.g. an Obj-C property.
3417 if (Op
->isGLValue()) {
3418 if (!PromotionType
.isNull()) {
3419 CodeGenFunction::ComplexPairTy result
= CGF
.EmitComplexExpr(
3420 Op
, /*IgnoreReal*/ true, /*IgnoreImag*/ IgnoreResultAssign
);
3422 result
.second
= CGF
.EmitPromotedValue(result
, PromotionType
).second
;
3423 return result
.second
;
3425 return CGF
.EmitLoadOfLValue(CGF
.EmitLValue(E
), E
->getExprLoc())
3429 // Otherwise, calculate and project.
3430 return CGF
.EmitComplexExpr(Op
, true, false).second
;
3433 // __imag on a scalar returns zero. Emit the subexpr to ensure side
3434 // effects are evaluated, but not the actual value.
3435 if (Op
->isGLValue())
3437 else if (!PromotionType
.isNull())
3438 CGF
.EmitPromotedScalarExpr(Op
, PromotionType
);
3440 CGF
.EmitScalarExpr(Op
, true);
3441 if (!PromotionType
.isNull())
3442 return llvm::Constant::getNullValue(ConvertType(PromotionType
));
3443 return llvm::Constant::getNullValue(ConvertType(E
->getType()));
3446 //===----------------------------------------------------------------------===//
3448 //===----------------------------------------------------------------------===//
3450 Value
*ScalarExprEmitter::EmitPromotedValue(Value
*result
,
3451 QualType PromotionType
) {
3452 return CGF
.Builder
.CreateFPExt(result
, ConvertType(PromotionType
), "ext");
3455 Value
*ScalarExprEmitter::EmitUnPromotedValue(Value
*result
,
3456 QualType ExprType
) {
3457 return CGF
.Builder
.CreateFPTrunc(result
, ConvertType(ExprType
), "unpromotion");
3460 Value
*ScalarExprEmitter::EmitPromoted(const Expr
*E
, QualType PromotionType
) {
3461 E
= E
->IgnoreParens();
3462 if (auto BO
= dyn_cast
<BinaryOperator
>(E
)) {
3463 switch (BO
->getOpcode()) {
3464 #define HANDLE_BINOP(OP) \
3466 return Emit##OP(EmitBinOps(BO, PromotionType));
3475 } else if (auto UO
= dyn_cast
<UnaryOperator
>(E
)) {
3476 switch (UO
->getOpcode()) {
3478 return VisitImag(UO
, PromotionType
);
3480 return VisitReal(UO
, PromotionType
);
3482 return VisitMinus(UO
, PromotionType
);
3484 return VisitPlus(UO
, PromotionType
);
3489 auto result
= Visit(const_cast<Expr
*>(E
));
3491 if (!PromotionType
.isNull())
3492 return EmitPromotedValue(result
, PromotionType
);
3494 return EmitUnPromotedValue(result
, E
->getType());
3499 BinOpInfo
ScalarExprEmitter::EmitBinOps(const BinaryOperator
*E
,
3500 QualType PromotionType
) {
3501 TestAndClearIgnoreResultAssign();
3503 Result
.LHS
= CGF
.EmitPromotedScalarExpr(E
->getLHS(), PromotionType
);
3504 Result
.RHS
= CGF
.EmitPromotedScalarExpr(E
->getRHS(), PromotionType
);
3505 if (!PromotionType
.isNull())
3506 Result
.Ty
= PromotionType
;
3508 Result
.Ty
= E
->getType();
3509 Result
.Opcode
= E
->getOpcode();
3510 Result
.FPFeatures
= E
->getFPFeaturesInEffect(CGF
.getLangOpts());
3515 LValue
ScalarExprEmitter::EmitCompoundAssignLValue(
3516 const CompoundAssignOperator
*E
,
3517 Value
*(ScalarExprEmitter::*Func
)(const BinOpInfo
&),
3519 QualType LHSTy
= E
->getLHS()->getType();
3522 if (E
->getComputationResultType()->isAnyComplexType())
3523 return CGF
.EmitScalarCompoundAssignWithComplex(E
, Result
);
3525 // Emit the RHS first. __block variables need to have the rhs evaluated
3526 // first, plus this should improve codegen a little.
3528 QualType PromotionTypeCR
;
3529 PromotionTypeCR
= getPromotionType(E
->getComputationResultType());
3530 if (PromotionTypeCR
.isNull())
3531 PromotionTypeCR
= E
->getComputationResultType();
3532 QualType PromotionTypeLHS
= getPromotionType(E
->getComputationLHSType());
3533 QualType PromotionTypeRHS
= getPromotionType(E
->getRHS()->getType());
3534 if (!PromotionTypeRHS
.isNull())
3535 OpInfo
.RHS
= CGF
.EmitPromotedScalarExpr(E
->getRHS(), PromotionTypeRHS
);
3537 OpInfo
.RHS
= Visit(E
->getRHS());
3538 OpInfo
.Ty
= PromotionTypeCR
;
3539 OpInfo
.Opcode
= E
->getOpcode();
3540 OpInfo
.FPFeatures
= E
->getFPFeaturesInEffect(CGF
.getLangOpts());
3542 // Load/convert the LHS.
3543 LValue LHSLV
= EmitCheckedLValue(E
->getLHS(), CodeGenFunction::TCK_Store
);
3545 llvm::PHINode
*atomicPHI
= nullptr;
3546 if (const AtomicType
*atomicTy
= LHSTy
->getAs
<AtomicType
>()) {
3547 QualType type
= atomicTy
->getValueType();
3548 if (!type
->isBooleanType() && type
->isIntegerType() &&
3549 !(type
->isUnsignedIntegerType() &&
3550 CGF
.SanOpts
.has(SanitizerKind::UnsignedIntegerOverflow
)) &&
3551 CGF
.getLangOpts().getSignedOverflowBehavior() !=
3552 LangOptions::SOB_Trapping
) {
3553 llvm::AtomicRMWInst::BinOp AtomicOp
= llvm::AtomicRMWInst::BAD_BINOP
;
3554 llvm::Instruction::BinaryOps Op
;
3555 switch (OpInfo
.Opcode
) {
3556 // We don't have atomicrmw operands for *, %, /, <<, >>
3557 case BO_MulAssign
: case BO_DivAssign
:
3563 AtomicOp
= llvm::AtomicRMWInst::Add
;
3564 Op
= llvm::Instruction::Add
;
3567 AtomicOp
= llvm::AtomicRMWInst::Sub
;
3568 Op
= llvm::Instruction::Sub
;
3571 AtomicOp
= llvm::AtomicRMWInst::And
;
3572 Op
= llvm::Instruction::And
;
3575 AtomicOp
= llvm::AtomicRMWInst::Xor
;
3576 Op
= llvm::Instruction::Xor
;
3579 AtomicOp
= llvm::AtomicRMWInst::Or
;
3580 Op
= llvm::Instruction::Or
;
3583 llvm_unreachable("Invalid compound assignment type");
3585 if (AtomicOp
!= llvm::AtomicRMWInst::BAD_BINOP
) {
3586 llvm::Value
*Amt
= CGF
.EmitToMemory(
3587 EmitScalarConversion(OpInfo
.RHS
, E
->getRHS()->getType(), LHSTy
,
3590 Value
*OldVal
= Builder
.CreateAtomicRMW(
3591 AtomicOp
, LHSLV
.getAddress(), Amt
,
3592 llvm::AtomicOrdering::SequentiallyConsistent
);
3594 // Since operation is atomic, the result type is guaranteed to be the
3595 // same as the input in LLVM terms.
3596 Result
= Builder
.CreateBinOp(Op
, OldVal
, Amt
);
3600 // FIXME: For floating point types, we should be saving and restoring the
3601 // floating point environment in the loop.
3602 llvm::BasicBlock
*startBB
= Builder
.GetInsertBlock();
3603 llvm::BasicBlock
*opBB
= CGF
.createBasicBlock("atomic_op", CGF
.CurFn
);
3604 OpInfo
.LHS
= EmitLoadOfLValue(LHSLV
, E
->getExprLoc());
3605 OpInfo
.LHS
= CGF
.EmitToMemory(OpInfo
.LHS
, type
);
3606 Builder
.CreateBr(opBB
);
3607 Builder
.SetInsertPoint(opBB
);
3608 atomicPHI
= Builder
.CreatePHI(OpInfo
.LHS
->getType(), 2);
3609 atomicPHI
->addIncoming(OpInfo
.LHS
, startBB
);
3610 OpInfo
.LHS
= atomicPHI
;
3613 OpInfo
.LHS
= EmitLoadOfLValue(LHSLV
, E
->getExprLoc());
3615 CodeGenFunction::CGFPOptionsRAII
FPOptsRAII(CGF
, OpInfo
.FPFeatures
);
3616 SourceLocation Loc
= E
->getExprLoc();
3617 if (!PromotionTypeLHS
.isNull())
3618 OpInfo
.LHS
= EmitScalarConversion(OpInfo
.LHS
, LHSTy
, PromotionTypeLHS
,
3621 OpInfo
.LHS
= EmitScalarConversion(OpInfo
.LHS
, LHSTy
,
3622 E
->getComputationLHSType(), Loc
);
3624 // Expand the binary operator.
3625 Result
= (this->*Func
)(OpInfo
);
3627 // Convert the result back to the LHS type,
3628 // potentially with Implicit Conversion sanitizer check.
3629 // If LHSLV is a bitfield, use default ScalarConversionOpts
3630 // to avoid emit any implicit integer checks.
3631 Value
*Previous
= nullptr;
3632 if (LHSLV
.isBitField()) {
3634 Result
= EmitScalarConversion(Result
, PromotionTypeCR
, LHSTy
, Loc
);
3636 Result
= EmitScalarConversion(Result
, PromotionTypeCR
, LHSTy
, Loc
,
3637 ScalarConversionOpts(CGF
.SanOpts
));
3640 llvm::BasicBlock
*curBlock
= Builder
.GetInsertBlock();
3641 llvm::BasicBlock
*contBB
= CGF
.createBasicBlock("atomic_cont", CGF
.CurFn
);
3642 auto Pair
= CGF
.EmitAtomicCompareExchange(
3643 LHSLV
, RValue::get(atomicPHI
), RValue::get(Result
), E
->getExprLoc());
3644 llvm::Value
*old
= CGF
.EmitToMemory(Pair
.first
.getScalarVal(), LHSTy
);
3645 llvm::Value
*success
= Pair
.second
;
3646 atomicPHI
->addIncoming(old
, curBlock
);
3647 Builder
.CreateCondBr(success
, contBB
, atomicPHI
->getParent());
3648 Builder
.SetInsertPoint(contBB
);
3652 // Store the result value into the LHS lvalue. Bit-fields are handled
3653 // specially because the result is altered by the store, i.e., [C99 6.5.16p1]
3654 // 'An assignment expression has the value of the left operand after the
3656 if (LHSLV
.isBitField()) {
3657 Value
*Src
= Previous
? Previous
: Result
;
3658 QualType SrcType
= E
->getRHS()->getType();
3659 QualType DstType
= E
->getLHS()->getType();
3660 CGF
.EmitStoreThroughBitfieldLValue(RValue::get(Result
), LHSLV
, &Result
);
3661 CGF
.EmitBitfieldConversionCheck(Src
, SrcType
, Result
, DstType
,
3662 LHSLV
.getBitFieldInfo(), E
->getExprLoc());
3664 CGF
.EmitStoreThroughLValue(RValue::get(Result
), LHSLV
);
3666 if (CGF
.getLangOpts().OpenMP
)
3667 CGF
.CGM
.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF
,
3672 Value
*ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator
*E
,
3673 Value
*(ScalarExprEmitter::*Func
)(const BinOpInfo
&)) {
3674 bool Ignore
= TestAndClearIgnoreResultAssign();
3675 Value
*RHS
= nullptr;
3676 LValue LHS
= EmitCompoundAssignLValue(E
, Func
, RHS
);
3678 // If the result is clearly ignored, return now.
3682 // The result of an assignment in C is the assigned r-value.
3683 if (!CGF
.getLangOpts().CPlusPlus
)
3686 // If the lvalue is non-volatile, return the computed value of the assignment.
3687 if (!LHS
.isVolatileQualified())
3690 // Otherwise, reload the value.
3691 return EmitLoadOfLValue(LHS
, E
->getExprLoc());
3694 void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck(
3695 const BinOpInfo
&Ops
, llvm::Value
*Zero
, bool isDiv
) {
3696 SmallVector
<std::pair
<llvm::Value
*, SanitizerMask
>, 2> Checks
;
3698 if (CGF
.SanOpts
.has(SanitizerKind::IntegerDivideByZero
)) {
3699 Checks
.push_back(std::make_pair(Builder
.CreateICmpNE(Ops
.RHS
, Zero
),
3700 SanitizerKind::IntegerDivideByZero
));
3703 const auto *BO
= cast
<BinaryOperator
>(Ops
.E
);
3704 if (CGF
.SanOpts
.has(SanitizerKind::SignedIntegerOverflow
) &&
3705 Ops
.Ty
->hasSignedIntegerRepresentation() &&
3706 !IsWidenedIntegerOp(CGF
.getContext(), BO
->getLHS()) &&
3707 Ops
.mayHaveIntegerOverflow()) {
3708 llvm::IntegerType
*Ty
= cast
<llvm::IntegerType
>(Zero
->getType());
3710 llvm::Value
*IntMin
=
3711 Builder
.getInt(llvm::APInt::getSignedMinValue(Ty
->getBitWidth()));
3712 llvm::Value
*NegOne
= llvm::Constant::getAllOnesValue(Ty
);
3714 llvm::Value
*LHSCmp
= Builder
.CreateICmpNE(Ops
.LHS
, IntMin
);
3715 llvm::Value
*RHSCmp
= Builder
.CreateICmpNE(Ops
.RHS
, NegOne
);
3716 llvm::Value
*NotOverflow
= Builder
.CreateOr(LHSCmp
, RHSCmp
, "or");
3718 std::make_pair(NotOverflow
, SanitizerKind::SignedIntegerOverflow
));
3721 if (Checks
.size() > 0)
3722 EmitBinOpCheck(Checks
, Ops
);
3725 Value
*ScalarExprEmitter::EmitDiv(const BinOpInfo
&Ops
) {
3727 CodeGenFunction::SanitizerScope
SanScope(&CGF
);
3728 if ((CGF
.SanOpts
.has(SanitizerKind::IntegerDivideByZero
) ||
3729 CGF
.SanOpts
.has(SanitizerKind::SignedIntegerOverflow
)) &&
3730 Ops
.Ty
->isIntegerType() &&
3731 (Ops
.mayHaveIntegerDivisionByZero() || Ops
.mayHaveIntegerOverflow())) {
3732 llvm::Value
*Zero
= llvm::Constant::getNullValue(ConvertType(Ops
.Ty
));
3733 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops
, Zero
, true);
3734 } else if (CGF
.SanOpts
.has(SanitizerKind::FloatDivideByZero
) &&
3735 Ops
.Ty
->isRealFloatingType() &&
3736 Ops
.mayHaveFloatDivisionByZero()) {
3737 llvm::Value
*Zero
= llvm::Constant::getNullValue(ConvertType(Ops
.Ty
));
3738 llvm::Value
*NonZero
= Builder
.CreateFCmpUNE(Ops
.RHS
, Zero
);
3739 EmitBinOpCheck(std::make_pair(NonZero
, SanitizerKind::FloatDivideByZero
),
3744 if (Ops
.Ty
->isConstantMatrixType()) {
3745 llvm::MatrixBuilder
MB(Builder
);
3746 // We need to check the types of the operands of the operator to get the
3747 // correct matrix dimensions.
3748 auto *BO
= cast
<BinaryOperator
>(Ops
.E
);
3751 isa
<ConstantMatrixType
>(BO
->getLHS()->getType().getCanonicalType()) &&
3752 "first operand must be a matrix");
3753 assert(BO
->getRHS()->getType().getCanonicalType()->isArithmeticType() &&
3754 "second operand must be an arithmetic type");
3755 CodeGenFunction::CGFPOptionsRAII
FPOptsRAII(CGF
, Ops
.FPFeatures
);
3756 return MB
.CreateScalarDiv(Ops
.LHS
, Ops
.RHS
,
3757 Ops
.Ty
->hasUnsignedIntegerRepresentation());
3760 if (Ops
.LHS
->getType()->isFPOrFPVectorTy()) {
3762 CodeGenFunction::CGFPOptionsRAII
FPOptsRAII(CGF
, Ops
.FPFeatures
);
3763 Val
= Builder
.CreateFDiv(Ops
.LHS
, Ops
.RHS
, "div");
3764 CGF
.SetDivFPAccuracy(Val
);
3767 else if (Ops
.isFixedPointOp())
3768 return EmitFixedPointBinOp(Ops
);
3769 else if (Ops
.Ty
->hasUnsignedIntegerRepresentation())
3770 return Builder
.CreateUDiv(Ops
.LHS
, Ops
.RHS
, "div");
3772 return Builder
.CreateSDiv(Ops
.LHS
, Ops
.RHS
, "div");
3775 Value
*ScalarExprEmitter::EmitRem(const BinOpInfo
&Ops
) {
3776 // Rem in C can't be a floating point type: C99 6.5.5p2.
3777 if ((CGF
.SanOpts
.has(SanitizerKind::IntegerDivideByZero
) ||
3778 CGF
.SanOpts
.has(SanitizerKind::SignedIntegerOverflow
)) &&
3779 Ops
.Ty
->isIntegerType() &&
3780 (Ops
.mayHaveIntegerDivisionByZero() || Ops
.mayHaveIntegerOverflow())) {
3781 CodeGenFunction::SanitizerScope
SanScope(&CGF
);
3782 llvm::Value
*Zero
= llvm::Constant::getNullValue(ConvertType(Ops
.Ty
));
3783 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops
, Zero
, false);
3786 if (Ops
.Ty
->hasUnsignedIntegerRepresentation())
3787 return Builder
.CreateURem(Ops
.LHS
, Ops
.RHS
, "rem");
3789 return Builder
.CreateSRem(Ops
.LHS
, Ops
.RHS
, "rem");
3792 Value
*ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo
&Ops
) {
3795 SanitizerHandler OverflowKind
;
3797 bool isSigned
= Ops
.Ty
->isSignedIntegerOrEnumerationType();
3798 switch (Ops
.Opcode
) {
3802 IID
= isSigned
? llvm::Intrinsic::sadd_with_overflow
:
3803 llvm::Intrinsic::uadd_with_overflow
;
3804 OverflowKind
= SanitizerHandler::AddOverflow
;
3809 IID
= isSigned
? llvm::Intrinsic::ssub_with_overflow
:
3810 llvm::Intrinsic::usub_with_overflow
;
3811 OverflowKind
= SanitizerHandler::SubOverflow
;
3816 IID
= isSigned
? llvm::Intrinsic::smul_with_overflow
:
3817 llvm::Intrinsic::umul_with_overflow
;
3818 OverflowKind
= SanitizerHandler::MulOverflow
;
3821 llvm_unreachable("Unsupported operation for overflow detection");
3827 CodeGenFunction::SanitizerScope
SanScope(&CGF
);
3828 llvm::Type
*opTy
= CGF
.CGM
.getTypes().ConvertType(Ops
.Ty
);
3830 llvm::Function
*intrinsic
= CGF
.CGM
.getIntrinsic(IID
, opTy
);
3832 Value
*resultAndOverflow
= Builder
.CreateCall(intrinsic
, {Ops
.LHS
, Ops
.RHS
});
3833 Value
*result
= Builder
.CreateExtractValue(resultAndOverflow
, 0);
3834 Value
*overflow
= Builder
.CreateExtractValue(resultAndOverflow
, 1);
3836 // Handle overflow with llvm.trap if no custom handler has been specified.
3837 const std::string
*handlerName
=
3838 &CGF
.getLangOpts().OverflowHandler
;
3839 if (handlerName
->empty()) {
3840 // If the signed-integer-overflow sanitizer is enabled, emit a call to its
3841 // runtime. Otherwise, this is a -ftrapv check, so just emit a trap.
3842 if (!isSigned
|| CGF
.SanOpts
.has(SanitizerKind::SignedIntegerOverflow
)) {
3843 llvm::Value
*NotOverflow
= Builder
.CreateNot(overflow
);
3844 SanitizerMask Kind
= isSigned
? SanitizerKind::SignedIntegerOverflow
3845 : SanitizerKind::UnsignedIntegerOverflow
;
3846 EmitBinOpCheck(std::make_pair(NotOverflow
, Kind
), Ops
);
3848 CGF
.EmitTrapCheck(Builder
.CreateNot(overflow
), OverflowKind
);
3852 // Branch in case of overflow.
3853 llvm::BasicBlock
*initialBB
= Builder
.GetInsertBlock();
3854 llvm::BasicBlock
*continueBB
=
3855 CGF
.createBasicBlock("nooverflow", CGF
.CurFn
, initialBB
->getNextNode());
3856 llvm::BasicBlock
*overflowBB
= CGF
.createBasicBlock("overflow", CGF
.CurFn
);
3858 Builder
.CreateCondBr(overflow
, overflowBB
, continueBB
);
3860 // If an overflow handler is set, then we want to call it and then use its
3861 // result, if it returns.
3862 Builder
.SetInsertPoint(overflowBB
);
3864 // Get the overflow handler.
3865 llvm::Type
*Int8Ty
= CGF
.Int8Ty
;
3866 llvm::Type
*argTypes
[] = { CGF
.Int64Ty
, CGF
.Int64Ty
, Int8Ty
, Int8Ty
};
3867 llvm::FunctionType
*handlerTy
=
3868 llvm::FunctionType::get(CGF
.Int64Ty
, argTypes
, true);
3869 llvm::FunctionCallee handler
=
3870 CGF
.CGM
.CreateRuntimeFunction(handlerTy
, *handlerName
);
3872 // Sign extend the args to 64-bit, so that we can use the same handler for
3873 // all types of overflow.
3874 llvm::Value
*lhs
= Builder
.CreateSExt(Ops
.LHS
, CGF
.Int64Ty
);
3875 llvm::Value
*rhs
= Builder
.CreateSExt(Ops
.RHS
, CGF
.Int64Ty
);
3877 // Call the handler with the two arguments, the operation, and the size of
3879 llvm::Value
*handlerArgs
[] = {
3882 Builder
.getInt8(OpID
),
3883 Builder
.getInt8(cast
<llvm::IntegerType
>(opTy
)->getBitWidth())
3885 llvm::Value
*handlerResult
=
3886 CGF
.EmitNounwindRuntimeCall(handler
, handlerArgs
);
3888 // Truncate the result back to the desired size.
3889 handlerResult
= Builder
.CreateTrunc(handlerResult
, opTy
);
3890 Builder
.CreateBr(continueBB
);
3892 Builder
.SetInsertPoint(continueBB
);
3893 llvm::PHINode
*phi
= Builder
.CreatePHI(opTy
, 2);
3894 phi
->addIncoming(result
, initialBB
);
3895 phi
->addIncoming(handlerResult
, overflowBB
);
3900 /// Emit pointer + index arithmetic.
3901 static Value
*emitPointerArithmetic(CodeGenFunction
&CGF
,
3902 const BinOpInfo
&op
,
3903 bool isSubtraction
) {
3904 // Must have binary (not unary) expr here. Unary pointer
3905 // increment/decrement doesn't use this path.
3906 const BinaryOperator
*expr
= cast
<BinaryOperator
>(op
.E
);
3908 Value
*pointer
= op
.LHS
;
3909 Expr
*pointerOperand
= expr
->getLHS();
3910 Value
*index
= op
.RHS
;
3911 Expr
*indexOperand
= expr
->getRHS();
3913 // In a subtraction, the LHS is always the pointer.
3914 if (!isSubtraction
&& !pointer
->getType()->isPointerTy()) {
3915 std::swap(pointer
, index
);
3916 std::swap(pointerOperand
, indexOperand
);
3919 bool isSigned
= indexOperand
->getType()->isSignedIntegerOrEnumerationType();
3921 unsigned width
= cast
<llvm::IntegerType
>(index
->getType())->getBitWidth();
3922 auto &DL
= CGF
.CGM
.getDataLayout();
3923 auto PtrTy
= cast
<llvm::PointerType
>(pointer
->getType());
3925 // Some versions of glibc and gcc use idioms (particularly in their malloc
3926 // routines) that add a pointer-sized integer (known to be a pointer value)
3927 // to a null pointer in order to cast the value back to an integer or as
3928 // part of a pointer alignment algorithm. This is undefined behavior, but
3929 // we'd like to be able to compile programs that use it.
3931 // Normally, we'd generate a GEP with a null-pointer base here in response
3932 // to that code, but it's also UB to dereference a pointer created that
3933 // way. Instead (as an acknowledged hack to tolerate the idiom) we will
3934 // generate a direct cast of the integer value to a pointer.
3936 // The idiom (p = nullptr + N) is not met if any of the following are true:
3938 // The operation is subtraction.
3939 // The index is not pointer-sized.
3940 // The pointer type is not byte-sized.
3942 if (BinaryOperator::isNullPointerArithmeticExtension(CGF
.getContext(),
3946 return CGF
.Builder
.CreateIntToPtr(index
, pointer
->getType());
3948 if (width
!= DL
.getIndexTypeSizeInBits(PtrTy
)) {
3949 // Zero-extend or sign-extend the pointer value according to
3950 // whether the index is signed or not.
3951 index
= CGF
.Builder
.CreateIntCast(index
, DL
.getIndexType(PtrTy
), isSigned
,
3955 // If this is subtraction, negate the index.
3957 index
= CGF
.Builder
.CreateNeg(index
, "idx.neg");
3959 if (CGF
.SanOpts
.has(SanitizerKind::ArrayBounds
))
3960 CGF
.EmitBoundsCheck(op
.E
, pointerOperand
, index
, indexOperand
->getType(),
3961 /*Accessed*/ false);
3963 const PointerType
*pointerType
3964 = pointerOperand
->getType()->getAs
<PointerType
>();
3966 QualType objectType
= pointerOperand
->getType()
3967 ->castAs
<ObjCObjectPointerType
>()
3969 llvm::Value
*objectSize
3970 = CGF
.CGM
.getSize(CGF
.getContext().getTypeSizeInChars(objectType
));
3972 index
= CGF
.Builder
.CreateMul(index
, objectSize
);
3975 CGF
.Builder
.CreateGEP(CGF
.Int8Ty
, pointer
, index
, "add.ptr");
3976 return CGF
.Builder
.CreateBitCast(result
, pointer
->getType());
3979 QualType elementType
= pointerType
->getPointeeType();
3980 if (const VariableArrayType
*vla
3981 = CGF
.getContext().getAsVariableArrayType(elementType
)) {
3982 // The element count here is the total number of non-VLA elements.
3983 llvm::Value
*numElements
= CGF
.getVLASize(vla
).NumElts
;
3985 // Effectively, the multiply by the VLA size is part of the GEP.
3986 // GEP indexes are signed, and scaling an index isn't permitted to
3987 // signed-overflow, so we use the same semantics for our explicit
3988 // multiply. We suppress this if overflow is not undefined behavior.
3989 llvm::Type
*elemTy
= CGF
.ConvertTypeForMem(vla
->getElementType());
3990 if (CGF
.getLangOpts().isSignedOverflowDefined()) {
3991 index
= CGF
.Builder
.CreateMul(index
, numElements
, "vla.index");
3992 pointer
= CGF
.Builder
.CreateGEP(elemTy
, pointer
, index
, "add.ptr");
3994 index
= CGF
.Builder
.CreateNSWMul(index
, numElements
, "vla.index");
3995 pointer
= CGF
.EmitCheckedInBoundsGEP(
3996 elemTy
, pointer
, index
, isSigned
, isSubtraction
, op
.E
->getExprLoc(),
4002 // Explicitly handle GNU void* and function pointer arithmetic extensions. The
4003 // GNU void* casts amount to no-ops since our void* type is i8*, but this is
4006 if (elementType
->isVoidType() || elementType
->isFunctionType())
4007 elemTy
= CGF
.Int8Ty
;
4009 elemTy
= CGF
.ConvertTypeForMem(elementType
);
4011 if (CGF
.getLangOpts().isSignedOverflowDefined())
4012 return CGF
.Builder
.CreateGEP(elemTy
, pointer
, index
, "add.ptr");
4014 return CGF
.EmitCheckedInBoundsGEP(
4015 elemTy
, pointer
, index
, isSigned
, isSubtraction
, op
.E
->getExprLoc(),
4019 // Construct an fmuladd intrinsic to represent a fused mul-add of MulOp and
4020 // Addend. Use negMul and negAdd to negate the first operand of the Mul or
4021 // the add operand respectively. This allows fmuladd to represent a*b-c, or
4022 // c-a*b. Patterns in LLVM should catch the negated forms and translate them to
4023 // efficient operations.
4024 static Value
* buildFMulAdd(llvm::Instruction
*MulOp
, Value
*Addend
,
4025 const CodeGenFunction
&CGF
, CGBuilderTy
&Builder
,
4026 bool negMul
, bool negAdd
) {
4027 Value
*MulOp0
= MulOp
->getOperand(0);
4028 Value
*MulOp1
= MulOp
->getOperand(1);
4030 MulOp0
= Builder
.CreateFNeg(MulOp0
, "neg");
4032 Addend
= Builder
.CreateFNeg(Addend
, "neg");
4034 Value
*FMulAdd
= nullptr;
4035 if (Builder
.getIsFPConstrained()) {
4036 assert(isa
<llvm::ConstrainedFPIntrinsic
>(MulOp
) &&
4037 "Only constrained operation should be created when Builder is in FP "
4038 "constrained mode");
4039 FMulAdd
= Builder
.CreateConstrainedFPCall(
4040 CGF
.CGM
.getIntrinsic(llvm::Intrinsic::experimental_constrained_fmuladd
,
4042 {MulOp0
, MulOp1
, Addend
});
4044 FMulAdd
= Builder
.CreateCall(
4045 CGF
.CGM
.getIntrinsic(llvm::Intrinsic::fmuladd
, Addend
->getType()),
4046 {MulOp0
, MulOp1
, Addend
});
4048 MulOp
->eraseFromParent();
4053 // Check whether it would be legal to emit an fmuladd intrinsic call to
4054 // represent op and if so, build the fmuladd.
4056 // Checks that (a) the operation is fusable, and (b) -ffp-contract=on.
4057 // Does NOT check the type of the operation - it's assumed that this function
4058 // will be called from contexts where it's known that the type is contractable.
4059 static Value
* tryEmitFMulAdd(const BinOpInfo
&op
,
4060 const CodeGenFunction
&CGF
, CGBuilderTy
&Builder
,
4063 assert((op
.Opcode
== BO_Add
|| op
.Opcode
== BO_AddAssign
||
4064 op
.Opcode
== BO_Sub
|| op
.Opcode
== BO_SubAssign
) &&
4065 "Only fadd/fsub can be the root of an fmuladd.");
4067 // Check whether this op is marked as fusable.
4068 if (!op
.FPFeatures
.allowFPContractWithinStatement())
4071 Value
*LHS
= op
.LHS
;
4072 Value
*RHS
= op
.RHS
;
4074 // Peek through fneg to look for fmul. Make sure fneg has no users, and that
4075 // it is the only use of its operand.
4076 bool NegLHS
= false;
4077 if (auto *LHSUnOp
= dyn_cast
<llvm::UnaryOperator
>(LHS
)) {
4078 if (LHSUnOp
->getOpcode() == llvm::Instruction::FNeg
&&
4079 LHSUnOp
->use_empty() && LHSUnOp
->getOperand(0)->hasOneUse()) {
4080 LHS
= LHSUnOp
->getOperand(0);
4085 bool NegRHS
= false;
4086 if (auto *RHSUnOp
= dyn_cast
<llvm::UnaryOperator
>(RHS
)) {
4087 if (RHSUnOp
->getOpcode() == llvm::Instruction::FNeg
&&
4088 RHSUnOp
->use_empty() && RHSUnOp
->getOperand(0)->hasOneUse()) {
4089 RHS
= RHSUnOp
->getOperand(0);
4094 // We have a potentially fusable op. Look for a mul on one of the operands.
4095 // Also, make sure that the mul result isn't used directly. In that case,
4096 // there's no point creating a muladd operation.
4097 if (auto *LHSBinOp
= dyn_cast
<llvm::BinaryOperator
>(LHS
)) {
4098 if (LHSBinOp
->getOpcode() == llvm::Instruction::FMul
&&
4099 (LHSBinOp
->use_empty() || NegLHS
)) {
4100 // If we looked through fneg, erase it.
4102 cast
<llvm::Instruction
>(op
.LHS
)->eraseFromParent();
4103 return buildFMulAdd(LHSBinOp
, op
.RHS
, CGF
, Builder
, NegLHS
, isSub
);
4106 if (auto *RHSBinOp
= dyn_cast
<llvm::BinaryOperator
>(RHS
)) {
4107 if (RHSBinOp
->getOpcode() == llvm::Instruction::FMul
&&
4108 (RHSBinOp
->use_empty() || NegRHS
)) {
4109 // If we looked through fneg, erase it.
4111 cast
<llvm::Instruction
>(op
.RHS
)->eraseFromParent();
4112 return buildFMulAdd(RHSBinOp
, op
.LHS
, CGF
, Builder
, isSub
^ NegRHS
, false);
4116 if (auto *LHSBinOp
= dyn_cast
<llvm::CallBase
>(LHS
)) {
4117 if (LHSBinOp
->getIntrinsicID() ==
4118 llvm::Intrinsic::experimental_constrained_fmul
&&
4119 (LHSBinOp
->use_empty() || NegLHS
)) {
4120 // If we looked through fneg, erase it.
4122 cast
<llvm::Instruction
>(op
.LHS
)->eraseFromParent();
4123 return buildFMulAdd(LHSBinOp
, op
.RHS
, CGF
, Builder
, NegLHS
, isSub
);
4126 if (auto *RHSBinOp
= dyn_cast
<llvm::CallBase
>(RHS
)) {
4127 if (RHSBinOp
->getIntrinsicID() ==
4128 llvm::Intrinsic::experimental_constrained_fmul
&&
4129 (RHSBinOp
->use_empty() || NegRHS
)) {
4130 // If we looked through fneg, erase it.
4132 cast
<llvm::Instruction
>(op
.RHS
)->eraseFromParent();
4133 return buildFMulAdd(RHSBinOp
, op
.LHS
, CGF
, Builder
, isSub
^ NegRHS
, false);
4140 Value
*ScalarExprEmitter::EmitAdd(const BinOpInfo
&op
) {
4141 if (op
.LHS
->getType()->isPointerTy() ||
4142 op
.RHS
->getType()->isPointerTy())
4143 return emitPointerArithmetic(CGF
, op
, CodeGenFunction::NotSubtraction
);
4145 if (op
.Ty
->isSignedIntegerOrEnumerationType()) {
4146 switch (CGF
.getLangOpts().getSignedOverflowBehavior()) {
4147 case LangOptions::SOB_Defined
:
4148 if (!CGF
.SanOpts
.has(SanitizerKind::SignedIntegerOverflow
))
4149 return Builder
.CreateAdd(op
.LHS
, op
.RHS
, "add");
4151 case LangOptions::SOB_Undefined
:
4152 if (!CGF
.SanOpts
.has(SanitizerKind::SignedIntegerOverflow
))
4153 return Builder
.CreateNSWAdd(op
.LHS
, op
.RHS
, "add");
4155 case LangOptions::SOB_Trapping
:
4156 if (CanElideOverflowCheck(CGF
.getContext(), op
))
4157 return Builder
.CreateNSWAdd(op
.LHS
, op
.RHS
, "add");
4158 return EmitOverflowCheckedBinOp(op
);
4162 // For vector and matrix adds, try to fold into a fmuladd.
4163 if (op
.LHS
->getType()->isFPOrFPVectorTy()) {
4164 CodeGenFunction::CGFPOptionsRAII
FPOptsRAII(CGF
, op
.FPFeatures
);
4165 // Try to form an fmuladd.
4166 if (Value
*FMulAdd
= tryEmitFMulAdd(op
, CGF
, Builder
))
4170 if (op
.Ty
->isConstantMatrixType()) {
4171 llvm::MatrixBuilder
MB(Builder
);
4172 CodeGenFunction::CGFPOptionsRAII
FPOptsRAII(CGF
, op
.FPFeatures
);
4173 return MB
.CreateAdd(op
.LHS
, op
.RHS
);
4176 if (op
.Ty
->isUnsignedIntegerType() &&
4177 CGF
.SanOpts
.has(SanitizerKind::UnsignedIntegerOverflow
) &&
4178 !CanElideOverflowCheck(CGF
.getContext(), op
))
4179 return EmitOverflowCheckedBinOp(op
);
4181 if (op
.LHS
->getType()->isFPOrFPVectorTy()) {
4182 CodeGenFunction::CGFPOptionsRAII
FPOptsRAII(CGF
, op
.FPFeatures
);
4183 return Builder
.CreateFAdd(op
.LHS
, op
.RHS
, "add");
4186 if (op
.isFixedPointOp())
4187 return EmitFixedPointBinOp(op
);
4189 return Builder
.CreateAdd(op
.LHS
, op
.RHS
, "add");
4192 /// The resulting value must be calculated with exact precision, so the operands
4193 /// may not be the same type.
4194 Value
*ScalarExprEmitter::EmitFixedPointBinOp(const BinOpInfo
&op
) {
4196 using llvm::ConstantInt
;
4198 // This is either a binary operation where at least one of the operands is
4199 // a fixed-point type, or a unary operation where the operand is a fixed-point
4200 // type. The result type of a binary operation is determined by
4201 // Sema::handleFixedPointConversions().
4202 QualType ResultTy
= op
.Ty
;
4203 QualType LHSTy
, RHSTy
;
4204 if (const auto *BinOp
= dyn_cast
<BinaryOperator
>(op
.E
)) {
4205 RHSTy
= BinOp
->getRHS()->getType();
4206 if (const auto *CAO
= dyn_cast
<CompoundAssignOperator
>(BinOp
)) {
4207 // For compound assignment, the effective type of the LHS at this point
4208 // is the computation LHS type, not the actual LHS type, and the final
4209 // result type is not the type of the expression but rather the
4210 // computation result type.
4211 LHSTy
= CAO
->getComputationLHSType();
4212 ResultTy
= CAO
->getComputationResultType();
4214 LHSTy
= BinOp
->getLHS()->getType();
4215 } else if (const auto *UnOp
= dyn_cast
<UnaryOperator
>(op
.E
)) {
4216 LHSTy
= UnOp
->getSubExpr()->getType();
4217 RHSTy
= UnOp
->getSubExpr()->getType();
4219 ASTContext
&Ctx
= CGF
.getContext();
4220 Value
*LHS
= op
.LHS
;
4221 Value
*RHS
= op
.RHS
;
4223 auto LHSFixedSema
= Ctx
.getFixedPointSemantics(LHSTy
);
4224 auto RHSFixedSema
= Ctx
.getFixedPointSemantics(RHSTy
);
4225 auto ResultFixedSema
= Ctx
.getFixedPointSemantics(ResultTy
);
4226 auto CommonFixedSema
= LHSFixedSema
.getCommonSemantics(RHSFixedSema
);
4228 // Perform the actual operation.
4230 llvm::FixedPointBuilder
<CGBuilderTy
> FPBuilder(Builder
);
4231 switch (op
.Opcode
) {
4234 Result
= FPBuilder
.CreateAdd(LHS
, LHSFixedSema
, RHS
, RHSFixedSema
);
4238 Result
= FPBuilder
.CreateSub(LHS
, LHSFixedSema
, RHS
, RHSFixedSema
);
4242 Result
= FPBuilder
.CreateMul(LHS
, LHSFixedSema
, RHS
, RHSFixedSema
);
4246 Result
= FPBuilder
.CreateDiv(LHS
, LHSFixedSema
, RHS
, RHSFixedSema
);
4250 Result
= FPBuilder
.CreateShl(LHS
, LHSFixedSema
, RHS
);
4254 Result
= FPBuilder
.CreateShr(LHS
, LHSFixedSema
, RHS
);
4257 return FPBuilder
.CreateLT(LHS
, LHSFixedSema
, RHS
, RHSFixedSema
);
4259 return FPBuilder
.CreateGT(LHS
, LHSFixedSema
, RHS
, RHSFixedSema
);
4261 return FPBuilder
.CreateLE(LHS
, LHSFixedSema
, RHS
, RHSFixedSema
);
4263 return FPBuilder
.CreateGE(LHS
, LHSFixedSema
, RHS
, RHSFixedSema
);
4265 // For equality operations, we assume any padding bits on unsigned types are
4266 // zero'd out. They could be overwritten through non-saturating operations
4267 // that cause overflow, but this leads to undefined behavior.
4268 return FPBuilder
.CreateEQ(LHS
, LHSFixedSema
, RHS
, RHSFixedSema
);
4270 return FPBuilder
.CreateNE(LHS
, LHSFixedSema
, RHS
, RHSFixedSema
);
4274 llvm_unreachable("Found unimplemented fixed point binary operation");
4287 llvm_unreachable("Found unsupported binary operation for fixed point types.");
4290 bool IsShift
= BinaryOperator::isShiftOp(op
.Opcode
) ||
4291 BinaryOperator::isShiftAssignOp(op
.Opcode
);
4292 // Convert to the result type.
4293 return FPBuilder
.CreateFixedToFixed(Result
, IsShift
? LHSFixedSema
4298 Value
*ScalarExprEmitter::EmitSub(const BinOpInfo
&op
) {
4299 // The LHS is always a pointer if either side is.
4300 if (!op
.LHS
->getType()->isPointerTy()) {
4301 if (op
.Ty
->isSignedIntegerOrEnumerationType()) {
4302 switch (CGF
.getLangOpts().getSignedOverflowBehavior()) {
4303 case LangOptions::SOB_Defined
:
4304 if (!CGF
.SanOpts
.has(SanitizerKind::SignedIntegerOverflow
))
4305 return Builder
.CreateSub(op
.LHS
, op
.RHS
, "sub");
4307 case LangOptions::SOB_Undefined
:
4308 if (!CGF
.SanOpts
.has(SanitizerKind::SignedIntegerOverflow
))
4309 return Builder
.CreateNSWSub(op
.LHS
, op
.RHS
, "sub");
4311 case LangOptions::SOB_Trapping
:
4312 if (CanElideOverflowCheck(CGF
.getContext(), op
))
4313 return Builder
.CreateNSWSub(op
.LHS
, op
.RHS
, "sub");
4314 return EmitOverflowCheckedBinOp(op
);
4318 // For vector and matrix subs, try to fold into a fmuladd.
4319 if (op
.LHS
->getType()->isFPOrFPVectorTy()) {
4320 CodeGenFunction::CGFPOptionsRAII
FPOptsRAII(CGF
, op
.FPFeatures
);
4321 // Try to form an fmuladd.
4322 if (Value
*FMulAdd
= tryEmitFMulAdd(op
, CGF
, Builder
, true))
4326 if (op
.Ty
->isConstantMatrixType()) {
4327 llvm::MatrixBuilder
MB(Builder
);
4328 CodeGenFunction::CGFPOptionsRAII
FPOptsRAII(CGF
, op
.FPFeatures
);
4329 return MB
.CreateSub(op
.LHS
, op
.RHS
);
4332 if (op
.Ty
->isUnsignedIntegerType() &&
4333 CGF
.SanOpts
.has(SanitizerKind::UnsignedIntegerOverflow
) &&
4334 !CanElideOverflowCheck(CGF
.getContext(), op
))
4335 return EmitOverflowCheckedBinOp(op
);
4337 if (op
.LHS
->getType()->isFPOrFPVectorTy()) {
4338 CodeGenFunction::CGFPOptionsRAII
FPOptsRAII(CGF
, op
.FPFeatures
);
4339 return Builder
.CreateFSub(op
.LHS
, op
.RHS
, "sub");
4342 if (op
.isFixedPointOp())
4343 return EmitFixedPointBinOp(op
);
4345 return Builder
.CreateSub(op
.LHS
, op
.RHS
, "sub");
4348 // If the RHS is not a pointer, then we have normal pointer
4350 if (!op
.RHS
->getType()->isPointerTy())
4351 return emitPointerArithmetic(CGF
, op
, CodeGenFunction::IsSubtraction
);
4353 // Otherwise, this is a pointer subtraction.
4355 // Do the raw subtraction part.
4357 = Builder
.CreatePtrToInt(op
.LHS
, CGF
.PtrDiffTy
, "sub.ptr.lhs.cast");
4359 = Builder
.CreatePtrToInt(op
.RHS
, CGF
.PtrDiffTy
, "sub.ptr.rhs.cast");
4360 Value
*diffInChars
= Builder
.CreateSub(LHS
, RHS
, "sub.ptr.sub");
4362 // Okay, figure out the element size.
4363 const BinaryOperator
*expr
= cast
<BinaryOperator
>(op
.E
);
4364 QualType elementType
= expr
->getLHS()->getType()->getPointeeType();
4366 llvm::Value
*divisor
= nullptr;
4368 // For a variable-length array, this is going to be non-constant.
4369 if (const VariableArrayType
*vla
4370 = CGF
.getContext().getAsVariableArrayType(elementType
)) {
4371 auto VlaSize
= CGF
.getVLASize(vla
);
4372 elementType
= VlaSize
.Type
;
4373 divisor
= VlaSize
.NumElts
;
4375 // Scale the number of non-VLA elements by the non-VLA element size.
4376 CharUnits eltSize
= CGF
.getContext().getTypeSizeInChars(elementType
);
4377 if (!eltSize
.isOne())
4378 divisor
= CGF
.Builder
.CreateNUWMul(CGF
.CGM
.getSize(eltSize
), divisor
);
4380 // For everything elese, we can just compute it, safe in the
4381 // assumption that Sema won't let anything through that we can't
4382 // safely compute the size of.
4384 CharUnits elementSize
;
4385 // Handle GCC extension for pointer arithmetic on void* and
4386 // function pointer types.
4387 if (elementType
->isVoidType() || elementType
->isFunctionType())
4388 elementSize
= CharUnits::One();
4390 elementSize
= CGF
.getContext().getTypeSizeInChars(elementType
);
4392 // Don't even emit the divide for element size of 1.
4393 if (elementSize
.isOne())
4396 divisor
= CGF
.CGM
.getSize(elementSize
);
4399 // Otherwise, do a full sdiv. This uses the "exact" form of sdiv, since
4400 // pointer difference in C is only defined in the case where both operands
4401 // are pointing to elements of an array.
4402 return Builder
.CreateExactSDiv(diffInChars
, divisor
, "sub.ptr.div");
4405 Value
*ScalarExprEmitter::GetMaximumShiftAmount(Value
*LHS
, Value
*RHS
,
4407 llvm::IntegerType
*Ty
;
4408 if (llvm::VectorType
*VT
= dyn_cast
<llvm::VectorType
>(LHS
->getType()))
4409 Ty
= cast
<llvm::IntegerType
>(VT
->getElementType());
4411 Ty
= cast
<llvm::IntegerType
>(LHS
->getType());
4412 // For a given type of LHS the maximum shift amount is width(LHS)-1, however
4413 // it can occur that width(LHS)-1 > range(RHS). Since there is no check for
4414 // this in ConstantInt::get, this results in the value getting truncated.
4415 // Constrain the return value to be max(RHS) in this case.
4416 llvm::Type
*RHSTy
= RHS
->getType();
4417 llvm::APInt RHSMax
=
4418 RHSIsSigned
? llvm::APInt::getSignedMaxValue(RHSTy
->getScalarSizeInBits())
4419 : llvm::APInt::getMaxValue(RHSTy
->getScalarSizeInBits());
4420 if (RHSMax
.ult(Ty
->getBitWidth()))
4421 return llvm::ConstantInt::get(RHSTy
, RHSMax
);
4422 return llvm::ConstantInt::get(RHSTy
, Ty
->getBitWidth() - 1);
4425 Value
*ScalarExprEmitter::ConstrainShiftValue(Value
*LHS
, Value
*RHS
,
4426 const Twine
&Name
) {
4427 llvm::IntegerType
*Ty
;
4428 if (auto *VT
= dyn_cast
<llvm::VectorType
>(LHS
->getType()))
4429 Ty
= cast
<llvm::IntegerType
>(VT
->getElementType());
4431 Ty
= cast
<llvm::IntegerType
>(LHS
->getType());
4433 if (llvm::isPowerOf2_64(Ty
->getBitWidth()))
4434 return Builder
.CreateAnd(RHS
, GetMaximumShiftAmount(LHS
, RHS
, false), Name
);
4436 return Builder
.CreateURem(
4437 RHS
, llvm::ConstantInt::get(RHS
->getType(), Ty
->getBitWidth()), Name
);
4440 Value
*ScalarExprEmitter::EmitShl(const BinOpInfo
&Ops
) {
4441 // TODO: This misses out on the sanitizer check below.
4442 if (Ops
.isFixedPointOp())
4443 return EmitFixedPointBinOp(Ops
);
4445 // LLVM requires the LHS and RHS to be the same type: promote or truncate the
4446 // RHS to the same size as the LHS.
4447 Value
*RHS
= Ops
.RHS
;
4448 if (Ops
.LHS
->getType() != RHS
->getType())
4449 RHS
= Builder
.CreateIntCast(RHS
, Ops
.LHS
->getType(), false, "sh_prom");
4451 bool SanitizeSignedBase
= CGF
.SanOpts
.has(SanitizerKind::ShiftBase
) &&
4452 Ops
.Ty
->hasSignedIntegerRepresentation() &&
4453 !CGF
.getLangOpts().isSignedOverflowDefined() &&
4454 !CGF
.getLangOpts().CPlusPlus20
;
4455 bool SanitizeUnsignedBase
=
4456 CGF
.SanOpts
.has(SanitizerKind::UnsignedShiftBase
) &&
4457 Ops
.Ty
->hasUnsignedIntegerRepresentation();
4458 bool SanitizeBase
= SanitizeSignedBase
|| SanitizeUnsignedBase
;
4459 bool SanitizeExponent
= CGF
.SanOpts
.has(SanitizerKind::ShiftExponent
);
4460 // OpenCL 6.3j: shift values are effectively % word size of LHS.
4461 if (CGF
.getLangOpts().OpenCL
|| CGF
.getLangOpts().HLSL
)
4462 RHS
= ConstrainShiftValue(Ops
.LHS
, RHS
, "shl.mask");
4463 else if ((SanitizeBase
|| SanitizeExponent
) &&
4464 isa
<llvm::IntegerType
>(Ops
.LHS
->getType())) {
4465 CodeGenFunction::SanitizerScope
SanScope(&CGF
);
4466 SmallVector
<std::pair
<Value
*, SanitizerMask
>, 2> Checks
;
4467 bool RHSIsSigned
= Ops
.rhsHasSignedIntegerRepresentation();
4468 llvm::Value
*WidthMinusOne
=
4469 GetMaximumShiftAmount(Ops
.LHS
, Ops
.RHS
, RHSIsSigned
);
4470 llvm::Value
*ValidExponent
= Builder
.CreateICmpULE(Ops
.RHS
, WidthMinusOne
);
4472 if (SanitizeExponent
) {
4474 std::make_pair(ValidExponent
, SanitizerKind::ShiftExponent
));
4478 // Check whether we are shifting any non-zero bits off the top of the
4479 // integer. We only emit this check if exponent is valid - otherwise
4480 // instructions below will have undefined behavior themselves.
4481 llvm::BasicBlock
*Orig
= Builder
.GetInsertBlock();
4482 llvm::BasicBlock
*Cont
= CGF
.createBasicBlock("cont");
4483 llvm::BasicBlock
*CheckShiftBase
= CGF
.createBasicBlock("check");
4484 Builder
.CreateCondBr(ValidExponent
, CheckShiftBase
, Cont
);
4485 llvm::Value
*PromotedWidthMinusOne
=
4486 (RHS
== Ops
.RHS
) ? WidthMinusOne
4487 : GetMaximumShiftAmount(Ops
.LHS
, RHS
, RHSIsSigned
);
4488 CGF
.EmitBlock(CheckShiftBase
);
4489 llvm::Value
*BitsShiftedOff
= Builder
.CreateLShr(
4490 Ops
.LHS
, Builder
.CreateSub(PromotedWidthMinusOne
, RHS
, "shl.zeros",
4491 /*NUW*/ true, /*NSW*/ true),
4493 if (SanitizeUnsignedBase
|| CGF
.getLangOpts().CPlusPlus
) {
4494 // In C99, we are not permitted to shift a 1 bit into the sign bit.
4495 // Under C++11's rules, shifting a 1 bit into the sign bit is
4496 // OK, but shifting a 1 bit out of it is not. (C89 and C++03 don't
4497 // define signed left shifts, so we use the C99 and C++11 rules there).
4498 // Unsigned shifts can always shift into the top bit.
4499 llvm::Value
*One
= llvm::ConstantInt::get(BitsShiftedOff
->getType(), 1);
4500 BitsShiftedOff
= Builder
.CreateLShr(BitsShiftedOff
, One
);
4502 llvm::Value
*Zero
= llvm::ConstantInt::get(BitsShiftedOff
->getType(), 0);
4503 llvm::Value
*ValidBase
= Builder
.CreateICmpEQ(BitsShiftedOff
, Zero
);
4504 CGF
.EmitBlock(Cont
);
4505 llvm::PHINode
*BaseCheck
= Builder
.CreatePHI(ValidBase
->getType(), 2);
4506 BaseCheck
->addIncoming(Builder
.getTrue(), Orig
);
4507 BaseCheck
->addIncoming(ValidBase
, CheckShiftBase
);
4508 Checks
.push_back(std::make_pair(
4509 BaseCheck
, SanitizeSignedBase
? SanitizerKind::ShiftBase
4510 : SanitizerKind::UnsignedShiftBase
));
4513 assert(!Checks
.empty());
4514 EmitBinOpCheck(Checks
, Ops
);
4517 return Builder
.CreateShl(Ops
.LHS
, RHS
, "shl");
4520 Value
*ScalarExprEmitter::EmitShr(const BinOpInfo
&Ops
) {
4521 // TODO: This misses out on the sanitizer check below.
4522 if (Ops
.isFixedPointOp())
4523 return EmitFixedPointBinOp(Ops
);
4525 // LLVM requires the LHS and RHS to be the same type: promote or truncate the
4526 // RHS to the same size as the LHS.
4527 Value
*RHS
= Ops
.RHS
;
4528 if (Ops
.LHS
->getType() != RHS
->getType())
4529 RHS
= Builder
.CreateIntCast(RHS
, Ops
.LHS
->getType(), false, "sh_prom");
4531 // OpenCL 6.3j: shift values are effectively % word size of LHS.
4532 if (CGF
.getLangOpts().OpenCL
|| CGF
.getLangOpts().HLSL
)
4533 RHS
= ConstrainShiftValue(Ops
.LHS
, RHS
, "shr.mask");
4534 else if (CGF
.SanOpts
.has(SanitizerKind::ShiftExponent
) &&
4535 isa
<llvm::IntegerType
>(Ops
.LHS
->getType())) {
4536 CodeGenFunction::SanitizerScope
SanScope(&CGF
);
4537 bool RHSIsSigned
= Ops
.rhsHasSignedIntegerRepresentation();
4538 llvm::Value
*Valid
= Builder
.CreateICmpULE(
4539 Ops
.RHS
, GetMaximumShiftAmount(Ops
.LHS
, Ops
.RHS
, RHSIsSigned
));
4540 EmitBinOpCheck(std::make_pair(Valid
, SanitizerKind::ShiftExponent
), Ops
);
4543 if (Ops
.Ty
->hasUnsignedIntegerRepresentation())
4544 return Builder
.CreateLShr(Ops
.LHS
, RHS
, "shr");
4545 return Builder
.CreateAShr(Ops
.LHS
, RHS
, "shr");
4548 enum IntrinsicType
{ VCMPEQ
, VCMPGT
};
4549 // return corresponding comparison intrinsic for given vector type
4550 static llvm::Intrinsic::ID
GetIntrinsic(IntrinsicType IT
,
4551 BuiltinType::Kind ElemKind
) {
4553 default: llvm_unreachable("unexpected element type");
4554 case BuiltinType::Char_U
:
4555 case BuiltinType::UChar
:
4556 return (IT
== VCMPEQ
) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p
:
4557 llvm::Intrinsic::ppc_altivec_vcmpgtub_p
;
4558 case BuiltinType::Char_S
:
4559 case BuiltinType::SChar
:
4560 return (IT
== VCMPEQ
) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p
:
4561 llvm::Intrinsic::ppc_altivec_vcmpgtsb_p
;
4562 case BuiltinType::UShort
:
4563 return (IT
== VCMPEQ
) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p
:
4564 llvm::Intrinsic::ppc_altivec_vcmpgtuh_p
;
4565 case BuiltinType::Short
:
4566 return (IT
== VCMPEQ
) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p
:
4567 llvm::Intrinsic::ppc_altivec_vcmpgtsh_p
;
4568 case BuiltinType::UInt
:
4569 return (IT
== VCMPEQ
) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p
:
4570 llvm::Intrinsic::ppc_altivec_vcmpgtuw_p
;
4571 case BuiltinType::Int
:
4572 return (IT
== VCMPEQ
) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p
:
4573 llvm::Intrinsic::ppc_altivec_vcmpgtsw_p
;
4574 case BuiltinType::ULong
:
4575 case BuiltinType::ULongLong
:
4576 return (IT
== VCMPEQ
) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p
:
4577 llvm::Intrinsic::ppc_altivec_vcmpgtud_p
;
4578 case BuiltinType::Long
:
4579 case BuiltinType::LongLong
:
4580 return (IT
== VCMPEQ
) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p
:
4581 llvm::Intrinsic::ppc_altivec_vcmpgtsd_p
;
4582 case BuiltinType::Float
:
4583 return (IT
== VCMPEQ
) ? llvm::Intrinsic::ppc_altivec_vcmpeqfp_p
:
4584 llvm::Intrinsic::ppc_altivec_vcmpgtfp_p
;
4585 case BuiltinType::Double
:
4586 return (IT
== VCMPEQ
) ? llvm::Intrinsic::ppc_vsx_xvcmpeqdp_p
:
4587 llvm::Intrinsic::ppc_vsx_xvcmpgtdp_p
;
4588 case BuiltinType::UInt128
:
4589 return (IT
== VCMPEQ
) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p
4590 : llvm::Intrinsic::ppc_altivec_vcmpgtuq_p
;
4591 case BuiltinType::Int128
:
4592 return (IT
== VCMPEQ
) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p
4593 : llvm::Intrinsic::ppc_altivec_vcmpgtsq_p
;
4597 Value
*ScalarExprEmitter::EmitCompare(const BinaryOperator
*E
,
4598 llvm::CmpInst::Predicate UICmpOpc
,
4599 llvm::CmpInst::Predicate SICmpOpc
,
4600 llvm::CmpInst::Predicate FCmpOpc
,
4602 TestAndClearIgnoreResultAssign();
4604 QualType LHSTy
= E
->getLHS()->getType();
4605 QualType RHSTy
= E
->getRHS()->getType();
4606 if (const MemberPointerType
*MPT
= LHSTy
->getAs
<MemberPointerType
>()) {
4607 assert(E
->getOpcode() == BO_EQ
||
4608 E
->getOpcode() == BO_NE
);
4609 Value
*LHS
= CGF
.EmitScalarExpr(E
->getLHS());
4610 Value
*RHS
= CGF
.EmitScalarExpr(E
->getRHS());
4611 Result
= CGF
.CGM
.getCXXABI().EmitMemberPointerComparison(
4612 CGF
, LHS
, RHS
, MPT
, E
->getOpcode() == BO_NE
);
4613 } else if (!LHSTy
->isAnyComplexType() && !RHSTy
->isAnyComplexType()) {
4614 BinOpInfo BOInfo
= EmitBinOps(E
);
4615 Value
*LHS
= BOInfo
.LHS
;
4616 Value
*RHS
= BOInfo
.RHS
;
4618 // If AltiVec, the comparison results in a numeric type, so we use
4619 // intrinsics comparing vectors and giving 0 or 1 as a result
4620 if (LHSTy
->isVectorType() && !E
->getType()->isVectorType()) {
4621 // constants for mapping CR6 register bits to predicate result
4622 enum { CR6_EQ
=0, CR6_EQ_REV
, CR6_LT
, CR6_LT_REV
} CR6
;
4624 llvm::Intrinsic::ID ID
= llvm::Intrinsic::not_intrinsic
;
4626 // in several cases vector arguments order will be reversed
4627 Value
*FirstVecArg
= LHS
,
4628 *SecondVecArg
= RHS
;
4630 QualType ElTy
= LHSTy
->castAs
<VectorType
>()->getElementType();
4631 BuiltinType::Kind ElementKind
= ElTy
->castAs
<BuiltinType
>()->getKind();
4633 switch(E
->getOpcode()) {
4634 default: llvm_unreachable("is not a comparison operation");
4637 ID
= GetIntrinsic(VCMPEQ
, ElementKind
);
4641 ID
= GetIntrinsic(VCMPEQ
, ElementKind
);
4645 ID
= GetIntrinsic(VCMPGT
, ElementKind
);
4646 std::swap(FirstVecArg
, SecondVecArg
);
4650 ID
= GetIntrinsic(VCMPGT
, ElementKind
);
4653 if (ElementKind
== BuiltinType::Float
) {
4655 ID
= llvm::Intrinsic::ppc_altivec_vcmpgefp_p
;
4656 std::swap(FirstVecArg
, SecondVecArg
);
4660 ID
= GetIntrinsic(VCMPGT
, ElementKind
);
4664 if (ElementKind
== BuiltinType::Float
) {
4666 ID
= llvm::Intrinsic::ppc_altivec_vcmpgefp_p
;
4670 ID
= GetIntrinsic(VCMPGT
, ElementKind
);
4671 std::swap(FirstVecArg
, SecondVecArg
);
4676 Value
*CR6Param
= Builder
.getInt32(CR6
);
4677 llvm::Function
*F
= CGF
.CGM
.getIntrinsic(ID
);
4678 Result
= Builder
.CreateCall(F
, {CR6Param
, FirstVecArg
, SecondVecArg
});
4680 // The result type of intrinsic may not be same as E->getType().
4681 // If E->getType() is not BoolTy, EmitScalarConversion will do the
4682 // conversion work. If E->getType() is BoolTy, EmitScalarConversion will
4683 // do nothing, if ResultTy is not i1 at the same time, it will cause
4685 llvm::IntegerType
*ResultTy
= cast
<llvm::IntegerType
>(Result
->getType());
4686 if (ResultTy
->getBitWidth() > 1 &&
4687 E
->getType() == CGF
.getContext().BoolTy
)
4688 Result
= Builder
.CreateTrunc(Result
, Builder
.getInt1Ty());
4689 return EmitScalarConversion(Result
, CGF
.getContext().BoolTy
, E
->getType(),
4693 if (BOInfo
.isFixedPointOp()) {
4694 Result
= EmitFixedPointBinOp(BOInfo
);
4695 } else if (LHS
->getType()->isFPOrFPVectorTy()) {
4696 CodeGenFunction::CGFPOptionsRAII
FPOptsRAII(CGF
, BOInfo
.FPFeatures
);
4698 Result
= Builder
.CreateFCmp(FCmpOpc
, LHS
, RHS
, "cmp");
4700 Result
= Builder
.CreateFCmpS(FCmpOpc
, LHS
, RHS
, "cmp");
4701 } else if (LHSTy
->hasSignedIntegerRepresentation()) {
4702 Result
= Builder
.CreateICmp(SICmpOpc
, LHS
, RHS
, "cmp");
4704 // Unsigned integers and pointers.
4706 if (CGF
.CGM
.getCodeGenOpts().StrictVTablePointers
&&
4707 !isa
<llvm::ConstantPointerNull
>(LHS
) &&
4708 !isa
<llvm::ConstantPointerNull
>(RHS
)) {
4710 // Dynamic information is required to be stripped for comparisons,
4711 // because it could leak the dynamic information. Based on comparisons
4712 // of pointers to dynamic objects, the optimizer can replace one pointer
4713 // with another, which might be incorrect in presence of invariant
4714 // groups. Comparison with null is safe because null does not carry any
4715 // dynamic information.
4716 if (LHSTy
.mayBeDynamicClass())
4717 LHS
= Builder
.CreateStripInvariantGroup(LHS
);
4718 if (RHSTy
.mayBeDynamicClass())
4719 RHS
= Builder
.CreateStripInvariantGroup(RHS
);
4722 Result
= Builder
.CreateICmp(UICmpOpc
, LHS
, RHS
, "cmp");
4725 // If this is a vector comparison, sign extend the result to the appropriate
4726 // vector integer type and return it (don't convert to bool).
4727 if (LHSTy
->isVectorType())
4728 return Builder
.CreateSExt(Result
, ConvertType(E
->getType()), "sext");
4731 // Complex Comparison: can only be an equality comparison.
4732 CodeGenFunction::ComplexPairTy LHS
, RHS
;
4734 if (auto *CTy
= LHSTy
->getAs
<ComplexType
>()) {
4735 LHS
= CGF
.EmitComplexExpr(E
->getLHS());
4736 CETy
= CTy
->getElementType();
4738 LHS
.first
= Visit(E
->getLHS());
4739 LHS
.second
= llvm::Constant::getNullValue(LHS
.first
->getType());
4742 if (auto *CTy
= RHSTy
->getAs
<ComplexType
>()) {
4743 RHS
= CGF
.EmitComplexExpr(E
->getRHS());
4744 assert(CGF
.getContext().hasSameUnqualifiedType(CETy
,
4745 CTy
->getElementType()) &&
4746 "The element types must always match.");
4749 RHS
.first
= Visit(E
->getRHS());
4750 RHS
.second
= llvm::Constant::getNullValue(RHS
.first
->getType());
4751 assert(CGF
.getContext().hasSameUnqualifiedType(CETy
, RHSTy
) &&
4752 "The element types must always match.");
4755 Value
*ResultR
, *ResultI
;
4756 if (CETy
->isRealFloatingType()) {
4757 // As complex comparisons can only be equality comparisons, they
4758 // are never signaling comparisons.
4759 ResultR
= Builder
.CreateFCmp(FCmpOpc
, LHS
.first
, RHS
.first
, "cmp.r");
4760 ResultI
= Builder
.CreateFCmp(FCmpOpc
, LHS
.second
, RHS
.second
, "cmp.i");
4762 // Complex comparisons can only be equality comparisons. As such, signed
4763 // and unsigned opcodes are the same.
4764 ResultR
= Builder
.CreateICmp(UICmpOpc
, LHS
.first
, RHS
.first
, "cmp.r");
4765 ResultI
= Builder
.CreateICmp(UICmpOpc
, LHS
.second
, RHS
.second
, "cmp.i");
4768 if (E
->getOpcode() == BO_EQ
) {
4769 Result
= Builder
.CreateAnd(ResultR
, ResultI
, "and.ri");
4771 assert(E
->getOpcode() == BO_NE
&&
4772 "Complex comparison other than == or != ?");
4773 Result
= Builder
.CreateOr(ResultR
, ResultI
, "or.ri");
4777 return EmitScalarConversion(Result
, CGF
.getContext().BoolTy
, E
->getType(),
4781 llvm::Value
*CodeGenFunction::EmitWithOriginalRHSBitfieldAssignment(
4782 const BinaryOperator
*E
, Value
**Previous
, QualType
*SrcType
) {
4783 // In case we have the integer or bitfield sanitizer checks enabled
4784 // we want to get the expression before scalar conversion.
4785 if (auto *ICE
= dyn_cast
<ImplicitCastExpr
>(E
->getRHS())) {
4786 CastKind Kind
= ICE
->getCastKind();
4787 if (Kind
== CK_IntegralCast
|| Kind
== CK_LValueToRValue
) {
4788 *SrcType
= ICE
->getSubExpr()->getType();
4789 *Previous
= EmitScalarExpr(ICE
->getSubExpr());
4790 // Pass default ScalarConversionOpts to avoid emitting
4791 // integer sanitizer checks as E refers to bitfield.
4792 return EmitScalarConversion(*Previous
, *SrcType
, ICE
->getType(),
4796 return EmitScalarExpr(E
->getRHS());
4799 Value
*ScalarExprEmitter::VisitBinAssign(const BinaryOperator
*E
) {
4800 bool Ignore
= TestAndClearIgnoreResultAssign();
4805 switch (E
->getLHS()->getType().getObjCLifetime()) {
4806 case Qualifiers::OCL_Strong
:
4807 std::tie(LHS
, RHS
) = CGF
.EmitARCStoreStrong(E
, Ignore
);
4810 case Qualifiers::OCL_Autoreleasing
:
4811 std::tie(LHS
, RHS
) = CGF
.EmitARCStoreAutoreleasing(E
);
4814 case Qualifiers::OCL_ExplicitNone
:
4815 std::tie(LHS
, RHS
) = CGF
.EmitARCStoreUnsafeUnretained(E
, Ignore
);
4818 case Qualifiers::OCL_Weak
:
4819 RHS
= Visit(E
->getRHS());
4820 LHS
= EmitCheckedLValue(E
->getLHS(), CodeGenFunction::TCK_Store
);
4821 RHS
= CGF
.EmitARCStoreWeak(LHS
.getAddress(), RHS
, Ignore
);
4824 case Qualifiers::OCL_None
:
4825 // __block variables need to have the rhs evaluated first, plus
4826 // this should improve codegen just a little.
4827 Value
*Previous
= nullptr;
4828 QualType SrcType
= E
->getRHS()->getType();
4829 // Check if LHS is a bitfield, if RHS contains an implicit cast expression
4830 // we want to extract that value and potentially (if the bitfield sanitizer
4831 // is enabled) use it to check for an implicit conversion.
4832 if (E
->getLHS()->refersToBitField())
4833 RHS
= CGF
.EmitWithOriginalRHSBitfieldAssignment(E
, &Previous
, &SrcType
);
4835 RHS
= Visit(E
->getRHS());
4837 LHS
= EmitCheckedLValue(E
->getLHS(), CodeGenFunction::TCK_Store
);
4839 // Store the value into the LHS. Bit-fields are handled specially
4840 // because the result is altered by the store, i.e., [C99 6.5.16p1]
4841 // 'An assignment expression has the value of the left operand after
4842 // the assignment...'.
4843 if (LHS
.isBitField()) {
4844 CGF
.EmitStoreThroughBitfieldLValue(RValue::get(RHS
), LHS
, &RHS
);
4845 // If the expression contained an implicit conversion, make sure
4846 // to use the value before the scalar conversion.
4847 Value
*Src
= Previous
? Previous
: RHS
;
4848 QualType DstType
= E
->getLHS()->getType();
4849 CGF
.EmitBitfieldConversionCheck(Src
, SrcType
, RHS
, DstType
,
4850 LHS
.getBitFieldInfo(), E
->getExprLoc());
4852 CGF
.EmitNullabilityCheck(LHS
, RHS
, E
->getExprLoc());
4853 CGF
.EmitStoreThroughLValue(RValue::get(RHS
), LHS
);
4857 // If the result is clearly ignored, return now.
4861 // The result of an assignment in C is the assigned r-value.
4862 if (!CGF
.getLangOpts().CPlusPlus
)
4865 // If the lvalue is non-volatile, return the computed value of the assignment.
4866 if (!LHS
.isVolatileQualified())
4869 // Otherwise, reload the value.
4870 return EmitLoadOfLValue(LHS
, E
->getExprLoc());
4873 Value
*ScalarExprEmitter::VisitBinLAnd(const BinaryOperator
*E
) {
4874 // Perform vector logical and on comparisons with zero vectors.
4875 if (E
->getType()->isVectorType()) {
4876 CGF
.incrementProfileCounter(E
);
4878 Value
*LHS
= Visit(E
->getLHS());
4879 Value
*RHS
= Visit(E
->getRHS());
4880 Value
*Zero
= llvm::ConstantAggregateZero::get(LHS
->getType());
4881 if (LHS
->getType()->isFPOrFPVectorTy()) {
4882 CodeGenFunction::CGFPOptionsRAII
FPOptsRAII(
4883 CGF
, E
->getFPFeaturesInEffect(CGF
.getLangOpts()));
4884 LHS
= Builder
.CreateFCmp(llvm::CmpInst::FCMP_UNE
, LHS
, Zero
, "cmp");
4885 RHS
= Builder
.CreateFCmp(llvm::CmpInst::FCMP_UNE
, RHS
, Zero
, "cmp");
4887 LHS
= Builder
.CreateICmp(llvm::CmpInst::ICMP_NE
, LHS
, Zero
, "cmp");
4888 RHS
= Builder
.CreateICmp(llvm::CmpInst::ICMP_NE
, RHS
, Zero
, "cmp");
4890 Value
*And
= Builder
.CreateAnd(LHS
, RHS
);
4891 return Builder
.CreateSExt(And
, ConvertType(E
->getType()), "sext");
4894 bool InstrumentRegions
= CGF
.CGM
.getCodeGenOpts().hasProfileClangInstr();
4895 llvm::Type
*ResTy
= ConvertType(E
->getType());
4897 // If we have 0 && RHS, see if we can elide RHS, if so, just return 0.
4898 // If we have 1 && X, just emit X without inserting the control flow.
4900 if (CGF
.ConstantFoldsToSimpleInteger(E
->getLHS(), LHSCondVal
)) {
4901 if (LHSCondVal
) { // If we have 1 && X, just emit X.
4902 CGF
.incrementProfileCounter(E
);
4904 // If the top of the logical operator nest, reset the MCDC temp to 0.
4905 if (CGF
.MCDCLogOpStack
.empty())
4906 CGF
.maybeResetMCDCCondBitmap(E
);
4908 CGF
.MCDCLogOpStack
.push_back(E
);
4910 Value
*RHSCond
= CGF
.EvaluateExprAsBool(E
->getRHS());
4912 // If we're generating for profiling or coverage, generate a branch to a
4913 // block that increments the RHS counter needed to track branch condition
4914 // coverage. In this case, use "FBlock" as both the final "TrueBlock" and
4915 // "FalseBlock" after the increment is done.
4916 if (InstrumentRegions
&&
4917 CodeGenFunction::isInstrumentedCondition(E
->getRHS())) {
4918 CGF
.maybeUpdateMCDCCondBitmap(E
->getRHS(), RHSCond
);
4919 llvm::BasicBlock
*FBlock
= CGF
.createBasicBlock("land.end");
4920 llvm::BasicBlock
*RHSBlockCnt
= CGF
.createBasicBlock("land.rhscnt");
4921 Builder
.CreateCondBr(RHSCond
, RHSBlockCnt
, FBlock
);
4922 CGF
.EmitBlock(RHSBlockCnt
);
4923 CGF
.incrementProfileCounter(E
->getRHS());
4924 CGF
.EmitBranch(FBlock
);
4925 CGF
.EmitBlock(FBlock
);
4928 CGF
.MCDCLogOpStack
.pop_back();
4929 // If the top of the logical operator nest, update the MCDC bitmap.
4930 if (CGF
.MCDCLogOpStack
.empty())
4931 CGF
.maybeUpdateMCDCTestVectorBitmap(E
);
4933 // ZExt result to int or bool.
4934 return Builder
.CreateZExtOrBitCast(RHSCond
, ResTy
, "land.ext");
4937 // 0 && RHS: If it is safe, just elide the RHS, and return 0/false.
4938 if (!CGF
.ContainsLabel(E
->getRHS()))
4939 return llvm::Constant::getNullValue(ResTy
);
4942 // If the top of the logical operator nest, reset the MCDC temp to 0.
4943 if (CGF
.MCDCLogOpStack
.empty())
4944 CGF
.maybeResetMCDCCondBitmap(E
);
4946 CGF
.MCDCLogOpStack
.push_back(E
);
4948 llvm::BasicBlock
*ContBlock
= CGF
.createBasicBlock("land.end");
4949 llvm::BasicBlock
*RHSBlock
= CGF
.createBasicBlock("land.rhs");
4951 CodeGenFunction::ConditionalEvaluation
eval(CGF
);
4953 // Branch on the LHS first. If it is false, go to the failure (cont) block.
4954 CGF
.EmitBranchOnBoolExpr(E
->getLHS(), RHSBlock
, ContBlock
,
4955 CGF
.getProfileCount(E
->getRHS()));
4957 // Any edges into the ContBlock are now from an (indeterminate number of)
4958 // edges from this first condition. All of these values will be false. Start
4959 // setting up the PHI node in the Cont Block for this.
4960 llvm::PHINode
*PN
= llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext
), 2,
4962 for (llvm::pred_iterator PI
= pred_begin(ContBlock
), PE
= pred_end(ContBlock
);
4964 PN
->addIncoming(llvm::ConstantInt::getFalse(VMContext
), *PI
);
4967 CGF
.EmitBlock(RHSBlock
);
4968 CGF
.incrementProfileCounter(E
);
4969 Value
*RHSCond
= CGF
.EvaluateExprAsBool(E
->getRHS());
4972 // Reaquire the RHS block, as there may be subblocks inserted.
4973 RHSBlock
= Builder
.GetInsertBlock();
4975 // If we're generating for profiling or coverage, generate a branch on the
4976 // RHS to a block that increments the RHS true counter needed to track branch
4977 // condition coverage.
4978 if (InstrumentRegions
&&
4979 CodeGenFunction::isInstrumentedCondition(E
->getRHS())) {
4980 CGF
.maybeUpdateMCDCCondBitmap(E
->getRHS(), RHSCond
);
4981 llvm::BasicBlock
*RHSBlockCnt
= CGF
.createBasicBlock("land.rhscnt");
4982 Builder
.CreateCondBr(RHSCond
, RHSBlockCnt
, ContBlock
);
4983 CGF
.EmitBlock(RHSBlockCnt
);
4984 CGF
.incrementProfileCounter(E
->getRHS());
4985 CGF
.EmitBranch(ContBlock
);
4986 PN
->addIncoming(RHSCond
, RHSBlockCnt
);
4989 // Emit an unconditional branch from this block to ContBlock.
4991 // There is no need to emit line number for unconditional branch.
4992 auto NL
= ApplyDebugLocation::CreateEmpty(CGF
);
4993 CGF
.EmitBlock(ContBlock
);
4995 // Insert an entry into the phi node for the edge with the value of RHSCond.
4996 PN
->addIncoming(RHSCond
, RHSBlock
);
4998 CGF
.MCDCLogOpStack
.pop_back();
4999 // If the top of the logical operator nest, update the MCDC bitmap.
5000 if (CGF
.MCDCLogOpStack
.empty())
5001 CGF
.maybeUpdateMCDCTestVectorBitmap(E
);
5003 // Artificial location to preserve the scope information
5005 auto NL
= ApplyDebugLocation::CreateArtificial(CGF
);
5006 PN
->setDebugLoc(Builder
.getCurrentDebugLocation());
5009 // ZExt result to int.
5010 return Builder
.CreateZExtOrBitCast(PN
, ResTy
, "land.ext");
5013 Value
*ScalarExprEmitter::VisitBinLOr(const BinaryOperator
*E
) {
5014 // Perform vector logical or on comparisons with zero vectors.
5015 if (E
->getType()->isVectorType()) {
5016 CGF
.incrementProfileCounter(E
);
5018 Value
*LHS
= Visit(E
->getLHS());
5019 Value
*RHS
= Visit(E
->getRHS());
5020 Value
*Zero
= llvm::ConstantAggregateZero::get(LHS
->getType());
5021 if (LHS
->getType()->isFPOrFPVectorTy()) {
5022 CodeGenFunction::CGFPOptionsRAII
FPOptsRAII(
5023 CGF
, E
->getFPFeaturesInEffect(CGF
.getLangOpts()));
5024 LHS
= Builder
.CreateFCmp(llvm::CmpInst::FCMP_UNE
, LHS
, Zero
, "cmp");
5025 RHS
= Builder
.CreateFCmp(llvm::CmpInst::FCMP_UNE
, RHS
, Zero
, "cmp");
5027 LHS
= Builder
.CreateICmp(llvm::CmpInst::ICMP_NE
, LHS
, Zero
, "cmp");
5028 RHS
= Builder
.CreateICmp(llvm::CmpInst::ICMP_NE
, RHS
, Zero
, "cmp");
5030 Value
*Or
= Builder
.CreateOr(LHS
, RHS
);
5031 return Builder
.CreateSExt(Or
, ConvertType(E
->getType()), "sext");
5034 bool InstrumentRegions
= CGF
.CGM
.getCodeGenOpts().hasProfileClangInstr();
5035 llvm::Type
*ResTy
= ConvertType(E
->getType());
5037 // If we have 1 || RHS, see if we can elide RHS, if so, just return 1.
5038 // If we have 0 || X, just emit X without inserting the control flow.
5040 if (CGF
.ConstantFoldsToSimpleInteger(E
->getLHS(), LHSCondVal
)) {
5041 if (!LHSCondVal
) { // If we have 0 || X, just emit X.
5042 CGF
.incrementProfileCounter(E
);
5044 // If the top of the logical operator nest, reset the MCDC temp to 0.
5045 if (CGF
.MCDCLogOpStack
.empty())
5046 CGF
.maybeResetMCDCCondBitmap(E
);
5048 CGF
.MCDCLogOpStack
.push_back(E
);
5050 Value
*RHSCond
= CGF
.EvaluateExprAsBool(E
->getRHS());
5052 // If we're generating for profiling or coverage, generate a branch to a
5053 // block that increments the RHS counter need to track branch condition
5054 // coverage. In this case, use "FBlock" as both the final "TrueBlock" and
5055 // "FalseBlock" after the increment is done.
5056 if (InstrumentRegions
&&
5057 CodeGenFunction::isInstrumentedCondition(E
->getRHS())) {
5058 CGF
.maybeUpdateMCDCCondBitmap(E
->getRHS(), RHSCond
);
5059 llvm::BasicBlock
*FBlock
= CGF
.createBasicBlock("lor.end");
5060 llvm::BasicBlock
*RHSBlockCnt
= CGF
.createBasicBlock("lor.rhscnt");
5061 Builder
.CreateCondBr(RHSCond
, FBlock
, RHSBlockCnt
);
5062 CGF
.EmitBlock(RHSBlockCnt
);
5063 CGF
.incrementProfileCounter(E
->getRHS());
5064 CGF
.EmitBranch(FBlock
);
5065 CGF
.EmitBlock(FBlock
);
5068 CGF
.MCDCLogOpStack
.pop_back();
5069 // If the top of the logical operator nest, update the MCDC bitmap.
5070 if (CGF
.MCDCLogOpStack
.empty())
5071 CGF
.maybeUpdateMCDCTestVectorBitmap(E
);
5073 // ZExt result to int or bool.
5074 return Builder
.CreateZExtOrBitCast(RHSCond
, ResTy
, "lor.ext");
5077 // 1 || RHS: If it is safe, just elide the RHS, and return 1/true.
5078 if (!CGF
.ContainsLabel(E
->getRHS()))
5079 return llvm::ConstantInt::get(ResTy
, 1);
5082 // If the top of the logical operator nest, reset the MCDC temp to 0.
5083 if (CGF
.MCDCLogOpStack
.empty())
5084 CGF
.maybeResetMCDCCondBitmap(E
);
5086 CGF
.MCDCLogOpStack
.push_back(E
);
5088 llvm::BasicBlock
*ContBlock
= CGF
.createBasicBlock("lor.end");
5089 llvm::BasicBlock
*RHSBlock
= CGF
.createBasicBlock("lor.rhs");
5091 CodeGenFunction::ConditionalEvaluation
eval(CGF
);
5093 // Branch on the LHS first. If it is true, go to the success (cont) block.
5094 CGF
.EmitBranchOnBoolExpr(E
->getLHS(), ContBlock
, RHSBlock
,
5095 CGF
.getCurrentProfileCount() -
5096 CGF
.getProfileCount(E
->getRHS()));
5098 // Any edges into the ContBlock are now from an (indeterminate number of)
5099 // edges from this first condition. All of these values will be true. Start
5100 // setting up the PHI node in the Cont Block for this.
5101 llvm::PHINode
*PN
= llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext
), 2,
5103 for (llvm::pred_iterator PI
= pred_begin(ContBlock
), PE
= pred_end(ContBlock
);
5105 PN
->addIncoming(llvm::ConstantInt::getTrue(VMContext
), *PI
);
5109 // Emit the RHS condition as a bool value.
5110 CGF
.EmitBlock(RHSBlock
);
5111 CGF
.incrementProfileCounter(E
);
5112 Value
*RHSCond
= CGF
.EvaluateExprAsBool(E
->getRHS());
5116 // Reaquire the RHS block, as there may be subblocks inserted.
5117 RHSBlock
= Builder
.GetInsertBlock();
5119 // If we're generating for profiling or coverage, generate a branch on the
5120 // RHS to a block that increments the RHS true counter needed to track branch
5121 // condition coverage.
5122 if (InstrumentRegions
&&
5123 CodeGenFunction::isInstrumentedCondition(E
->getRHS())) {
5124 CGF
.maybeUpdateMCDCCondBitmap(E
->getRHS(), RHSCond
);
5125 llvm::BasicBlock
*RHSBlockCnt
= CGF
.createBasicBlock("lor.rhscnt");
5126 Builder
.CreateCondBr(RHSCond
, ContBlock
, RHSBlockCnt
);
5127 CGF
.EmitBlock(RHSBlockCnt
);
5128 CGF
.incrementProfileCounter(E
->getRHS());
5129 CGF
.EmitBranch(ContBlock
);
5130 PN
->addIncoming(RHSCond
, RHSBlockCnt
);
5133 // Emit an unconditional branch from this block to ContBlock. Insert an entry
5134 // into the phi node for the edge with the value of RHSCond.
5135 CGF
.EmitBlock(ContBlock
);
5136 PN
->addIncoming(RHSCond
, RHSBlock
);
5138 CGF
.MCDCLogOpStack
.pop_back();
5139 // If the top of the logical operator nest, update the MCDC bitmap.
5140 if (CGF
.MCDCLogOpStack
.empty())
5141 CGF
.maybeUpdateMCDCTestVectorBitmap(E
);
5143 // ZExt result to int.
5144 return Builder
.CreateZExtOrBitCast(PN
, ResTy
, "lor.ext");
5147 Value
*ScalarExprEmitter::VisitBinComma(const BinaryOperator
*E
) {
5148 CGF
.EmitIgnoredExpr(E
->getLHS());
5149 CGF
.EnsureInsertPoint();
5150 return Visit(E
->getRHS());
5153 //===----------------------------------------------------------------------===//
5155 //===----------------------------------------------------------------------===//
5157 /// isCheapEnoughToEvaluateUnconditionally - Return true if the specified
5158 /// expression is cheap enough and side-effect-free enough to evaluate
5159 /// unconditionally instead of conditionally. This is used to convert control
5160 /// flow into selects in some cases.
5161 static bool isCheapEnoughToEvaluateUnconditionally(const Expr
*E
,
5162 CodeGenFunction
&CGF
) {
5163 // Anything that is an integer or floating point constant is fine.
5164 return E
->IgnoreParens()->isEvaluatable(CGF
.getContext());
5166 // Even non-volatile automatic variables can't be evaluated unconditionally.
5167 // Referencing a thread_local may cause non-trivial initialization work to
5168 // occur. If we're inside a lambda and one of the variables is from the scope
5169 // outside the lambda, that function may have returned already. Reading its
5170 // locals is a bad idea. Also, these reads may introduce races there didn't
5171 // exist in the source-level program.
5175 Value
*ScalarExprEmitter::
5176 VisitAbstractConditionalOperator(const AbstractConditionalOperator
*E
) {
5177 TestAndClearIgnoreResultAssign();
5179 // Bind the common expression if necessary.
5180 CodeGenFunction::OpaqueValueMapping
binding(CGF
, E
);
5182 Expr
*condExpr
= E
->getCond();
5183 Expr
*lhsExpr
= E
->getTrueExpr();
5184 Expr
*rhsExpr
= E
->getFalseExpr();
5186 // If the condition constant folds and can be elided, try to avoid emitting
5187 // the condition and the dead arm.
5189 if (CGF
.ConstantFoldsToSimpleInteger(condExpr
, CondExprBool
)) {
5190 Expr
*live
= lhsExpr
, *dead
= rhsExpr
;
5191 if (!CondExprBool
) std::swap(live
, dead
);
5193 // If the dead side doesn't have labels we need, just emit the Live part.
5194 if (!CGF
.ContainsLabel(dead
)) {
5196 if (llvm::EnableSingleByteCoverage
) {
5197 CGF
.incrementProfileCounter(lhsExpr
);
5198 CGF
.incrementProfileCounter(rhsExpr
);
5200 CGF
.incrementProfileCounter(E
);
5202 Value
*Result
= Visit(live
);
5204 // If the live part is a throw expression, it acts like it has a void
5205 // type, so evaluating it returns a null Value*. However, a conditional
5206 // with non-void type must return a non-null Value*.
5207 if (!Result
&& !E
->getType()->isVoidType())
5208 Result
= llvm::UndefValue::get(CGF
.ConvertType(E
->getType()));
5214 // OpenCL: If the condition is a vector, we can treat this condition like
5215 // the select function.
5216 if ((CGF
.getLangOpts().OpenCL
&& condExpr
->getType()->isVectorType()) ||
5217 condExpr
->getType()->isExtVectorType()) {
5218 CGF
.incrementProfileCounter(E
);
5220 llvm::Value
*CondV
= CGF
.EmitScalarExpr(condExpr
);
5221 llvm::Value
*LHS
= Visit(lhsExpr
);
5222 llvm::Value
*RHS
= Visit(rhsExpr
);
5224 llvm::Type
*condType
= ConvertType(condExpr
->getType());
5225 auto *vecTy
= cast
<llvm::FixedVectorType
>(condType
);
5227 unsigned numElem
= vecTy
->getNumElements();
5228 llvm::Type
*elemType
= vecTy
->getElementType();
5230 llvm::Value
*zeroVec
= llvm::Constant::getNullValue(vecTy
);
5231 llvm::Value
*TestMSB
= Builder
.CreateICmpSLT(CondV
, zeroVec
);
5232 llvm::Value
*tmp
= Builder
.CreateSExt(
5233 TestMSB
, llvm::FixedVectorType::get(elemType
, numElem
), "sext");
5234 llvm::Value
*tmp2
= Builder
.CreateNot(tmp
);
5236 // Cast float to int to perform ANDs if necessary.
5237 llvm::Value
*RHSTmp
= RHS
;
5238 llvm::Value
*LHSTmp
= LHS
;
5239 bool wasCast
= false;
5240 llvm::VectorType
*rhsVTy
= cast
<llvm::VectorType
>(RHS
->getType());
5241 if (rhsVTy
->getElementType()->isFloatingPointTy()) {
5242 RHSTmp
= Builder
.CreateBitCast(RHS
, tmp2
->getType());
5243 LHSTmp
= Builder
.CreateBitCast(LHS
, tmp
->getType());
5247 llvm::Value
*tmp3
= Builder
.CreateAnd(RHSTmp
, tmp2
);
5248 llvm::Value
*tmp4
= Builder
.CreateAnd(LHSTmp
, tmp
);
5249 llvm::Value
*tmp5
= Builder
.CreateOr(tmp3
, tmp4
, "cond");
5251 tmp5
= Builder
.CreateBitCast(tmp5
, RHS
->getType());
5256 if (condExpr
->getType()->isVectorType() ||
5257 condExpr
->getType()->isSveVLSBuiltinType()) {
5258 CGF
.incrementProfileCounter(E
);
5260 llvm::Value
*CondV
= CGF
.EmitScalarExpr(condExpr
);
5261 llvm::Value
*LHS
= Visit(lhsExpr
);
5262 llvm::Value
*RHS
= Visit(rhsExpr
);
5264 llvm::Type
*CondType
= ConvertType(condExpr
->getType());
5265 auto *VecTy
= cast
<llvm::VectorType
>(CondType
);
5266 llvm::Value
*ZeroVec
= llvm::Constant::getNullValue(VecTy
);
5268 CondV
= Builder
.CreateICmpNE(CondV
, ZeroVec
, "vector_cond");
5269 return Builder
.CreateSelect(CondV
, LHS
, RHS
, "vector_select");
5272 // If this is a really simple expression (like x ? 4 : 5), emit this as a
5273 // select instead of as control flow. We can only do this if it is cheap and
5274 // safe to evaluate the LHS and RHS unconditionally.
5275 if (isCheapEnoughToEvaluateUnconditionally(lhsExpr
, CGF
) &&
5276 isCheapEnoughToEvaluateUnconditionally(rhsExpr
, CGF
)) {
5277 llvm::Value
*CondV
= CGF
.EvaluateExprAsBool(condExpr
);
5278 llvm::Value
*StepV
= Builder
.CreateZExtOrBitCast(CondV
, CGF
.Int64Ty
);
5280 if (llvm::EnableSingleByteCoverage
) {
5281 CGF
.incrementProfileCounter(lhsExpr
);
5282 CGF
.incrementProfileCounter(rhsExpr
);
5283 CGF
.incrementProfileCounter(E
);
5285 CGF
.incrementProfileCounter(E
, StepV
);
5287 llvm::Value
*LHS
= Visit(lhsExpr
);
5288 llvm::Value
*RHS
= Visit(rhsExpr
);
5290 // If the conditional has void type, make sure we return a null Value*.
5291 assert(!RHS
&& "LHS and RHS types must match");
5294 return Builder
.CreateSelect(CondV
, LHS
, RHS
, "cond");
5297 // If the top of the logical operator nest, reset the MCDC temp to 0.
5298 if (CGF
.MCDCLogOpStack
.empty())
5299 CGF
.maybeResetMCDCCondBitmap(condExpr
);
5301 llvm::BasicBlock
*LHSBlock
= CGF
.createBasicBlock("cond.true");
5302 llvm::BasicBlock
*RHSBlock
= CGF
.createBasicBlock("cond.false");
5303 llvm::BasicBlock
*ContBlock
= CGF
.createBasicBlock("cond.end");
5305 CodeGenFunction::ConditionalEvaluation
eval(CGF
);
5306 CGF
.EmitBranchOnBoolExpr(condExpr
, LHSBlock
, RHSBlock
,
5307 CGF
.getProfileCount(lhsExpr
));
5309 CGF
.EmitBlock(LHSBlock
);
5311 // If the top of the logical operator nest, update the MCDC bitmap for the
5312 // ConditionalOperator prior to visiting its LHS and RHS blocks, since they
5313 // may also contain a boolean expression.
5314 if (CGF
.MCDCLogOpStack
.empty())
5315 CGF
.maybeUpdateMCDCTestVectorBitmap(condExpr
);
5317 if (llvm::EnableSingleByteCoverage
)
5318 CGF
.incrementProfileCounter(lhsExpr
);
5320 CGF
.incrementProfileCounter(E
);
5323 Value
*LHS
= Visit(lhsExpr
);
5326 LHSBlock
= Builder
.GetInsertBlock();
5327 Builder
.CreateBr(ContBlock
);
5329 CGF
.EmitBlock(RHSBlock
);
5331 // If the top of the logical operator nest, update the MCDC bitmap for the
5332 // ConditionalOperator prior to visiting its LHS and RHS blocks, since they
5333 // may also contain a boolean expression.
5334 if (CGF
.MCDCLogOpStack
.empty())
5335 CGF
.maybeUpdateMCDCTestVectorBitmap(condExpr
);
5337 if (llvm::EnableSingleByteCoverage
)
5338 CGF
.incrementProfileCounter(rhsExpr
);
5341 Value
*RHS
= Visit(rhsExpr
);
5344 RHSBlock
= Builder
.GetInsertBlock();
5345 CGF
.EmitBlock(ContBlock
);
5347 // If the LHS or RHS is a throw expression, it will be legitimately null.
5353 // Create a PHI node for the real part.
5354 llvm::PHINode
*PN
= Builder
.CreatePHI(LHS
->getType(), 2, "cond");
5355 PN
->addIncoming(LHS
, LHSBlock
);
5356 PN
->addIncoming(RHS
, RHSBlock
);
5358 // When single byte coverage mode is enabled, add a counter to continuation
5360 if (llvm::EnableSingleByteCoverage
)
5361 CGF
.incrementProfileCounter(E
);
5366 Value
*ScalarExprEmitter::VisitChooseExpr(ChooseExpr
*E
) {
5367 return Visit(E
->getChosenSubExpr());
5370 Value
*ScalarExprEmitter::VisitVAArgExpr(VAArgExpr
*VE
) {
5371 QualType Ty
= VE
->getType();
5373 if (Ty
->isVariablyModifiedType())
5374 CGF
.EmitVariablyModifiedType(Ty
);
5376 Address ArgValue
= Address::invalid();
5377 RValue ArgPtr
= CGF
.EmitVAArg(VE
, ArgValue
);
5379 return ArgPtr
.getScalarVal();
5382 Value
*ScalarExprEmitter::VisitBlockExpr(const BlockExpr
*block
) {
5383 return CGF
.EmitBlockLiteral(block
);
5386 // Convert a vec3 to vec4, or vice versa.
5387 static Value
*ConvertVec3AndVec4(CGBuilderTy
&Builder
, CodeGenFunction
&CGF
,
5388 Value
*Src
, unsigned NumElementsDst
) {
5389 static constexpr int Mask
[] = {0, 1, 2, -1};
5390 return Builder
.CreateShuffleVector(Src
, llvm::ArrayRef(Mask
, NumElementsDst
));
5393 // Create cast instructions for converting LLVM value \p Src to LLVM type \p
5394 // DstTy. \p Src has the same size as \p DstTy. Both are single value types
5395 // but could be scalar or vectors of different lengths, and either can be
5397 // There are 4 cases:
5398 // 1. non-pointer -> non-pointer : needs 1 bitcast
5399 // 2. pointer -> pointer : needs 1 bitcast or addrspacecast
5400 // 3. pointer -> non-pointer
5401 // a) pointer -> intptr_t : needs 1 ptrtoint
5402 // b) pointer -> non-intptr_t : needs 1 ptrtoint then 1 bitcast
5403 // 4. non-pointer -> pointer
5404 // a) intptr_t -> pointer : needs 1 inttoptr
5405 // b) non-intptr_t -> pointer : needs 1 bitcast then 1 inttoptr
5406 // Note: for cases 3b and 4b two casts are required since LLVM casts do not
5407 // allow casting directly between pointer types and non-integer non-pointer
5409 static Value
*createCastsForTypeOfSameSize(CGBuilderTy
&Builder
,
5410 const llvm::DataLayout
&DL
,
5411 Value
*Src
, llvm::Type
*DstTy
,
5412 StringRef Name
= "") {
5413 auto SrcTy
= Src
->getType();
5416 if (!SrcTy
->isPointerTy() && !DstTy
->isPointerTy())
5417 return Builder
.CreateBitCast(Src
, DstTy
, Name
);
5420 if (SrcTy
->isPointerTy() && DstTy
->isPointerTy())
5421 return Builder
.CreatePointerBitCastOrAddrSpaceCast(Src
, DstTy
, Name
);
5424 if (SrcTy
->isPointerTy() && !DstTy
->isPointerTy()) {
5426 if (!DstTy
->isIntegerTy())
5427 Src
= Builder
.CreatePtrToInt(Src
, DL
.getIntPtrType(SrcTy
));
5429 return Builder
.CreateBitOrPointerCast(Src
, DstTy
, Name
);
5433 if (!SrcTy
->isIntegerTy())
5434 Src
= Builder
.CreateBitCast(Src
, DL
.getIntPtrType(DstTy
));
5436 return Builder
.CreateIntToPtr(Src
, DstTy
, Name
);
5439 Value
*ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr
*E
) {
5440 Value
*Src
= CGF
.EmitScalarExpr(E
->getSrcExpr());
5441 llvm::Type
*DstTy
= ConvertType(E
->getType());
5443 llvm::Type
*SrcTy
= Src
->getType();
5444 unsigned NumElementsSrc
=
5445 isa
<llvm::VectorType
>(SrcTy
)
5446 ? cast
<llvm::FixedVectorType
>(SrcTy
)->getNumElements()
5448 unsigned NumElementsDst
=
5449 isa
<llvm::VectorType
>(DstTy
)
5450 ? cast
<llvm::FixedVectorType
>(DstTy
)->getNumElements()
5453 // Use bit vector expansion for ext_vector_type boolean vectors.
5454 if (E
->getType()->isExtVectorBoolType())
5455 return CGF
.emitBoolVecConversion(Src
, NumElementsDst
, "astype");
5457 // Going from vec3 to non-vec3 is a special case and requires a shuffle
5458 // vector to get a vec4, then a bitcast if the target type is different.
5459 if (NumElementsSrc
== 3 && NumElementsDst
!= 3) {
5460 Src
= ConvertVec3AndVec4(Builder
, CGF
, Src
, 4);
5461 Src
= createCastsForTypeOfSameSize(Builder
, CGF
.CGM
.getDataLayout(), Src
,
5464 Src
->setName("astype");
5468 // Going from non-vec3 to vec3 is a special case and requires a bitcast
5469 // to vec4 if the original type is not vec4, then a shuffle vector to
5471 if (NumElementsSrc
!= 3 && NumElementsDst
== 3) {
5472 auto *Vec4Ty
= llvm::FixedVectorType::get(
5473 cast
<llvm::VectorType
>(DstTy
)->getElementType(), 4);
5474 Src
= createCastsForTypeOfSameSize(Builder
, CGF
.CGM
.getDataLayout(), Src
,
5477 Src
= ConvertVec3AndVec4(Builder
, CGF
, Src
, 3);
5478 Src
->setName("astype");
5482 return createCastsForTypeOfSameSize(Builder
, CGF
.CGM
.getDataLayout(),
5483 Src
, DstTy
, "astype");
5486 Value
*ScalarExprEmitter::VisitAtomicExpr(AtomicExpr
*E
) {
5487 return CGF
.EmitAtomicExpr(E
).getScalarVal();
5490 //===----------------------------------------------------------------------===//
5491 // Entry Point into this File
5492 //===----------------------------------------------------------------------===//
5494 /// Emit the computation of the specified expression of scalar type, ignoring
5496 Value
*CodeGenFunction::EmitScalarExpr(const Expr
*E
, bool IgnoreResultAssign
) {
5497 assert(E
&& hasScalarEvaluationKind(E
->getType()) &&
5498 "Invalid scalar expression to emit");
5500 return ScalarExprEmitter(*this, IgnoreResultAssign
)
5501 .Visit(const_cast<Expr
*>(E
));
5504 /// Emit a conversion from the specified type to the specified destination type,
5505 /// both of which are LLVM scalar types.
5506 Value
*CodeGenFunction::EmitScalarConversion(Value
*Src
, QualType SrcTy
,
5508 SourceLocation Loc
) {
5509 assert(hasScalarEvaluationKind(SrcTy
) && hasScalarEvaluationKind(DstTy
) &&
5510 "Invalid scalar expression to emit");
5511 return ScalarExprEmitter(*this).EmitScalarConversion(Src
, SrcTy
, DstTy
, Loc
);
5514 /// Emit a conversion from the specified complex type to the specified
5515 /// destination type, where the destination type is an LLVM scalar type.
5516 Value
*CodeGenFunction::EmitComplexToScalarConversion(ComplexPairTy Src
,
5519 SourceLocation Loc
) {
5520 assert(SrcTy
->isAnyComplexType() && hasScalarEvaluationKind(DstTy
) &&
5521 "Invalid complex -> scalar conversion");
5522 return ScalarExprEmitter(*this)
5523 .EmitComplexToScalarConversion(Src
, SrcTy
, DstTy
, Loc
);
5528 CodeGenFunction::EmitPromotedScalarExpr(const Expr
*E
,
5529 QualType PromotionType
) {
5530 if (!PromotionType
.isNull())
5531 return ScalarExprEmitter(*this).EmitPromoted(E
, PromotionType
);
5533 return ScalarExprEmitter(*this).Visit(const_cast<Expr
*>(E
));
5537 llvm::Value
*CodeGenFunction::
5538 EmitScalarPrePostIncDec(const UnaryOperator
*E
, LValue LV
,
5539 bool isInc
, bool isPre
) {
5540 return ScalarExprEmitter(*this).EmitScalarPrePostIncDec(E
, LV
, isInc
, isPre
);
5543 LValue
CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr
*E
) {
5544 // object->isa or (*object).isa
5545 // Generate code as for: *(Class*)object
5547 Expr
*BaseExpr
= E
->getBase();
5548 Address Addr
= Address::invalid();
5549 if (BaseExpr
->isPRValue()) {
5550 llvm::Type
*BaseTy
=
5551 ConvertTypeForMem(BaseExpr
->getType()->getPointeeType());
5552 Addr
= Address(EmitScalarExpr(BaseExpr
), BaseTy
, getPointerAlign());
5554 Addr
= EmitLValue(BaseExpr
).getAddress();
5557 // Cast the address to Class*.
5558 Addr
= Addr
.withElementType(ConvertType(E
->getType()));
5559 return MakeAddrLValue(Addr
, E
->getType());
5563 LValue
CodeGenFunction::EmitCompoundAssignmentLValue(
5564 const CompoundAssignOperator
*E
) {
5565 ScalarExprEmitter
Scalar(*this);
5566 Value
*Result
= nullptr;
5567 switch (E
->getOpcode()) {
5568 #define COMPOUND_OP(Op) \
5569 case BO_##Op##Assign: \
5570 return Scalar.EmitCompoundAssignLValue(E, &ScalarExprEmitter::Emit##Op, \
5607 llvm_unreachable("Not valid compound assignment operators");
5610 llvm_unreachable("Unhandled compound assignment operator");
5613 struct GEPOffsetAndOverflow
{
5614 // The total (signed) byte offset for the GEP.
5615 llvm::Value
*TotalOffset
;
5616 // The offset overflow flag - true if the total offset overflows.
5617 llvm::Value
*OffsetOverflows
;
5620 /// Evaluate given GEPVal, which is either an inbounds GEP, or a constant,
5621 /// and compute the total offset it applies from it's base pointer BasePtr.
5622 /// Returns offset in bytes and a boolean flag whether an overflow happened
5623 /// during evaluation.
5624 static GEPOffsetAndOverflow
EmitGEPOffsetInBytes(Value
*BasePtr
, Value
*GEPVal
,
5625 llvm::LLVMContext
&VMContext
,
5627 CGBuilderTy
&Builder
) {
5628 const auto &DL
= CGM
.getDataLayout();
5630 // The total (signed) byte offset for the GEP.
5631 llvm::Value
*TotalOffset
= nullptr;
5633 // Was the GEP already reduced to a constant?
5634 if (isa
<llvm::Constant
>(GEPVal
)) {
5635 // Compute the offset by casting both pointers to integers and subtracting:
5636 // GEPVal = BasePtr + ptr(Offset) <--> Offset = int(GEPVal) - int(BasePtr)
5637 Value
*BasePtr_int
=
5638 Builder
.CreatePtrToInt(BasePtr
, DL
.getIntPtrType(BasePtr
->getType()));
5640 Builder
.CreatePtrToInt(GEPVal
, DL
.getIntPtrType(GEPVal
->getType()));
5641 TotalOffset
= Builder
.CreateSub(GEPVal_int
, BasePtr_int
);
5642 return {TotalOffset
, /*OffsetOverflows=*/Builder
.getFalse()};
5645 auto *GEP
= cast
<llvm::GEPOperator
>(GEPVal
);
5646 assert(GEP
->getPointerOperand() == BasePtr
&&
5647 "BasePtr must be the base of the GEP.");
5648 assert(GEP
->isInBounds() && "Expected inbounds GEP");
5650 auto *IntPtrTy
= DL
.getIntPtrType(GEP
->getPointerOperandType());
5652 // Grab references to the signed add/mul overflow intrinsics for intptr_t.
5653 auto *Zero
= llvm::ConstantInt::getNullValue(IntPtrTy
);
5654 auto *SAddIntrinsic
=
5655 CGM
.getIntrinsic(llvm::Intrinsic::sadd_with_overflow
, IntPtrTy
);
5656 auto *SMulIntrinsic
=
5657 CGM
.getIntrinsic(llvm::Intrinsic::smul_with_overflow
, IntPtrTy
);
5659 // The offset overflow flag - true if the total offset overflows.
5660 llvm::Value
*OffsetOverflows
= Builder
.getFalse();
5662 /// Return the result of the given binary operation.
5663 auto eval
= [&](BinaryOperator::Opcode Opcode
, llvm::Value
*LHS
,
5664 llvm::Value
*RHS
) -> llvm::Value
* {
5665 assert((Opcode
== BO_Add
|| Opcode
== BO_Mul
) && "Can't eval binop");
5667 // If the operands are constants, return a constant result.
5668 if (auto *LHSCI
= dyn_cast
<llvm::ConstantInt
>(LHS
)) {
5669 if (auto *RHSCI
= dyn_cast
<llvm::ConstantInt
>(RHS
)) {
5671 bool HasOverflow
= mayHaveIntegerOverflow(LHSCI
, RHSCI
, Opcode
,
5672 /*Signed=*/true, N
);
5674 OffsetOverflows
= Builder
.getTrue();
5675 return llvm::ConstantInt::get(VMContext
, N
);
5679 // Otherwise, compute the result with checked arithmetic.
5680 auto *ResultAndOverflow
= Builder
.CreateCall(
5681 (Opcode
== BO_Add
) ? SAddIntrinsic
: SMulIntrinsic
, {LHS
, RHS
});
5682 OffsetOverflows
= Builder
.CreateOr(
5683 Builder
.CreateExtractValue(ResultAndOverflow
, 1), OffsetOverflows
);
5684 return Builder
.CreateExtractValue(ResultAndOverflow
, 0);
5687 // Determine the total byte offset by looking at each GEP operand.
5688 for (auto GTI
= llvm::gep_type_begin(GEP
), GTE
= llvm::gep_type_end(GEP
);
5689 GTI
!= GTE
; ++GTI
) {
5690 llvm::Value
*LocalOffset
;
5691 auto *Index
= GTI
.getOperand();
5692 // Compute the local offset contributed by this indexing step:
5693 if (auto *STy
= GTI
.getStructTypeOrNull()) {
5694 // For struct indexing, the local offset is the byte position of the
5696 unsigned FieldNo
= cast
<llvm::ConstantInt
>(Index
)->getZExtValue();
5697 LocalOffset
= llvm::ConstantInt::get(
5698 IntPtrTy
, DL
.getStructLayout(STy
)->getElementOffset(FieldNo
));
5700 // Otherwise this is array-like indexing. The local offset is the index
5701 // multiplied by the element size.
5703 llvm::ConstantInt::get(IntPtrTy
, GTI
.getSequentialElementStride(DL
));
5704 auto *IndexS
= Builder
.CreateIntCast(Index
, IntPtrTy
, /*isSigned=*/true);
5705 LocalOffset
= eval(BO_Mul
, ElementSize
, IndexS
);
5708 // If this is the first offset, set it as the total offset. Otherwise, add
5709 // the local offset into the running total.
5710 if (!TotalOffset
|| TotalOffset
== Zero
)
5711 TotalOffset
= LocalOffset
;
5713 TotalOffset
= eval(BO_Add
, TotalOffset
, LocalOffset
);
5716 return {TotalOffset
, OffsetOverflows
};
5720 CodeGenFunction::EmitCheckedInBoundsGEP(llvm::Type
*ElemTy
, Value
*Ptr
,
5721 ArrayRef
<Value
*> IdxList
,
5722 bool SignedIndices
, bool IsSubtraction
,
5723 SourceLocation Loc
, const Twine
&Name
) {
5724 llvm::Type
*PtrTy
= Ptr
->getType();
5725 Value
*GEPVal
= Builder
.CreateInBoundsGEP(ElemTy
, Ptr
, IdxList
, Name
);
5727 // If the pointer overflow sanitizer isn't enabled, do nothing.
5728 if (!SanOpts
.has(SanitizerKind::PointerOverflow
))
5731 // Perform nullptr-and-offset check unless the nullptr is defined.
5732 bool PerformNullCheck
= !NullPointerIsDefined(
5733 Builder
.GetInsertBlock()->getParent(), PtrTy
->getPointerAddressSpace());
5734 // Check for overflows unless the GEP got constant-folded,
5735 // and only in the default address space
5736 bool PerformOverflowCheck
=
5737 !isa
<llvm::Constant
>(GEPVal
) && PtrTy
->getPointerAddressSpace() == 0;
5739 if (!(PerformNullCheck
|| PerformOverflowCheck
))
5742 const auto &DL
= CGM
.getDataLayout();
5744 SanitizerScope
SanScope(this);
5745 llvm::Type
*IntPtrTy
= DL
.getIntPtrType(PtrTy
);
5747 GEPOffsetAndOverflow EvaluatedGEP
=
5748 EmitGEPOffsetInBytes(Ptr
, GEPVal
, getLLVMContext(), CGM
, Builder
);
5750 assert((!isa
<llvm::Constant
>(EvaluatedGEP
.TotalOffset
) ||
5751 EvaluatedGEP
.OffsetOverflows
== Builder
.getFalse()) &&
5752 "If the offset got constant-folded, we don't expect that there was an "
5755 auto *Zero
= llvm::ConstantInt::getNullValue(IntPtrTy
);
5757 // Common case: if the total offset is zero, and we are using C++ semantics,
5758 // where nullptr+0 is defined, don't emit a check.
5759 if (EvaluatedGEP
.TotalOffset
== Zero
&& CGM
.getLangOpts().CPlusPlus
)
5762 // Now that we've computed the total offset, add it to the base pointer (with
5763 // wrapping semantics).
5764 auto *IntPtr
= Builder
.CreatePtrToInt(Ptr
, IntPtrTy
);
5765 auto *ComputedGEP
= Builder
.CreateAdd(IntPtr
, EvaluatedGEP
.TotalOffset
);
5767 llvm::SmallVector
<std::pair
<llvm::Value
*, SanitizerMask
>, 2> Checks
;
5769 if (PerformNullCheck
) {
5770 // In C++, if the base pointer evaluates to a null pointer value,
5771 // the only valid pointer this inbounds GEP can produce is also
5772 // a null pointer, so the offset must also evaluate to zero.
5773 // Likewise, if we have non-zero base pointer, we can not get null pointer
5774 // as a result, so the offset can not be -intptr_t(BasePtr).
5775 // In other words, both pointers are either null, or both are non-null,
5776 // or the behaviour is undefined.
5778 // C, however, is more strict in this regard, and gives more
5779 // optimization opportunities: in C, additionally, nullptr+0 is undefined.
5780 // So both the input to the 'gep inbounds' AND the output must not be null.
5781 auto *BaseIsNotNullptr
= Builder
.CreateIsNotNull(Ptr
);
5782 auto *ResultIsNotNullptr
= Builder
.CreateIsNotNull(ComputedGEP
);
5784 CGM
.getLangOpts().CPlusPlus
5785 ? Builder
.CreateICmpEQ(BaseIsNotNullptr
, ResultIsNotNullptr
)
5786 : Builder
.CreateAnd(BaseIsNotNullptr
, ResultIsNotNullptr
);
5787 Checks
.emplace_back(Valid
, SanitizerKind::PointerOverflow
);
5790 if (PerformOverflowCheck
) {
5791 // The GEP is valid if:
5792 // 1) The total offset doesn't overflow, and
5793 // 2) The sign of the difference between the computed address and the base
5794 // pointer matches the sign of the total offset.
5795 llvm::Value
*ValidGEP
;
5796 auto *NoOffsetOverflow
= Builder
.CreateNot(EvaluatedGEP
.OffsetOverflows
);
5797 if (SignedIndices
) {
5798 // GEP is computed as `unsigned base + signed offset`, therefore:
5799 // * If offset was positive, then the computed pointer can not be
5800 // [unsigned] less than the base pointer, unless it overflowed.
5801 // * If offset was negative, then the computed pointer can not be
5802 // [unsigned] greater than the bas pointere, unless it overflowed.
5803 auto *PosOrZeroValid
= Builder
.CreateICmpUGE(ComputedGEP
, IntPtr
);
5804 auto *PosOrZeroOffset
=
5805 Builder
.CreateICmpSGE(EvaluatedGEP
.TotalOffset
, Zero
);
5806 llvm::Value
*NegValid
= Builder
.CreateICmpULT(ComputedGEP
, IntPtr
);
5808 Builder
.CreateSelect(PosOrZeroOffset
, PosOrZeroValid
, NegValid
);
5809 } else if (!IsSubtraction
) {
5810 // GEP is computed as `unsigned base + unsigned offset`, therefore the
5811 // computed pointer can not be [unsigned] less than base pointer,
5812 // unless there was an overflow.
5813 // Equivalent to `@llvm.uadd.with.overflow(%base, %offset)`.
5814 ValidGEP
= Builder
.CreateICmpUGE(ComputedGEP
, IntPtr
);
5816 // GEP is computed as `unsigned base - unsigned offset`, therefore the
5817 // computed pointer can not be [unsigned] greater than base pointer,
5818 // unless there was an overflow.
5819 // Equivalent to `@llvm.usub.with.overflow(%base, sub(0, %offset))`.
5820 ValidGEP
= Builder
.CreateICmpULE(ComputedGEP
, IntPtr
);
5822 ValidGEP
= Builder
.CreateAnd(ValidGEP
, NoOffsetOverflow
);
5823 Checks
.emplace_back(ValidGEP
, SanitizerKind::PointerOverflow
);
5826 assert(!Checks
.empty() && "Should have produced some checks.");
5828 llvm::Constant
*StaticArgs
[] = {EmitCheckSourceLocation(Loc
)};
5829 // Pass the computed GEP to the runtime to avoid emitting poisoned arguments.
5830 llvm::Value
*DynamicArgs
[] = {IntPtr
, ComputedGEP
};
5831 EmitCheck(Checks
, SanitizerHandler::PointerOverflow
, StaticArgs
, DynamicArgs
);
5836 Address
CodeGenFunction::EmitCheckedInBoundsGEP(
5837 Address Addr
, ArrayRef
<Value
*> IdxList
, llvm::Type
*elementType
,
5838 bool SignedIndices
, bool IsSubtraction
, SourceLocation Loc
, CharUnits Align
,
5839 const Twine
&Name
) {
5840 if (!SanOpts
.has(SanitizerKind::PointerOverflow
))
5841 return Builder
.CreateInBoundsGEP(Addr
, IdxList
, elementType
, Align
, Name
);
5844 EmitCheckedInBoundsGEP(Addr
.getElementType(), Addr
.emitRawPointer(*this),
5845 IdxList
, SignedIndices
, IsSubtraction
, Loc
, Name
),
5846 elementType
, Align
);