1 //===--- CGExprScalar.cpp - Emit LLVM Code for Scalar Exprs ---------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This contains code to emit Expr nodes with scalar LLVM types as LLVM code.
11 //===----------------------------------------------------------------------===//
14 #include "CGCleanup.h"
15 #include "CGDebugInfo.h"
16 #include "CGObjCRuntime.h"
17 #include "CGOpenMPRuntime.h"
18 #include "CGRecordLayout.h"
19 #include "CodeGenFunction.h"
20 #include "CodeGenModule.h"
21 #include "ConstantEmitter.h"
22 #include "TargetInfo.h"
23 #include "clang/AST/ASTContext.h"
24 #include "clang/AST/Attr.h"
25 #include "clang/AST/DeclObjC.h"
26 #include "clang/AST/Expr.h"
27 #include "clang/AST/ParentMapContext.h"
28 #include "clang/AST/RecordLayout.h"
29 #include "clang/AST/StmtVisitor.h"
30 #include "clang/Basic/CodeGenOptions.h"
31 #include "clang/Basic/TargetInfo.h"
32 #include "llvm/ADT/APFixedPoint.h"
33 #include "llvm/IR/CFG.h"
34 #include "llvm/IR/Constants.h"
35 #include "llvm/IR/DataLayout.h"
36 #include "llvm/IR/DerivedTypes.h"
37 #include "llvm/IR/FixedPointBuilder.h"
38 #include "llvm/IR/Function.h"
39 #include "llvm/IR/GEPNoWrapFlags.h"
40 #include "llvm/IR/GetElementPtrTypeIterator.h"
41 #include "llvm/IR/GlobalVariable.h"
42 #include "llvm/IR/Intrinsics.h"
43 #include "llvm/IR/IntrinsicsPowerPC.h"
44 #include "llvm/IR/MatrixBuilder.h"
45 #include "llvm/IR/Module.h"
46 #include "llvm/Support/TypeSize.h"
50 using namespace clang
;
51 using namespace CodeGen
;
54 //===----------------------------------------------------------------------===//
55 // Scalar Expression Emitter
56 //===----------------------------------------------------------------------===//
59 extern cl::opt
<bool> EnableSingleByteCoverage
;
64 /// Determine whether the given binary operation may overflow.
65 /// Sets \p Result to the value of the operation for BO_Add, BO_Sub, BO_Mul,
66 /// and signed BO_{Div,Rem}. For these opcodes, and for unsigned BO_{Div,Rem},
67 /// the returned overflow check is precise. The returned value is 'true' for
68 /// all other opcodes, to be conservative.
69 bool mayHaveIntegerOverflow(llvm::ConstantInt
*LHS
, llvm::ConstantInt
*RHS
,
70 BinaryOperator::Opcode Opcode
, bool Signed
,
71 llvm::APInt
&Result
) {
72 // Assume overflow is possible, unless we can prove otherwise.
74 const auto &LHSAP
= LHS
->getValue();
75 const auto &RHSAP
= RHS
->getValue();
76 if (Opcode
== BO_Add
) {
77 Result
= Signed
? LHSAP
.sadd_ov(RHSAP
, Overflow
)
78 : LHSAP
.uadd_ov(RHSAP
, Overflow
);
79 } else if (Opcode
== BO_Sub
) {
80 Result
= Signed
? LHSAP
.ssub_ov(RHSAP
, Overflow
)
81 : LHSAP
.usub_ov(RHSAP
, Overflow
);
82 } else if (Opcode
== BO_Mul
) {
83 Result
= Signed
? LHSAP
.smul_ov(RHSAP
, Overflow
)
84 : LHSAP
.umul_ov(RHSAP
, Overflow
);
85 } else if (Opcode
== BO_Div
|| Opcode
== BO_Rem
) {
86 if (Signed
&& !RHS
->isZero())
87 Result
= LHSAP
.sdiv_ov(RHSAP
, Overflow
);
97 QualType Ty
; // Computation Type.
98 BinaryOperator::Opcode Opcode
; // Opcode of BinOp to perform
100 const Expr
*E
; // Entire expr, for error unsupported. May not be binop.
102 /// Check if the binop can result in integer overflow.
103 bool mayHaveIntegerOverflow() const {
104 // Without constant input, we can't rule out overflow.
105 auto *LHSCI
= dyn_cast
<llvm::ConstantInt
>(LHS
);
106 auto *RHSCI
= dyn_cast
<llvm::ConstantInt
>(RHS
);
107 if (!LHSCI
|| !RHSCI
)
111 return ::mayHaveIntegerOverflow(
112 LHSCI
, RHSCI
, Opcode
, Ty
->hasSignedIntegerRepresentation(), Result
);
115 /// Check if the binop computes a division or a remainder.
116 bool isDivremOp() const {
117 return Opcode
== BO_Div
|| Opcode
== BO_Rem
|| Opcode
== BO_DivAssign
||
118 Opcode
== BO_RemAssign
;
121 /// Check if the binop can result in an integer division by zero.
122 bool mayHaveIntegerDivisionByZero() const {
124 if (auto *CI
= dyn_cast
<llvm::ConstantInt
>(RHS
))
129 /// Check if the binop can result in a float division by zero.
130 bool mayHaveFloatDivisionByZero() const {
132 if (auto *CFP
= dyn_cast
<llvm::ConstantFP
>(RHS
))
133 return CFP
->isZero();
137 /// Check if at least one operand is a fixed point type. In such cases, this
138 /// operation did not follow usual arithmetic conversion and both operands
139 /// might not be of the same type.
140 bool isFixedPointOp() const {
141 // We cannot simply check the result type since comparison operations return
143 if (const auto *BinOp
= dyn_cast
<BinaryOperator
>(E
)) {
144 QualType LHSType
= BinOp
->getLHS()->getType();
145 QualType RHSType
= BinOp
->getRHS()->getType();
146 return LHSType
->isFixedPointType() || RHSType
->isFixedPointType();
148 if (const auto *UnOp
= dyn_cast
<UnaryOperator
>(E
))
149 return UnOp
->getSubExpr()->getType()->isFixedPointType();
153 /// Check if the RHS has a signed integer representation.
154 bool rhsHasSignedIntegerRepresentation() const {
155 if (const auto *BinOp
= dyn_cast
<BinaryOperator
>(E
)) {
156 QualType RHSType
= BinOp
->getRHS()->getType();
157 return RHSType
->hasSignedIntegerRepresentation();
163 static bool MustVisitNullValue(const Expr
*E
) {
164 // If a null pointer expression's type is the C++0x nullptr_t, then
165 // it's not necessarily a simple constant and it must be evaluated
166 // for its potential side effects.
167 return E
->getType()->isNullPtrType();
170 /// If \p E is a widened promoted integer, get its base (unpromoted) type.
171 static std::optional
<QualType
> getUnwidenedIntegerType(const ASTContext
&Ctx
,
173 const Expr
*Base
= E
->IgnoreImpCasts();
177 QualType BaseTy
= Base
->getType();
178 if (!Ctx
.isPromotableIntegerType(BaseTy
) ||
179 Ctx
.getTypeSize(BaseTy
) >= Ctx
.getTypeSize(E
->getType()))
185 /// Check if \p E is a widened promoted integer.
186 static bool IsWidenedIntegerOp(const ASTContext
&Ctx
, const Expr
*E
) {
187 return getUnwidenedIntegerType(Ctx
, E
).has_value();
190 /// Check if we can skip the overflow check for \p Op.
191 static bool CanElideOverflowCheck(const ASTContext
&Ctx
, const BinOpInfo
&Op
) {
192 assert((isa
<UnaryOperator
>(Op
.E
) || isa
<BinaryOperator
>(Op
.E
)) &&
193 "Expected a unary or binary operator");
195 // If the binop has constant inputs and we can prove there is no overflow,
196 // we can elide the overflow check.
197 if (!Op
.mayHaveIntegerOverflow())
200 const UnaryOperator
*UO
= dyn_cast
<UnaryOperator
>(Op
.E
);
202 if (UO
&& UO
->getOpcode() == UO_Minus
&&
203 Ctx
.getLangOpts().isOverflowPatternExcluded(
204 LangOptions::OverflowPatternExclusionKind::NegUnsignedConst
) &&
205 UO
->isIntegerConstantExpr(Ctx
))
208 // If a unary op has a widened operand, the op cannot overflow.
210 return !UO
->canOverflow();
212 // We usually don't need overflow checks for binops with widened operands.
213 // Multiplication with promoted unsigned operands is a special case.
214 const auto *BO
= cast
<BinaryOperator
>(Op
.E
);
215 if (BO
->hasExcludedOverflowPattern())
218 auto OptionalLHSTy
= getUnwidenedIntegerType(Ctx
, BO
->getLHS());
222 auto OptionalRHSTy
= getUnwidenedIntegerType(Ctx
, BO
->getRHS());
226 QualType LHSTy
= *OptionalLHSTy
;
227 QualType RHSTy
= *OptionalRHSTy
;
229 // This is the simple case: binops without unsigned multiplication, and with
230 // widened operands. No overflow check is needed here.
231 if ((Op
.Opcode
!= BO_Mul
&& Op
.Opcode
!= BO_MulAssign
) ||
232 !LHSTy
->isUnsignedIntegerType() || !RHSTy
->isUnsignedIntegerType())
235 // For unsigned multiplication the overflow check can be elided if either one
236 // of the unpromoted types are less than half the size of the promoted type.
237 unsigned PromotedSize
= Ctx
.getTypeSize(Op
.E
->getType());
238 return (2 * Ctx
.getTypeSize(LHSTy
)) < PromotedSize
||
239 (2 * Ctx
.getTypeSize(RHSTy
)) < PromotedSize
;
242 class ScalarExprEmitter
243 : public StmtVisitor
<ScalarExprEmitter
, Value
*> {
244 CodeGenFunction
&CGF
;
245 CGBuilderTy
&Builder
;
246 bool IgnoreResultAssign
;
247 llvm::LLVMContext
&VMContext
;
250 ScalarExprEmitter(CodeGenFunction
&cgf
, bool ira
=false)
251 : CGF(cgf
), Builder(CGF
.Builder
), IgnoreResultAssign(ira
),
252 VMContext(cgf
.getLLVMContext()) {
255 //===--------------------------------------------------------------------===//
257 //===--------------------------------------------------------------------===//
259 bool TestAndClearIgnoreResultAssign() {
260 bool I
= IgnoreResultAssign
;
261 IgnoreResultAssign
= false;
265 llvm::Type
*ConvertType(QualType T
) { return CGF
.ConvertType(T
); }
266 LValue
EmitLValue(const Expr
*E
) { return CGF
.EmitLValue(E
); }
267 LValue
EmitCheckedLValue(const Expr
*E
, CodeGenFunction::TypeCheckKind TCK
) {
268 return CGF
.EmitCheckedLValue(E
, TCK
);
271 void EmitBinOpCheck(ArrayRef
<std::pair
<Value
*, SanitizerMask
>> Checks
,
272 const BinOpInfo
&Info
);
274 Value
*EmitLoadOfLValue(LValue LV
, SourceLocation Loc
) {
275 return CGF
.EmitLoadOfLValue(LV
, Loc
).getScalarVal();
278 void EmitLValueAlignmentAssumption(const Expr
*E
, Value
*V
) {
279 const AlignValueAttr
*AVAttr
= nullptr;
280 if (const auto *DRE
= dyn_cast
<DeclRefExpr
>(E
)) {
281 const ValueDecl
*VD
= DRE
->getDecl();
283 if (VD
->getType()->isReferenceType()) {
284 if (const auto *TTy
=
285 VD
->getType().getNonReferenceType()->getAs
<TypedefType
>())
286 AVAttr
= TTy
->getDecl()->getAttr
<AlignValueAttr
>();
288 // Assumptions for function parameters are emitted at the start of the
289 // function, so there is no need to repeat that here,
290 // unless the alignment-assumption sanitizer is enabled,
291 // then we prefer the assumption over alignment attribute
292 // on IR function param.
293 if (isa
<ParmVarDecl
>(VD
) && !CGF
.SanOpts
.has(SanitizerKind::Alignment
))
296 AVAttr
= VD
->getAttr
<AlignValueAttr
>();
301 if (const auto *TTy
= E
->getType()->getAs
<TypedefType
>())
302 AVAttr
= TTy
->getDecl()->getAttr
<AlignValueAttr
>();
307 Value
*AlignmentValue
= CGF
.EmitScalarExpr(AVAttr
->getAlignment());
308 llvm::ConstantInt
*AlignmentCI
= cast
<llvm::ConstantInt
>(AlignmentValue
);
309 CGF
.emitAlignmentAssumption(V
, E
, AVAttr
->getLocation(), AlignmentCI
);
312 /// EmitLoadOfLValue - Given an expression with complex type that represents a
313 /// value l-value, this method emits the address of the l-value, then loads
314 /// and returns the result.
315 Value
*EmitLoadOfLValue(const Expr
*E
) {
316 Value
*V
= EmitLoadOfLValue(EmitCheckedLValue(E
, CodeGenFunction::TCK_Load
),
319 EmitLValueAlignmentAssumption(E
, V
);
323 /// EmitConversionToBool - Convert the specified expression value to a
324 /// boolean (i1) truth value. This is equivalent to "Val != 0".
325 Value
*EmitConversionToBool(Value
*Src
, QualType DstTy
);
327 /// Emit a check that a conversion from a floating-point type does not
329 void EmitFloatConversionCheck(Value
*OrigSrc
, QualType OrigSrcType
,
330 Value
*Src
, QualType SrcType
, QualType DstType
,
331 llvm::Type
*DstTy
, SourceLocation Loc
);
333 /// Known implicit conversion check kinds.
334 /// This is used for bitfield conversion checks as well.
335 /// Keep in sync with the enum of the same name in ubsan_handlers.h
336 enum ImplicitConversionCheckKind
: unsigned char {
337 ICCK_IntegerTruncation
= 0, // Legacy, was only used by clang 7.
338 ICCK_UnsignedIntegerTruncation
= 1,
339 ICCK_SignedIntegerTruncation
= 2,
340 ICCK_IntegerSignChange
= 3,
341 ICCK_SignedIntegerTruncationOrSignChange
= 4,
344 /// Emit a check that an [implicit] truncation of an integer does not
345 /// discard any bits. It is not UB, so we use the value after truncation.
346 void EmitIntegerTruncationCheck(Value
*Src
, QualType SrcType
, Value
*Dst
,
347 QualType DstType
, SourceLocation Loc
);
349 /// Emit a check that an [implicit] conversion of an integer does not change
350 /// the sign of the value. It is not UB, so we use the value after conversion.
351 /// NOTE: Src and Dst may be the exact same value! (point to the same thing)
352 void EmitIntegerSignChangeCheck(Value
*Src
, QualType SrcType
, Value
*Dst
,
353 QualType DstType
, SourceLocation Loc
);
355 /// Emit a conversion from the specified type to the specified destination
356 /// type, both of which are LLVM scalar types.
357 struct ScalarConversionOpts
{
358 bool TreatBooleanAsSigned
;
359 bool EmitImplicitIntegerTruncationChecks
;
360 bool EmitImplicitIntegerSignChangeChecks
;
362 ScalarConversionOpts()
363 : TreatBooleanAsSigned(false),
364 EmitImplicitIntegerTruncationChecks(false),
365 EmitImplicitIntegerSignChangeChecks(false) {}
367 ScalarConversionOpts(clang::SanitizerSet SanOpts
)
368 : TreatBooleanAsSigned(false),
369 EmitImplicitIntegerTruncationChecks(
370 SanOpts
.hasOneOf(SanitizerKind::ImplicitIntegerTruncation
)),
371 EmitImplicitIntegerSignChangeChecks(
372 SanOpts
.has(SanitizerKind::ImplicitIntegerSignChange
)) {}
374 Value
*EmitScalarCast(Value
*Src
, QualType SrcType
, QualType DstType
,
375 llvm::Type
*SrcTy
, llvm::Type
*DstTy
,
376 ScalarConversionOpts Opts
);
378 EmitScalarConversion(Value
*Src
, QualType SrcTy
, QualType DstTy
,
380 ScalarConversionOpts Opts
= ScalarConversionOpts());
382 /// Convert between either a fixed point and other fixed point or fixed point
384 Value
*EmitFixedPointConversion(Value
*Src
, QualType SrcTy
, QualType DstTy
,
387 /// Emit a conversion from the specified complex type to the specified
388 /// destination type, where the destination type is an LLVM scalar type.
389 Value
*EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src
,
390 QualType SrcTy
, QualType DstTy
,
393 /// EmitNullValue - Emit a value that corresponds to null for the given type.
394 Value
*EmitNullValue(QualType Ty
);
396 /// EmitFloatToBoolConversion - Perform an FP to boolean conversion.
397 Value
*EmitFloatToBoolConversion(Value
*V
) {
398 // Compare against 0.0 for fp scalars.
399 llvm::Value
*Zero
= llvm::Constant::getNullValue(V
->getType());
400 return Builder
.CreateFCmpUNE(V
, Zero
, "tobool");
403 /// EmitPointerToBoolConversion - Perform a pointer to boolean conversion.
404 Value
*EmitPointerToBoolConversion(Value
*V
, QualType QT
) {
405 Value
*Zero
= CGF
.CGM
.getNullPointer(cast
<llvm::PointerType
>(V
->getType()), QT
);
407 return Builder
.CreateICmpNE(V
, Zero
, "tobool");
410 Value
*EmitIntToBoolConversion(Value
*V
) {
411 // Because of the type rules of C, we often end up computing a
412 // logical value, then zero extending it to int, then wanting it
413 // as a logical value again. Optimize this common case.
414 if (llvm::ZExtInst
*ZI
= dyn_cast
<llvm::ZExtInst
>(V
)) {
415 if (ZI
->getOperand(0)->getType() == Builder
.getInt1Ty()) {
416 Value
*Result
= ZI
->getOperand(0);
417 // If there aren't any more uses, zap the instruction to save space.
418 // Note that there can be more uses, for example if this
419 // is the result of an assignment.
421 ZI
->eraseFromParent();
426 return Builder
.CreateIsNotNull(V
, "tobool");
429 //===--------------------------------------------------------------------===//
431 //===--------------------------------------------------------------------===//
433 Value
*Visit(Expr
*E
) {
434 ApplyDebugLocation
DL(CGF
, E
);
435 return StmtVisitor
<ScalarExprEmitter
, Value
*>::Visit(E
);
438 Value
*VisitStmt(Stmt
*S
) {
439 S
->dump(llvm::errs(), CGF
.getContext());
440 llvm_unreachable("Stmt can't have complex result type!");
442 Value
*VisitExpr(Expr
*S
);
444 Value
*VisitConstantExpr(ConstantExpr
*E
) {
445 // A constant expression of type 'void' generates no code and produces no
447 if (E
->getType()->isVoidType())
450 if (Value
*Result
= ConstantEmitter(CGF
).tryEmitConstantExpr(E
)) {
452 return CGF
.EmitLoadOfScalar(
453 Address(Result
, CGF
.convertTypeForLoadStore(E
->getType()),
454 CGF
.getContext().getTypeAlignInChars(E
->getType())),
455 /*Volatile*/ false, E
->getType(), E
->getExprLoc());
458 return Visit(E
->getSubExpr());
460 Value
*VisitParenExpr(ParenExpr
*PE
) {
461 return Visit(PE
->getSubExpr());
463 Value
*VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr
*E
) {
464 return Visit(E
->getReplacement());
466 Value
*VisitGenericSelectionExpr(GenericSelectionExpr
*GE
) {
467 return Visit(GE
->getResultExpr());
469 Value
*VisitCoawaitExpr(CoawaitExpr
*S
) {
470 return CGF
.EmitCoawaitExpr(*S
).getScalarVal();
472 Value
*VisitCoyieldExpr(CoyieldExpr
*S
) {
473 return CGF
.EmitCoyieldExpr(*S
).getScalarVal();
475 Value
*VisitUnaryCoawait(const UnaryOperator
*E
) {
476 return Visit(E
->getSubExpr());
480 Value
*VisitIntegerLiteral(const IntegerLiteral
*E
) {
481 return Builder
.getInt(E
->getValue());
483 Value
*VisitFixedPointLiteral(const FixedPointLiteral
*E
) {
484 return Builder
.getInt(E
->getValue());
486 Value
*VisitFloatingLiteral(const FloatingLiteral
*E
) {
487 return llvm::ConstantFP::get(VMContext
, E
->getValue());
489 Value
*VisitCharacterLiteral(const CharacterLiteral
*E
) {
490 return llvm::ConstantInt::get(ConvertType(E
->getType()), E
->getValue());
492 Value
*VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr
*E
) {
493 return llvm::ConstantInt::get(ConvertType(E
->getType()), E
->getValue());
495 Value
*VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr
*E
) {
496 return llvm::ConstantInt::get(ConvertType(E
->getType()), E
->getValue());
498 Value
*VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr
*E
) {
499 if (E
->getType()->isVoidType())
502 return EmitNullValue(E
->getType());
504 Value
*VisitGNUNullExpr(const GNUNullExpr
*E
) {
505 return EmitNullValue(E
->getType());
507 Value
*VisitOffsetOfExpr(OffsetOfExpr
*E
);
508 Value
*VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr
*E
);
509 Value
*VisitAddrLabelExpr(const AddrLabelExpr
*E
) {
510 llvm::Value
*V
= CGF
.GetAddrOfLabel(E
->getLabel());
511 return Builder
.CreateBitCast(V
, ConvertType(E
->getType()));
514 Value
*VisitSizeOfPackExpr(SizeOfPackExpr
*E
) {
515 return llvm::ConstantInt::get(ConvertType(E
->getType()),E
->getPackLength());
518 Value
*VisitPseudoObjectExpr(PseudoObjectExpr
*E
) {
519 return CGF
.EmitPseudoObjectRValue(E
).getScalarVal();
522 Value
*VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr
*E
);
523 Value
*VisitEmbedExpr(EmbedExpr
*E
);
525 Value
*VisitOpaqueValueExpr(OpaqueValueExpr
*E
) {
527 return EmitLoadOfLValue(CGF
.getOrCreateOpaqueLValueMapping(E
),
530 // Otherwise, assume the mapping is the scalar directly.
531 return CGF
.getOrCreateOpaqueRValueMapping(E
).getScalarVal();
535 Value
*VisitDeclRefExpr(DeclRefExpr
*E
) {
536 if (CodeGenFunction::ConstantEmission Constant
= CGF
.tryEmitAsConstant(E
))
537 return CGF
.emitScalarConstant(Constant
, E
);
538 return EmitLoadOfLValue(E
);
541 Value
*VisitObjCSelectorExpr(ObjCSelectorExpr
*E
) {
542 return CGF
.EmitObjCSelectorExpr(E
);
544 Value
*VisitObjCProtocolExpr(ObjCProtocolExpr
*E
) {
545 return CGF
.EmitObjCProtocolExpr(E
);
547 Value
*VisitObjCIvarRefExpr(ObjCIvarRefExpr
*E
) {
548 return EmitLoadOfLValue(E
);
550 Value
*VisitObjCMessageExpr(ObjCMessageExpr
*E
) {
551 if (E
->getMethodDecl() &&
552 E
->getMethodDecl()->getReturnType()->isReferenceType())
553 return EmitLoadOfLValue(E
);
554 return CGF
.EmitObjCMessageExpr(E
).getScalarVal();
557 Value
*VisitObjCIsaExpr(ObjCIsaExpr
*E
) {
558 LValue LV
= CGF
.EmitObjCIsaExpr(E
);
559 Value
*V
= CGF
.EmitLoadOfLValue(LV
, E
->getExprLoc()).getScalarVal();
563 Value
*VisitObjCAvailabilityCheckExpr(ObjCAvailabilityCheckExpr
*E
) {
564 VersionTuple Version
= E
->getVersion();
566 // If we're checking for a platform older than our minimum deployment
567 // target, we can fold the check away.
568 if (Version
<= CGF
.CGM
.getTarget().getPlatformMinVersion())
569 return llvm::ConstantInt::get(Builder
.getInt1Ty(), 1);
571 return CGF
.EmitBuiltinAvailable(Version
);
574 Value
*VisitArraySubscriptExpr(ArraySubscriptExpr
*E
);
575 Value
*VisitMatrixSubscriptExpr(MatrixSubscriptExpr
*E
);
576 Value
*VisitShuffleVectorExpr(ShuffleVectorExpr
*E
);
577 Value
*VisitConvertVectorExpr(ConvertVectorExpr
*E
);
578 Value
*VisitMemberExpr(MemberExpr
*E
);
579 Value
*VisitExtVectorElementExpr(Expr
*E
) { return EmitLoadOfLValue(E
); }
580 Value
*VisitCompoundLiteralExpr(CompoundLiteralExpr
*E
) {
581 // Strictly speaking, we shouldn't be calling EmitLoadOfLValue, which
582 // transitively calls EmitCompoundLiteralLValue, here in C++ since compound
583 // literals aren't l-values in C++. We do so simply because that's the
584 // cleanest way to handle compound literals in C++.
585 // See the discussion here: https://reviews.llvm.org/D64464
586 return EmitLoadOfLValue(E
);
589 Value
*VisitInitListExpr(InitListExpr
*E
);
591 Value
*VisitArrayInitIndexExpr(ArrayInitIndexExpr
*E
) {
592 assert(CGF
.getArrayInitIndex() &&
593 "ArrayInitIndexExpr not inside an ArrayInitLoopExpr?");
594 return CGF
.getArrayInitIndex();
597 Value
*VisitImplicitValueInitExpr(const ImplicitValueInitExpr
*E
) {
598 return EmitNullValue(E
->getType());
600 Value
*VisitExplicitCastExpr(ExplicitCastExpr
*E
) {
601 CGF
.CGM
.EmitExplicitCastExprType(E
, &CGF
);
602 return VisitCastExpr(E
);
604 Value
*VisitCastExpr(CastExpr
*E
);
606 Value
*VisitCallExpr(const CallExpr
*E
) {
607 if (E
->getCallReturnType(CGF
.getContext())->isReferenceType())
608 return EmitLoadOfLValue(E
);
610 Value
*V
= CGF
.EmitCallExpr(E
).getScalarVal();
612 EmitLValueAlignmentAssumption(E
, V
);
616 Value
*VisitStmtExpr(const StmtExpr
*E
);
619 Value
*VisitUnaryPostDec(const UnaryOperator
*E
) {
620 LValue LV
= EmitLValue(E
->getSubExpr());
621 return EmitScalarPrePostIncDec(E
, LV
, false, false);
623 Value
*VisitUnaryPostInc(const UnaryOperator
*E
) {
624 LValue LV
= EmitLValue(E
->getSubExpr());
625 return EmitScalarPrePostIncDec(E
, LV
, true, false);
627 Value
*VisitUnaryPreDec(const UnaryOperator
*E
) {
628 LValue LV
= EmitLValue(E
->getSubExpr());
629 return EmitScalarPrePostIncDec(E
, LV
, false, true);
631 Value
*VisitUnaryPreInc(const UnaryOperator
*E
) {
632 LValue LV
= EmitLValue(E
->getSubExpr());
633 return EmitScalarPrePostIncDec(E
, LV
, true, true);
636 llvm::Value
*EmitIncDecConsiderOverflowBehavior(const UnaryOperator
*E
,
640 llvm::Value
*EmitScalarPrePostIncDec(const UnaryOperator
*E
, LValue LV
,
641 bool isInc
, bool isPre
);
644 Value
*VisitUnaryAddrOf(const UnaryOperator
*E
) {
645 if (isa
<MemberPointerType
>(E
->getType())) // never sugared
646 return CGF
.CGM
.getMemberPointerConstant(E
);
648 return EmitLValue(E
->getSubExpr()).getPointer(CGF
);
650 Value
*VisitUnaryDeref(const UnaryOperator
*E
) {
651 if (E
->getType()->isVoidType())
652 return Visit(E
->getSubExpr()); // the actual value should be unused
653 return EmitLoadOfLValue(E
);
656 Value
*VisitUnaryPlus(const UnaryOperator
*E
,
657 QualType PromotionType
= QualType());
658 Value
*VisitPlus(const UnaryOperator
*E
, QualType PromotionType
);
659 Value
*VisitUnaryMinus(const UnaryOperator
*E
,
660 QualType PromotionType
= QualType());
661 Value
*VisitMinus(const UnaryOperator
*E
, QualType PromotionType
);
663 Value
*VisitUnaryNot (const UnaryOperator
*E
);
664 Value
*VisitUnaryLNot (const UnaryOperator
*E
);
665 Value
*VisitUnaryReal(const UnaryOperator
*E
,
666 QualType PromotionType
= QualType());
667 Value
*VisitReal(const UnaryOperator
*E
, QualType PromotionType
);
668 Value
*VisitUnaryImag(const UnaryOperator
*E
,
669 QualType PromotionType
= QualType());
670 Value
*VisitImag(const UnaryOperator
*E
, QualType PromotionType
);
671 Value
*VisitUnaryExtension(const UnaryOperator
*E
) {
672 return Visit(E
->getSubExpr());
676 Value
*VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr
*E
) {
677 return EmitLoadOfLValue(E
);
679 Value
*VisitSourceLocExpr(SourceLocExpr
*SLE
) {
680 auto &Ctx
= CGF
.getContext();
682 SLE
->EvaluateInContext(Ctx
, CGF
.CurSourceLocExprScope
.getDefaultExpr());
683 return ConstantEmitter(CGF
).emitAbstract(SLE
->getLocation(), Evaluated
,
687 Value
*VisitCXXDefaultArgExpr(CXXDefaultArgExpr
*DAE
) {
688 CodeGenFunction::CXXDefaultArgExprScope
Scope(CGF
, DAE
);
689 return Visit(DAE
->getExpr());
691 Value
*VisitCXXDefaultInitExpr(CXXDefaultInitExpr
*DIE
) {
692 CodeGenFunction::CXXDefaultInitExprScope
Scope(CGF
, DIE
);
693 return Visit(DIE
->getExpr());
695 Value
*VisitCXXThisExpr(CXXThisExpr
*TE
) {
696 return CGF
.LoadCXXThis();
699 Value
*VisitExprWithCleanups(ExprWithCleanups
*E
);
700 Value
*VisitCXXNewExpr(const CXXNewExpr
*E
) {
701 return CGF
.EmitCXXNewExpr(E
);
703 Value
*VisitCXXDeleteExpr(const CXXDeleteExpr
*E
) {
704 CGF
.EmitCXXDeleteExpr(E
);
708 Value
*VisitTypeTraitExpr(const TypeTraitExpr
*E
) {
709 return llvm::ConstantInt::get(ConvertType(E
->getType()), E
->getValue());
712 Value
*VisitConceptSpecializationExpr(const ConceptSpecializationExpr
*E
) {
713 return Builder
.getInt1(E
->isSatisfied());
716 Value
*VisitRequiresExpr(const RequiresExpr
*E
) {
717 return Builder
.getInt1(E
->isSatisfied());
720 Value
*VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr
*E
) {
721 return llvm::ConstantInt::get(Builder
.getInt32Ty(), E
->getValue());
724 Value
*VisitExpressionTraitExpr(const ExpressionTraitExpr
*E
) {
725 return llvm::ConstantInt::get(Builder
.getInt1Ty(), E
->getValue());
728 Value
*VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr
*E
) {
729 // C++ [expr.pseudo]p1:
730 // The result shall only be used as the operand for the function call
731 // operator (), and the result of such a call has type void. The only
732 // effect is the evaluation of the postfix-expression before the dot or
734 CGF
.EmitScalarExpr(E
->getBase());
738 Value
*VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr
*E
) {
739 return EmitNullValue(E
->getType());
742 Value
*VisitCXXThrowExpr(const CXXThrowExpr
*E
) {
743 CGF
.EmitCXXThrowExpr(E
);
747 Value
*VisitCXXNoexceptExpr(const CXXNoexceptExpr
*E
) {
748 return Builder
.getInt1(E
->getValue());
752 Value
*EmitMul(const BinOpInfo
&Ops
) {
753 if (Ops
.Ty
->isSignedIntegerOrEnumerationType()) {
754 switch (CGF
.getLangOpts().getSignedOverflowBehavior()) {
755 case LangOptions::SOB_Defined
:
756 if (!CGF
.SanOpts
.has(SanitizerKind::SignedIntegerOverflow
))
757 return Builder
.CreateMul(Ops
.LHS
, Ops
.RHS
, "mul");
759 case LangOptions::SOB_Undefined
:
760 if (!CGF
.SanOpts
.has(SanitizerKind::SignedIntegerOverflow
))
761 return Builder
.CreateNSWMul(Ops
.LHS
, Ops
.RHS
, "mul");
763 case LangOptions::SOB_Trapping
:
764 if (CanElideOverflowCheck(CGF
.getContext(), Ops
))
765 return Builder
.CreateNSWMul(Ops
.LHS
, Ops
.RHS
, "mul");
766 return EmitOverflowCheckedBinOp(Ops
);
770 if (Ops
.Ty
->isConstantMatrixType()) {
771 llvm::MatrixBuilder
MB(Builder
);
772 // We need to check the types of the operands of the operator to get the
773 // correct matrix dimensions.
774 auto *BO
= cast
<BinaryOperator
>(Ops
.E
);
775 auto *LHSMatTy
= dyn_cast
<ConstantMatrixType
>(
776 BO
->getLHS()->getType().getCanonicalType());
777 auto *RHSMatTy
= dyn_cast
<ConstantMatrixType
>(
778 BO
->getRHS()->getType().getCanonicalType());
779 CodeGenFunction::CGFPOptionsRAII
FPOptsRAII(CGF
, Ops
.FPFeatures
);
780 if (LHSMatTy
&& RHSMatTy
)
781 return MB
.CreateMatrixMultiply(Ops
.LHS
, Ops
.RHS
, LHSMatTy
->getNumRows(),
782 LHSMatTy
->getNumColumns(),
783 RHSMatTy
->getNumColumns());
784 return MB
.CreateScalarMultiply(Ops
.LHS
, Ops
.RHS
);
787 if (Ops
.Ty
->isUnsignedIntegerType() &&
788 CGF
.SanOpts
.has(SanitizerKind::UnsignedIntegerOverflow
) &&
789 !CanElideOverflowCheck(CGF
.getContext(), Ops
))
790 return EmitOverflowCheckedBinOp(Ops
);
792 if (Ops
.LHS
->getType()->isFPOrFPVectorTy()) {
793 // Preserve the old values
794 CodeGenFunction::CGFPOptionsRAII
FPOptsRAII(CGF
, Ops
.FPFeatures
);
795 return Builder
.CreateFMul(Ops
.LHS
, Ops
.RHS
, "mul");
797 if (Ops
.isFixedPointOp())
798 return EmitFixedPointBinOp(Ops
);
799 return Builder
.CreateMul(Ops
.LHS
, Ops
.RHS
, "mul");
801 /// Create a binary op that checks for overflow.
802 /// Currently only supports +, - and *.
803 Value
*EmitOverflowCheckedBinOp(const BinOpInfo
&Ops
);
805 // Check for undefined division and modulus behaviors.
806 void EmitUndefinedBehaviorIntegerDivAndRemCheck(const BinOpInfo
&Ops
,
807 llvm::Value
*Zero
,bool isDiv
);
808 // Common helper for getting how wide LHS of shift is.
809 static Value
*GetMaximumShiftAmount(Value
*LHS
, Value
*RHS
, bool RHSIsSigned
);
811 // Used for shifting constraints for OpenCL, do mask for powers of 2, URem for
812 // non powers of two.
813 Value
*ConstrainShiftValue(Value
*LHS
, Value
*RHS
, const Twine
&Name
);
815 Value
*EmitDiv(const BinOpInfo
&Ops
);
816 Value
*EmitRem(const BinOpInfo
&Ops
);
817 Value
*EmitAdd(const BinOpInfo
&Ops
);
818 Value
*EmitSub(const BinOpInfo
&Ops
);
819 Value
*EmitShl(const BinOpInfo
&Ops
);
820 Value
*EmitShr(const BinOpInfo
&Ops
);
821 Value
*EmitAnd(const BinOpInfo
&Ops
) {
822 return Builder
.CreateAnd(Ops
.LHS
, Ops
.RHS
, "and");
824 Value
*EmitXor(const BinOpInfo
&Ops
) {
825 return Builder
.CreateXor(Ops
.LHS
, Ops
.RHS
, "xor");
827 Value
*EmitOr (const BinOpInfo
&Ops
) {
828 return Builder
.CreateOr(Ops
.LHS
, Ops
.RHS
, "or");
831 // Helper functions for fixed point binary operations.
832 Value
*EmitFixedPointBinOp(const BinOpInfo
&Ops
);
834 BinOpInfo
EmitBinOps(const BinaryOperator
*E
,
835 QualType PromotionTy
= QualType());
837 Value
*EmitPromotedValue(Value
*result
, QualType PromotionType
);
838 Value
*EmitUnPromotedValue(Value
*result
, QualType ExprType
);
839 Value
*EmitPromoted(const Expr
*E
, QualType PromotionType
);
841 LValue
EmitCompoundAssignLValue(const CompoundAssignOperator
*E
,
842 Value
*(ScalarExprEmitter::*F
)(const BinOpInfo
&),
845 Value
*EmitCompoundAssign(const CompoundAssignOperator
*E
,
846 Value
*(ScalarExprEmitter::*F
)(const BinOpInfo
&));
848 QualType
getPromotionType(QualType Ty
) {
849 const auto &Ctx
= CGF
.getContext();
850 if (auto *CT
= Ty
->getAs
<ComplexType
>()) {
851 QualType ElementType
= CT
->getElementType();
852 if (ElementType
.UseExcessPrecision(Ctx
))
853 return Ctx
.getComplexType(Ctx
.FloatTy
);
856 if (Ty
.UseExcessPrecision(Ctx
)) {
857 if (auto *VT
= Ty
->getAs
<VectorType
>()) {
858 unsigned NumElements
= VT
->getNumElements();
859 return Ctx
.getVectorType(Ctx
.FloatTy
, NumElements
, VT
->getVectorKind());
867 // Binary operators and binary compound assignment operators.
868 #define HANDLEBINOP(OP) \
869 Value *VisitBin##OP(const BinaryOperator *E) { \
870 QualType promotionTy = getPromotionType(E->getType()); \
871 auto result = Emit##OP(EmitBinOps(E, promotionTy)); \
872 if (result && !promotionTy.isNull()) \
873 result = EmitUnPromotedValue(result, E->getType()); \
876 Value *VisitBin##OP##Assign(const CompoundAssignOperator *E) { \
877 return EmitCompoundAssign(E, &ScalarExprEmitter::Emit##OP); \
892 Value
*EmitCompare(const BinaryOperator
*E
, llvm::CmpInst::Predicate UICmpOpc
,
893 llvm::CmpInst::Predicate SICmpOpc
,
894 llvm::CmpInst::Predicate FCmpOpc
, bool IsSignaling
);
895 #define VISITCOMP(CODE, UI, SI, FP, SIG) \
896 Value *VisitBin##CODE(const BinaryOperator *E) { \
897 return EmitCompare(E, llvm::ICmpInst::UI, llvm::ICmpInst::SI, \
898 llvm::FCmpInst::FP, SIG); }
899 VISITCOMP(LT
, ICMP_ULT
, ICMP_SLT
, FCMP_OLT
, true)
900 VISITCOMP(GT
, ICMP_UGT
, ICMP_SGT
, FCMP_OGT
, true)
901 VISITCOMP(LE
, ICMP_ULE
, ICMP_SLE
, FCMP_OLE
, true)
902 VISITCOMP(GE
, ICMP_UGE
, ICMP_SGE
, FCMP_OGE
, true)
903 VISITCOMP(EQ
, ICMP_EQ
, ICMP_EQ
, FCMP_OEQ
, false)
904 VISITCOMP(NE
, ICMP_NE
, ICMP_NE
, FCMP_UNE
, false)
907 Value
*VisitBinAssign (const BinaryOperator
*E
);
909 Value
*VisitBinLAnd (const BinaryOperator
*E
);
910 Value
*VisitBinLOr (const BinaryOperator
*E
);
911 Value
*VisitBinComma (const BinaryOperator
*E
);
913 Value
*VisitBinPtrMemD(const Expr
*E
) { return EmitLoadOfLValue(E
); }
914 Value
*VisitBinPtrMemI(const Expr
*E
) { return EmitLoadOfLValue(E
); }
916 Value
*VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator
*E
) {
917 return Visit(E
->getSemanticForm());
921 Value
*VisitBlockExpr(const BlockExpr
*BE
);
922 Value
*VisitAbstractConditionalOperator(const AbstractConditionalOperator
*);
923 Value
*VisitChooseExpr(ChooseExpr
*CE
);
924 Value
*VisitVAArgExpr(VAArgExpr
*VE
);
925 Value
*VisitObjCStringLiteral(const ObjCStringLiteral
*E
) {
926 return CGF
.EmitObjCStringLiteral(E
);
928 Value
*VisitObjCBoxedExpr(ObjCBoxedExpr
*E
) {
929 return CGF
.EmitObjCBoxedExpr(E
);
931 Value
*VisitObjCArrayLiteral(ObjCArrayLiteral
*E
) {
932 return CGF
.EmitObjCArrayLiteral(E
);
934 Value
*VisitObjCDictionaryLiteral(ObjCDictionaryLiteral
*E
) {
935 return CGF
.EmitObjCDictionaryLiteral(E
);
937 Value
*VisitAsTypeExpr(AsTypeExpr
*CE
);
938 Value
*VisitAtomicExpr(AtomicExpr
*AE
);
939 Value
*VisitPackIndexingExpr(PackIndexingExpr
*E
) {
940 return Visit(E
->getSelectedExpr());
943 } // end anonymous namespace.
945 //===----------------------------------------------------------------------===//
947 //===----------------------------------------------------------------------===//
949 /// EmitConversionToBool - Convert the specified expression value to a
950 /// boolean (i1) truth value. This is equivalent to "Val != 0".
951 Value
*ScalarExprEmitter::EmitConversionToBool(Value
*Src
, QualType SrcType
) {
952 assert(SrcType
.isCanonical() && "EmitScalarConversion strips typedefs");
954 if (SrcType
->isRealFloatingType())
955 return EmitFloatToBoolConversion(Src
);
957 if (const MemberPointerType
*MPT
= dyn_cast
<MemberPointerType
>(SrcType
))
958 return CGF
.CGM
.getCXXABI().EmitMemberPointerIsNotNull(CGF
, Src
, MPT
);
960 assert((SrcType
->isIntegerType() || isa
<llvm::PointerType
>(Src
->getType())) &&
961 "Unknown scalar type to convert");
963 if (isa
<llvm::IntegerType
>(Src
->getType()))
964 return EmitIntToBoolConversion(Src
);
966 assert(isa
<llvm::PointerType
>(Src
->getType()));
967 return EmitPointerToBoolConversion(Src
, SrcType
);
970 void ScalarExprEmitter::EmitFloatConversionCheck(
971 Value
*OrigSrc
, QualType OrigSrcType
, Value
*Src
, QualType SrcType
,
972 QualType DstType
, llvm::Type
*DstTy
, SourceLocation Loc
) {
973 assert(SrcType
->isFloatingType() && "not a conversion from floating point");
974 if (!isa
<llvm::IntegerType
>(DstTy
))
977 CodeGenFunction::SanitizerScope
SanScope(&CGF
);
981 llvm::Value
*Check
= nullptr;
982 const llvm::fltSemantics
&SrcSema
=
983 CGF
.getContext().getFloatTypeSemantics(OrigSrcType
);
985 // Floating-point to integer. This has undefined behavior if the source is
986 // +-Inf, NaN, or doesn't fit into the destination type (after truncation
988 unsigned Width
= CGF
.getContext().getIntWidth(DstType
);
989 bool Unsigned
= DstType
->isUnsignedIntegerOrEnumerationType();
991 APSInt Min
= APSInt::getMinValue(Width
, Unsigned
);
992 APFloat
MinSrc(SrcSema
, APFloat::uninitialized
);
993 if (MinSrc
.convertFromAPInt(Min
, !Unsigned
, APFloat::rmTowardZero
) &
995 // Don't need an overflow check for lower bound. Just check for
997 MinSrc
= APFloat::getInf(SrcSema
, true);
999 // Find the largest value which is too small to represent (before
1000 // truncation toward zero).
1001 MinSrc
.subtract(APFloat(SrcSema
, 1), APFloat::rmTowardNegative
);
1003 APSInt Max
= APSInt::getMaxValue(Width
, Unsigned
);
1004 APFloat
MaxSrc(SrcSema
, APFloat::uninitialized
);
1005 if (MaxSrc
.convertFromAPInt(Max
, !Unsigned
, APFloat::rmTowardZero
) &
1006 APFloat::opOverflow
)
1007 // Don't need an overflow check for upper bound. Just check for
1009 MaxSrc
= APFloat::getInf(SrcSema
, false);
1011 // Find the smallest value which is too large to represent (before
1012 // truncation toward zero).
1013 MaxSrc
.add(APFloat(SrcSema
, 1), APFloat::rmTowardPositive
);
1015 // If we're converting from __half, convert the range to float to match
1017 if (OrigSrcType
->isHalfType()) {
1018 const llvm::fltSemantics
&Sema
=
1019 CGF
.getContext().getFloatTypeSemantics(SrcType
);
1021 MinSrc
.convert(Sema
, APFloat::rmTowardZero
, &IsInexact
);
1022 MaxSrc
.convert(Sema
, APFloat::rmTowardZero
, &IsInexact
);
1026 Builder
.CreateFCmpOGT(Src
, llvm::ConstantFP::get(VMContext
, MinSrc
));
1028 Builder
.CreateFCmpOLT(Src
, llvm::ConstantFP::get(VMContext
, MaxSrc
));
1029 Check
= Builder
.CreateAnd(GE
, LE
);
1031 llvm::Constant
*StaticArgs
[] = {CGF
.EmitCheckSourceLocation(Loc
),
1032 CGF
.EmitCheckTypeDescriptor(OrigSrcType
),
1033 CGF
.EmitCheckTypeDescriptor(DstType
)};
1034 CGF
.EmitCheck(std::make_pair(Check
, SanitizerKind::FloatCastOverflow
),
1035 SanitizerHandler::FloatCastOverflow
, StaticArgs
, OrigSrc
);
1038 // Should be called within CodeGenFunction::SanitizerScope RAII scope.
1039 // Returns 'i1 false' when the truncation Src -> Dst was lossy.
1040 static std::pair
<ScalarExprEmitter::ImplicitConversionCheckKind
,
1041 std::pair
<llvm::Value
*, SanitizerMask
>>
1042 EmitIntegerTruncationCheckHelper(Value
*Src
, QualType SrcType
, Value
*Dst
,
1043 QualType DstType
, CGBuilderTy
&Builder
) {
1044 llvm::Type
*SrcTy
= Src
->getType();
1045 llvm::Type
*DstTy
= Dst
->getType();
1046 (void)DstTy
; // Only used in assert()
1048 // This should be truncation of integral types.
1050 assert(SrcTy
->getScalarSizeInBits() > Dst
->getType()->getScalarSizeInBits());
1051 assert(isa
<llvm::IntegerType
>(SrcTy
) && isa
<llvm::IntegerType
>(DstTy
) &&
1052 "non-integer llvm type");
1054 bool SrcSigned
= SrcType
->isSignedIntegerOrEnumerationType();
1055 bool DstSigned
= DstType
->isSignedIntegerOrEnumerationType();
1057 // If both (src and dst) types are unsigned, then it's an unsigned truncation.
1058 // Else, it is a signed truncation.
1059 ScalarExprEmitter::ImplicitConversionCheckKind Kind
;
1061 if (!SrcSigned
&& !DstSigned
) {
1062 Kind
= ScalarExprEmitter::ICCK_UnsignedIntegerTruncation
;
1063 Mask
= SanitizerKind::ImplicitUnsignedIntegerTruncation
;
1065 Kind
= ScalarExprEmitter::ICCK_SignedIntegerTruncation
;
1066 Mask
= SanitizerKind::ImplicitSignedIntegerTruncation
;
1069 llvm::Value
*Check
= nullptr;
1070 // 1. Extend the truncated value back to the same width as the Src.
1071 Check
= Builder
.CreateIntCast(Dst
, SrcTy
, DstSigned
, "anyext");
1072 // 2. Equality-compare with the original source value
1073 Check
= Builder
.CreateICmpEQ(Check
, Src
, "truncheck");
1074 // If the comparison result is 'i1 false', then the truncation was lossy.
1075 return std::make_pair(Kind
, std::make_pair(Check
, Mask
));
1078 static bool PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(
1079 QualType SrcType
, QualType DstType
) {
1080 return SrcType
->isIntegerType() && DstType
->isIntegerType();
1083 void ScalarExprEmitter::EmitIntegerTruncationCheck(Value
*Src
, QualType SrcType
,
1084 Value
*Dst
, QualType DstType
,
1085 SourceLocation Loc
) {
1086 if (!CGF
.SanOpts
.hasOneOf(SanitizerKind::ImplicitIntegerTruncation
))
1089 // We only care about int->int conversions here.
1090 // We ignore conversions to/from pointer and/or bool.
1091 if (!PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(SrcType
,
1095 unsigned SrcBits
= Src
->getType()->getScalarSizeInBits();
1096 unsigned DstBits
= Dst
->getType()->getScalarSizeInBits();
1097 // This must be truncation. Else we do not care.
1098 if (SrcBits
<= DstBits
)
1101 assert(!DstType
->isBooleanType() && "we should not get here with booleans.");
1103 // If the integer sign change sanitizer is enabled,
1104 // and we are truncating from larger unsigned type to smaller signed type,
1105 // let that next sanitizer deal with it.
1106 bool SrcSigned
= SrcType
->isSignedIntegerOrEnumerationType();
1107 bool DstSigned
= DstType
->isSignedIntegerOrEnumerationType();
1108 if (CGF
.SanOpts
.has(SanitizerKind::ImplicitIntegerSignChange
) &&
1109 (!SrcSigned
&& DstSigned
))
1112 CodeGenFunction::SanitizerScope
SanScope(&CGF
);
1114 std::pair
<ScalarExprEmitter::ImplicitConversionCheckKind
,
1115 std::pair
<llvm::Value
*, SanitizerMask
>>
1117 EmitIntegerTruncationCheckHelper(Src
, SrcType
, Dst
, DstType
, Builder
);
1118 // If the comparison result is 'i1 false', then the truncation was lossy.
1120 // Do we care about this type of truncation?
1121 if (!CGF
.SanOpts
.has(Check
.second
.second
))
1124 llvm::Constant
*StaticArgs
[] = {
1125 CGF
.EmitCheckSourceLocation(Loc
), CGF
.EmitCheckTypeDescriptor(SrcType
),
1126 CGF
.EmitCheckTypeDescriptor(DstType
),
1127 llvm::ConstantInt::get(Builder
.getInt8Ty(), Check
.first
),
1128 llvm::ConstantInt::get(Builder
.getInt32Ty(), 0)};
1130 CGF
.EmitCheck(Check
.second
, SanitizerHandler::ImplicitConversion
, StaticArgs
,
1134 static llvm::Value
*EmitIsNegativeTestHelper(Value
*V
, QualType VType
,
1136 CGBuilderTy
&Builder
) {
1137 bool VSigned
= VType
->isSignedIntegerOrEnumerationType();
1138 llvm::Type
*VTy
= V
->getType();
1140 // If the value is unsigned, then it is never negative.
1141 return llvm::ConstantInt::getFalse(VTy
->getContext());
1143 llvm::Constant
*Zero
= llvm::ConstantInt::get(VTy
, 0);
1144 return Builder
.CreateICmp(llvm::ICmpInst::ICMP_SLT
, V
, Zero
,
1145 llvm::Twine(Name
) + "." + V
->getName() +
1146 ".negativitycheck");
1149 // Should be called within CodeGenFunction::SanitizerScope RAII scope.
1150 // Returns 'i1 false' when the conversion Src -> Dst changed the sign.
1151 static std::pair
<ScalarExprEmitter::ImplicitConversionCheckKind
,
1152 std::pair
<llvm::Value
*, SanitizerMask
>>
1153 EmitIntegerSignChangeCheckHelper(Value
*Src
, QualType SrcType
, Value
*Dst
,
1154 QualType DstType
, CGBuilderTy
&Builder
) {
1155 llvm::Type
*SrcTy
= Src
->getType();
1156 llvm::Type
*DstTy
= Dst
->getType();
1158 assert(isa
<llvm::IntegerType
>(SrcTy
) && isa
<llvm::IntegerType
>(DstTy
) &&
1159 "non-integer llvm type");
1161 bool SrcSigned
= SrcType
->isSignedIntegerOrEnumerationType();
1162 bool DstSigned
= DstType
->isSignedIntegerOrEnumerationType();
1163 (void)SrcSigned
; // Only used in assert()
1164 (void)DstSigned
; // Only used in assert()
1165 unsigned SrcBits
= SrcTy
->getScalarSizeInBits();
1166 unsigned DstBits
= DstTy
->getScalarSizeInBits();
1167 (void)SrcBits
; // Only used in assert()
1168 (void)DstBits
; // Only used in assert()
1170 assert(((SrcBits
!= DstBits
) || (SrcSigned
!= DstSigned
)) &&
1171 "either the widths should be different, or the signednesses.");
1173 // 1. Was the old Value negative?
1174 llvm::Value
*SrcIsNegative
=
1175 EmitIsNegativeTestHelper(Src
, SrcType
, "src", Builder
);
1176 // 2. Is the new Value negative?
1177 llvm::Value
*DstIsNegative
=
1178 EmitIsNegativeTestHelper(Dst
, DstType
, "dst", Builder
);
1179 // 3. Now, was the 'negativity status' preserved during the conversion?
1180 // NOTE: conversion from negative to zero is considered to change the sign.
1181 // (We want to get 'false' when the conversion changed the sign)
1182 // So we should just equality-compare the negativity statuses.
1183 llvm::Value
*Check
= nullptr;
1184 Check
= Builder
.CreateICmpEQ(SrcIsNegative
, DstIsNegative
, "signchangecheck");
1185 // If the comparison result is 'false', then the conversion changed the sign.
1186 return std::make_pair(
1187 ScalarExprEmitter::ICCK_IntegerSignChange
,
1188 std::make_pair(Check
, SanitizerKind::ImplicitIntegerSignChange
));
1191 void ScalarExprEmitter::EmitIntegerSignChangeCheck(Value
*Src
, QualType SrcType
,
1192 Value
*Dst
, QualType DstType
,
1193 SourceLocation Loc
) {
1194 if (!CGF
.SanOpts
.has(SanitizerKind::ImplicitIntegerSignChange
))
1197 llvm::Type
*SrcTy
= Src
->getType();
1198 llvm::Type
*DstTy
= Dst
->getType();
1200 // We only care about int->int conversions here.
1201 // We ignore conversions to/from pointer and/or bool.
1202 if (!PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(SrcType
,
1206 bool SrcSigned
= SrcType
->isSignedIntegerOrEnumerationType();
1207 bool DstSigned
= DstType
->isSignedIntegerOrEnumerationType();
1208 unsigned SrcBits
= SrcTy
->getScalarSizeInBits();
1209 unsigned DstBits
= DstTy
->getScalarSizeInBits();
1211 // Now, we do not need to emit the check in *all* of the cases.
1212 // We can avoid emitting it in some obvious cases where it would have been
1213 // dropped by the opt passes (instcombine) always anyways.
1214 // If it's a cast between effectively the same type, no check.
1215 // NOTE: this is *not* equivalent to checking the canonical types.
1216 if (SrcSigned
== DstSigned
&& SrcBits
== DstBits
)
1218 // At least one of the values needs to have signed type.
1219 // If both are unsigned, then obviously, neither of them can be negative.
1220 if (!SrcSigned
&& !DstSigned
)
1222 // If the conversion is to *larger* *signed* type, then no check is needed.
1223 // Because either sign-extension happens (so the sign will remain),
1224 // or zero-extension will happen (the sign bit will be zero.)
1225 if ((DstBits
> SrcBits
) && DstSigned
)
1227 if (CGF
.SanOpts
.has(SanitizerKind::ImplicitSignedIntegerTruncation
) &&
1228 (SrcBits
> DstBits
) && SrcSigned
) {
1229 // If the signed integer truncation sanitizer is enabled,
1230 // and this is a truncation from signed type, then no check is needed.
1231 // Because here sign change check is interchangeable with truncation check.
1234 // That's it. We can't rule out any more cases with the data we have.
1236 CodeGenFunction::SanitizerScope
SanScope(&CGF
);
1238 std::pair
<ScalarExprEmitter::ImplicitConversionCheckKind
,
1239 std::pair
<llvm::Value
*, SanitizerMask
>>
1242 // Each of these checks needs to return 'false' when an issue was detected.
1243 ImplicitConversionCheckKind CheckKind
;
1244 llvm::SmallVector
<std::pair
<llvm::Value
*, SanitizerMask
>, 2> Checks
;
1245 // So we can 'and' all the checks together, and still get 'false',
1246 // if at least one of the checks detected an issue.
1248 Check
= EmitIntegerSignChangeCheckHelper(Src
, SrcType
, Dst
, DstType
, Builder
);
1249 CheckKind
= Check
.first
;
1250 Checks
.emplace_back(Check
.second
);
1252 if (CGF
.SanOpts
.has(SanitizerKind::ImplicitSignedIntegerTruncation
) &&
1253 (SrcBits
> DstBits
) && !SrcSigned
&& DstSigned
) {
1254 // If the signed integer truncation sanitizer was enabled,
1255 // and we are truncating from larger unsigned type to smaller signed type,
1256 // let's handle the case we skipped in that check.
1258 EmitIntegerTruncationCheckHelper(Src
, SrcType
, Dst
, DstType
, Builder
);
1259 CheckKind
= ICCK_SignedIntegerTruncationOrSignChange
;
1260 Checks
.emplace_back(Check
.second
);
1261 // If the comparison result is 'i1 false', then the truncation was lossy.
1264 llvm::Constant
*StaticArgs
[] = {
1265 CGF
.EmitCheckSourceLocation(Loc
), CGF
.EmitCheckTypeDescriptor(SrcType
),
1266 CGF
.EmitCheckTypeDescriptor(DstType
),
1267 llvm::ConstantInt::get(Builder
.getInt8Ty(), CheckKind
),
1268 llvm::ConstantInt::get(Builder
.getInt32Ty(), 0)};
1269 // EmitCheck() will 'and' all the checks together.
1270 CGF
.EmitCheck(Checks
, SanitizerHandler::ImplicitConversion
, StaticArgs
,
1274 // Should be called within CodeGenFunction::SanitizerScope RAII scope.
1275 // Returns 'i1 false' when the truncation Src -> Dst was lossy.
1276 static std::pair
<ScalarExprEmitter::ImplicitConversionCheckKind
,
1277 std::pair
<llvm::Value
*, SanitizerMask
>>
1278 EmitBitfieldTruncationCheckHelper(Value
*Src
, QualType SrcType
, Value
*Dst
,
1279 QualType DstType
, CGBuilderTy
&Builder
) {
1280 bool SrcSigned
= SrcType
->isSignedIntegerOrEnumerationType();
1281 bool DstSigned
= DstType
->isSignedIntegerOrEnumerationType();
1283 ScalarExprEmitter::ImplicitConversionCheckKind Kind
;
1284 if (!SrcSigned
&& !DstSigned
)
1285 Kind
= ScalarExprEmitter::ICCK_UnsignedIntegerTruncation
;
1287 Kind
= ScalarExprEmitter::ICCK_SignedIntegerTruncation
;
1289 llvm::Value
*Check
= nullptr;
1290 // 1. Extend the truncated value back to the same width as the Src.
1291 Check
= Builder
.CreateIntCast(Dst
, Src
->getType(), DstSigned
, "bf.anyext");
1292 // 2. Equality-compare with the original source value
1293 Check
= Builder
.CreateICmpEQ(Check
, Src
, "bf.truncheck");
1294 // If the comparison result is 'i1 false', then the truncation was lossy.
1296 return std::make_pair(
1297 Kind
, std::make_pair(Check
, SanitizerKind::ImplicitBitfieldConversion
));
1300 // Should be called within CodeGenFunction::SanitizerScope RAII scope.
1301 // Returns 'i1 false' when the conversion Src -> Dst changed the sign.
1302 static std::pair
<ScalarExprEmitter::ImplicitConversionCheckKind
,
1303 std::pair
<llvm::Value
*, SanitizerMask
>>
1304 EmitBitfieldSignChangeCheckHelper(Value
*Src
, QualType SrcType
, Value
*Dst
,
1305 QualType DstType
, CGBuilderTy
&Builder
) {
1306 // 1. Was the old Value negative?
1307 llvm::Value
*SrcIsNegative
=
1308 EmitIsNegativeTestHelper(Src
, SrcType
, "bf.src", Builder
);
1309 // 2. Is the new Value negative?
1310 llvm::Value
*DstIsNegative
=
1311 EmitIsNegativeTestHelper(Dst
, DstType
, "bf.dst", Builder
);
1312 // 3. Now, was the 'negativity status' preserved during the conversion?
1313 // NOTE: conversion from negative to zero is considered to change the sign.
1314 // (We want to get 'false' when the conversion changed the sign)
1315 // So we should just equality-compare the negativity statuses.
1316 llvm::Value
*Check
= nullptr;
1318 Builder
.CreateICmpEQ(SrcIsNegative
, DstIsNegative
, "bf.signchangecheck");
1319 // If the comparison result is 'false', then the conversion changed the sign.
1320 return std::make_pair(
1321 ScalarExprEmitter::ICCK_IntegerSignChange
,
1322 std::make_pair(Check
, SanitizerKind::ImplicitBitfieldConversion
));
1325 void CodeGenFunction::EmitBitfieldConversionCheck(Value
*Src
, QualType SrcType
,
1326 Value
*Dst
, QualType DstType
,
1327 const CGBitFieldInfo
&Info
,
1328 SourceLocation Loc
) {
1330 if (!SanOpts
.has(SanitizerKind::ImplicitBitfieldConversion
))
1333 // We only care about int->int conversions here.
1334 // We ignore conversions to/from pointer and/or bool.
1335 if (!PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(SrcType
,
1339 if (DstType
->isBooleanType() || SrcType
->isBooleanType())
1342 // This should be truncation of integral types.
1343 assert(isa
<llvm::IntegerType
>(Src
->getType()) &&
1344 isa
<llvm::IntegerType
>(Dst
->getType()) && "non-integer llvm type");
1346 // TODO: Calculate src width to avoid emitting code
1347 // for unecessary cases.
1348 unsigned SrcBits
= ConvertType(SrcType
)->getScalarSizeInBits();
1349 unsigned DstBits
= Info
.Size
;
1351 bool SrcSigned
= SrcType
->isSignedIntegerOrEnumerationType();
1352 bool DstSigned
= DstType
->isSignedIntegerOrEnumerationType();
1354 CodeGenFunction::SanitizerScope
SanScope(this);
1356 std::pair
<ScalarExprEmitter::ImplicitConversionCheckKind
,
1357 std::pair
<llvm::Value
*, SanitizerMask
>>
1361 bool EmitTruncation
= DstBits
< SrcBits
;
1362 // If Dst is signed and Src unsigned, we want to be more specific
1363 // about the CheckKind we emit, in this case we want to emit
1364 // ICCK_SignedIntegerTruncationOrSignChange.
1365 bool EmitTruncationFromUnsignedToSigned
=
1366 EmitTruncation
&& DstSigned
&& !SrcSigned
;
1368 bool SameTypeSameSize
= SrcSigned
== DstSigned
&& SrcBits
== DstBits
;
1369 bool BothUnsigned
= !SrcSigned
&& !DstSigned
;
1370 bool LargerSigned
= (DstBits
> SrcBits
) && DstSigned
;
1371 // We can avoid emitting sign change checks in some obvious cases
1372 // 1. If Src and Dst have the same signedness and size
1373 // 2. If both are unsigned sign check is unecessary!
1374 // 3. If Dst is signed and bigger than Src, either
1375 // sign-extension or zero-extension will make sure
1376 // the sign remains.
1377 bool EmitSignChange
= !SameTypeSameSize
&& !BothUnsigned
&& !LargerSigned
;
1381 EmitBitfieldTruncationCheckHelper(Src
, SrcType
, Dst
, DstType
, Builder
);
1382 else if (EmitSignChange
) {
1383 assert(((SrcBits
!= DstBits
) || (SrcSigned
!= DstSigned
)) &&
1384 "either the widths should be different, or the signednesses.");
1386 EmitBitfieldSignChangeCheckHelper(Src
, SrcType
, Dst
, DstType
, Builder
);
1390 ScalarExprEmitter::ImplicitConversionCheckKind CheckKind
= Check
.first
;
1391 if (EmitTruncationFromUnsignedToSigned
)
1392 CheckKind
= ScalarExprEmitter::ICCK_SignedIntegerTruncationOrSignChange
;
1394 llvm::Constant
*StaticArgs
[] = {
1395 EmitCheckSourceLocation(Loc
), EmitCheckTypeDescriptor(SrcType
),
1396 EmitCheckTypeDescriptor(DstType
),
1397 llvm::ConstantInt::get(Builder
.getInt8Ty(), CheckKind
),
1398 llvm::ConstantInt::get(Builder
.getInt32Ty(), Info
.Size
)};
1400 EmitCheck(Check
.second
, SanitizerHandler::ImplicitConversion
, StaticArgs
,
1404 Value
*ScalarExprEmitter::EmitScalarCast(Value
*Src
, QualType SrcType
,
1405 QualType DstType
, llvm::Type
*SrcTy
,
1407 ScalarConversionOpts Opts
) {
1408 // The Element types determine the type of cast to perform.
1409 llvm::Type
*SrcElementTy
;
1410 llvm::Type
*DstElementTy
;
1411 QualType SrcElementType
;
1412 QualType DstElementType
;
1413 if (SrcType
->isMatrixType() && DstType
->isMatrixType()) {
1414 SrcElementTy
= cast
<llvm::VectorType
>(SrcTy
)->getElementType();
1415 DstElementTy
= cast
<llvm::VectorType
>(DstTy
)->getElementType();
1416 SrcElementType
= SrcType
->castAs
<MatrixType
>()->getElementType();
1417 DstElementType
= DstType
->castAs
<MatrixType
>()->getElementType();
1419 assert(!SrcType
->isMatrixType() && !DstType
->isMatrixType() &&
1420 "cannot cast between matrix and non-matrix types");
1421 SrcElementTy
= SrcTy
;
1422 DstElementTy
= DstTy
;
1423 SrcElementType
= SrcType
;
1424 DstElementType
= DstType
;
1427 if (isa
<llvm::IntegerType
>(SrcElementTy
)) {
1428 bool InputSigned
= SrcElementType
->isSignedIntegerOrEnumerationType();
1429 if (SrcElementType
->isBooleanType() && Opts
.TreatBooleanAsSigned
) {
1433 if (isa
<llvm::IntegerType
>(DstElementTy
))
1434 return Builder
.CreateIntCast(Src
, DstTy
, InputSigned
, "conv");
1436 return Builder
.CreateSIToFP(Src
, DstTy
, "conv");
1437 return Builder
.CreateUIToFP(Src
, DstTy
, "conv");
1440 if (isa
<llvm::IntegerType
>(DstElementTy
)) {
1441 assert(SrcElementTy
->isFloatingPointTy() && "Unknown real conversion");
1442 bool IsSigned
= DstElementType
->isSignedIntegerOrEnumerationType();
1444 // If we can't recognize overflow as undefined behavior, assume that
1445 // overflow saturates. This protects against normal optimizations if we are
1446 // compiling with non-standard FP semantics.
1447 if (!CGF
.CGM
.getCodeGenOpts().StrictFloatCastOverflow
) {
1448 llvm::Intrinsic::ID IID
=
1449 IsSigned
? llvm::Intrinsic::fptosi_sat
: llvm::Intrinsic::fptoui_sat
;
1450 return Builder
.CreateCall(CGF
.CGM
.getIntrinsic(IID
, {DstTy
, SrcTy
}), Src
);
1454 return Builder
.CreateFPToSI(Src
, DstTy
, "conv");
1455 return Builder
.CreateFPToUI(Src
, DstTy
, "conv");
1458 if ((DstElementTy
->is16bitFPTy() && SrcElementTy
->is16bitFPTy())) {
1459 Value
*FloatVal
= Builder
.CreateFPExt(Src
, Builder
.getFloatTy(), "fpext");
1460 return Builder
.CreateFPTrunc(FloatVal
, DstTy
, "fptrunc");
1462 if (DstElementTy
->getTypeID() < SrcElementTy
->getTypeID())
1463 return Builder
.CreateFPTrunc(Src
, DstTy
, "conv");
1464 return Builder
.CreateFPExt(Src
, DstTy
, "conv");
1467 /// Emit a conversion from the specified type to the specified destination type,
1468 /// both of which are LLVM scalar types.
1469 Value
*ScalarExprEmitter::EmitScalarConversion(Value
*Src
, QualType SrcType
,
1472 ScalarConversionOpts Opts
) {
1473 // All conversions involving fixed point types should be handled by the
1474 // EmitFixedPoint family functions. This is done to prevent bloating up this
1475 // function more, and although fixed point numbers are represented by
1476 // integers, we do not want to follow any logic that assumes they should be
1477 // treated as integers.
1478 // TODO(leonardchan): When necessary, add another if statement checking for
1479 // conversions to fixed point types from other types.
1480 if (SrcType
->isFixedPointType()) {
1481 if (DstType
->isBooleanType())
1482 // It is important that we check this before checking if the dest type is
1483 // an integer because booleans are technically integer types.
1484 // We do not need to check the padding bit on unsigned types if unsigned
1485 // padding is enabled because overflow into this bit is undefined
1487 return Builder
.CreateIsNotNull(Src
, "tobool");
1488 if (DstType
->isFixedPointType() || DstType
->isIntegerType() ||
1489 DstType
->isRealFloatingType())
1490 return EmitFixedPointConversion(Src
, SrcType
, DstType
, Loc
);
1493 "Unhandled scalar conversion from a fixed point type to another type.");
1494 } else if (DstType
->isFixedPointType()) {
1495 if (SrcType
->isIntegerType() || SrcType
->isRealFloatingType())
1496 // This also includes converting booleans and enums to fixed point types.
1497 return EmitFixedPointConversion(Src
, SrcType
, DstType
, Loc
);
1500 "Unhandled scalar conversion to a fixed point type from another type.");
1503 QualType NoncanonicalSrcType
= SrcType
;
1504 QualType NoncanonicalDstType
= DstType
;
1506 SrcType
= CGF
.getContext().getCanonicalType(SrcType
);
1507 DstType
= CGF
.getContext().getCanonicalType(DstType
);
1508 if (SrcType
== DstType
) return Src
;
1510 if (DstType
->isVoidType()) return nullptr;
1512 llvm::Value
*OrigSrc
= Src
;
1513 QualType OrigSrcType
= SrcType
;
1514 llvm::Type
*SrcTy
= Src
->getType();
1516 // Handle conversions to bool first, they are special: comparisons against 0.
1517 if (DstType
->isBooleanType())
1518 return EmitConversionToBool(Src
, SrcType
);
1520 llvm::Type
*DstTy
= ConvertType(DstType
);
1522 // Cast from half through float if half isn't a native type.
1523 if (SrcType
->isHalfType() && !CGF
.getContext().getLangOpts().NativeHalfType
) {
1524 // Cast to FP using the intrinsic if the half type itself isn't supported.
1525 if (DstTy
->isFloatingPointTy()) {
1526 if (CGF
.getContext().getTargetInfo().useFP16ConversionIntrinsics())
1527 return Builder
.CreateCall(
1528 CGF
.CGM
.getIntrinsic(llvm::Intrinsic::convert_from_fp16
, DstTy
),
1531 // Cast to other types through float, using either the intrinsic or FPExt,
1532 // depending on whether the half type itself is supported
1533 // (as opposed to operations on half, available with NativeHalfType).
1534 if (CGF
.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
1535 Src
= Builder
.CreateCall(
1536 CGF
.CGM
.getIntrinsic(llvm::Intrinsic::convert_from_fp16
,
1540 Src
= Builder
.CreateFPExt(Src
, CGF
.CGM
.FloatTy
, "conv");
1542 SrcType
= CGF
.getContext().FloatTy
;
1543 SrcTy
= CGF
.FloatTy
;
1547 // Ignore conversions like int -> uint.
1548 if (SrcTy
== DstTy
) {
1549 if (Opts
.EmitImplicitIntegerSignChangeChecks
)
1550 EmitIntegerSignChangeCheck(Src
, NoncanonicalSrcType
, Src
,
1551 NoncanonicalDstType
, Loc
);
1556 // Handle pointer conversions next: pointers can only be converted to/from
1557 // other pointers and integers. Check for pointer types in terms of LLVM, as
1558 // some native types (like Obj-C id) may map to a pointer type.
1559 if (auto DstPT
= dyn_cast
<llvm::PointerType
>(DstTy
)) {
1560 // The source value may be an integer, or a pointer.
1561 if (isa
<llvm::PointerType
>(SrcTy
))
1564 assert(SrcType
->isIntegerType() && "Not ptr->ptr or int->ptr conversion?");
1565 // First, convert to the correct width so that we control the kind of
1567 llvm::Type
*MiddleTy
= CGF
.CGM
.getDataLayout().getIntPtrType(DstPT
);
1568 bool InputSigned
= SrcType
->isSignedIntegerOrEnumerationType();
1569 llvm::Value
* IntResult
=
1570 Builder
.CreateIntCast(Src
, MiddleTy
, InputSigned
, "conv");
1571 // Then, cast to pointer.
1572 return Builder
.CreateIntToPtr(IntResult
, DstTy
, "conv");
1575 if (isa
<llvm::PointerType
>(SrcTy
)) {
1576 // Must be an ptr to int cast.
1577 assert(isa
<llvm::IntegerType
>(DstTy
) && "not ptr->int?");
1578 return Builder
.CreatePtrToInt(Src
, DstTy
, "conv");
1581 // A scalar can be splatted to an extended vector of the same element type
1582 if (DstType
->isExtVectorType() && !SrcType
->isVectorType()) {
1583 // Sema should add casts to make sure that the source expression's type is
1584 // the same as the vector's element type (sans qualifiers)
1585 assert(DstType
->castAs
<ExtVectorType
>()->getElementType().getTypePtr() ==
1586 SrcType
.getTypePtr() &&
1587 "Splatted expr doesn't match with vector element type?");
1589 // Splat the element across to all elements
1590 unsigned NumElements
= cast
<llvm::FixedVectorType
>(DstTy
)->getNumElements();
1591 return Builder
.CreateVectorSplat(NumElements
, Src
, "splat");
1594 if (SrcType
->isMatrixType() && DstType
->isMatrixType())
1595 return EmitScalarCast(Src
, SrcType
, DstType
, SrcTy
, DstTy
, Opts
);
1597 if (isa
<llvm::VectorType
>(SrcTy
) || isa
<llvm::VectorType
>(DstTy
)) {
1598 // Allow bitcast from vector to integer/fp of the same size.
1599 llvm::TypeSize SrcSize
= SrcTy
->getPrimitiveSizeInBits();
1600 llvm::TypeSize DstSize
= DstTy
->getPrimitiveSizeInBits();
1601 if (SrcSize
== DstSize
)
1602 return Builder
.CreateBitCast(Src
, DstTy
, "conv");
1604 // Conversions between vectors of different sizes are not allowed except
1605 // when vectors of half are involved. Operations on storage-only half
1606 // vectors require promoting half vector operands to float vectors and
1607 // truncating the result, which is either an int or float vector, to a
1608 // short or half vector.
1610 // Source and destination are both expected to be vectors.
1611 llvm::Type
*SrcElementTy
= cast
<llvm::VectorType
>(SrcTy
)->getElementType();
1612 llvm::Type
*DstElementTy
= cast
<llvm::VectorType
>(DstTy
)->getElementType();
1615 assert(((SrcElementTy
->isIntegerTy() &&
1616 DstElementTy
->isIntegerTy()) ||
1617 (SrcElementTy
->isFloatingPointTy() &&
1618 DstElementTy
->isFloatingPointTy())) &&
1619 "unexpected conversion between a floating-point vector and an "
1622 // Truncate an i32 vector to an i16 vector.
1623 if (SrcElementTy
->isIntegerTy())
1624 return Builder
.CreateIntCast(Src
, DstTy
, false, "conv");
1626 // Truncate a float vector to a half vector.
1627 if (SrcSize
> DstSize
)
1628 return Builder
.CreateFPTrunc(Src
, DstTy
, "conv");
1630 // Promote a half vector to a float vector.
1631 return Builder
.CreateFPExt(Src
, DstTy
, "conv");
1634 // Finally, we have the arithmetic types: real int/float.
1635 Value
*Res
= nullptr;
1636 llvm::Type
*ResTy
= DstTy
;
1638 // An overflowing conversion has undefined behavior if either the source type
1639 // or the destination type is a floating-point type. However, we consider the
1640 // range of representable values for all floating-point types to be
1641 // [-inf,+inf], so no overflow can ever happen when the destination type is a
1642 // floating-point type.
1643 if (CGF
.SanOpts
.has(SanitizerKind::FloatCastOverflow
) &&
1644 OrigSrcType
->isFloatingType())
1645 EmitFloatConversionCheck(OrigSrc
, OrigSrcType
, Src
, SrcType
, DstType
, DstTy
,
1648 // Cast to half through float if half isn't a native type.
1649 if (DstType
->isHalfType() && !CGF
.getContext().getLangOpts().NativeHalfType
) {
1650 // Make sure we cast in a single step if from another FP type.
1651 if (SrcTy
->isFloatingPointTy()) {
1652 // Use the intrinsic if the half type itself isn't supported
1653 // (as opposed to operations on half, available with NativeHalfType).
1654 if (CGF
.getContext().getTargetInfo().useFP16ConversionIntrinsics())
1655 return Builder
.CreateCall(
1656 CGF
.CGM
.getIntrinsic(llvm::Intrinsic::convert_to_fp16
, SrcTy
), Src
);
1657 // If the half type is supported, just use an fptrunc.
1658 return Builder
.CreateFPTrunc(Src
, DstTy
);
1660 DstTy
= CGF
.FloatTy
;
1663 Res
= EmitScalarCast(Src
, SrcType
, DstType
, SrcTy
, DstTy
, Opts
);
1665 if (DstTy
!= ResTy
) {
1666 if (CGF
.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
1667 assert(ResTy
->isIntegerTy(16) && "Only half FP requires extra conversion");
1668 Res
= Builder
.CreateCall(
1669 CGF
.CGM
.getIntrinsic(llvm::Intrinsic::convert_to_fp16
, CGF
.CGM
.FloatTy
),
1672 Res
= Builder
.CreateFPTrunc(Res
, ResTy
, "conv");
1676 if (Opts
.EmitImplicitIntegerTruncationChecks
)
1677 EmitIntegerTruncationCheck(Src
, NoncanonicalSrcType
, Res
,
1678 NoncanonicalDstType
, Loc
);
1680 if (Opts
.EmitImplicitIntegerSignChangeChecks
)
1681 EmitIntegerSignChangeCheck(Src
, NoncanonicalSrcType
, Res
,
1682 NoncanonicalDstType
, Loc
);
1687 Value
*ScalarExprEmitter::EmitFixedPointConversion(Value
*Src
, QualType SrcTy
,
1689 SourceLocation Loc
) {
1690 llvm::FixedPointBuilder
<CGBuilderTy
> FPBuilder(Builder
);
1691 llvm::Value
*Result
;
1692 if (SrcTy
->isRealFloatingType())
1693 Result
= FPBuilder
.CreateFloatingToFixed(Src
,
1694 CGF
.getContext().getFixedPointSemantics(DstTy
));
1695 else if (DstTy
->isRealFloatingType())
1696 Result
= FPBuilder
.CreateFixedToFloating(Src
,
1697 CGF
.getContext().getFixedPointSemantics(SrcTy
),
1698 ConvertType(DstTy
));
1700 auto SrcFPSema
= CGF
.getContext().getFixedPointSemantics(SrcTy
);
1701 auto DstFPSema
= CGF
.getContext().getFixedPointSemantics(DstTy
);
1703 if (DstTy
->isIntegerType())
1704 Result
= FPBuilder
.CreateFixedToInteger(Src
, SrcFPSema
,
1705 DstFPSema
.getWidth(),
1706 DstFPSema
.isSigned());
1707 else if (SrcTy
->isIntegerType())
1708 Result
= FPBuilder
.CreateIntegerToFixed(Src
, SrcFPSema
.isSigned(),
1711 Result
= FPBuilder
.CreateFixedToFixed(Src
, SrcFPSema
, DstFPSema
);
1716 /// Emit a conversion from the specified complex type to the specified
1717 /// destination type, where the destination type is an LLVM scalar type.
1718 Value
*ScalarExprEmitter::EmitComplexToScalarConversion(
1719 CodeGenFunction::ComplexPairTy Src
, QualType SrcTy
, QualType DstTy
,
1720 SourceLocation Loc
) {
1721 // Get the source element type.
1722 SrcTy
= SrcTy
->castAs
<ComplexType
>()->getElementType();
1724 // Handle conversions to bool first, they are special: comparisons against 0.
1725 if (DstTy
->isBooleanType()) {
1726 // Complex != 0 -> (Real != 0) | (Imag != 0)
1727 Src
.first
= EmitScalarConversion(Src
.first
, SrcTy
, DstTy
, Loc
);
1728 Src
.second
= EmitScalarConversion(Src
.second
, SrcTy
, DstTy
, Loc
);
1729 return Builder
.CreateOr(Src
.first
, Src
.second
, "tobool");
1732 // C99 6.3.1.7p2: "When a value of complex type is converted to a real type,
1733 // the imaginary part of the complex value is discarded and the value of the
1734 // real part is converted according to the conversion rules for the
1735 // corresponding real type.
1736 return EmitScalarConversion(Src
.first
, SrcTy
, DstTy
, Loc
);
1739 Value
*ScalarExprEmitter::EmitNullValue(QualType Ty
) {
1740 return CGF
.EmitFromMemory(CGF
.CGM
.EmitNullConstant(Ty
), Ty
);
1743 /// Emit a sanitization check for the given "binary" operation (which
1744 /// might actually be a unary increment which has been lowered to a binary
1745 /// operation). The check passes if all values in \p Checks (which are \c i1),
1747 void ScalarExprEmitter::EmitBinOpCheck(
1748 ArrayRef
<std::pair
<Value
*, SanitizerMask
>> Checks
, const BinOpInfo
&Info
) {
1749 assert(CGF
.IsSanitizerScope
);
1750 SanitizerHandler Check
;
1751 SmallVector
<llvm::Constant
*, 4> StaticData
;
1752 SmallVector
<llvm::Value
*, 2> DynamicData
;
1754 BinaryOperatorKind Opcode
= Info
.Opcode
;
1755 if (BinaryOperator::isCompoundAssignmentOp(Opcode
))
1756 Opcode
= BinaryOperator::getOpForCompoundAssignment(Opcode
);
1758 StaticData
.push_back(CGF
.EmitCheckSourceLocation(Info
.E
->getExprLoc()));
1759 const UnaryOperator
*UO
= dyn_cast
<UnaryOperator
>(Info
.E
);
1760 if (UO
&& UO
->getOpcode() == UO_Minus
) {
1761 Check
= SanitizerHandler::NegateOverflow
;
1762 StaticData
.push_back(CGF
.EmitCheckTypeDescriptor(UO
->getType()));
1763 DynamicData
.push_back(Info
.RHS
);
1765 if (BinaryOperator::isShiftOp(Opcode
)) {
1766 // Shift LHS negative or too large, or RHS out of bounds.
1767 Check
= SanitizerHandler::ShiftOutOfBounds
;
1768 const BinaryOperator
*BO
= cast
<BinaryOperator
>(Info
.E
);
1769 StaticData
.push_back(
1770 CGF
.EmitCheckTypeDescriptor(BO
->getLHS()->getType()));
1771 StaticData
.push_back(
1772 CGF
.EmitCheckTypeDescriptor(BO
->getRHS()->getType()));
1773 } else if (Opcode
== BO_Div
|| Opcode
== BO_Rem
) {
1774 // Divide or modulo by zero, or signed overflow (eg INT_MAX / -1).
1775 Check
= SanitizerHandler::DivremOverflow
;
1776 StaticData
.push_back(CGF
.EmitCheckTypeDescriptor(Info
.Ty
));
1778 // Arithmetic overflow (+, -, *).
1780 case BO_Add
: Check
= SanitizerHandler::AddOverflow
; break;
1781 case BO_Sub
: Check
= SanitizerHandler::SubOverflow
; break;
1782 case BO_Mul
: Check
= SanitizerHandler::MulOverflow
; break;
1783 default: llvm_unreachable("unexpected opcode for bin op check");
1785 StaticData
.push_back(CGF
.EmitCheckTypeDescriptor(Info
.Ty
));
1787 DynamicData
.push_back(Info
.LHS
);
1788 DynamicData
.push_back(Info
.RHS
);
1791 CGF
.EmitCheck(Checks
, Check
, StaticData
, DynamicData
);
1794 //===----------------------------------------------------------------------===//
1796 //===----------------------------------------------------------------------===//
1798 Value
*ScalarExprEmitter::VisitExpr(Expr
*E
) {
1799 CGF
.ErrorUnsupported(E
, "scalar expression");
1800 if (E
->getType()->isVoidType())
1802 return llvm::UndefValue::get(CGF
.ConvertType(E
->getType()));
1806 ScalarExprEmitter::VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr
*E
) {
1807 ASTContext
&Context
= CGF
.getContext();
1808 unsigned AddrSpace
=
1809 Context
.getTargetAddressSpace(CGF
.CGM
.GetGlobalConstantAddressSpace());
1810 llvm::Constant
*GlobalConstStr
= Builder
.CreateGlobalStringPtr(
1811 E
->ComputeName(Context
), "__usn_str", AddrSpace
);
1813 llvm::Type
*ExprTy
= ConvertType(E
->getType());
1814 return Builder
.CreatePointerBitCastOrAddrSpaceCast(GlobalConstStr
, ExprTy
,
1818 Value
*ScalarExprEmitter::VisitEmbedExpr(EmbedExpr
*E
) {
1819 assert(E
->getDataElementCount() == 1);
1820 auto It
= E
->begin();
1821 return Builder
.getInt((*It
)->getValue());
1824 Value
*ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr
*E
) {
1826 if (E
->getNumSubExprs() == 2) {
1827 Value
*LHS
= CGF
.EmitScalarExpr(E
->getExpr(0));
1828 Value
*RHS
= CGF
.EmitScalarExpr(E
->getExpr(1));
1831 auto *LTy
= cast
<llvm::FixedVectorType
>(LHS
->getType());
1832 unsigned LHSElts
= LTy
->getNumElements();
1836 auto *MTy
= cast
<llvm::FixedVectorType
>(Mask
->getType());
1838 // Mask off the high bits of each shuffle index.
1840 llvm::ConstantInt::get(MTy
, llvm::NextPowerOf2(LHSElts
- 1) - 1);
1841 Mask
= Builder
.CreateAnd(Mask
, MaskBits
, "mask");
1844 // mask = mask & maskbits
1846 // n = extract mask i
1847 // x = extract val n
1848 // newv = insert newv, x, i
1849 auto *RTy
= llvm::FixedVectorType::get(LTy
->getElementType(),
1850 MTy
->getNumElements());
1851 Value
* NewV
= llvm::PoisonValue::get(RTy
);
1852 for (unsigned i
= 0, e
= MTy
->getNumElements(); i
!= e
; ++i
) {
1853 Value
*IIndx
= llvm::ConstantInt::get(CGF
.SizeTy
, i
);
1854 Value
*Indx
= Builder
.CreateExtractElement(Mask
, IIndx
, "shuf_idx");
1856 Value
*VExt
= Builder
.CreateExtractElement(LHS
, Indx
, "shuf_elt");
1857 NewV
= Builder
.CreateInsertElement(NewV
, VExt
, IIndx
, "shuf_ins");
1862 Value
* V1
= CGF
.EmitScalarExpr(E
->getExpr(0));
1863 Value
* V2
= CGF
.EmitScalarExpr(E
->getExpr(1));
1865 SmallVector
<int, 32> Indices
;
1866 for (unsigned i
= 2; i
< E
->getNumSubExprs(); ++i
) {
1867 llvm::APSInt Idx
= E
->getShuffleMaskIdx(CGF
.getContext(), i
-2);
1868 // Check for -1 and output it as undef in the IR.
1869 if (Idx
.isSigned() && Idx
.isAllOnes())
1870 Indices
.push_back(-1);
1872 Indices
.push_back(Idx
.getZExtValue());
1875 return Builder
.CreateShuffleVector(V1
, V2
, Indices
, "shuffle");
1878 Value
*ScalarExprEmitter::VisitConvertVectorExpr(ConvertVectorExpr
*E
) {
1879 QualType SrcType
= E
->getSrcExpr()->getType(),
1880 DstType
= E
->getType();
1882 Value
*Src
= CGF
.EmitScalarExpr(E
->getSrcExpr());
1884 SrcType
= CGF
.getContext().getCanonicalType(SrcType
);
1885 DstType
= CGF
.getContext().getCanonicalType(DstType
);
1886 if (SrcType
== DstType
) return Src
;
1888 assert(SrcType
->isVectorType() &&
1889 "ConvertVector source type must be a vector");
1890 assert(DstType
->isVectorType() &&
1891 "ConvertVector destination type must be a vector");
1893 llvm::Type
*SrcTy
= Src
->getType();
1894 llvm::Type
*DstTy
= ConvertType(DstType
);
1896 // Ignore conversions like int -> uint.
1900 QualType SrcEltType
= SrcType
->castAs
<VectorType
>()->getElementType(),
1901 DstEltType
= DstType
->castAs
<VectorType
>()->getElementType();
1903 assert(SrcTy
->isVectorTy() &&
1904 "ConvertVector source IR type must be a vector");
1905 assert(DstTy
->isVectorTy() &&
1906 "ConvertVector destination IR type must be a vector");
1908 llvm::Type
*SrcEltTy
= cast
<llvm::VectorType
>(SrcTy
)->getElementType(),
1909 *DstEltTy
= cast
<llvm::VectorType
>(DstTy
)->getElementType();
1911 if (DstEltType
->isBooleanType()) {
1912 assert((SrcEltTy
->isFloatingPointTy() ||
1913 isa
<llvm::IntegerType
>(SrcEltTy
)) && "Unknown boolean conversion");
1915 llvm::Value
*Zero
= llvm::Constant::getNullValue(SrcTy
);
1916 if (SrcEltTy
->isFloatingPointTy()) {
1917 return Builder
.CreateFCmpUNE(Src
, Zero
, "tobool");
1919 return Builder
.CreateICmpNE(Src
, Zero
, "tobool");
1923 // We have the arithmetic types: real int/float.
1924 Value
*Res
= nullptr;
1926 if (isa
<llvm::IntegerType
>(SrcEltTy
)) {
1927 bool InputSigned
= SrcEltType
->isSignedIntegerOrEnumerationType();
1928 if (isa
<llvm::IntegerType
>(DstEltTy
))
1929 Res
= Builder
.CreateIntCast(Src
, DstTy
, InputSigned
, "conv");
1930 else if (InputSigned
)
1931 Res
= Builder
.CreateSIToFP(Src
, DstTy
, "conv");
1933 Res
= Builder
.CreateUIToFP(Src
, DstTy
, "conv");
1934 } else if (isa
<llvm::IntegerType
>(DstEltTy
)) {
1935 assert(SrcEltTy
->isFloatingPointTy() && "Unknown real conversion");
1936 if (DstEltType
->isSignedIntegerOrEnumerationType())
1937 Res
= Builder
.CreateFPToSI(Src
, DstTy
, "conv");
1939 Res
= Builder
.CreateFPToUI(Src
, DstTy
, "conv");
1941 assert(SrcEltTy
->isFloatingPointTy() && DstEltTy
->isFloatingPointTy() &&
1942 "Unknown real conversion");
1943 if (DstEltTy
->getTypeID() < SrcEltTy
->getTypeID())
1944 Res
= Builder
.CreateFPTrunc(Src
, DstTy
, "conv");
1946 Res
= Builder
.CreateFPExt(Src
, DstTy
, "conv");
1952 Value
*ScalarExprEmitter::VisitMemberExpr(MemberExpr
*E
) {
1953 if (CodeGenFunction::ConstantEmission Constant
= CGF
.tryEmitAsConstant(E
)) {
1954 CGF
.EmitIgnoredExpr(E
->getBase());
1955 return CGF
.emitScalarConstant(Constant
, E
);
1957 Expr::EvalResult Result
;
1958 if (E
->EvaluateAsInt(Result
, CGF
.getContext(), Expr::SE_AllowSideEffects
)) {
1959 llvm::APSInt Value
= Result
.Val
.getInt();
1960 CGF
.EmitIgnoredExpr(E
->getBase());
1961 return Builder
.getInt(Value
);
1965 llvm::Value
*Result
= EmitLoadOfLValue(E
);
1967 // If -fdebug-info-for-profiling is specified, emit a pseudo variable and its
1968 // debug info for the pointer, even if there is no variable associated with
1969 // the pointer's expression.
1970 if (CGF
.CGM
.getCodeGenOpts().DebugInfoForProfiling
&& CGF
.getDebugInfo()) {
1971 if (llvm::LoadInst
*Load
= dyn_cast
<llvm::LoadInst
>(Result
)) {
1972 if (llvm::GetElementPtrInst
*GEP
=
1973 dyn_cast
<llvm::GetElementPtrInst
>(Load
->getPointerOperand())) {
1974 if (llvm::Instruction
*Pointer
=
1975 dyn_cast
<llvm::Instruction
>(GEP
->getPointerOperand())) {
1976 QualType Ty
= E
->getBase()->getType();
1978 Ty
= CGF
.getContext().getPointerType(Ty
);
1979 CGF
.getDebugInfo()->EmitPseudoVariable(Builder
, Pointer
, Ty
);
1987 Value
*ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr
*E
) {
1988 TestAndClearIgnoreResultAssign();
1990 // Emit subscript expressions in rvalue context's. For most cases, this just
1991 // loads the lvalue formed by the subscript expr. However, we have to be
1992 // careful, because the base of a vector subscript is occasionally an rvalue,
1993 // so we can't get it as an lvalue.
1994 if (!E
->getBase()->getType()->isVectorType() &&
1995 !E
->getBase()->getType()->isSveVLSBuiltinType())
1996 return EmitLoadOfLValue(E
);
1998 // Handle the vector case. The base must be a vector, the index must be an
2000 Value
*Base
= Visit(E
->getBase());
2001 Value
*Idx
= Visit(E
->getIdx());
2002 QualType IdxTy
= E
->getIdx()->getType();
2004 if (CGF
.SanOpts
.has(SanitizerKind::ArrayBounds
))
2005 CGF
.EmitBoundsCheck(E
, E
->getBase(), Idx
, IdxTy
, /*Accessed*/true);
2007 return Builder
.CreateExtractElement(Base
, Idx
, "vecext");
2010 Value
*ScalarExprEmitter::VisitMatrixSubscriptExpr(MatrixSubscriptExpr
*E
) {
2011 TestAndClearIgnoreResultAssign();
2013 // Handle the vector case. The base must be a vector, the index must be an
2015 Value
*RowIdx
= CGF
.EmitMatrixIndexExpr(E
->getRowIdx());
2016 Value
*ColumnIdx
= CGF
.EmitMatrixIndexExpr(E
->getColumnIdx());
2018 const auto *MatrixTy
= E
->getBase()->getType()->castAs
<ConstantMatrixType
>();
2019 unsigned NumRows
= MatrixTy
->getNumRows();
2020 llvm::MatrixBuilder
MB(Builder
);
2021 Value
*Idx
= MB
.CreateIndex(RowIdx
, ColumnIdx
, NumRows
);
2022 if (CGF
.CGM
.getCodeGenOpts().OptimizationLevel
> 0)
2023 MB
.CreateIndexAssumption(Idx
, MatrixTy
->getNumElementsFlattened());
2025 Value
*Matrix
= Visit(E
->getBase());
2027 // TODO: Should we emit bounds checks with SanitizerKind::ArrayBounds?
2028 return Builder
.CreateExtractElement(Matrix
, Idx
, "matrixext");
2031 static int getMaskElt(llvm::ShuffleVectorInst
*SVI
, unsigned Idx
,
2033 int MV
= SVI
->getMaskValue(Idx
);
2039 static int getAsInt32(llvm::ConstantInt
*C
, llvm::Type
*I32Ty
) {
2040 assert(llvm::ConstantInt::isValueValidForType(I32Ty
, C
->getZExtValue()) &&
2041 "Index operand too large for shufflevector mask!");
2042 return C
->getZExtValue();
2045 Value
*ScalarExprEmitter::VisitInitListExpr(InitListExpr
*E
) {
2046 bool Ignore
= TestAndClearIgnoreResultAssign();
2048 assert (Ignore
== false && "init list ignored");
2049 unsigned NumInitElements
= E
->getNumInits();
2051 if (E
->hadArrayRangeDesignator())
2052 CGF
.ErrorUnsupported(E
, "GNU array range designator extension");
2054 llvm::VectorType
*VType
=
2055 dyn_cast
<llvm::VectorType
>(ConvertType(E
->getType()));
2058 if (NumInitElements
== 0) {
2059 // C++11 value-initialization for the scalar.
2060 return EmitNullValue(E
->getType());
2062 // We have a scalar in braces. Just use the first element.
2063 return Visit(E
->getInit(0));
2066 if (isa
<llvm::ScalableVectorType
>(VType
)) {
2067 if (NumInitElements
== 0) {
2068 // C++11 value-initialization for the vector.
2069 return EmitNullValue(E
->getType());
2072 if (NumInitElements
== 1) {
2073 Expr
*InitVector
= E
->getInit(0);
2075 // Initialize from another scalable vector of the same type.
2076 if (InitVector
->getType() == E
->getType())
2077 return Visit(InitVector
);
2080 llvm_unreachable("Unexpected initialization of a scalable vector!");
2083 unsigned ResElts
= cast
<llvm::FixedVectorType
>(VType
)->getNumElements();
2085 // Loop over initializers collecting the Value for each, and remembering
2086 // whether the source was swizzle (ExtVectorElementExpr). This will allow
2087 // us to fold the shuffle for the swizzle into the shuffle for the vector
2088 // initializer, since LLVM optimizers generally do not want to touch
2090 unsigned CurIdx
= 0;
2091 bool VIsPoisonShuffle
= false;
2092 llvm::Value
*V
= llvm::PoisonValue::get(VType
);
2093 for (unsigned i
= 0; i
!= NumInitElements
; ++i
) {
2094 Expr
*IE
= E
->getInit(i
);
2095 Value
*Init
= Visit(IE
);
2096 SmallVector
<int, 16> Args
;
2098 llvm::VectorType
*VVT
= dyn_cast
<llvm::VectorType
>(Init
->getType());
2100 // Handle scalar elements. If the scalar initializer is actually one
2101 // element of a different vector of the same width, use shuffle instead of
2104 if (isa
<ExtVectorElementExpr
>(IE
)) {
2105 llvm::ExtractElementInst
*EI
= cast
<llvm::ExtractElementInst
>(Init
);
2107 if (cast
<llvm::FixedVectorType
>(EI
->getVectorOperandType())
2108 ->getNumElements() == ResElts
) {
2109 llvm::ConstantInt
*C
= cast
<llvm::ConstantInt
>(EI
->getIndexOperand());
2110 Value
*LHS
= nullptr, *RHS
= nullptr;
2112 // insert into poison -> shuffle (src, poison)
2113 // shufflemask must use an i32
2114 Args
.push_back(getAsInt32(C
, CGF
.Int32Ty
));
2115 Args
.resize(ResElts
, -1);
2117 LHS
= EI
->getVectorOperand();
2119 VIsPoisonShuffle
= true;
2120 } else if (VIsPoisonShuffle
) {
2121 // insert into poison shuffle && size match -> shuffle (v, src)
2122 llvm::ShuffleVectorInst
*SVV
= cast
<llvm::ShuffleVectorInst
>(V
);
2123 for (unsigned j
= 0; j
!= CurIdx
; ++j
)
2124 Args
.push_back(getMaskElt(SVV
, j
, 0));
2125 Args
.push_back(ResElts
+ C
->getZExtValue());
2126 Args
.resize(ResElts
, -1);
2128 LHS
= cast
<llvm::ShuffleVectorInst
>(V
)->getOperand(0);
2129 RHS
= EI
->getVectorOperand();
2130 VIsPoisonShuffle
= false;
2132 if (!Args
.empty()) {
2133 V
= Builder
.CreateShuffleVector(LHS
, RHS
, Args
);
2139 V
= Builder
.CreateInsertElement(V
, Init
, Builder
.getInt32(CurIdx
),
2141 VIsPoisonShuffle
= false;
2146 unsigned InitElts
= cast
<llvm::FixedVectorType
>(VVT
)->getNumElements();
2148 // If the initializer is an ExtVecEltExpr (a swizzle), and the swizzle's
2149 // input is the same width as the vector being constructed, generate an
2150 // optimized shuffle of the swizzle input into the result.
2151 unsigned Offset
= (CurIdx
== 0) ? 0 : ResElts
;
2152 if (isa
<ExtVectorElementExpr
>(IE
)) {
2153 llvm::ShuffleVectorInst
*SVI
= cast
<llvm::ShuffleVectorInst
>(Init
);
2154 Value
*SVOp
= SVI
->getOperand(0);
2155 auto *OpTy
= cast
<llvm::FixedVectorType
>(SVOp
->getType());
2157 if (OpTy
->getNumElements() == ResElts
) {
2158 for (unsigned j
= 0; j
!= CurIdx
; ++j
) {
2159 // If the current vector initializer is a shuffle with poison, merge
2160 // this shuffle directly into it.
2161 if (VIsPoisonShuffle
) {
2162 Args
.push_back(getMaskElt(cast
<llvm::ShuffleVectorInst
>(V
), j
, 0));
2167 for (unsigned j
= 0, je
= InitElts
; j
!= je
; ++j
)
2168 Args
.push_back(getMaskElt(SVI
, j
, Offset
));
2169 Args
.resize(ResElts
, -1);
2171 if (VIsPoisonShuffle
)
2172 V
= cast
<llvm::ShuffleVectorInst
>(V
)->getOperand(0);
2178 // Extend init to result vector length, and then shuffle its contribution
2179 // to the vector initializer into V.
2181 for (unsigned j
= 0; j
!= InitElts
; ++j
)
2183 Args
.resize(ResElts
, -1);
2184 Init
= Builder
.CreateShuffleVector(Init
, Args
, "vext");
2187 for (unsigned j
= 0; j
!= CurIdx
; ++j
)
2189 for (unsigned j
= 0; j
!= InitElts
; ++j
)
2190 Args
.push_back(j
+ Offset
);
2191 Args
.resize(ResElts
, -1);
2194 // If V is poison, make sure it ends up on the RHS of the shuffle to aid
2195 // merging subsequent shuffles into this one.
2198 V
= Builder
.CreateShuffleVector(V
, Init
, Args
, "vecinit");
2199 VIsPoisonShuffle
= isa
<llvm::PoisonValue
>(Init
);
2203 // FIXME: evaluate codegen vs. shuffling against constant null vector.
2204 // Emit remaining default initializers.
2205 llvm::Type
*EltTy
= VType
->getElementType();
2207 // Emit remaining default initializers
2208 for (/* Do not initialize i*/; CurIdx
< ResElts
; ++CurIdx
) {
2209 Value
*Idx
= Builder
.getInt32(CurIdx
);
2210 llvm::Value
*Init
= llvm::Constant::getNullValue(EltTy
);
2211 V
= Builder
.CreateInsertElement(V
, Init
, Idx
, "vecinit");
2216 bool CodeGenFunction::ShouldNullCheckClassCastValue(const CastExpr
*CE
) {
2217 const Expr
*E
= CE
->getSubExpr();
2219 if (CE
->getCastKind() == CK_UncheckedDerivedToBase
)
2222 if (isa
<CXXThisExpr
>(E
->IgnoreParens())) {
2223 // We always assume that 'this' is never null.
2227 if (const ImplicitCastExpr
*ICE
= dyn_cast
<ImplicitCastExpr
>(CE
)) {
2228 // And that glvalue casts are never null.
2229 if (ICE
->isGLValue())
2236 // VisitCastExpr - Emit code for an explicit or implicit cast. Implicit casts
2237 // have to handle a more broad range of conversions than explicit casts, as they
2238 // handle things like function to ptr-to-function decay etc.
2239 Value
*ScalarExprEmitter::VisitCastExpr(CastExpr
*CE
) {
2240 Expr
*E
= CE
->getSubExpr();
2241 QualType DestTy
= CE
->getType();
2242 CastKind Kind
= CE
->getCastKind();
2243 CodeGenFunction::CGFPOptionsRAII
FPOptions(CGF
, CE
);
2245 // These cases are generally not written to ignore the result of
2246 // evaluating their sub-expressions, so we clear this now.
2247 bool Ignored
= TestAndClearIgnoreResultAssign();
2249 // Since almost all cast kinds apply to scalars, this switch doesn't have
2250 // a default case, so the compiler will warn on a missing case. The cases
2251 // are in the same order as in the CastKind enum.
2253 case CK_Dependent
: llvm_unreachable("dependent cast kind in IR gen!");
2254 case CK_BuiltinFnToFnPtr
:
2255 llvm_unreachable("builtin functions are handled elsewhere");
2257 case CK_LValueBitCast
:
2258 case CK_ObjCObjectLValueCast
: {
2259 Address Addr
= EmitLValue(E
).getAddress();
2260 Addr
= Addr
.withElementType(CGF
.ConvertTypeForMem(DestTy
));
2261 LValue LV
= CGF
.MakeAddrLValue(Addr
, DestTy
);
2262 return EmitLoadOfLValue(LV
, CE
->getExprLoc());
2265 case CK_LValueToRValueBitCast
: {
2266 LValue SourceLVal
= CGF
.EmitLValue(E
);
2268 SourceLVal
.getAddress().withElementType(CGF
.ConvertTypeForMem(DestTy
));
2269 LValue DestLV
= CGF
.MakeAddrLValue(Addr
, DestTy
);
2270 DestLV
.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
2271 return EmitLoadOfLValue(DestLV
, CE
->getExprLoc());
2274 case CK_CPointerToObjCPointerCast
:
2275 case CK_BlockPointerToObjCPointerCast
:
2276 case CK_AnyPointerToBlockPointerCast
:
2278 Value
*Src
= Visit(const_cast<Expr
*>(E
));
2279 llvm::Type
*SrcTy
= Src
->getType();
2280 llvm::Type
*DstTy
= ConvertType(DestTy
);
2282 (!SrcTy
->isPtrOrPtrVectorTy() || !DstTy
->isPtrOrPtrVectorTy() ||
2283 SrcTy
->getPointerAddressSpace() == DstTy
->getPointerAddressSpace()) &&
2284 "Address-space cast must be used to convert address spaces");
2286 if (CGF
.SanOpts
.has(SanitizerKind::CFIUnrelatedCast
)) {
2287 if (auto *PT
= DestTy
->getAs
<PointerType
>()) {
2288 CGF
.EmitVTablePtrCheckForCast(
2289 PT
->getPointeeType(),
2291 CGF
.ConvertTypeForMem(
2292 E
->getType()->castAs
<PointerType
>()->getPointeeType()),
2293 CGF
.getPointerAlign()),
2294 /*MayBeNull=*/true, CodeGenFunction::CFITCK_UnrelatedCast
,
2299 if (CGF
.CGM
.getCodeGenOpts().StrictVTablePointers
) {
2300 const QualType SrcType
= E
->getType();
2302 if (SrcType
.mayBeNotDynamicClass() && DestTy
.mayBeDynamicClass()) {
2303 // Casting to pointer that could carry dynamic information (provided by
2304 // invariant.group) requires launder.
2305 Src
= Builder
.CreateLaunderInvariantGroup(Src
);
2306 } else if (SrcType
.mayBeDynamicClass() && DestTy
.mayBeNotDynamicClass()) {
2307 // Casting to pointer that does not carry dynamic information (provided
2308 // by invariant.group) requires stripping it. Note that we don't do it
2309 // if the source could not be dynamic type and destination could be
2310 // dynamic because dynamic information is already laundered. It is
2311 // because launder(strip(src)) == launder(src), so there is no need to
2312 // add extra strip before launder.
2313 Src
= Builder
.CreateStripInvariantGroup(Src
);
2317 // Update heapallocsite metadata when there is an explicit pointer cast.
2318 if (auto *CI
= dyn_cast
<llvm::CallBase
>(Src
)) {
2319 if (CI
->getMetadata("heapallocsite") && isa
<ExplicitCastExpr
>(CE
) &&
2320 !isa
<CastExpr
>(E
)) {
2321 QualType PointeeType
= DestTy
->getPointeeType();
2322 if (!PointeeType
.isNull())
2323 CGF
.getDebugInfo()->addHeapAllocSiteMetadata(CI
, PointeeType
,
2328 // If Src is a fixed vector and Dst is a scalable vector, and both have the
2329 // same element type, use the llvm.vector.insert intrinsic to perform the
2331 if (auto *FixedSrcTy
= dyn_cast
<llvm::FixedVectorType
>(SrcTy
)) {
2332 if (auto *ScalableDstTy
= dyn_cast
<llvm::ScalableVectorType
>(DstTy
)) {
2333 // If we are casting a fixed i8 vector to a scalable i1 predicate
2334 // vector, use a vector insert and bitcast the result.
2335 if (ScalableDstTy
->getElementType()->isIntegerTy(1) &&
2336 ScalableDstTy
->getElementCount().isKnownMultipleOf(8) &&
2337 FixedSrcTy
->getElementType()->isIntegerTy(8)) {
2338 ScalableDstTy
= llvm::ScalableVectorType::get(
2339 FixedSrcTy
->getElementType(),
2340 ScalableDstTy
->getElementCount().getKnownMinValue() / 8);
2342 if (FixedSrcTy
->getElementType() == ScalableDstTy
->getElementType()) {
2343 llvm::Value
*UndefVec
= llvm::UndefValue::get(ScalableDstTy
);
2344 llvm::Value
*Zero
= llvm::Constant::getNullValue(CGF
.CGM
.Int64Ty
);
2345 llvm::Value
*Result
= Builder
.CreateInsertVector(
2346 ScalableDstTy
, UndefVec
, Src
, Zero
, "cast.scalable");
2347 if (Result
->getType() != DstTy
)
2348 Result
= Builder
.CreateBitCast(Result
, DstTy
);
2354 // If Src is a scalable vector and Dst is a fixed vector, and both have the
2355 // same element type, use the llvm.vector.extract intrinsic to perform the
2357 if (auto *ScalableSrcTy
= dyn_cast
<llvm::ScalableVectorType
>(SrcTy
)) {
2358 if (auto *FixedDstTy
= dyn_cast
<llvm::FixedVectorType
>(DstTy
)) {
2359 // If we are casting a scalable i1 predicate vector to a fixed i8
2360 // vector, bitcast the source and use a vector extract.
2361 if (ScalableSrcTy
->getElementType()->isIntegerTy(1) &&
2362 ScalableSrcTy
->getElementCount().isKnownMultipleOf(8) &&
2363 FixedDstTy
->getElementType()->isIntegerTy(8)) {
2364 ScalableSrcTy
= llvm::ScalableVectorType::get(
2365 FixedDstTy
->getElementType(),
2366 ScalableSrcTy
->getElementCount().getKnownMinValue() / 8);
2367 Src
= Builder
.CreateBitCast(Src
, ScalableSrcTy
);
2369 if (ScalableSrcTy
->getElementType() == FixedDstTy
->getElementType()) {
2370 llvm::Value
*Zero
= llvm::Constant::getNullValue(CGF
.CGM
.Int64Ty
);
2371 return Builder
.CreateExtractVector(DstTy
, Src
, Zero
, "cast.fixed");
2376 // Perform VLAT <-> VLST bitcast through memory.
2377 // TODO: since the llvm.vector.{insert,extract} intrinsics
2378 // require the element types of the vectors to be the same, we
2379 // need to keep this around for bitcasts between VLAT <-> VLST where
2380 // the element types of the vectors are not the same, until we figure
2381 // out a better way of doing these casts.
2382 if ((isa
<llvm::FixedVectorType
>(SrcTy
) &&
2383 isa
<llvm::ScalableVectorType
>(DstTy
)) ||
2384 (isa
<llvm::ScalableVectorType
>(SrcTy
) &&
2385 isa
<llvm::FixedVectorType
>(DstTy
))) {
2386 Address Addr
= CGF
.CreateDefaultAlignTempAlloca(SrcTy
, "saved-value");
2387 LValue LV
= CGF
.MakeAddrLValue(Addr
, E
->getType());
2388 CGF
.EmitStoreOfScalar(Src
, LV
);
2389 Addr
= Addr
.withElementType(CGF
.ConvertTypeForMem(DestTy
));
2390 LValue DestLV
= CGF
.MakeAddrLValue(Addr
, DestTy
);
2391 DestLV
.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
2392 return EmitLoadOfLValue(DestLV
, CE
->getExprLoc());
2395 llvm::Value
*Result
= Builder
.CreateBitCast(Src
, DstTy
);
2396 return CGF
.authPointerToPointerCast(Result
, E
->getType(), DestTy
);
2398 case CK_AddressSpaceConversion
: {
2399 Expr::EvalResult Result
;
2400 if (E
->EvaluateAsRValue(Result
, CGF
.getContext()) &&
2401 Result
.Val
.isNullPointer()) {
2402 // If E has side effect, it is emitted even if its final result is a
2403 // null pointer. In that case, a DCE pass should be able to
2404 // eliminate the useless instructions emitted during translating E.
2405 if (Result
.HasSideEffects
)
2407 return CGF
.CGM
.getNullPointer(cast
<llvm::PointerType
>(
2408 ConvertType(DestTy
)), DestTy
);
2410 // Since target may map different address spaces in AST to the same address
2411 // space, an address space conversion may end up as a bitcast.
2412 return CGF
.CGM
.getTargetCodeGenInfo().performAddrSpaceCast(
2413 CGF
, Visit(E
), E
->getType()->getPointeeType().getAddressSpace(),
2414 DestTy
->getPointeeType().getAddressSpace(), ConvertType(DestTy
));
2416 case CK_AtomicToNonAtomic
:
2417 case CK_NonAtomicToAtomic
:
2418 case CK_UserDefinedConversion
:
2419 return Visit(const_cast<Expr
*>(E
));
2422 return CE
->changesVolatileQualification() ? EmitLoadOfLValue(CE
)
2423 : Visit(const_cast<Expr
*>(E
));
2426 case CK_BaseToDerived
: {
2427 const CXXRecordDecl
*DerivedClassDecl
= DestTy
->getPointeeCXXRecordDecl();
2428 assert(DerivedClassDecl
&& "BaseToDerived arg isn't a C++ object pointer!");
2430 Address Base
= CGF
.EmitPointerWithAlignment(E
);
2432 CGF
.GetAddressOfDerivedClass(Base
, DerivedClassDecl
,
2433 CE
->path_begin(), CE
->path_end(),
2434 CGF
.ShouldNullCheckClassCastValue(CE
));
2436 // C++11 [expr.static.cast]p11: Behavior is undefined if a downcast is
2437 // performed and the object is not of the derived type.
2438 if (CGF
.sanitizePerformTypeCheck())
2439 CGF
.EmitTypeCheck(CodeGenFunction::TCK_DowncastPointer
, CE
->getExprLoc(),
2440 Derived
, DestTy
->getPointeeType());
2442 if (CGF
.SanOpts
.has(SanitizerKind::CFIDerivedCast
))
2443 CGF
.EmitVTablePtrCheckForCast(DestTy
->getPointeeType(), Derived
,
2445 CodeGenFunction::CFITCK_DerivedCast
,
2448 return CGF
.getAsNaturalPointerTo(Derived
, CE
->getType()->getPointeeType());
2450 case CK_UncheckedDerivedToBase
:
2451 case CK_DerivedToBase
: {
2452 // The EmitPointerWithAlignment path does this fine; just discard
2454 return CGF
.getAsNaturalPointerTo(CGF
.EmitPointerWithAlignment(CE
),
2455 CE
->getType()->getPointeeType());
2459 Address V
= CGF
.EmitPointerWithAlignment(E
);
2460 const CXXDynamicCastExpr
*DCE
= cast
<CXXDynamicCastExpr
>(CE
);
2461 return CGF
.EmitDynamicCast(V
, DCE
);
2464 case CK_ArrayToPointerDecay
:
2465 return CGF
.getAsNaturalPointerTo(CGF
.EmitArrayToPointerDecay(E
),
2466 CE
->getType()->getPointeeType());
2467 case CK_FunctionToPointerDecay
:
2468 return EmitLValue(E
).getPointer(CGF
);
2470 case CK_NullToPointer
:
2471 if (MustVisitNullValue(E
))
2472 CGF
.EmitIgnoredExpr(E
);
2474 return CGF
.CGM
.getNullPointer(cast
<llvm::PointerType
>(ConvertType(DestTy
)),
2477 case CK_NullToMemberPointer
: {
2478 if (MustVisitNullValue(E
))
2479 CGF
.EmitIgnoredExpr(E
);
2481 const MemberPointerType
*MPT
= CE
->getType()->getAs
<MemberPointerType
>();
2482 return CGF
.CGM
.getCXXABI().EmitNullMemberPointer(MPT
);
2485 case CK_ReinterpretMemberPointer
:
2486 case CK_BaseToDerivedMemberPointer
:
2487 case CK_DerivedToBaseMemberPointer
: {
2488 Value
*Src
= Visit(E
);
2490 // Note that the AST doesn't distinguish between checked and
2491 // unchecked member pointer conversions, so we always have to
2492 // implement checked conversions here. This is inefficient when
2493 // actual control flow may be required in order to perform the
2494 // check, which it is for data member pointers (but not member
2495 // function pointers on Itanium and ARM).
2496 return CGF
.CGM
.getCXXABI().EmitMemberPointerConversion(CGF
, CE
, Src
);
2499 case CK_ARCProduceObject
:
2500 return CGF
.EmitARCRetainScalarExpr(E
);
2501 case CK_ARCConsumeObject
:
2502 return CGF
.EmitObjCConsumeObject(E
->getType(), Visit(E
));
2503 case CK_ARCReclaimReturnedObject
:
2504 return CGF
.EmitARCReclaimReturnedObject(E
, /*allowUnsafe*/ Ignored
);
2505 case CK_ARCExtendBlockObject
:
2506 return CGF
.EmitARCExtendBlockObject(E
);
2508 case CK_CopyAndAutoreleaseBlockObject
:
2509 return CGF
.EmitBlockCopyAndAutorelease(Visit(E
), E
->getType());
2511 case CK_FloatingRealToComplex
:
2512 case CK_FloatingComplexCast
:
2513 case CK_IntegralRealToComplex
:
2514 case CK_IntegralComplexCast
:
2515 case CK_IntegralComplexToFloatingComplex
:
2516 case CK_FloatingComplexToIntegralComplex
:
2517 case CK_ConstructorConversion
:
2519 case CK_HLSLArrayRValue
:
2520 llvm_unreachable("scalar cast to non-scalar value");
2522 case CK_LValueToRValue
:
2523 assert(CGF
.getContext().hasSameUnqualifiedType(E
->getType(), DestTy
));
2524 assert(E
->isGLValue() && "lvalue-to-rvalue applied to r-value!");
2525 return Visit(const_cast<Expr
*>(E
));
2527 case CK_IntegralToPointer
: {
2528 Value
*Src
= Visit(const_cast<Expr
*>(E
));
2530 // First, convert to the correct width so that we control the kind of
2532 auto DestLLVMTy
= ConvertType(DestTy
);
2533 llvm::Type
*MiddleTy
= CGF
.CGM
.getDataLayout().getIntPtrType(DestLLVMTy
);
2534 bool InputSigned
= E
->getType()->isSignedIntegerOrEnumerationType();
2535 llvm::Value
* IntResult
=
2536 Builder
.CreateIntCast(Src
, MiddleTy
, InputSigned
, "conv");
2538 auto *IntToPtr
= Builder
.CreateIntToPtr(IntResult
, DestLLVMTy
);
2540 if (CGF
.CGM
.getCodeGenOpts().StrictVTablePointers
) {
2541 // Going from integer to pointer that could be dynamic requires reloading
2542 // dynamic information from invariant.group.
2543 if (DestTy
.mayBeDynamicClass())
2544 IntToPtr
= Builder
.CreateLaunderInvariantGroup(IntToPtr
);
2547 IntToPtr
= CGF
.authPointerToPointerCast(IntToPtr
, E
->getType(), DestTy
);
2550 case CK_PointerToIntegral
: {
2551 assert(!DestTy
->isBooleanType() && "bool should use PointerToBool");
2552 auto *PtrExpr
= Visit(E
);
2554 if (CGF
.CGM
.getCodeGenOpts().StrictVTablePointers
) {
2555 const QualType SrcType
= E
->getType();
2557 // Casting to integer requires stripping dynamic information as it does
2559 if (SrcType
.mayBeDynamicClass())
2560 PtrExpr
= Builder
.CreateStripInvariantGroup(PtrExpr
);
2563 PtrExpr
= CGF
.authPointerToPointerCast(PtrExpr
, E
->getType(), DestTy
);
2564 return Builder
.CreatePtrToInt(PtrExpr
, ConvertType(DestTy
));
2567 CGF
.EmitIgnoredExpr(E
);
2570 case CK_MatrixCast
: {
2571 return EmitScalarConversion(Visit(E
), E
->getType(), DestTy
,
2574 case CK_VectorSplat
: {
2575 llvm::Type
*DstTy
= ConvertType(DestTy
);
2576 Value
*Elt
= Visit(const_cast<Expr
*>(E
));
2577 // Splat the element across to all elements
2578 llvm::ElementCount NumElements
=
2579 cast
<llvm::VectorType
>(DstTy
)->getElementCount();
2580 return Builder
.CreateVectorSplat(NumElements
, Elt
, "splat");
2583 case CK_FixedPointCast
:
2584 return EmitScalarConversion(Visit(E
), E
->getType(), DestTy
,
2587 case CK_FixedPointToBoolean
:
2588 assert(E
->getType()->isFixedPointType() &&
2589 "Expected src type to be fixed point type");
2590 assert(DestTy
->isBooleanType() && "Expected dest type to be boolean type");
2591 return EmitScalarConversion(Visit(E
), E
->getType(), DestTy
,
2594 case CK_FixedPointToIntegral
:
2595 assert(E
->getType()->isFixedPointType() &&
2596 "Expected src type to be fixed point type");
2597 assert(DestTy
->isIntegerType() && "Expected dest type to be an integer");
2598 return EmitScalarConversion(Visit(E
), E
->getType(), DestTy
,
2601 case CK_IntegralToFixedPoint
:
2602 assert(E
->getType()->isIntegerType() &&
2603 "Expected src type to be an integer");
2604 assert(DestTy
->isFixedPointType() &&
2605 "Expected dest type to be fixed point type");
2606 return EmitScalarConversion(Visit(E
), E
->getType(), DestTy
,
2609 case CK_IntegralCast
: {
2610 if (E
->getType()->isExtVectorType() && DestTy
->isExtVectorType()) {
2611 QualType SrcElTy
= E
->getType()->castAs
<VectorType
>()->getElementType();
2612 return Builder
.CreateIntCast(Visit(E
), ConvertType(DestTy
),
2613 SrcElTy
->isSignedIntegerOrEnumerationType(),
2616 ScalarConversionOpts Opts
;
2617 if (auto *ICE
= dyn_cast
<ImplicitCastExpr
>(CE
)) {
2618 if (!ICE
->isPartOfExplicitCast())
2619 Opts
= ScalarConversionOpts(CGF
.SanOpts
);
2621 return EmitScalarConversion(Visit(E
), E
->getType(), DestTy
,
2622 CE
->getExprLoc(), Opts
);
2624 case CK_IntegralToFloating
: {
2625 if (E
->getType()->isVectorType() && DestTy
->isVectorType()) {
2626 // TODO: Support constrained FP intrinsics.
2627 QualType SrcElTy
= E
->getType()->castAs
<VectorType
>()->getElementType();
2628 if (SrcElTy
->isSignedIntegerOrEnumerationType())
2629 return Builder
.CreateSIToFP(Visit(E
), ConvertType(DestTy
), "conv");
2630 return Builder
.CreateUIToFP(Visit(E
), ConvertType(DestTy
), "conv");
2632 CodeGenFunction::CGFPOptionsRAII
FPOptsRAII(CGF
, CE
);
2633 return EmitScalarConversion(Visit(E
), E
->getType(), DestTy
,
2636 case CK_FloatingToIntegral
: {
2637 if (E
->getType()->isVectorType() && DestTy
->isVectorType()) {
2638 // TODO: Support constrained FP intrinsics.
2639 QualType DstElTy
= DestTy
->castAs
<VectorType
>()->getElementType();
2640 if (DstElTy
->isSignedIntegerOrEnumerationType())
2641 return Builder
.CreateFPToSI(Visit(E
), ConvertType(DestTy
), "conv");
2642 return Builder
.CreateFPToUI(Visit(E
), ConvertType(DestTy
), "conv");
2644 CodeGenFunction::CGFPOptionsRAII
FPOptsRAII(CGF
, CE
);
2645 return EmitScalarConversion(Visit(E
), E
->getType(), DestTy
,
2648 case CK_FloatingCast
: {
2649 if (E
->getType()->isVectorType() && DestTy
->isVectorType()) {
2650 // TODO: Support constrained FP intrinsics.
2651 QualType SrcElTy
= E
->getType()->castAs
<VectorType
>()->getElementType();
2652 QualType DstElTy
= DestTy
->castAs
<VectorType
>()->getElementType();
2653 if (DstElTy
->castAs
<BuiltinType
>()->getKind() <
2654 SrcElTy
->castAs
<BuiltinType
>()->getKind())
2655 return Builder
.CreateFPTrunc(Visit(E
), ConvertType(DestTy
), "conv");
2656 return Builder
.CreateFPExt(Visit(E
), ConvertType(DestTy
), "conv");
2658 CodeGenFunction::CGFPOptionsRAII
FPOptsRAII(CGF
, CE
);
2659 return EmitScalarConversion(Visit(E
), E
->getType(), DestTy
,
2662 case CK_FixedPointToFloating
:
2663 case CK_FloatingToFixedPoint
: {
2664 CodeGenFunction::CGFPOptionsRAII
FPOptsRAII(CGF
, CE
);
2665 return EmitScalarConversion(Visit(E
), E
->getType(), DestTy
,
2668 case CK_BooleanToSignedIntegral
: {
2669 ScalarConversionOpts Opts
;
2670 Opts
.TreatBooleanAsSigned
= true;
2671 return EmitScalarConversion(Visit(E
), E
->getType(), DestTy
,
2672 CE
->getExprLoc(), Opts
);
2674 case CK_IntegralToBoolean
:
2675 return EmitIntToBoolConversion(Visit(E
));
2676 case CK_PointerToBoolean
:
2677 return EmitPointerToBoolConversion(Visit(E
), E
->getType());
2678 case CK_FloatingToBoolean
: {
2679 CodeGenFunction::CGFPOptionsRAII
FPOptsRAII(CGF
, CE
);
2680 return EmitFloatToBoolConversion(Visit(E
));
2682 case CK_MemberPointerToBoolean
: {
2683 llvm::Value
*MemPtr
= Visit(E
);
2684 const MemberPointerType
*MPT
= E
->getType()->getAs
<MemberPointerType
>();
2685 return CGF
.CGM
.getCXXABI().EmitMemberPointerIsNotNull(CGF
, MemPtr
, MPT
);
2688 case CK_FloatingComplexToReal
:
2689 case CK_IntegralComplexToReal
:
2690 return CGF
.EmitComplexExpr(E
, false, true).first
;
2692 case CK_FloatingComplexToBoolean
:
2693 case CK_IntegralComplexToBoolean
: {
2694 CodeGenFunction::ComplexPairTy V
= CGF
.EmitComplexExpr(E
);
2696 // TODO: kill this function off, inline appropriate case here
2697 return EmitComplexToScalarConversion(V
, E
->getType(), DestTy
,
2701 case CK_ZeroToOCLOpaqueType
: {
2702 assert((DestTy
->isEventT() || DestTy
->isQueueT() ||
2703 DestTy
->isOCLIntelSubgroupAVCType()) &&
2704 "CK_ZeroToOCLEvent cast on non-event type");
2705 return llvm::Constant::getNullValue(ConvertType(DestTy
));
2708 case CK_IntToOCLSampler
:
2709 return CGF
.CGM
.createOpenCLIntToSamplerConversion(E
, CGF
);
2711 case CK_HLSLVectorTruncation
: {
2712 assert((DestTy
->isVectorType() || DestTy
->isBuiltinType()) &&
2713 "Destination type must be a vector or builtin type.");
2714 Value
*Vec
= Visit(const_cast<Expr
*>(E
));
2715 if (auto *VecTy
= DestTy
->getAs
<VectorType
>()) {
2716 SmallVector
<int> Mask
;
2717 unsigned NumElts
= VecTy
->getNumElements();
2718 for (unsigned I
= 0; I
!= NumElts
; ++I
)
2721 return Builder
.CreateShuffleVector(Vec
, Mask
, "trunc");
2723 llvm::Value
*Zero
= llvm::Constant::getNullValue(CGF
.SizeTy
);
2724 return Builder
.CreateExtractElement(Vec
, Zero
, "cast.vtrunc");
2729 llvm_unreachable("unknown scalar cast");
2732 Value
*ScalarExprEmitter::VisitStmtExpr(const StmtExpr
*E
) {
2733 CodeGenFunction::StmtExprEvaluation
eval(CGF
);
2734 Address RetAlloca
= CGF
.EmitCompoundStmt(*E
->getSubStmt(),
2735 !E
->getType()->isVoidType());
2736 if (!RetAlloca
.isValid())
2738 return CGF
.EmitLoadOfScalar(CGF
.MakeAddrLValue(RetAlloca
, E
->getType()),
2742 Value
*ScalarExprEmitter::VisitExprWithCleanups(ExprWithCleanups
*E
) {
2743 CodeGenFunction::RunCleanupsScope
Scope(CGF
);
2744 Value
*V
= Visit(E
->getSubExpr());
2745 // Defend against dominance problems caused by jumps out of expression
2746 // evaluation through the shared cleanup block.
2747 Scope
.ForceCleanup({&V
});
2751 //===----------------------------------------------------------------------===//
2753 //===----------------------------------------------------------------------===//
2755 static BinOpInfo
createBinOpInfoFromIncDec(const UnaryOperator
*E
,
2756 llvm::Value
*InVal
, bool IsInc
,
2757 FPOptions FPFeatures
) {
2760 BinOp
.RHS
= llvm::ConstantInt::get(InVal
->getType(), 1, false);
2761 BinOp
.Ty
= E
->getType();
2762 BinOp
.Opcode
= IsInc
? BO_Add
: BO_Sub
;
2763 BinOp
.FPFeatures
= FPFeatures
;
2768 llvm::Value
*ScalarExprEmitter::EmitIncDecConsiderOverflowBehavior(
2769 const UnaryOperator
*E
, llvm::Value
*InVal
, bool IsInc
) {
2770 llvm::Value
*Amount
=
2771 llvm::ConstantInt::get(InVal
->getType(), IsInc
? 1 : -1, true);
2772 StringRef Name
= IsInc
? "inc" : "dec";
2773 switch (CGF
.getLangOpts().getSignedOverflowBehavior()) {
2774 case LangOptions::SOB_Defined
:
2775 if (!CGF
.SanOpts
.has(SanitizerKind::SignedIntegerOverflow
))
2776 return Builder
.CreateAdd(InVal
, Amount
, Name
);
2778 case LangOptions::SOB_Undefined
:
2779 if (!CGF
.SanOpts
.has(SanitizerKind::SignedIntegerOverflow
))
2780 return Builder
.CreateNSWAdd(InVal
, Amount
, Name
);
2782 case LangOptions::SOB_Trapping
:
2783 if (!E
->canOverflow())
2784 return Builder
.CreateNSWAdd(InVal
, Amount
, Name
);
2785 return EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(
2786 E
, InVal
, IsInc
, E
->getFPFeaturesInEffect(CGF
.getLangOpts())));
2788 llvm_unreachable("Unknown SignedOverflowBehaviorTy");
2791 /// For the purposes of overflow pattern exclusion, does this match the
2792 /// "while(i--)" pattern?
2793 static bool matchesPostDecrInWhile(const UnaryOperator
*UO
, bool isInc
,
2794 bool isPre
, ASTContext
&Ctx
) {
2798 // -fsanitize-undefined-ignore-overflow-pattern=unsigned-post-decr-while
2799 if (!Ctx
.getLangOpts().isOverflowPatternExcluded(
2800 LangOptions::OverflowPatternExclusionKind::PostDecrInWhile
))
2803 // all Parents (usually just one) must be a WhileStmt
2804 for (const auto &Parent
: Ctx
.getParentMapContext().getParents(*UO
))
2805 if (!Parent
.get
<WhileStmt
>())
2812 /// Handles check and update for lastprivate conditional variables.
2813 class OMPLastprivateConditionalUpdateRAII
{
2815 CodeGenFunction
&CGF
;
2816 const UnaryOperator
*E
;
2819 OMPLastprivateConditionalUpdateRAII(CodeGenFunction
&CGF
,
2820 const UnaryOperator
*E
)
2822 ~OMPLastprivateConditionalUpdateRAII() {
2823 if (CGF
.getLangOpts().OpenMP
)
2824 CGF
.CGM
.getOpenMPRuntime().checkAndEmitLastprivateConditional(
2825 CGF
, E
->getSubExpr());
2831 ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator
*E
, LValue LV
,
2832 bool isInc
, bool isPre
) {
2833 OMPLastprivateConditionalUpdateRAII
OMPRegion(CGF
, E
);
2834 QualType type
= E
->getSubExpr()->getType();
2835 llvm::PHINode
*atomicPHI
= nullptr;
2838 llvm::Value
*Previous
= nullptr;
2839 QualType SrcType
= E
->getType();
2841 int amount
= (isInc
? 1 : -1);
2842 bool isSubtraction
= !isInc
;
2844 if (const AtomicType
*atomicTy
= type
->getAs
<AtomicType
>()) {
2845 type
= atomicTy
->getValueType();
2846 if (isInc
&& type
->isBooleanType()) {
2847 llvm::Value
*True
= CGF
.EmitToMemory(Builder
.getTrue(), type
);
2849 Builder
.CreateStore(True
, LV
.getAddress(), LV
.isVolatileQualified())
2850 ->setAtomic(llvm::AtomicOrdering::SequentiallyConsistent
);
2851 return Builder
.getTrue();
2853 // For atomic bool increment, we just store true and return it for
2854 // preincrement, do an atomic swap with true for postincrement
2855 return Builder
.CreateAtomicRMW(
2856 llvm::AtomicRMWInst::Xchg
, LV
.getAddress(), True
,
2857 llvm::AtomicOrdering::SequentiallyConsistent
);
2859 // Special case for atomic increment / decrement on integers, emit
2860 // atomicrmw instructions. We skip this if we want to be doing overflow
2861 // checking, and fall into the slow path with the atomic cmpxchg loop.
2862 if (!type
->isBooleanType() && type
->isIntegerType() &&
2863 !(type
->isUnsignedIntegerType() &&
2864 CGF
.SanOpts
.has(SanitizerKind::UnsignedIntegerOverflow
)) &&
2865 CGF
.getLangOpts().getSignedOverflowBehavior() !=
2866 LangOptions::SOB_Trapping
) {
2867 llvm::AtomicRMWInst::BinOp aop
= isInc
? llvm::AtomicRMWInst::Add
:
2868 llvm::AtomicRMWInst::Sub
;
2869 llvm::Instruction::BinaryOps op
= isInc
? llvm::Instruction::Add
:
2870 llvm::Instruction::Sub
;
2871 llvm::Value
*amt
= CGF
.EmitToMemory(
2872 llvm::ConstantInt::get(ConvertType(type
), 1, true), type
);
2874 Builder
.CreateAtomicRMW(aop
, LV
.getAddress(), amt
,
2875 llvm::AtomicOrdering::SequentiallyConsistent
);
2876 return isPre
? Builder
.CreateBinOp(op
, old
, amt
) : old
;
2878 // Special case for atomic increment/decrement on floats.
2879 // Bail out non-power-of-2-sized floating point types (e.g., x86_fp80).
2880 if (type
->isFloatingType()) {
2881 llvm::Type
*Ty
= ConvertType(type
);
2882 if (llvm::has_single_bit(Ty
->getScalarSizeInBits())) {
2883 llvm::AtomicRMWInst::BinOp aop
=
2884 isInc
? llvm::AtomicRMWInst::FAdd
: llvm::AtomicRMWInst::FSub
;
2885 llvm::Instruction::BinaryOps op
=
2886 isInc
? llvm::Instruction::FAdd
: llvm::Instruction::FSub
;
2887 llvm::Value
*amt
= llvm::ConstantFP::get(Ty
, 1.0);
2888 llvm::AtomicRMWInst
*old
=
2889 CGF
.emitAtomicRMWInst(aop
, LV
.getAddress(), amt
,
2890 llvm::AtomicOrdering::SequentiallyConsistent
);
2892 return isPre
? Builder
.CreateBinOp(op
, old
, amt
) : old
;
2895 value
= EmitLoadOfLValue(LV
, E
->getExprLoc());
2897 // For every other atomic operation, we need to emit a load-op-cmpxchg loop
2898 llvm::BasicBlock
*startBB
= Builder
.GetInsertBlock();
2899 llvm::BasicBlock
*opBB
= CGF
.createBasicBlock("atomic_op", CGF
.CurFn
);
2900 value
= CGF
.EmitToMemory(value
, type
);
2901 Builder
.CreateBr(opBB
);
2902 Builder
.SetInsertPoint(opBB
);
2903 atomicPHI
= Builder
.CreatePHI(value
->getType(), 2);
2904 atomicPHI
->addIncoming(value
, startBB
);
2907 value
= EmitLoadOfLValue(LV
, E
->getExprLoc());
2911 // Special case of integer increment that we have to check first: bool++.
2912 // Due to promotion rules, we get:
2913 // bool++ -> bool = bool + 1
2914 // -> bool = (int)bool + 1
2915 // -> bool = ((int)bool + 1 != 0)
2916 // An interesting aspect of this is that increment is always true.
2917 // Decrement does not have this property.
2918 if (isInc
&& type
->isBooleanType()) {
2919 value
= Builder
.getTrue();
2921 // Most common case by far: integer increment.
2922 } else if (type
->isIntegerType()) {
2923 QualType promotedType
;
2924 bool canPerformLossyDemotionCheck
= false;
2926 bool excludeOverflowPattern
=
2927 matchesPostDecrInWhile(E
, isInc
, isPre
, CGF
.getContext());
2929 if (CGF
.getContext().isPromotableIntegerType(type
)) {
2930 promotedType
= CGF
.getContext().getPromotedIntegerType(type
);
2931 assert(promotedType
!= type
&& "Shouldn't promote to the same type.");
2932 canPerformLossyDemotionCheck
= true;
2933 canPerformLossyDemotionCheck
&=
2934 CGF
.getContext().getCanonicalType(type
) !=
2935 CGF
.getContext().getCanonicalType(promotedType
);
2936 canPerformLossyDemotionCheck
&=
2937 PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(
2938 type
, promotedType
);
2939 assert((!canPerformLossyDemotionCheck
||
2940 type
->isSignedIntegerOrEnumerationType() ||
2941 promotedType
->isSignedIntegerOrEnumerationType() ||
2942 ConvertType(type
)->getScalarSizeInBits() ==
2943 ConvertType(promotedType
)->getScalarSizeInBits()) &&
2944 "The following check expects that if we do promotion to different "
2945 "underlying canonical type, at least one of the types (either "
2946 "base or promoted) will be signed, or the bitwidths will match.");
2948 if (CGF
.SanOpts
.hasOneOf(
2949 SanitizerKind::ImplicitIntegerArithmeticValueChange
|
2950 SanitizerKind::ImplicitBitfieldConversion
) &&
2951 canPerformLossyDemotionCheck
) {
2952 // While `x += 1` (for `x` with width less than int) is modeled as
2953 // promotion+arithmetics+demotion, and we can catch lossy demotion with
2954 // ease; inc/dec with width less than int can't overflow because of
2955 // promotion rules, so we omit promotion+demotion, which means that we can
2956 // not catch lossy "demotion". Because we still want to catch these cases
2957 // when the sanitizer is enabled, we perform the promotion, then perform
2958 // the increment/decrement in the wider type, and finally
2959 // perform the demotion. This will catch lossy demotions.
2961 // We have a special case for bitfields defined using all the bits of the
2962 // type. In this case we need to do the same trick as for the integer
2963 // sanitizer checks, i.e., promotion -> increment/decrement -> demotion.
2965 value
= EmitScalarConversion(value
, type
, promotedType
, E
->getExprLoc());
2966 Value
*amt
= llvm::ConstantInt::get(value
->getType(), amount
, true);
2967 value
= Builder
.CreateAdd(value
, amt
, isInc
? "inc" : "dec");
2968 // Do pass non-default ScalarConversionOpts so that sanitizer check is
2969 // emitted if LV is not a bitfield, otherwise the bitfield sanitizer
2970 // checks will take care of the conversion.
2971 ScalarConversionOpts Opts
;
2972 if (!LV
.isBitField())
2973 Opts
= ScalarConversionOpts(CGF
.SanOpts
);
2974 else if (CGF
.SanOpts
.has(SanitizerKind::ImplicitBitfieldConversion
)) {
2976 SrcType
= promotedType
;
2979 value
= EmitScalarConversion(value
, promotedType
, type
, E
->getExprLoc(),
2982 // Note that signed integer inc/dec with width less than int can't
2983 // overflow because of promotion rules; we're just eliding a few steps
2985 } else if (E
->canOverflow() && type
->isSignedIntegerOrEnumerationType()) {
2986 value
= EmitIncDecConsiderOverflowBehavior(E
, value
, isInc
);
2987 } else if (E
->canOverflow() && type
->isUnsignedIntegerType() &&
2988 CGF
.SanOpts
.has(SanitizerKind::UnsignedIntegerOverflow
) &&
2989 !excludeOverflowPattern
) {
2990 value
= EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(
2991 E
, value
, isInc
, E
->getFPFeaturesInEffect(CGF
.getLangOpts())));
2993 llvm::Value
*amt
= llvm::ConstantInt::get(value
->getType(), amount
, true);
2994 value
= Builder
.CreateAdd(value
, amt
, isInc
? "inc" : "dec");
2997 // Next most common: pointer increment.
2998 } else if (const PointerType
*ptr
= type
->getAs
<PointerType
>()) {
2999 QualType type
= ptr
->getPointeeType();
3001 // VLA types don't have constant size.
3002 if (const VariableArrayType
*vla
3003 = CGF
.getContext().getAsVariableArrayType(type
)) {
3004 llvm::Value
*numElts
= CGF
.getVLASize(vla
).NumElts
;
3005 if (!isInc
) numElts
= Builder
.CreateNSWNeg(numElts
, "vla.negsize");
3006 llvm::Type
*elemTy
= CGF
.ConvertTypeForMem(vla
->getElementType());
3007 if (CGF
.getLangOpts().isSignedOverflowDefined())
3008 value
= Builder
.CreateGEP(elemTy
, value
, numElts
, "vla.inc");
3010 value
= CGF
.EmitCheckedInBoundsGEP(
3011 elemTy
, value
, numElts
, /*SignedIndices=*/false, isSubtraction
,
3012 E
->getExprLoc(), "vla.inc");
3014 // Arithmetic on function pointers (!) is just +-1.
3015 } else if (type
->isFunctionType()) {
3016 llvm::Value
*amt
= Builder
.getInt32(amount
);
3018 if (CGF
.getLangOpts().isSignedOverflowDefined())
3019 value
= Builder
.CreateGEP(CGF
.Int8Ty
, value
, amt
, "incdec.funcptr");
3022 CGF
.EmitCheckedInBoundsGEP(CGF
.Int8Ty
, value
, amt
,
3023 /*SignedIndices=*/false, isSubtraction
,
3024 E
->getExprLoc(), "incdec.funcptr");
3026 // For everything else, we can just do a simple increment.
3028 llvm::Value
*amt
= Builder
.getInt32(amount
);
3029 llvm::Type
*elemTy
= CGF
.ConvertTypeForMem(type
);
3030 if (CGF
.getLangOpts().isSignedOverflowDefined())
3031 value
= Builder
.CreateGEP(elemTy
, value
, amt
, "incdec.ptr");
3033 value
= CGF
.EmitCheckedInBoundsGEP(
3034 elemTy
, value
, amt
, /*SignedIndices=*/false, isSubtraction
,
3035 E
->getExprLoc(), "incdec.ptr");
3038 // Vector increment/decrement.
3039 } else if (type
->isVectorType()) {
3040 if (type
->hasIntegerRepresentation()) {
3041 llvm::Value
*amt
= llvm::ConstantInt::get(value
->getType(), amount
);
3043 value
= Builder
.CreateAdd(value
, amt
, isInc
? "inc" : "dec");
3045 value
= Builder
.CreateFAdd(
3047 llvm::ConstantFP::get(value
->getType(), amount
),
3048 isInc
? "inc" : "dec");
3052 } else if (type
->isRealFloatingType()) {
3053 // Add the inc/dec to the real part.
3055 CodeGenFunction::CGFPOptionsRAII
FPOptsRAII(CGF
, E
);
3057 if (type
->isHalfType() && !CGF
.getContext().getLangOpts().NativeHalfType
) {
3058 // Another special case: half FP increment should be done via float
3059 if (CGF
.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
3060 value
= Builder
.CreateCall(
3061 CGF
.CGM
.getIntrinsic(llvm::Intrinsic::convert_from_fp16
,
3063 input
, "incdec.conv");
3065 value
= Builder
.CreateFPExt(input
, CGF
.CGM
.FloatTy
, "incdec.conv");
3069 if (value
->getType()->isFloatTy())
3070 amt
= llvm::ConstantFP::get(VMContext
,
3071 llvm::APFloat(static_cast<float>(amount
)));
3072 else if (value
->getType()->isDoubleTy())
3073 amt
= llvm::ConstantFP::get(VMContext
,
3074 llvm::APFloat(static_cast<double>(amount
)));
3076 // Remaining types are Half, Bfloat16, LongDouble, __ibm128 or __float128.
3077 // Convert from float.
3078 llvm::APFloat
F(static_cast<float>(amount
));
3080 const llvm::fltSemantics
*FS
;
3081 // Don't use getFloatTypeSemantics because Half isn't
3082 // necessarily represented using the "half" LLVM type.
3083 if (value
->getType()->isFP128Ty())
3084 FS
= &CGF
.getTarget().getFloat128Format();
3085 else if (value
->getType()->isHalfTy())
3086 FS
= &CGF
.getTarget().getHalfFormat();
3087 else if (value
->getType()->isBFloatTy())
3088 FS
= &CGF
.getTarget().getBFloat16Format();
3089 else if (value
->getType()->isPPC_FP128Ty())
3090 FS
= &CGF
.getTarget().getIbm128Format();
3092 FS
= &CGF
.getTarget().getLongDoubleFormat();
3093 F
.convert(*FS
, llvm::APFloat::rmTowardZero
, &ignored
);
3094 amt
= llvm::ConstantFP::get(VMContext
, F
);
3096 value
= Builder
.CreateFAdd(value
, amt
, isInc
? "inc" : "dec");
3098 if (type
->isHalfType() && !CGF
.getContext().getLangOpts().NativeHalfType
) {
3099 if (CGF
.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
3100 value
= Builder
.CreateCall(
3101 CGF
.CGM
.getIntrinsic(llvm::Intrinsic::convert_to_fp16
,
3103 value
, "incdec.conv");
3105 value
= Builder
.CreateFPTrunc(value
, input
->getType(), "incdec.conv");
3109 // Fixed-point types.
3110 } else if (type
->isFixedPointType()) {
3111 // Fixed-point types are tricky. In some cases, it isn't possible to
3112 // represent a 1 or a -1 in the type at all. Piggyback off of
3113 // EmitFixedPointBinOp to avoid having to reimplement saturation.
3116 Info
.Ty
= E
->getType();
3117 Info
.Opcode
= isInc
? BO_Add
: BO_Sub
;
3119 Info
.RHS
= llvm::ConstantInt::get(value
->getType(), 1, false);
3120 // If the type is signed, it's better to represent this as +(-1) or -(-1),
3121 // since -1 is guaranteed to be representable.
3122 if (type
->isSignedFixedPointType()) {
3123 Info
.Opcode
= isInc
? BO_Sub
: BO_Add
;
3124 Info
.RHS
= Builder
.CreateNeg(Info
.RHS
);
3126 // Now, convert from our invented integer literal to the type of the unary
3127 // op. This will upscale and saturate if necessary. This value can become
3128 // undef in some cases.
3129 llvm::FixedPointBuilder
<CGBuilderTy
> FPBuilder(Builder
);
3130 auto DstSema
= CGF
.getContext().getFixedPointSemantics(Info
.Ty
);
3131 Info
.RHS
= FPBuilder
.CreateIntegerToFixed(Info
.RHS
, true, DstSema
);
3132 value
= EmitFixedPointBinOp(Info
);
3134 // Objective-C pointer types.
3136 const ObjCObjectPointerType
*OPT
= type
->castAs
<ObjCObjectPointerType
>();
3138 CharUnits size
= CGF
.getContext().getTypeSizeInChars(OPT
->getObjectType());
3139 if (!isInc
) size
= -size
;
3140 llvm::Value
*sizeValue
=
3141 llvm::ConstantInt::get(CGF
.SizeTy
, size
.getQuantity());
3143 if (CGF
.getLangOpts().isSignedOverflowDefined())
3144 value
= Builder
.CreateGEP(CGF
.Int8Ty
, value
, sizeValue
, "incdec.objptr");
3146 value
= CGF
.EmitCheckedInBoundsGEP(
3147 CGF
.Int8Ty
, value
, sizeValue
, /*SignedIndices=*/false, isSubtraction
,
3148 E
->getExprLoc(), "incdec.objptr");
3149 value
= Builder
.CreateBitCast(value
, input
->getType());
3153 llvm::BasicBlock
*curBlock
= Builder
.GetInsertBlock();
3154 llvm::BasicBlock
*contBB
= CGF
.createBasicBlock("atomic_cont", CGF
.CurFn
);
3155 auto Pair
= CGF
.EmitAtomicCompareExchange(
3156 LV
, RValue::get(atomicPHI
), RValue::get(value
), E
->getExprLoc());
3157 llvm::Value
*old
= CGF
.EmitToMemory(Pair
.first
.getScalarVal(), type
);
3158 llvm::Value
*success
= Pair
.second
;
3159 atomicPHI
->addIncoming(old
, curBlock
);
3160 Builder
.CreateCondBr(success
, contBB
, atomicPHI
->getParent());
3161 Builder
.SetInsertPoint(contBB
);
3162 return isPre
? value
: input
;
3165 // Store the updated result through the lvalue.
3166 if (LV
.isBitField()) {
3167 Value
*Src
= Previous
? Previous
: value
;
3168 CGF
.EmitStoreThroughBitfieldLValue(RValue::get(value
), LV
, &value
);
3169 CGF
.EmitBitfieldConversionCheck(Src
, SrcType
, value
, E
->getType(),
3170 LV
.getBitFieldInfo(), E
->getExprLoc());
3172 CGF
.EmitStoreThroughLValue(RValue::get(value
), LV
);
3174 // If this is a postinc, return the value read from memory, otherwise use the
3176 return isPre
? value
: input
;
3180 Value
*ScalarExprEmitter::VisitUnaryPlus(const UnaryOperator
*E
,
3181 QualType PromotionType
) {
3182 QualType promotionTy
= PromotionType
.isNull()
3183 ? getPromotionType(E
->getSubExpr()->getType())
3185 Value
*result
= VisitPlus(E
, promotionTy
);
3186 if (result
&& !promotionTy
.isNull())
3187 result
= EmitUnPromotedValue(result
, E
->getType());
3191 Value
*ScalarExprEmitter::VisitPlus(const UnaryOperator
*E
,
3192 QualType PromotionType
) {
3193 // This differs from gcc, though, most likely due to a bug in gcc.
3194 TestAndClearIgnoreResultAssign();
3195 if (!PromotionType
.isNull())
3196 return CGF
.EmitPromotedScalarExpr(E
->getSubExpr(), PromotionType
);
3197 return Visit(E
->getSubExpr());
3200 Value
*ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator
*E
,
3201 QualType PromotionType
) {
3202 QualType promotionTy
= PromotionType
.isNull()
3203 ? getPromotionType(E
->getSubExpr()->getType())
3205 Value
*result
= VisitMinus(E
, promotionTy
);
3206 if (result
&& !promotionTy
.isNull())
3207 result
= EmitUnPromotedValue(result
, E
->getType());
3211 Value
*ScalarExprEmitter::VisitMinus(const UnaryOperator
*E
,
3212 QualType PromotionType
) {
3213 TestAndClearIgnoreResultAssign();
3215 if (!PromotionType
.isNull())
3216 Op
= CGF
.EmitPromotedScalarExpr(E
->getSubExpr(), PromotionType
);
3218 Op
= Visit(E
->getSubExpr());
3220 // Generate a unary FNeg for FP ops.
3221 if (Op
->getType()->isFPOrFPVectorTy())
3222 return Builder
.CreateFNeg(Op
, "fneg");
3224 // Emit unary minus with EmitSub so we handle overflow cases etc.
3227 BinOp
.LHS
= llvm::Constant::getNullValue(BinOp
.RHS
->getType());
3228 BinOp
.Ty
= E
->getType();
3229 BinOp
.Opcode
= BO_Sub
;
3230 BinOp
.FPFeatures
= E
->getFPFeaturesInEffect(CGF
.getLangOpts());
3232 return EmitSub(BinOp
);
3235 Value
*ScalarExprEmitter::VisitUnaryNot(const UnaryOperator
*E
) {
3236 TestAndClearIgnoreResultAssign();
3237 Value
*Op
= Visit(E
->getSubExpr());
3238 return Builder
.CreateNot(Op
, "not");
3241 Value
*ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator
*E
) {
3242 // Perform vector logical not on comparison with zero vector.
3243 if (E
->getType()->isVectorType() &&
3244 E
->getType()->castAs
<VectorType
>()->getVectorKind() ==
3245 VectorKind::Generic
) {
3246 Value
*Oper
= Visit(E
->getSubExpr());
3247 Value
*Zero
= llvm::Constant::getNullValue(Oper
->getType());
3249 if (Oper
->getType()->isFPOrFPVectorTy()) {
3250 CodeGenFunction::CGFPOptionsRAII
FPOptsRAII(
3251 CGF
, E
->getFPFeaturesInEffect(CGF
.getLangOpts()));
3252 Result
= Builder
.CreateFCmp(llvm::CmpInst::FCMP_OEQ
, Oper
, Zero
, "cmp");
3254 Result
= Builder
.CreateICmp(llvm::CmpInst::ICMP_EQ
, Oper
, Zero
, "cmp");
3255 return Builder
.CreateSExt(Result
, ConvertType(E
->getType()), "sext");
3258 // Compare operand to zero.
3259 Value
*BoolVal
= CGF
.EvaluateExprAsBool(E
->getSubExpr());
3262 // TODO: Could dynamically modify easy computations here. For example, if
3263 // the operand is an icmp ne, turn into icmp eq.
3264 BoolVal
= Builder
.CreateNot(BoolVal
, "lnot");
3266 // ZExt result to the expr type.
3267 return Builder
.CreateZExt(BoolVal
, ConvertType(E
->getType()), "lnot.ext");
3270 Value
*ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr
*E
) {
3271 // Try folding the offsetof to a constant.
3272 Expr::EvalResult EVResult
;
3273 if (E
->EvaluateAsInt(EVResult
, CGF
.getContext())) {
3274 llvm::APSInt Value
= EVResult
.Val
.getInt();
3275 return Builder
.getInt(Value
);
3278 // Loop over the components of the offsetof to compute the value.
3279 unsigned n
= E
->getNumComponents();
3280 llvm::Type
* ResultType
= ConvertType(E
->getType());
3281 llvm::Value
* Result
= llvm::Constant::getNullValue(ResultType
);
3282 QualType CurrentType
= E
->getTypeSourceInfo()->getType();
3283 for (unsigned i
= 0; i
!= n
; ++i
) {
3284 OffsetOfNode ON
= E
->getComponent(i
);
3285 llvm::Value
*Offset
= nullptr;
3286 switch (ON
.getKind()) {
3287 case OffsetOfNode::Array
: {
3288 // Compute the index
3289 Expr
*IdxExpr
= E
->getIndexExpr(ON
.getArrayExprIndex());
3290 llvm::Value
* Idx
= CGF
.EmitScalarExpr(IdxExpr
);
3291 bool IdxSigned
= IdxExpr
->getType()->isSignedIntegerOrEnumerationType();
3292 Idx
= Builder
.CreateIntCast(Idx
, ResultType
, IdxSigned
, "conv");
3294 // Save the element type
3296 CGF
.getContext().getAsArrayType(CurrentType
)->getElementType();
3298 // Compute the element size
3299 llvm::Value
* ElemSize
= llvm::ConstantInt::get(ResultType
,
3300 CGF
.getContext().getTypeSizeInChars(CurrentType
).getQuantity());
3302 // Multiply out to compute the result
3303 Offset
= Builder
.CreateMul(Idx
, ElemSize
);
3307 case OffsetOfNode::Field
: {
3308 FieldDecl
*MemberDecl
= ON
.getField();
3309 RecordDecl
*RD
= CurrentType
->castAs
<RecordType
>()->getDecl();
3310 const ASTRecordLayout
&RL
= CGF
.getContext().getASTRecordLayout(RD
);
3312 // Compute the index of the field in its parent.
3314 // FIXME: It would be nice if we didn't have to loop here!
3315 for (RecordDecl::field_iterator Field
= RD
->field_begin(),
3316 FieldEnd
= RD
->field_end();
3317 Field
!= FieldEnd
; ++Field
, ++i
) {
3318 if (*Field
== MemberDecl
)
3321 assert(i
< RL
.getFieldCount() && "offsetof field in wrong type");
3323 // Compute the offset to the field
3324 int64_t OffsetInt
= RL
.getFieldOffset(i
) /
3325 CGF
.getContext().getCharWidth();
3326 Offset
= llvm::ConstantInt::get(ResultType
, OffsetInt
);
3328 // Save the element type.
3329 CurrentType
= MemberDecl
->getType();
3333 case OffsetOfNode::Identifier
:
3334 llvm_unreachable("dependent __builtin_offsetof");
3336 case OffsetOfNode::Base
: {
3337 if (ON
.getBase()->isVirtual()) {
3338 CGF
.ErrorUnsupported(E
, "virtual base in offsetof");
3342 RecordDecl
*RD
= CurrentType
->castAs
<RecordType
>()->getDecl();
3343 const ASTRecordLayout
&RL
= CGF
.getContext().getASTRecordLayout(RD
);
3345 // Save the element type.
3346 CurrentType
= ON
.getBase()->getType();
3348 // Compute the offset to the base.
3349 auto *BaseRT
= CurrentType
->castAs
<RecordType
>();
3350 auto *BaseRD
= cast
<CXXRecordDecl
>(BaseRT
->getDecl());
3351 CharUnits OffsetInt
= RL
.getBaseClassOffset(BaseRD
);
3352 Offset
= llvm::ConstantInt::get(ResultType
, OffsetInt
.getQuantity());
3356 Result
= Builder
.CreateAdd(Result
, Offset
);
3361 /// VisitUnaryExprOrTypeTraitExpr - Return the size or alignment of the type of
3362 /// argument of the sizeof expression as an integer.
3364 ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr(
3365 const UnaryExprOrTypeTraitExpr
*E
) {
3366 QualType TypeToSize
= E
->getTypeOfArgument();
3367 if (auto Kind
= E
->getKind();
3368 Kind
== UETT_SizeOf
|| Kind
== UETT_DataSizeOf
) {
3369 if (const VariableArrayType
*VAT
=
3370 CGF
.getContext().getAsVariableArrayType(TypeToSize
)) {
3371 if (E
->isArgumentType()) {
3372 // sizeof(type) - make sure to emit the VLA size.
3373 CGF
.EmitVariablyModifiedType(TypeToSize
);
3375 // C99 6.5.3.4p2: If the argument is an expression of type
3376 // VLA, it is evaluated.
3377 CGF
.EmitIgnoredExpr(E
->getArgumentExpr());
3380 auto VlaSize
= CGF
.getVLASize(VAT
);
3381 llvm::Value
*size
= VlaSize
.NumElts
;
3383 // Scale the number of non-VLA elements by the non-VLA element size.
3384 CharUnits eltSize
= CGF
.getContext().getTypeSizeInChars(VlaSize
.Type
);
3385 if (!eltSize
.isOne())
3386 size
= CGF
.Builder
.CreateNUWMul(CGF
.CGM
.getSize(eltSize
), size
);
3390 } else if (E
->getKind() == UETT_OpenMPRequiredSimdAlign
) {
3393 .toCharUnitsFromBits(CGF
.getContext().getOpenMPDefaultSimdAlign(
3394 E
->getTypeOfArgument()->getPointeeType()))
3396 return llvm::ConstantInt::get(CGF
.SizeTy
, Alignment
);
3397 } else if (E
->getKind() == UETT_VectorElements
) {
3398 auto *VecTy
= cast
<llvm::VectorType
>(ConvertType(E
->getTypeOfArgument()));
3399 return Builder
.CreateElementCount(CGF
.SizeTy
, VecTy
->getElementCount());
3402 // If this isn't sizeof(vla), the result must be constant; use the constant
3403 // folding logic so we don't have to duplicate it here.
3404 return Builder
.getInt(E
->EvaluateKnownConstInt(CGF
.getContext()));
3407 Value
*ScalarExprEmitter::VisitUnaryReal(const UnaryOperator
*E
,
3408 QualType PromotionType
) {
3409 QualType promotionTy
= PromotionType
.isNull()
3410 ? getPromotionType(E
->getSubExpr()->getType())
3412 Value
*result
= VisitReal(E
, promotionTy
);
3413 if (result
&& !promotionTy
.isNull())
3414 result
= EmitUnPromotedValue(result
, E
->getType());
3418 Value
*ScalarExprEmitter::VisitReal(const UnaryOperator
*E
,
3419 QualType PromotionType
) {
3420 Expr
*Op
= E
->getSubExpr();
3421 if (Op
->getType()->isAnyComplexType()) {
3422 // If it's an l-value, load through the appropriate subobject l-value.
3423 // Note that we have to ask E because Op might be an l-value that
3424 // this won't work for, e.g. an Obj-C property.
3425 if (E
->isGLValue()) {
3426 if (!PromotionType
.isNull()) {
3427 CodeGenFunction::ComplexPairTy result
= CGF
.EmitComplexExpr(
3428 Op
, /*IgnoreReal*/ IgnoreResultAssign
, /*IgnoreImag*/ true);
3430 result
.first
= CGF
.EmitPromotedValue(result
, PromotionType
).first
;
3431 return result
.first
;
3433 return CGF
.EmitLoadOfLValue(CGF
.EmitLValue(E
), E
->getExprLoc())
3437 // Otherwise, calculate and project.
3438 return CGF
.EmitComplexExpr(Op
, false, true).first
;
3441 if (!PromotionType
.isNull())
3442 return CGF
.EmitPromotedScalarExpr(Op
, PromotionType
);
3446 Value
*ScalarExprEmitter::VisitUnaryImag(const UnaryOperator
*E
,
3447 QualType PromotionType
) {
3448 QualType promotionTy
= PromotionType
.isNull()
3449 ? getPromotionType(E
->getSubExpr()->getType())
3451 Value
*result
= VisitImag(E
, promotionTy
);
3452 if (result
&& !promotionTy
.isNull())
3453 result
= EmitUnPromotedValue(result
, E
->getType());
3457 Value
*ScalarExprEmitter::VisitImag(const UnaryOperator
*E
,
3458 QualType PromotionType
) {
3459 Expr
*Op
= E
->getSubExpr();
3460 if (Op
->getType()->isAnyComplexType()) {
3461 // If it's an l-value, load through the appropriate subobject l-value.
3462 // Note that we have to ask E because Op might be an l-value that
3463 // this won't work for, e.g. an Obj-C property.
3464 if (Op
->isGLValue()) {
3465 if (!PromotionType
.isNull()) {
3466 CodeGenFunction::ComplexPairTy result
= CGF
.EmitComplexExpr(
3467 Op
, /*IgnoreReal*/ true, /*IgnoreImag*/ IgnoreResultAssign
);
3469 result
.second
= CGF
.EmitPromotedValue(result
, PromotionType
).second
;
3470 return result
.second
;
3472 return CGF
.EmitLoadOfLValue(CGF
.EmitLValue(E
), E
->getExprLoc())
3476 // Otherwise, calculate and project.
3477 return CGF
.EmitComplexExpr(Op
, true, false).second
;
3480 // __imag on a scalar returns zero. Emit the subexpr to ensure side
3481 // effects are evaluated, but not the actual value.
3482 if (Op
->isGLValue())
3484 else if (!PromotionType
.isNull())
3485 CGF
.EmitPromotedScalarExpr(Op
, PromotionType
);
3487 CGF
.EmitScalarExpr(Op
, true);
3488 if (!PromotionType
.isNull())
3489 return llvm::Constant::getNullValue(ConvertType(PromotionType
));
3490 return llvm::Constant::getNullValue(ConvertType(E
->getType()));
3493 //===----------------------------------------------------------------------===//
3495 //===----------------------------------------------------------------------===//
3497 Value
*ScalarExprEmitter::EmitPromotedValue(Value
*result
,
3498 QualType PromotionType
) {
3499 return CGF
.Builder
.CreateFPExt(result
, ConvertType(PromotionType
), "ext");
3502 Value
*ScalarExprEmitter::EmitUnPromotedValue(Value
*result
,
3503 QualType ExprType
) {
3504 return CGF
.Builder
.CreateFPTrunc(result
, ConvertType(ExprType
), "unpromotion");
3507 Value
*ScalarExprEmitter::EmitPromoted(const Expr
*E
, QualType PromotionType
) {
3508 E
= E
->IgnoreParens();
3509 if (auto BO
= dyn_cast
<BinaryOperator
>(E
)) {
3510 switch (BO
->getOpcode()) {
3511 #define HANDLE_BINOP(OP) \
3513 return Emit##OP(EmitBinOps(BO, PromotionType));
3522 } else if (auto UO
= dyn_cast
<UnaryOperator
>(E
)) {
3523 switch (UO
->getOpcode()) {
3525 return VisitImag(UO
, PromotionType
);
3527 return VisitReal(UO
, PromotionType
);
3529 return VisitMinus(UO
, PromotionType
);
3531 return VisitPlus(UO
, PromotionType
);
3536 auto result
= Visit(const_cast<Expr
*>(E
));
3538 if (!PromotionType
.isNull())
3539 return EmitPromotedValue(result
, PromotionType
);
3541 return EmitUnPromotedValue(result
, E
->getType());
3546 BinOpInfo
ScalarExprEmitter::EmitBinOps(const BinaryOperator
*E
,
3547 QualType PromotionType
) {
3548 TestAndClearIgnoreResultAssign();
3550 Result
.LHS
= CGF
.EmitPromotedScalarExpr(E
->getLHS(), PromotionType
);
3551 Result
.RHS
= CGF
.EmitPromotedScalarExpr(E
->getRHS(), PromotionType
);
3552 if (!PromotionType
.isNull())
3553 Result
.Ty
= PromotionType
;
3555 Result
.Ty
= E
->getType();
3556 Result
.Opcode
= E
->getOpcode();
3557 Result
.FPFeatures
= E
->getFPFeaturesInEffect(CGF
.getLangOpts());
3562 LValue
ScalarExprEmitter::EmitCompoundAssignLValue(
3563 const CompoundAssignOperator
*E
,
3564 Value
*(ScalarExprEmitter::*Func
)(const BinOpInfo
&),
3566 QualType LHSTy
= E
->getLHS()->getType();
3569 if (E
->getComputationResultType()->isAnyComplexType())
3570 return CGF
.EmitScalarCompoundAssignWithComplex(E
, Result
);
3572 // Emit the RHS first. __block variables need to have the rhs evaluated
3573 // first, plus this should improve codegen a little.
3575 QualType PromotionTypeCR
;
3576 PromotionTypeCR
= getPromotionType(E
->getComputationResultType());
3577 if (PromotionTypeCR
.isNull())
3578 PromotionTypeCR
= E
->getComputationResultType();
3579 QualType PromotionTypeLHS
= getPromotionType(E
->getComputationLHSType());
3580 QualType PromotionTypeRHS
= getPromotionType(E
->getRHS()->getType());
3581 if (!PromotionTypeRHS
.isNull())
3582 OpInfo
.RHS
= CGF
.EmitPromotedScalarExpr(E
->getRHS(), PromotionTypeRHS
);
3584 OpInfo
.RHS
= Visit(E
->getRHS());
3585 OpInfo
.Ty
= PromotionTypeCR
;
3586 OpInfo
.Opcode
= E
->getOpcode();
3587 OpInfo
.FPFeatures
= E
->getFPFeaturesInEffect(CGF
.getLangOpts());
3589 // Load/convert the LHS.
3590 LValue LHSLV
= EmitCheckedLValue(E
->getLHS(), CodeGenFunction::TCK_Store
);
3592 llvm::PHINode
*atomicPHI
= nullptr;
3593 if (const AtomicType
*atomicTy
= LHSTy
->getAs
<AtomicType
>()) {
3594 QualType type
= atomicTy
->getValueType();
3595 if (!type
->isBooleanType() && type
->isIntegerType() &&
3596 !(type
->isUnsignedIntegerType() &&
3597 CGF
.SanOpts
.has(SanitizerKind::UnsignedIntegerOverflow
)) &&
3598 CGF
.getLangOpts().getSignedOverflowBehavior() !=
3599 LangOptions::SOB_Trapping
) {
3600 llvm::AtomicRMWInst::BinOp AtomicOp
= llvm::AtomicRMWInst::BAD_BINOP
;
3601 llvm::Instruction::BinaryOps Op
;
3602 switch (OpInfo
.Opcode
) {
3603 // We don't have atomicrmw operands for *, %, /, <<, >>
3604 case BO_MulAssign
: case BO_DivAssign
:
3610 AtomicOp
= llvm::AtomicRMWInst::Add
;
3611 Op
= llvm::Instruction::Add
;
3614 AtomicOp
= llvm::AtomicRMWInst::Sub
;
3615 Op
= llvm::Instruction::Sub
;
3618 AtomicOp
= llvm::AtomicRMWInst::And
;
3619 Op
= llvm::Instruction::And
;
3622 AtomicOp
= llvm::AtomicRMWInst::Xor
;
3623 Op
= llvm::Instruction::Xor
;
3626 AtomicOp
= llvm::AtomicRMWInst::Or
;
3627 Op
= llvm::Instruction::Or
;
3630 llvm_unreachable("Invalid compound assignment type");
3632 if (AtomicOp
!= llvm::AtomicRMWInst::BAD_BINOP
) {
3633 llvm::Value
*Amt
= CGF
.EmitToMemory(
3634 EmitScalarConversion(OpInfo
.RHS
, E
->getRHS()->getType(), LHSTy
,
3638 llvm::AtomicRMWInst
*OldVal
=
3639 CGF
.emitAtomicRMWInst(AtomicOp
, LHSLV
.getAddress(), Amt
);
3641 // Since operation is atomic, the result type is guaranteed to be the
3642 // same as the input in LLVM terms.
3643 Result
= Builder
.CreateBinOp(Op
, OldVal
, Amt
);
3647 // FIXME: For floating point types, we should be saving and restoring the
3648 // floating point environment in the loop.
3649 llvm::BasicBlock
*startBB
= Builder
.GetInsertBlock();
3650 llvm::BasicBlock
*opBB
= CGF
.createBasicBlock("atomic_op", CGF
.CurFn
);
3651 OpInfo
.LHS
= EmitLoadOfLValue(LHSLV
, E
->getExprLoc());
3652 OpInfo
.LHS
= CGF
.EmitToMemory(OpInfo
.LHS
, type
);
3653 Builder
.CreateBr(opBB
);
3654 Builder
.SetInsertPoint(opBB
);
3655 atomicPHI
= Builder
.CreatePHI(OpInfo
.LHS
->getType(), 2);
3656 atomicPHI
->addIncoming(OpInfo
.LHS
, startBB
);
3657 OpInfo
.LHS
= atomicPHI
;
3660 OpInfo
.LHS
= EmitLoadOfLValue(LHSLV
, E
->getExprLoc());
3662 CodeGenFunction::CGFPOptionsRAII
FPOptsRAII(CGF
, OpInfo
.FPFeatures
);
3663 SourceLocation Loc
= E
->getExprLoc();
3664 if (!PromotionTypeLHS
.isNull())
3665 OpInfo
.LHS
= EmitScalarConversion(OpInfo
.LHS
, LHSTy
, PromotionTypeLHS
,
3668 OpInfo
.LHS
= EmitScalarConversion(OpInfo
.LHS
, LHSTy
,
3669 E
->getComputationLHSType(), Loc
);
3671 // Expand the binary operator.
3672 Result
= (this->*Func
)(OpInfo
);
3674 // Convert the result back to the LHS type,
3675 // potentially with Implicit Conversion sanitizer check.
3676 // If LHSLV is a bitfield, use default ScalarConversionOpts
3677 // to avoid emit any implicit integer checks.
3678 Value
*Previous
= nullptr;
3679 if (LHSLV
.isBitField()) {
3681 Result
= EmitScalarConversion(Result
, PromotionTypeCR
, LHSTy
, Loc
);
3683 Result
= EmitScalarConversion(Result
, PromotionTypeCR
, LHSTy
, Loc
,
3684 ScalarConversionOpts(CGF
.SanOpts
));
3687 llvm::BasicBlock
*curBlock
= Builder
.GetInsertBlock();
3688 llvm::BasicBlock
*contBB
= CGF
.createBasicBlock("atomic_cont", CGF
.CurFn
);
3689 auto Pair
= CGF
.EmitAtomicCompareExchange(
3690 LHSLV
, RValue::get(atomicPHI
), RValue::get(Result
), E
->getExprLoc());
3691 llvm::Value
*old
= CGF
.EmitToMemory(Pair
.first
.getScalarVal(), LHSTy
);
3692 llvm::Value
*success
= Pair
.second
;
3693 atomicPHI
->addIncoming(old
, curBlock
);
3694 Builder
.CreateCondBr(success
, contBB
, atomicPHI
->getParent());
3695 Builder
.SetInsertPoint(contBB
);
3699 // Store the result value into the LHS lvalue. Bit-fields are handled
3700 // specially because the result is altered by the store, i.e., [C99 6.5.16p1]
3701 // 'An assignment expression has the value of the left operand after the
3703 if (LHSLV
.isBitField()) {
3704 Value
*Src
= Previous
? Previous
: Result
;
3705 QualType SrcType
= E
->getRHS()->getType();
3706 QualType DstType
= E
->getLHS()->getType();
3707 CGF
.EmitStoreThroughBitfieldLValue(RValue::get(Result
), LHSLV
, &Result
);
3708 CGF
.EmitBitfieldConversionCheck(Src
, SrcType
, Result
, DstType
,
3709 LHSLV
.getBitFieldInfo(), E
->getExprLoc());
3711 CGF
.EmitStoreThroughLValue(RValue::get(Result
), LHSLV
);
3713 if (CGF
.getLangOpts().OpenMP
)
3714 CGF
.CGM
.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF
,
3719 Value
*ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator
*E
,
3720 Value
*(ScalarExprEmitter::*Func
)(const BinOpInfo
&)) {
3721 bool Ignore
= TestAndClearIgnoreResultAssign();
3722 Value
*RHS
= nullptr;
3723 LValue LHS
= EmitCompoundAssignLValue(E
, Func
, RHS
);
3725 // If the result is clearly ignored, return now.
3729 // The result of an assignment in C is the assigned r-value.
3730 if (!CGF
.getLangOpts().CPlusPlus
)
3733 // If the lvalue is non-volatile, return the computed value of the assignment.
3734 if (!LHS
.isVolatileQualified())
3737 // Otherwise, reload the value.
3738 return EmitLoadOfLValue(LHS
, E
->getExprLoc());
3741 void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck(
3742 const BinOpInfo
&Ops
, llvm::Value
*Zero
, bool isDiv
) {
3743 SmallVector
<std::pair
<llvm::Value
*, SanitizerMask
>, 2> Checks
;
3745 if (CGF
.SanOpts
.has(SanitizerKind::IntegerDivideByZero
)) {
3746 Checks
.push_back(std::make_pair(Builder
.CreateICmpNE(Ops
.RHS
, Zero
),
3747 SanitizerKind::IntegerDivideByZero
));
3750 const auto *BO
= cast
<BinaryOperator
>(Ops
.E
);
3751 if (CGF
.SanOpts
.has(SanitizerKind::SignedIntegerOverflow
) &&
3752 Ops
.Ty
->hasSignedIntegerRepresentation() &&
3753 !IsWidenedIntegerOp(CGF
.getContext(), BO
->getLHS()) &&
3754 Ops
.mayHaveIntegerOverflow()) {
3755 llvm::IntegerType
*Ty
= cast
<llvm::IntegerType
>(Zero
->getType());
3757 llvm::Value
*IntMin
=
3758 Builder
.getInt(llvm::APInt::getSignedMinValue(Ty
->getBitWidth()));
3759 llvm::Value
*NegOne
= llvm::Constant::getAllOnesValue(Ty
);
3761 llvm::Value
*LHSCmp
= Builder
.CreateICmpNE(Ops
.LHS
, IntMin
);
3762 llvm::Value
*RHSCmp
= Builder
.CreateICmpNE(Ops
.RHS
, NegOne
);
3763 llvm::Value
*NotOverflow
= Builder
.CreateOr(LHSCmp
, RHSCmp
, "or");
3765 std::make_pair(NotOverflow
, SanitizerKind::SignedIntegerOverflow
));
3768 if (Checks
.size() > 0)
3769 EmitBinOpCheck(Checks
, Ops
);
3772 Value
*ScalarExprEmitter::EmitDiv(const BinOpInfo
&Ops
) {
3774 CodeGenFunction::SanitizerScope
SanScope(&CGF
);
3775 if ((CGF
.SanOpts
.has(SanitizerKind::IntegerDivideByZero
) ||
3776 CGF
.SanOpts
.has(SanitizerKind::SignedIntegerOverflow
)) &&
3777 Ops
.Ty
->isIntegerType() &&
3778 (Ops
.mayHaveIntegerDivisionByZero() || Ops
.mayHaveIntegerOverflow())) {
3779 llvm::Value
*Zero
= llvm::Constant::getNullValue(ConvertType(Ops
.Ty
));
3780 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops
, Zero
, true);
3781 } else if (CGF
.SanOpts
.has(SanitizerKind::FloatDivideByZero
) &&
3782 Ops
.Ty
->isRealFloatingType() &&
3783 Ops
.mayHaveFloatDivisionByZero()) {
3784 llvm::Value
*Zero
= llvm::Constant::getNullValue(ConvertType(Ops
.Ty
));
3785 llvm::Value
*NonZero
= Builder
.CreateFCmpUNE(Ops
.RHS
, Zero
);
3786 EmitBinOpCheck(std::make_pair(NonZero
, SanitizerKind::FloatDivideByZero
),
3791 if (Ops
.Ty
->isConstantMatrixType()) {
3792 llvm::MatrixBuilder
MB(Builder
);
3793 // We need to check the types of the operands of the operator to get the
3794 // correct matrix dimensions.
3795 auto *BO
= cast
<BinaryOperator
>(Ops
.E
);
3798 isa
<ConstantMatrixType
>(BO
->getLHS()->getType().getCanonicalType()) &&
3799 "first operand must be a matrix");
3800 assert(BO
->getRHS()->getType().getCanonicalType()->isArithmeticType() &&
3801 "second operand must be an arithmetic type");
3802 CodeGenFunction::CGFPOptionsRAII
FPOptsRAII(CGF
, Ops
.FPFeatures
);
3803 return MB
.CreateScalarDiv(Ops
.LHS
, Ops
.RHS
,
3804 Ops
.Ty
->hasUnsignedIntegerRepresentation());
3807 if (Ops
.LHS
->getType()->isFPOrFPVectorTy()) {
3809 CodeGenFunction::CGFPOptionsRAII
FPOptsRAII(CGF
, Ops
.FPFeatures
);
3810 Val
= Builder
.CreateFDiv(Ops
.LHS
, Ops
.RHS
, "div");
3811 CGF
.SetDivFPAccuracy(Val
);
3814 else if (Ops
.isFixedPointOp())
3815 return EmitFixedPointBinOp(Ops
);
3816 else if (Ops
.Ty
->hasUnsignedIntegerRepresentation())
3817 return Builder
.CreateUDiv(Ops
.LHS
, Ops
.RHS
, "div");
3819 return Builder
.CreateSDiv(Ops
.LHS
, Ops
.RHS
, "div");
3822 Value
*ScalarExprEmitter::EmitRem(const BinOpInfo
&Ops
) {
3823 // Rem in C can't be a floating point type: C99 6.5.5p2.
3824 if ((CGF
.SanOpts
.has(SanitizerKind::IntegerDivideByZero
) ||
3825 CGF
.SanOpts
.has(SanitizerKind::SignedIntegerOverflow
)) &&
3826 Ops
.Ty
->isIntegerType() &&
3827 (Ops
.mayHaveIntegerDivisionByZero() || Ops
.mayHaveIntegerOverflow())) {
3828 CodeGenFunction::SanitizerScope
SanScope(&CGF
);
3829 llvm::Value
*Zero
= llvm::Constant::getNullValue(ConvertType(Ops
.Ty
));
3830 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops
, Zero
, false);
3833 if (Ops
.Ty
->hasUnsignedIntegerRepresentation())
3834 return Builder
.CreateURem(Ops
.LHS
, Ops
.RHS
, "rem");
3836 return Builder
.CreateSRem(Ops
.LHS
, Ops
.RHS
, "rem");
3839 Value
*ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo
&Ops
) {
3842 SanitizerHandler OverflowKind
;
3844 bool isSigned
= Ops
.Ty
->isSignedIntegerOrEnumerationType();
3845 switch (Ops
.Opcode
) {
3849 IID
= isSigned
? llvm::Intrinsic::sadd_with_overflow
:
3850 llvm::Intrinsic::uadd_with_overflow
;
3851 OverflowKind
= SanitizerHandler::AddOverflow
;
3856 IID
= isSigned
? llvm::Intrinsic::ssub_with_overflow
:
3857 llvm::Intrinsic::usub_with_overflow
;
3858 OverflowKind
= SanitizerHandler::SubOverflow
;
3863 IID
= isSigned
? llvm::Intrinsic::smul_with_overflow
:
3864 llvm::Intrinsic::umul_with_overflow
;
3865 OverflowKind
= SanitizerHandler::MulOverflow
;
3868 llvm_unreachable("Unsupported operation for overflow detection");
3874 CodeGenFunction::SanitizerScope
SanScope(&CGF
);
3875 llvm::Type
*opTy
= CGF
.CGM
.getTypes().ConvertType(Ops
.Ty
);
3877 llvm::Function
*intrinsic
= CGF
.CGM
.getIntrinsic(IID
, opTy
);
3879 Value
*resultAndOverflow
= Builder
.CreateCall(intrinsic
, {Ops
.LHS
, Ops
.RHS
});
3880 Value
*result
= Builder
.CreateExtractValue(resultAndOverflow
, 0);
3881 Value
*overflow
= Builder
.CreateExtractValue(resultAndOverflow
, 1);
3883 // Handle overflow with llvm.trap if no custom handler has been specified.
3884 const std::string
*handlerName
=
3885 &CGF
.getLangOpts().OverflowHandler
;
3886 if (handlerName
->empty()) {
3887 // If the signed-integer-overflow sanitizer is enabled, emit a call to its
3888 // runtime. Otherwise, this is a -ftrapv check, so just emit a trap.
3889 if (!isSigned
|| CGF
.SanOpts
.has(SanitizerKind::SignedIntegerOverflow
)) {
3890 llvm::Value
*NotOverflow
= Builder
.CreateNot(overflow
);
3891 SanitizerMask Kind
= isSigned
? SanitizerKind::SignedIntegerOverflow
3892 : SanitizerKind::UnsignedIntegerOverflow
;
3893 EmitBinOpCheck(std::make_pair(NotOverflow
, Kind
), Ops
);
3895 CGF
.EmitTrapCheck(Builder
.CreateNot(overflow
), OverflowKind
);
3899 // Branch in case of overflow.
3900 llvm::BasicBlock
*initialBB
= Builder
.GetInsertBlock();
3901 llvm::BasicBlock
*continueBB
=
3902 CGF
.createBasicBlock("nooverflow", CGF
.CurFn
, initialBB
->getNextNode());
3903 llvm::BasicBlock
*overflowBB
= CGF
.createBasicBlock("overflow", CGF
.CurFn
);
3905 Builder
.CreateCondBr(overflow
, overflowBB
, continueBB
);
3907 // If an overflow handler is set, then we want to call it and then use its
3908 // result, if it returns.
3909 Builder
.SetInsertPoint(overflowBB
);
3911 // Get the overflow handler.
3912 llvm::Type
*Int8Ty
= CGF
.Int8Ty
;
3913 llvm::Type
*argTypes
[] = { CGF
.Int64Ty
, CGF
.Int64Ty
, Int8Ty
, Int8Ty
};
3914 llvm::FunctionType
*handlerTy
=
3915 llvm::FunctionType::get(CGF
.Int64Ty
, argTypes
, true);
3916 llvm::FunctionCallee handler
=
3917 CGF
.CGM
.CreateRuntimeFunction(handlerTy
, *handlerName
);
3919 // Sign extend the args to 64-bit, so that we can use the same handler for
3920 // all types of overflow.
3921 llvm::Value
*lhs
= Builder
.CreateSExt(Ops
.LHS
, CGF
.Int64Ty
);
3922 llvm::Value
*rhs
= Builder
.CreateSExt(Ops
.RHS
, CGF
.Int64Ty
);
3924 // Call the handler with the two arguments, the operation, and the size of
3926 llvm::Value
*handlerArgs
[] = {
3929 Builder
.getInt8(OpID
),
3930 Builder
.getInt8(cast
<llvm::IntegerType
>(opTy
)->getBitWidth())
3932 llvm::Value
*handlerResult
=
3933 CGF
.EmitNounwindRuntimeCall(handler
, handlerArgs
);
3935 // Truncate the result back to the desired size.
3936 handlerResult
= Builder
.CreateTrunc(handlerResult
, opTy
);
3937 Builder
.CreateBr(continueBB
);
3939 Builder
.SetInsertPoint(continueBB
);
3940 llvm::PHINode
*phi
= Builder
.CreatePHI(opTy
, 2);
3941 phi
->addIncoming(result
, initialBB
);
3942 phi
->addIncoming(handlerResult
, overflowBB
);
3947 /// Emit pointer + index arithmetic.
3948 static Value
*emitPointerArithmetic(CodeGenFunction
&CGF
,
3949 const BinOpInfo
&op
,
3950 bool isSubtraction
) {
3951 // Must have binary (not unary) expr here. Unary pointer
3952 // increment/decrement doesn't use this path.
3953 const BinaryOperator
*expr
= cast
<BinaryOperator
>(op
.E
);
3955 Value
*pointer
= op
.LHS
;
3956 Expr
*pointerOperand
= expr
->getLHS();
3957 Value
*index
= op
.RHS
;
3958 Expr
*indexOperand
= expr
->getRHS();
3960 // In a subtraction, the LHS is always the pointer.
3961 if (!isSubtraction
&& !pointer
->getType()->isPointerTy()) {
3962 std::swap(pointer
, index
);
3963 std::swap(pointerOperand
, indexOperand
);
3966 bool isSigned
= indexOperand
->getType()->isSignedIntegerOrEnumerationType();
3968 unsigned width
= cast
<llvm::IntegerType
>(index
->getType())->getBitWidth();
3969 auto &DL
= CGF
.CGM
.getDataLayout();
3970 auto PtrTy
= cast
<llvm::PointerType
>(pointer
->getType());
3972 // Some versions of glibc and gcc use idioms (particularly in their malloc
3973 // routines) that add a pointer-sized integer (known to be a pointer value)
3974 // to a null pointer in order to cast the value back to an integer or as
3975 // part of a pointer alignment algorithm. This is undefined behavior, but
3976 // we'd like to be able to compile programs that use it.
3978 // Normally, we'd generate a GEP with a null-pointer base here in response
3979 // to that code, but it's also UB to dereference a pointer created that
3980 // way. Instead (as an acknowledged hack to tolerate the idiom) we will
3981 // generate a direct cast of the integer value to a pointer.
3983 // The idiom (p = nullptr + N) is not met if any of the following are true:
3985 // The operation is subtraction.
3986 // The index is not pointer-sized.
3987 // The pointer type is not byte-sized.
3989 if (BinaryOperator::isNullPointerArithmeticExtension(CGF
.getContext(),
3993 return CGF
.Builder
.CreateIntToPtr(index
, pointer
->getType());
3995 if (width
!= DL
.getIndexTypeSizeInBits(PtrTy
)) {
3996 // Zero-extend or sign-extend the pointer value according to
3997 // whether the index is signed or not.
3998 index
= CGF
.Builder
.CreateIntCast(index
, DL
.getIndexType(PtrTy
), isSigned
,
4002 // If this is subtraction, negate the index.
4004 index
= CGF
.Builder
.CreateNeg(index
, "idx.neg");
4006 if (CGF
.SanOpts
.has(SanitizerKind::ArrayBounds
))
4007 CGF
.EmitBoundsCheck(op
.E
, pointerOperand
, index
, indexOperand
->getType(),
4008 /*Accessed*/ false);
4010 const PointerType
*pointerType
4011 = pointerOperand
->getType()->getAs
<PointerType
>();
4013 QualType objectType
= pointerOperand
->getType()
4014 ->castAs
<ObjCObjectPointerType
>()
4016 llvm::Value
*objectSize
4017 = CGF
.CGM
.getSize(CGF
.getContext().getTypeSizeInChars(objectType
));
4019 index
= CGF
.Builder
.CreateMul(index
, objectSize
);
4022 CGF
.Builder
.CreateGEP(CGF
.Int8Ty
, pointer
, index
, "add.ptr");
4023 return CGF
.Builder
.CreateBitCast(result
, pointer
->getType());
4026 QualType elementType
= pointerType
->getPointeeType();
4027 if (const VariableArrayType
*vla
4028 = CGF
.getContext().getAsVariableArrayType(elementType
)) {
4029 // The element count here is the total number of non-VLA elements.
4030 llvm::Value
*numElements
= CGF
.getVLASize(vla
).NumElts
;
4032 // Effectively, the multiply by the VLA size is part of the GEP.
4033 // GEP indexes are signed, and scaling an index isn't permitted to
4034 // signed-overflow, so we use the same semantics for our explicit
4035 // multiply. We suppress this if overflow is not undefined behavior.
4036 llvm::Type
*elemTy
= CGF
.ConvertTypeForMem(vla
->getElementType());
4037 if (CGF
.getLangOpts().isSignedOverflowDefined()) {
4038 index
= CGF
.Builder
.CreateMul(index
, numElements
, "vla.index");
4039 pointer
= CGF
.Builder
.CreateGEP(elemTy
, pointer
, index
, "add.ptr");
4041 index
= CGF
.Builder
.CreateNSWMul(index
, numElements
, "vla.index");
4042 pointer
= CGF
.EmitCheckedInBoundsGEP(
4043 elemTy
, pointer
, index
, isSigned
, isSubtraction
, op
.E
->getExprLoc(),
4049 // Explicitly handle GNU void* and function pointer arithmetic extensions. The
4050 // GNU void* casts amount to no-ops since our void* type is i8*, but this is
4053 if (elementType
->isVoidType() || elementType
->isFunctionType())
4054 elemTy
= CGF
.Int8Ty
;
4056 elemTy
= CGF
.ConvertTypeForMem(elementType
);
4058 if (CGF
.getLangOpts().isSignedOverflowDefined())
4059 return CGF
.Builder
.CreateGEP(elemTy
, pointer
, index
, "add.ptr");
4061 return CGF
.EmitCheckedInBoundsGEP(
4062 elemTy
, pointer
, index
, isSigned
, isSubtraction
, op
.E
->getExprLoc(),
4066 // Construct an fmuladd intrinsic to represent a fused mul-add of MulOp and
4067 // Addend. Use negMul and negAdd to negate the first operand of the Mul or
4068 // the add operand respectively. This allows fmuladd to represent a*b-c, or
4069 // c-a*b. Patterns in LLVM should catch the negated forms and translate them to
4070 // efficient operations.
4071 static Value
* buildFMulAdd(llvm::Instruction
*MulOp
, Value
*Addend
,
4072 const CodeGenFunction
&CGF
, CGBuilderTy
&Builder
,
4073 bool negMul
, bool negAdd
) {
4074 Value
*MulOp0
= MulOp
->getOperand(0);
4075 Value
*MulOp1
= MulOp
->getOperand(1);
4077 MulOp0
= Builder
.CreateFNeg(MulOp0
, "neg");
4079 Addend
= Builder
.CreateFNeg(Addend
, "neg");
4081 Value
*FMulAdd
= nullptr;
4082 if (Builder
.getIsFPConstrained()) {
4083 assert(isa
<llvm::ConstrainedFPIntrinsic
>(MulOp
) &&
4084 "Only constrained operation should be created when Builder is in FP "
4085 "constrained mode");
4086 FMulAdd
= Builder
.CreateConstrainedFPCall(
4087 CGF
.CGM
.getIntrinsic(llvm::Intrinsic::experimental_constrained_fmuladd
,
4089 {MulOp0
, MulOp1
, Addend
});
4091 FMulAdd
= Builder
.CreateCall(
4092 CGF
.CGM
.getIntrinsic(llvm::Intrinsic::fmuladd
, Addend
->getType()),
4093 {MulOp0
, MulOp1
, Addend
});
4095 MulOp
->eraseFromParent();
4100 // Check whether it would be legal to emit an fmuladd intrinsic call to
4101 // represent op and if so, build the fmuladd.
4103 // Checks that (a) the operation is fusable, and (b) -ffp-contract=on.
4104 // Does NOT check the type of the operation - it's assumed that this function
4105 // will be called from contexts where it's known that the type is contractable.
4106 static Value
* tryEmitFMulAdd(const BinOpInfo
&op
,
4107 const CodeGenFunction
&CGF
, CGBuilderTy
&Builder
,
4110 assert((op
.Opcode
== BO_Add
|| op
.Opcode
== BO_AddAssign
||
4111 op
.Opcode
== BO_Sub
|| op
.Opcode
== BO_SubAssign
) &&
4112 "Only fadd/fsub can be the root of an fmuladd.");
4114 // Check whether this op is marked as fusable.
4115 if (!op
.FPFeatures
.allowFPContractWithinStatement())
4118 Value
*LHS
= op
.LHS
;
4119 Value
*RHS
= op
.RHS
;
4121 // Peek through fneg to look for fmul. Make sure fneg has no users, and that
4122 // it is the only use of its operand.
4123 bool NegLHS
= false;
4124 if (auto *LHSUnOp
= dyn_cast
<llvm::UnaryOperator
>(LHS
)) {
4125 if (LHSUnOp
->getOpcode() == llvm::Instruction::FNeg
&&
4126 LHSUnOp
->use_empty() && LHSUnOp
->getOperand(0)->hasOneUse()) {
4127 LHS
= LHSUnOp
->getOperand(0);
4132 bool NegRHS
= false;
4133 if (auto *RHSUnOp
= dyn_cast
<llvm::UnaryOperator
>(RHS
)) {
4134 if (RHSUnOp
->getOpcode() == llvm::Instruction::FNeg
&&
4135 RHSUnOp
->use_empty() && RHSUnOp
->getOperand(0)->hasOneUse()) {
4136 RHS
= RHSUnOp
->getOperand(0);
4141 // We have a potentially fusable op. Look for a mul on one of the operands.
4142 // Also, make sure that the mul result isn't used directly. In that case,
4143 // there's no point creating a muladd operation.
4144 if (auto *LHSBinOp
= dyn_cast
<llvm::BinaryOperator
>(LHS
)) {
4145 if (LHSBinOp
->getOpcode() == llvm::Instruction::FMul
&&
4146 (LHSBinOp
->use_empty() || NegLHS
)) {
4147 // If we looked through fneg, erase it.
4149 cast
<llvm::Instruction
>(op
.LHS
)->eraseFromParent();
4150 return buildFMulAdd(LHSBinOp
, op
.RHS
, CGF
, Builder
, NegLHS
, isSub
);
4153 if (auto *RHSBinOp
= dyn_cast
<llvm::BinaryOperator
>(RHS
)) {
4154 if (RHSBinOp
->getOpcode() == llvm::Instruction::FMul
&&
4155 (RHSBinOp
->use_empty() || NegRHS
)) {
4156 // If we looked through fneg, erase it.
4158 cast
<llvm::Instruction
>(op
.RHS
)->eraseFromParent();
4159 return buildFMulAdd(RHSBinOp
, op
.LHS
, CGF
, Builder
, isSub
^ NegRHS
, false);
4163 if (auto *LHSBinOp
= dyn_cast
<llvm::CallBase
>(LHS
)) {
4164 if (LHSBinOp
->getIntrinsicID() ==
4165 llvm::Intrinsic::experimental_constrained_fmul
&&
4166 (LHSBinOp
->use_empty() || NegLHS
)) {
4167 // If we looked through fneg, erase it.
4169 cast
<llvm::Instruction
>(op
.LHS
)->eraseFromParent();
4170 return buildFMulAdd(LHSBinOp
, op
.RHS
, CGF
, Builder
, NegLHS
, isSub
);
4173 if (auto *RHSBinOp
= dyn_cast
<llvm::CallBase
>(RHS
)) {
4174 if (RHSBinOp
->getIntrinsicID() ==
4175 llvm::Intrinsic::experimental_constrained_fmul
&&
4176 (RHSBinOp
->use_empty() || NegRHS
)) {
4177 // If we looked through fneg, erase it.
4179 cast
<llvm::Instruction
>(op
.RHS
)->eraseFromParent();
4180 return buildFMulAdd(RHSBinOp
, op
.LHS
, CGF
, Builder
, isSub
^ NegRHS
, false);
4187 Value
*ScalarExprEmitter::EmitAdd(const BinOpInfo
&op
) {
4188 if (op
.LHS
->getType()->isPointerTy() ||
4189 op
.RHS
->getType()->isPointerTy())
4190 return emitPointerArithmetic(CGF
, op
, CodeGenFunction::NotSubtraction
);
4192 if (op
.Ty
->isSignedIntegerOrEnumerationType()) {
4193 switch (CGF
.getLangOpts().getSignedOverflowBehavior()) {
4194 case LangOptions::SOB_Defined
:
4195 if (!CGF
.SanOpts
.has(SanitizerKind::SignedIntegerOverflow
))
4196 return Builder
.CreateAdd(op
.LHS
, op
.RHS
, "add");
4198 case LangOptions::SOB_Undefined
:
4199 if (!CGF
.SanOpts
.has(SanitizerKind::SignedIntegerOverflow
))
4200 return Builder
.CreateNSWAdd(op
.LHS
, op
.RHS
, "add");
4202 case LangOptions::SOB_Trapping
:
4203 if (CanElideOverflowCheck(CGF
.getContext(), op
))
4204 return Builder
.CreateNSWAdd(op
.LHS
, op
.RHS
, "add");
4205 return EmitOverflowCheckedBinOp(op
);
4209 // For vector and matrix adds, try to fold into a fmuladd.
4210 if (op
.LHS
->getType()->isFPOrFPVectorTy()) {
4211 CodeGenFunction::CGFPOptionsRAII
FPOptsRAII(CGF
, op
.FPFeatures
);
4212 // Try to form an fmuladd.
4213 if (Value
*FMulAdd
= tryEmitFMulAdd(op
, CGF
, Builder
))
4217 if (op
.Ty
->isConstantMatrixType()) {
4218 llvm::MatrixBuilder
MB(Builder
);
4219 CodeGenFunction::CGFPOptionsRAII
FPOptsRAII(CGF
, op
.FPFeatures
);
4220 return MB
.CreateAdd(op
.LHS
, op
.RHS
);
4223 if (op
.Ty
->isUnsignedIntegerType() &&
4224 CGF
.SanOpts
.has(SanitizerKind::UnsignedIntegerOverflow
) &&
4225 !CanElideOverflowCheck(CGF
.getContext(), op
))
4226 return EmitOverflowCheckedBinOp(op
);
4228 if (op
.LHS
->getType()->isFPOrFPVectorTy()) {
4229 CodeGenFunction::CGFPOptionsRAII
FPOptsRAII(CGF
, op
.FPFeatures
);
4230 return Builder
.CreateFAdd(op
.LHS
, op
.RHS
, "add");
4233 if (op
.isFixedPointOp())
4234 return EmitFixedPointBinOp(op
);
4236 return Builder
.CreateAdd(op
.LHS
, op
.RHS
, "add");
4239 /// The resulting value must be calculated with exact precision, so the operands
4240 /// may not be the same type.
4241 Value
*ScalarExprEmitter::EmitFixedPointBinOp(const BinOpInfo
&op
) {
4243 using llvm::ConstantInt
;
4245 // This is either a binary operation where at least one of the operands is
4246 // a fixed-point type, or a unary operation where the operand is a fixed-point
4247 // type. The result type of a binary operation is determined by
4248 // Sema::handleFixedPointConversions().
4249 QualType ResultTy
= op
.Ty
;
4250 QualType LHSTy
, RHSTy
;
4251 if (const auto *BinOp
= dyn_cast
<BinaryOperator
>(op
.E
)) {
4252 RHSTy
= BinOp
->getRHS()->getType();
4253 if (const auto *CAO
= dyn_cast
<CompoundAssignOperator
>(BinOp
)) {
4254 // For compound assignment, the effective type of the LHS at this point
4255 // is the computation LHS type, not the actual LHS type, and the final
4256 // result type is not the type of the expression but rather the
4257 // computation result type.
4258 LHSTy
= CAO
->getComputationLHSType();
4259 ResultTy
= CAO
->getComputationResultType();
4261 LHSTy
= BinOp
->getLHS()->getType();
4262 } else if (const auto *UnOp
= dyn_cast
<UnaryOperator
>(op
.E
)) {
4263 LHSTy
= UnOp
->getSubExpr()->getType();
4264 RHSTy
= UnOp
->getSubExpr()->getType();
4266 ASTContext
&Ctx
= CGF
.getContext();
4267 Value
*LHS
= op
.LHS
;
4268 Value
*RHS
= op
.RHS
;
4270 auto LHSFixedSema
= Ctx
.getFixedPointSemantics(LHSTy
);
4271 auto RHSFixedSema
= Ctx
.getFixedPointSemantics(RHSTy
);
4272 auto ResultFixedSema
= Ctx
.getFixedPointSemantics(ResultTy
);
4273 auto CommonFixedSema
= LHSFixedSema
.getCommonSemantics(RHSFixedSema
);
4275 // Perform the actual operation.
4277 llvm::FixedPointBuilder
<CGBuilderTy
> FPBuilder(Builder
);
4278 switch (op
.Opcode
) {
4281 Result
= FPBuilder
.CreateAdd(LHS
, LHSFixedSema
, RHS
, RHSFixedSema
);
4285 Result
= FPBuilder
.CreateSub(LHS
, LHSFixedSema
, RHS
, RHSFixedSema
);
4289 Result
= FPBuilder
.CreateMul(LHS
, LHSFixedSema
, RHS
, RHSFixedSema
);
4293 Result
= FPBuilder
.CreateDiv(LHS
, LHSFixedSema
, RHS
, RHSFixedSema
);
4297 Result
= FPBuilder
.CreateShl(LHS
, LHSFixedSema
, RHS
);
4301 Result
= FPBuilder
.CreateShr(LHS
, LHSFixedSema
, RHS
);
4304 return FPBuilder
.CreateLT(LHS
, LHSFixedSema
, RHS
, RHSFixedSema
);
4306 return FPBuilder
.CreateGT(LHS
, LHSFixedSema
, RHS
, RHSFixedSema
);
4308 return FPBuilder
.CreateLE(LHS
, LHSFixedSema
, RHS
, RHSFixedSema
);
4310 return FPBuilder
.CreateGE(LHS
, LHSFixedSema
, RHS
, RHSFixedSema
);
4312 // For equality operations, we assume any padding bits on unsigned types are
4313 // zero'd out. They could be overwritten through non-saturating operations
4314 // that cause overflow, but this leads to undefined behavior.
4315 return FPBuilder
.CreateEQ(LHS
, LHSFixedSema
, RHS
, RHSFixedSema
);
4317 return FPBuilder
.CreateNE(LHS
, LHSFixedSema
, RHS
, RHSFixedSema
);
4321 llvm_unreachable("Found unimplemented fixed point binary operation");
4334 llvm_unreachable("Found unsupported binary operation for fixed point types.");
4337 bool IsShift
= BinaryOperator::isShiftOp(op
.Opcode
) ||
4338 BinaryOperator::isShiftAssignOp(op
.Opcode
);
4339 // Convert to the result type.
4340 return FPBuilder
.CreateFixedToFixed(Result
, IsShift
? LHSFixedSema
4345 Value
*ScalarExprEmitter::EmitSub(const BinOpInfo
&op
) {
4346 // The LHS is always a pointer if either side is.
4347 if (!op
.LHS
->getType()->isPointerTy()) {
4348 if (op
.Ty
->isSignedIntegerOrEnumerationType()) {
4349 switch (CGF
.getLangOpts().getSignedOverflowBehavior()) {
4350 case LangOptions::SOB_Defined
:
4351 if (!CGF
.SanOpts
.has(SanitizerKind::SignedIntegerOverflow
))
4352 return Builder
.CreateSub(op
.LHS
, op
.RHS
, "sub");
4354 case LangOptions::SOB_Undefined
:
4355 if (!CGF
.SanOpts
.has(SanitizerKind::SignedIntegerOverflow
))
4356 return Builder
.CreateNSWSub(op
.LHS
, op
.RHS
, "sub");
4358 case LangOptions::SOB_Trapping
:
4359 if (CanElideOverflowCheck(CGF
.getContext(), op
))
4360 return Builder
.CreateNSWSub(op
.LHS
, op
.RHS
, "sub");
4361 return EmitOverflowCheckedBinOp(op
);
4365 // For vector and matrix subs, try to fold into a fmuladd.
4366 if (op
.LHS
->getType()->isFPOrFPVectorTy()) {
4367 CodeGenFunction::CGFPOptionsRAII
FPOptsRAII(CGF
, op
.FPFeatures
);
4368 // Try to form an fmuladd.
4369 if (Value
*FMulAdd
= tryEmitFMulAdd(op
, CGF
, Builder
, true))
4373 if (op
.Ty
->isConstantMatrixType()) {
4374 llvm::MatrixBuilder
MB(Builder
);
4375 CodeGenFunction::CGFPOptionsRAII
FPOptsRAII(CGF
, op
.FPFeatures
);
4376 return MB
.CreateSub(op
.LHS
, op
.RHS
);
4379 if (op
.Ty
->isUnsignedIntegerType() &&
4380 CGF
.SanOpts
.has(SanitizerKind::UnsignedIntegerOverflow
) &&
4381 !CanElideOverflowCheck(CGF
.getContext(), op
))
4382 return EmitOverflowCheckedBinOp(op
);
4384 if (op
.LHS
->getType()->isFPOrFPVectorTy()) {
4385 CodeGenFunction::CGFPOptionsRAII
FPOptsRAII(CGF
, op
.FPFeatures
);
4386 return Builder
.CreateFSub(op
.LHS
, op
.RHS
, "sub");
4389 if (op
.isFixedPointOp())
4390 return EmitFixedPointBinOp(op
);
4392 return Builder
.CreateSub(op
.LHS
, op
.RHS
, "sub");
4395 // If the RHS is not a pointer, then we have normal pointer
4397 if (!op
.RHS
->getType()->isPointerTy())
4398 return emitPointerArithmetic(CGF
, op
, CodeGenFunction::IsSubtraction
);
4400 // Otherwise, this is a pointer subtraction.
4402 // Do the raw subtraction part.
4404 = Builder
.CreatePtrToInt(op
.LHS
, CGF
.PtrDiffTy
, "sub.ptr.lhs.cast");
4406 = Builder
.CreatePtrToInt(op
.RHS
, CGF
.PtrDiffTy
, "sub.ptr.rhs.cast");
4407 Value
*diffInChars
= Builder
.CreateSub(LHS
, RHS
, "sub.ptr.sub");
4409 // Okay, figure out the element size.
4410 const BinaryOperator
*expr
= cast
<BinaryOperator
>(op
.E
);
4411 QualType elementType
= expr
->getLHS()->getType()->getPointeeType();
4413 llvm::Value
*divisor
= nullptr;
4415 // For a variable-length array, this is going to be non-constant.
4416 if (const VariableArrayType
*vla
4417 = CGF
.getContext().getAsVariableArrayType(elementType
)) {
4418 auto VlaSize
= CGF
.getVLASize(vla
);
4419 elementType
= VlaSize
.Type
;
4420 divisor
= VlaSize
.NumElts
;
4422 // Scale the number of non-VLA elements by the non-VLA element size.
4423 CharUnits eltSize
= CGF
.getContext().getTypeSizeInChars(elementType
);
4424 if (!eltSize
.isOne())
4425 divisor
= CGF
.Builder
.CreateNUWMul(CGF
.CGM
.getSize(eltSize
), divisor
);
4427 // For everything elese, we can just compute it, safe in the
4428 // assumption that Sema won't let anything through that we can't
4429 // safely compute the size of.
4431 CharUnits elementSize
;
4432 // Handle GCC extension for pointer arithmetic on void* and
4433 // function pointer types.
4434 if (elementType
->isVoidType() || elementType
->isFunctionType())
4435 elementSize
= CharUnits::One();
4437 elementSize
= CGF
.getContext().getTypeSizeInChars(elementType
);
4439 // Don't even emit the divide for element size of 1.
4440 if (elementSize
.isOne())
4443 divisor
= CGF
.CGM
.getSize(elementSize
);
4446 // Otherwise, do a full sdiv. This uses the "exact" form of sdiv, since
4447 // pointer difference in C is only defined in the case where both operands
4448 // are pointing to elements of an array.
4449 return Builder
.CreateExactSDiv(diffInChars
, divisor
, "sub.ptr.div");
4452 Value
*ScalarExprEmitter::GetMaximumShiftAmount(Value
*LHS
, Value
*RHS
,
4454 llvm::IntegerType
*Ty
;
4455 if (llvm::VectorType
*VT
= dyn_cast
<llvm::VectorType
>(LHS
->getType()))
4456 Ty
= cast
<llvm::IntegerType
>(VT
->getElementType());
4458 Ty
= cast
<llvm::IntegerType
>(LHS
->getType());
4459 // For a given type of LHS the maximum shift amount is width(LHS)-1, however
4460 // it can occur that width(LHS)-1 > range(RHS). Since there is no check for
4461 // this in ConstantInt::get, this results in the value getting truncated.
4462 // Constrain the return value to be max(RHS) in this case.
4463 llvm::Type
*RHSTy
= RHS
->getType();
4464 llvm::APInt RHSMax
=
4465 RHSIsSigned
? llvm::APInt::getSignedMaxValue(RHSTy
->getScalarSizeInBits())
4466 : llvm::APInt::getMaxValue(RHSTy
->getScalarSizeInBits());
4467 if (RHSMax
.ult(Ty
->getBitWidth()))
4468 return llvm::ConstantInt::get(RHSTy
, RHSMax
);
4469 return llvm::ConstantInt::get(RHSTy
, Ty
->getBitWidth() - 1);
4472 Value
*ScalarExprEmitter::ConstrainShiftValue(Value
*LHS
, Value
*RHS
,
4473 const Twine
&Name
) {
4474 llvm::IntegerType
*Ty
;
4475 if (auto *VT
= dyn_cast
<llvm::VectorType
>(LHS
->getType()))
4476 Ty
= cast
<llvm::IntegerType
>(VT
->getElementType());
4478 Ty
= cast
<llvm::IntegerType
>(LHS
->getType());
4480 if (llvm::isPowerOf2_64(Ty
->getBitWidth()))
4481 return Builder
.CreateAnd(RHS
, GetMaximumShiftAmount(LHS
, RHS
, false), Name
);
4483 return Builder
.CreateURem(
4484 RHS
, llvm::ConstantInt::get(RHS
->getType(), Ty
->getBitWidth()), Name
);
4487 Value
*ScalarExprEmitter::EmitShl(const BinOpInfo
&Ops
) {
4488 // TODO: This misses out on the sanitizer check below.
4489 if (Ops
.isFixedPointOp())
4490 return EmitFixedPointBinOp(Ops
);
4492 // LLVM requires the LHS and RHS to be the same type: promote or truncate the
4493 // RHS to the same size as the LHS.
4494 Value
*RHS
= Ops
.RHS
;
4495 if (Ops
.LHS
->getType() != RHS
->getType())
4496 RHS
= Builder
.CreateIntCast(RHS
, Ops
.LHS
->getType(), false, "sh_prom");
4498 bool SanitizeSignedBase
= CGF
.SanOpts
.has(SanitizerKind::ShiftBase
) &&
4499 Ops
.Ty
->hasSignedIntegerRepresentation() &&
4500 !CGF
.getLangOpts().isSignedOverflowDefined() &&
4501 !CGF
.getLangOpts().CPlusPlus20
;
4502 bool SanitizeUnsignedBase
=
4503 CGF
.SanOpts
.has(SanitizerKind::UnsignedShiftBase
) &&
4504 Ops
.Ty
->hasUnsignedIntegerRepresentation();
4505 bool SanitizeBase
= SanitizeSignedBase
|| SanitizeUnsignedBase
;
4506 bool SanitizeExponent
= CGF
.SanOpts
.has(SanitizerKind::ShiftExponent
);
4507 // OpenCL 6.3j: shift values are effectively % word size of LHS.
4508 if (CGF
.getLangOpts().OpenCL
|| CGF
.getLangOpts().HLSL
)
4509 RHS
= ConstrainShiftValue(Ops
.LHS
, RHS
, "shl.mask");
4510 else if ((SanitizeBase
|| SanitizeExponent
) &&
4511 isa
<llvm::IntegerType
>(Ops
.LHS
->getType())) {
4512 CodeGenFunction::SanitizerScope
SanScope(&CGF
);
4513 SmallVector
<std::pair
<Value
*, SanitizerMask
>, 2> Checks
;
4514 bool RHSIsSigned
= Ops
.rhsHasSignedIntegerRepresentation();
4515 llvm::Value
*WidthMinusOne
=
4516 GetMaximumShiftAmount(Ops
.LHS
, Ops
.RHS
, RHSIsSigned
);
4517 llvm::Value
*ValidExponent
= Builder
.CreateICmpULE(Ops
.RHS
, WidthMinusOne
);
4519 if (SanitizeExponent
) {
4521 std::make_pair(ValidExponent
, SanitizerKind::ShiftExponent
));
4525 // Check whether we are shifting any non-zero bits off the top of the
4526 // integer. We only emit this check if exponent is valid - otherwise
4527 // instructions below will have undefined behavior themselves.
4528 llvm::BasicBlock
*Orig
= Builder
.GetInsertBlock();
4529 llvm::BasicBlock
*Cont
= CGF
.createBasicBlock("cont");
4530 llvm::BasicBlock
*CheckShiftBase
= CGF
.createBasicBlock("check");
4531 Builder
.CreateCondBr(ValidExponent
, CheckShiftBase
, Cont
);
4532 llvm::Value
*PromotedWidthMinusOne
=
4533 (RHS
== Ops
.RHS
) ? WidthMinusOne
4534 : GetMaximumShiftAmount(Ops
.LHS
, RHS
, RHSIsSigned
);
4535 CGF
.EmitBlock(CheckShiftBase
);
4536 llvm::Value
*BitsShiftedOff
= Builder
.CreateLShr(
4537 Ops
.LHS
, Builder
.CreateSub(PromotedWidthMinusOne
, RHS
, "shl.zeros",
4538 /*NUW*/ true, /*NSW*/ true),
4540 if (SanitizeUnsignedBase
|| CGF
.getLangOpts().CPlusPlus
) {
4541 // In C99, we are not permitted to shift a 1 bit into the sign bit.
4542 // Under C++11's rules, shifting a 1 bit into the sign bit is
4543 // OK, but shifting a 1 bit out of it is not. (C89 and C++03 don't
4544 // define signed left shifts, so we use the C99 and C++11 rules there).
4545 // Unsigned shifts can always shift into the top bit.
4546 llvm::Value
*One
= llvm::ConstantInt::get(BitsShiftedOff
->getType(), 1);
4547 BitsShiftedOff
= Builder
.CreateLShr(BitsShiftedOff
, One
);
4549 llvm::Value
*Zero
= llvm::ConstantInt::get(BitsShiftedOff
->getType(), 0);
4550 llvm::Value
*ValidBase
= Builder
.CreateICmpEQ(BitsShiftedOff
, Zero
);
4551 CGF
.EmitBlock(Cont
);
4552 llvm::PHINode
*BaseCheck
= Builder
.CreatePHI(ValidBase
->getType(), 2);
4553 BaseCheck
->addIncoming(Builder
.getTrue(), Orig
);
4554 BaseCheck
->addIncoming(ValidBase
, CheckShiftBase
);
4555 Checks
.push_back(std::make_pair(
4556 BaseCheck
, SanitizeSignedBase
? SanitizerKind::ShiftBase
4557 : SanitizerKind::UnsignedShiftBase
));
4560 assert(!Checks
.empty());
4561 EmitBinOpCheck(Checks
, Ops
);
4564 return Builder
.CreateShl(Ops
.LHS
, RHS
, "shl");
4567 Value
*ScalarExprEmitter::EmitShr(const BinOpInfo
&Ops
) {
4568 // TODO: This misses out on the sanitizer check below.
4569 if (Ops
.isFixedPointOp())
4570 return EmitFixedPointBinOp(Ops
);
4572 // LLVM requires the LHS and RHS to be the same type: promote or truncate the
4573 // RHS to the same size as the LHS.
4574 Value
*RHS
= Ops
.RHS
;
4575 if (Ops
.LHS
->getType() != RHS
->getType())
4576 RHS
= Builder
.CreateIntCast(RHS
, Ops
.LHS
->getType(), false, "sh_prom");
4578 // OpenCL 6.3j: shift values are effectively % word size of LHS.
4579 if (CGF
.getLangOpts().OpenCL
|| CGF
.getLangOpts().HLSL
)
4580 RHS
= ConstrainShiftValue(Ops
.LHS
, RHS
, "shr.mask");
4581 else if (CGF
.SanOpts
.has(SanitizerKind::ShiftExponent
) &&
4582 isa
<llvm::IntegerType
>(Ops
.LHS
->getType())) {
4583 CodeGenFunction::SanitizerScope
SanScope(&CGF
);
4584 bool RHSIsSigned
= Ops
.rhsHasSignedIntegerRepresentation();
4585 llvm::Value
*Valid
= Builder
.CreateICmpULE(
4586 Ops
.RHS
, GetMaximumShiftAmount(Ops
.LHS
, Ops
.RHS
, RHSIsSigned
));
4587 EmitBinOpCheck(std::make_pair(Valid
, SanitizerKind::ShiftExponent
), Ops
);
4590 if (Ops
.Ty
->hasUnsignedIntegerRepresentation())
4591 return Builder
.CreateLShr(Ops
.LHS
, RHS
, "shr");
4592 return Builder
.CreateAShr(Ops
.LHS
, RHS
, "shr");
4595 enum IntrinsicType
{ VCMPEQ
, VCMPGT
};
4596 // return corresponding comparison intrinsic for given vector type
4597 static llvm::Intrinsic::ID
GetIntrinsic(IntrinsicType IT
,
4598 BuiltinType::Kind ElemKind
) {
4600 default: llvm_unreachable("unexpected element type");
4601 case BuiltinType::Char_U
:
4602 case BuiltinType::UChar
:
4603 return (IT
== VCMPEQ
) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p
:
4604 llvm::Intrinsic::ppc_altivec_vcmpgtub_p
;
4605 case BuiltinType::Char_S
:
4606 case BuiltinType::SChar
:
4607 return (IT
== VCMPEQ
) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p
:
4608 llvm::Intrinsic::ppc_altivec_vcmpgtsb_p
;
4609 case BuiltinType::UShort
:
4610 return (IT
== VCMPEQ
) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p
:
4611 llvm::Intrinsic::ppc_altivec_vcmpgtuh_p
;
4612 case BuiltinType::Short
:
4613 return (IT
== VCMPEQ
) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p
:
4614 llvm::Intrinsic::ppc_altivec_vcmpgtsh_p
;
4615 case BuiltinType::UInt
:
4616 return (IT
== VCMPEQ
) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p
:
4617 llvm::Intrinsic::ppc_altivec_vcmpgtuw_p
;
4618 case BuiltinType::Int
:
4619 return (IT
== VCMPEQ
) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p
:
4620 llvm::Intrinsic::ppc_altivec_vcmpgtsw_p
;
4621 case BuiltinType::ULong
:
4622 case BuiltinType::ULongLong
:
4623 return (IT
== VCMPEQ
) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p
:
4624 llvm::Intrinsic::ppc_altivec_vcmpgtud_p
;
4625 case BuiltinType::Long
:
4626 case BuiltinType::LongLong
:
4627 return (IT
== VCMPEQ
) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p
:
4628 llvm::Intrinsic::ppc_altivec_vcmpgtsd_p
;
4629 case BuiltinType::Float
:
4630 return (IT
== VCMPEQ
) ? llvm::Intrinsic::ppc_altivec_vcmpeqfp_p
:
4631 llvm::Intrinsic::ppc_altivec_vcmpgtfp_p
;
4632 case BuiltinType::Double
:
4633 return (IT
== VCMPEQ
) ? llvm::Intrinsic::ppc_vsx_xvcmpeqdp_p
:
4634 llvm::Intrinsic::ppc_vsx_xvcmpgtdp_p
;
4635 case BuiltinType::UInt128
:
4636 return (IT
== VCMPEQ
) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p
4637 : llvm::Intrinsic::ppc_altivec_vcmpgtuq_p
;
4638 case BuiltinType::Int128
:
4639 return (IT
== VCMPEQ
) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p
4640 : llvm::Intrinsic::ppc_altivec_vcmpgtsq_p
;
4644 Value
*ScalarExprEmitter::EmitCompare(const BinaryOperator
*E
,
4645 llvm::CmpInst::Predicate UICmpOpc
,
4646 llvm::CmpInst::Predicate SICmpOpc
,
4647 llvm::CmpInst::Predicate FCmpOpc
,
4649 TestAndClearIgnoreResultAssign();
4651 QualType LHSTy
= E
->getLHS()->getType();
4652 QualType RHSTy
= E
->getRHS()->getType();
4653 if (const MemberPointerType
*MPT
= LHSTy
->getAs
<MemberPointerType
>()) {
4654 assert(E
->getOpcode() == BO_EQ
||
4655 E
->getOpcode() == BO_NE
);
4656 Value
*LHS
= CGF
.EmitScalarExpr(E
->getLHS());
4657 Value
*RHS
= CGF
.EmitScalarExpr(E
->getRHS());
4658 Result
= CGF
.CGM
.getCXXABI().EmitMemberPointerComparison(
4659 CGF
, LHS
, RHS
, MPT
, E
->getOpcode() == BO_NE
);
4660 } else if (!LHSTy
->isAnyComplexType() && !RHSTy
->isAnyComplexType()) {
4661 BinOpInfo BOInfo
= EmitBinOps(E
);
4662 Value
*LHS
= BOInfo
.LHS
;
4663 Value
*RHS
= BOInfo
.RHS
;
4665 // If AltiVec, the comparison results in a numeric type, so we use
4666 // intrinsics comparing vectors and giving 0 or 1 as a result
4667 if (LHSTy
->isVectorType() && !E
->getType()->isVectorType()) {
4668 // constants for mapping CR6 register bits to predicate result
4669 enum { CR6_EQ
=0, CR6_EQ_REV
, CR6_LT
, CR6_LT_REV
} CR6
;
4671 llvm::Intrinsic::ID ID
= llvm::Intrinsic::not_intrinsic
;
4673 // in several cases vector arguments order will be reversed
4674 Value
*FirstVecArg
= LHS
,
4675 *SecondVecArg
= RHS
;
4677 QualType ElTy
= LHSTy
->castAs
<VectorType
>()->getElementType();
4678 BuiltinType::Kind ElementKind
= ElTy
->castAs
<BuiltinType
>()->getKind();
4680 switch(E
->getOpcode()) {
4681 default: llvm_unreachable("is not a comparison operation");
4684 ID
= GetIntrinsic(VCMPEQ
, ElementKind
);
4688 ID
= GetIntrinsic(VCMPEQ
, ElementKind
);
4692 ID
= GetIntrinsic(VCMPGT
, ElementKind
);
4693 std::swap(FirstVecArg
, SecondVecArg
);
4697 ID
= GetIntrinsic(VCMPGT
, ElementKind
);
4700 if (ElementKind
== BuiltinType::Float
) {
4702 ID
= llvm::Intrinsic::ppc_altivec_vcmpgefp_p
;
4703 std::swap(FirstVecArg
, SecondVecArg
);
4707 ID
= GetIntrinsic(VCMPGT
, ElementKind
);
4711 if (ElementKind
== BuiltinType::Float
) {
4713 ID
= llvm::Intrinsic::ppc_altivec_vcmpgefp_p
;
4717 ID
= GetIntrinsic(VCMPGT
, ElementKind
);
4718 std::swap(FirstVecArg
, SecondVecArg
);
4723 Value
*CR6Param
= Builder
.getInt32(CR6
);
4724 llvm::Function
*F
= CGF
.CGM
.getIntrinsic(ID
);
4725 Result
= Builder
.CreateCall(F
, {CR6Param
, FirstVecArg
, SecondVecArg
});
4727 // The result type of intrinsic may not be same as E->getType().
4728 // If E->getType() is not BoolTy, EmitScalarConversion will do the
4729 // conversion work. If E->getType() is BoolTy, EmitScalarConversion will
4730 // do nothing, if ResultTy is not i1 at the same time, it will cause
4732 llvm::IntegerType
*ResultTy
= cast
<llvm::IntegerType
>(Result
->getType());
4733 if (ResultTy
->getBitWidth() > 1 &&
4734 E
->getType() == CGF
.getContext().BoolTy
)
4735 Result
= Builder
.CreateTrunc(Result
, Builder
.getInt1Ty());
4736 return EmitScalarConversion(Result
, CGF
.getContext().BoolTy
, E
->getType(),
4740 if (BOInfo
.isFixedPointOp()) {
4741 Result
= EmitFixedPointBinOp(BOInfo
);
4742 } else if (LHS
->getType()->isFPOrFPVectorTy()) {
4743 CodeGenFunction::CGFPOptionsRAII
FPOptsRAII(CGF
, BOInfo
.FPFeatures
);
4745 Result
= Builder
.CreateFCmp(FCmpOpc
, LHS
, RHS
, "cmp");
4747 Result
= Builder
.CreateFCmpS(FCmpOpc
, LHS
, RHS
, "cmp");
4748 } else if (LHSTy
->hasSignedIntegerRepresentation()) {
4749 Result
= Builder
.CreateICmp(SICmpOpc
, LHS
, RHS
, "cmp");
4751 // Unsigned integers and pointers.
4753 if (CGF
.CGM
.getCodeGenOpts().StrictVTablePointers
&&
4754 !isa
<llvm::ConstantPointerNull
>(LHS
) &&
4755 !isa
<llvm::ConstantPointerNull
>(RHS
)) {
4757 // Dynamic information is required to be stripped for comparisons,
4758 // because it could leak the dynamic information. Based on comparisons
4759 // of pointers to dynamic objects, the optimizer can replace one pointer
4760 // with another, which might be incorrect in presence of invariant
4761 // groups. Comparison with null is safe because null does not carry any
4762 // dynamic information.
4763 if (LHSTy
.mayBeDynamicClass())
4764 LHS
= Builder
.CreateStripInvariantGroup(LHS
);
4765 if (RHSTy
.mayBeDynamicClass())
4766 RHS
= Builder
.CreateStripInvariantGroup(RHS
);
4769 Result
= Builder
.CreateICmp(UICmpOpc
, LHS
, RHS
, "cmp");
4772 // If this is a vector comparison, sign extend the result to the appropriate
4773 // vector integer type and return it (don't convert to bool).
4774 if (LHSTy
->isVectorType())
4775 return Builder
.CreateSExt(Result
, ConvertType(E
->getType()), "sext");
4778 // Complex Comparison: can only be an equality comparison.
4779 CodeGenFunction::ComplexPairTy LHS
, RHS
;
4781 if (auto *CTy
= LHSTy
->getAs
<ComplexType
>()) {
4782 LHS
= CGF
.EmitComplexExpr(E
->getLHS());
4783 CETy
= CTy
->getElementType();
4785 LHS
.first
= Visit(E
->getLHS());
4786 LHS
.second
= llvm::Constant::getNullValue(LHS
.first
->getType());
4789 if (auto *CTy
= RHSTy
->getAs
<ComplexType
>()) {
4790 RHS
= CGF
.EmitComplexExpr(E
->getRHS());
4791 assert(CGF
.getContext().hasSameUnqualifiedType(CETy
,
4792 CTy
->getElementType()) &&
4793 "The element types must always match.");
4796 RHS
.first
= Visit(E
->getRHS());
4797 RHS
.second
= llvm::Constant::getNullValue(RHS
.first
->getType());
4798 assert(CGF
.getContext().hasSameUnqualifiedType(CETy
, RHSTy
) &&
4799 "The element types must always match.");
4802 Value
*ResultR
, *ResultI
;
4803 if (CETy
->isRealFloatingType()) {
4804 // As complex comparisons can only be equality comparisons, they
4805 // are never signaling comparisons.
4806 ResultR
= Builder
.CreateFCmp(FCmpOpc
, LHS
.first
, RHS
.first
, "cmp.r");
4807 ResultI
= Builder
.CreateFCmp(FCmpOpc
, LHS
.second
, RHS
.second
, "cmp.i");
4809 // Complex comparisons can only be equality comparisons. As such, signed
4810 // and unsigned opcodes are the same.
4811 ResultR
= Builder
.CreateICmp(UICmpOpc
, LHS
.first
, RHS
.first
, "cmp.r");
4812 ResultI
= Builder
.CreateICmp(UICmpOpc
, LHS
.second
, RHS
.second
, "cmp.i");
4815 if (E
->getOpcode() == BO_EQ
) {
4816 Result
= Builder
.CreateAnd(ResultR
, ResultI
, "and.ri");
4818 assert(E
->getOpcode() == BO_NE
&&
4819 "Complex comparison other than == or != ?");
4820 Result
= Builder
.CreateOr(ResultR
, ResultI
, "or.ri");
4824 return EmitScalarConversion(Result
, CGF
.getContext().BoolTy
, E
->getType(),
4828 llvm::Value
*CodeGenFunction::EmitWithOriginalRHSBitfieldAssignment(
4829 const BinaryOperator
*E
, Value
**Previous
, QualType
*SrcType
) {
4830 // In case we have the integer or bitfield sanitizer checks enabled
4831 // we want to get the expression before scalar conversion.
4832 if (auto *ICE
= dyn_cast
<ImplicitCastExpr
>(E
->getRHS())) {
4833 CastKind Kind
= ICE
->getCastKind();
4834 if (Kind
== CK_IntegralCast
|| Kind
== CK_LValueToRValue
) {
4835 *SrcType
= ICE
->getSubExpr()->getType();
4836 *Previous
= EmitScalarExpr(ICE
->getSubExpr());
4837 // Pass default ScalarConversionOpts to avoid emitting
4838 // integer sanitizer checks as E refers to bitfield.
4839 return EmitScalarConversion(*Previous
, *SrcType
, ICE
->getType(),
4843 return EmitScalarExpr(E
->getRHS());
4846 Value
*ScalarExprEmitter::VisitBinAssign(const BinaryOperator
*E
) {
4847 bool Ignore
= TestAndClearIgnoreResultAssign();
4852 switch (E
->getLHS()->getType().getObjCLifetime()) {
4853 case Qualifiers::OCL_Strong
:
4854 std::tie(LHS
, RHS
) = CGF
.EmitARCStoreStrong(E
, Ignore
);
4857 case Qualifiers::OCL_Autoreleasing
:
4858 std::tie(LHS
, RHS
) = CGF
.EmitARCStoreAutoreleasing(E
);
4861 case Qualifiers::OCL_ExplicitNone
:
4862 std::tie(LHS
, RHS
) = CGF
.EmitARCStoreUnsafeUnretained(E
, Ignore
);
4865 case Qualifiers::OCL_Weak
:
4866 RHS
= Visit(E
->getRHS());
4867 LHS
= EmitCheckedLValue(E
->getLHS(), CodeGenFunction::TCK_Store
);
4868 RHS
= CGF
.EmitARCStoreWeak(LHS
.getAddress(), RHS
, Ignore
);
4871 case Qualifiers::OCL_None
:
4872 // __block variables need to have the rhs evaluated first, plus
4873 // this should improve codegen just a little.
4874 Value
*Previous
= nullptr;
4875 QualType SrcType
= E
->getRHS()->getType();
4876 // Check if LHS is a bitfield, if RHS contains an implicit cast expression
4877 // we want to extract that value and potentially (if the bitfield sanitizer
4878 // is enabled) use it to check for an implicit conversion.
4879 if (E
->getLHS()->refersToBitField())
4880 RHS
= CGF
.EmitWithOriginalRHSBitfieldAssignment(E
, &Previous
, &SrcType
);
4882 RHS
= Visit(E
->getRHS());
4884 LHS
= EmitCheckedLValue(E
->getLHS(), CodeGenFunction::TCK_Store
);
4886 // Store the value into the LHS. Bit-fields are handled specially
4887 // because the result is altered by the store, i.e., [C99 6.5.16p1]
4888 // 'An assignment expression has the value of the left operand after
4889 // the assignment...'.
4890 if (LHS
.isBitField()) {
4891 CGF
.EmitStoreThroughBitfieldLValue(RValue::get(RHS
), LHS
, &RHS
);
4892 // If the expression contained an implicit conversion, make sure
4893 // to use the value before the scalar conversion.
4894 Value
*Src
= Previous
? Previous
: RHS
;
4895 QualType DstType
= E
->getLHS()->getType();
4896 CGF
.EmitBitfieldConversionCheck(Src
, SrcType
, RHS
, DstType
,
4897 LHS
.getBitFieldInfo(), E
->getExprLoc());
4899 CGF
.EmitNullabilityCheck(LHS
, RHS
, E
->getExprLoc());
4900 CGF
.EmitStoreThroughLValue(RValue::get(RHS
), LHS
);
4904 // If the result is clearly ignored, return now.
4908 // The result of an assignment in C is the assigned r-value.
4909 if (!CGF
.getLangOpts().CPlusPlus
)
4912 // If the lvalue is non-volatile, return the computed value of the assignment.
4913 if (!LHS
.isVolatileQualified())
4916 // Otherwise, reload the value.
4917 return EmitLoadOfLValue(LHS
, E
->getExprLoc());
4920 Value
*ScalarExprEmitter::VisitBinLAnd(const BinaryOperator
*E
) {
4921 // Perform vector logical and on comparisons with zero vectors.
4922 if (E
->getType()->isVectorType()) {
4923 CGF
.incrementProfileCounter(E
);
4925 Value
*LHS
= Visit(E
->getLHS());
4926 Value
*RHS
= Visit(E
->getRHS());
4927 Value
*Zero
= llvm::ConstantAggregateZero::get(LHS
->getType());
4928 if (LHS
->getType()->isFPOrFPVectorTy()) {
4929 CodeGenFunction::CGFPOptionsRAII
FPOptsRAII(
4930 CGF
, E
->getFPFeaturesInEffect(CGF
.getLangOpts()));
4931 LHS
= Builder
.CreateFCmp(llvm::CmpInst::FCMP_UNE
, LHS
, Zero
, "cmp");
4932 RHS
= Builder
.CreateFCmp(llvm::CmpInst::FCMP_UNE
, RHS
, Zero
, "cmp");
4934 LHS
= Builder
.CreateICmp(llvm::CmpInst::ICMP_NE
, LHS
, Zero
, "cmp");
4935 RHS
= Builder
.CreateICmp(llvm::CmpInst::ICMP_NE
, RHS
, Zero
, "cmp");
4937 Value
*And
= Builder
.CreateAnd(LHS
, RHS
);
4938 return Builder
.CreateSExt(And
, ConvertType(E
->getType()), "sext");
4941 bool InstrumentRegions
= CGF
.CGM
.getCodeGenOpts().hasProfileClangInstr();
4942 llvm::Type
*ResTy
= ConvertType(E
->getType());
4944 // If we have 0 && RHS, see if we can elide RHS, if so, just return 0.
4945 // If we have 1 && X, just emit X without inserting the control flow.
4947 if (CGF
.ConstantFoldsToSimpleInteger(E
->getLHS(), LHSCondVal
)) {
4948 if (LHSCondVal
) { // If we have 1 && X, just emit X.
4949 CGF
.incrementProfileCounter(E
);
4951 // If the top of the logical operator nest, reset the MCDC temp to 0.
4952 if (CGF
.MCDCLogOpStack
.empty())
4953 CGF
.maybeResetMCDCCondBitmap(E
);
4955 CGF
.MCDCLogOpStack
.push_back(E
);
4957 Value
*RHSCond
= CGF
.EvaluateExprAsBool(E
->getRHS());
4959 // If we're generating for profiling or coverage, generate a branch to a
4960 // block that increments the RHS counter needed to track branch condition
4961 // coverage. In this case, use "FBlock" as both the final "TrueBlock" and
4962 // "FalseBlock" after the increment is done.
4963 if (InstrumentRegions
&&
4964 CodeGenFunction::isInstrumentedCondition(E
->getRHS())) {
4965 CGF
.maybeUpdateMCDCCondBitmap(E
->getRHS(), RHSCond
);
4966 llvm::BasicBlock
*FBlock
= CGF
.createBasicBlock("land.end");
4967 llvm::BasicBlock
*RHSBlockCnt
= CGF
.createBasicBlock("land.rhscnt");
4968 Builder
.CreateCondBr(RHSCond
, RHSBlockCnt
, FBlock
);
4969 CGF
.EmitBlock(RHSBlockCnt
);
4970 CGF
.incrementProfileCounter(E
->getRHS());
4971 CGF
.EmitBranch(FBlock
);
4972 CGF
.EmitBlock(FBlock
);
4975 CGF
.MCDCLogOpStack
.pop_back();
4976 // If the top of the logical operator nest, update the MCDC bitmap.
4977 if (CGF
.MCDCLogOpStack
.empty())
4978 CGF
.maybeUpdateMCDCTestVectorBitmap(E
);
4980 // ZExt result to int or bool.
4981 return Builder
.CreateZExtOrBitCast(RHSCond
, ResTy
, "land.ext");
4984 // 0 && RHS: If it is safe, just elide the RHS, and return 0/false.
4985 if (!CGF
.ContainsLabel(E
->getRHS()))
4986 return llvm::Constant::getNullValue(ResTy
);
4989 // If the top of the logical operator nest, reset the MCDC temp to 0.
4990 if (CGF
.MCDCLogOpStack
.empty())
4991 CGF
.maybeResetMCDCCondBitmap(E
);
4993 CGF
.MCDCLogOpStack
.push_back(E
);
4995 llvm::BasicBlock
*ContBlock
= CGF
.createBasicBlock("land.end");
4996 llvm::BasicBlock
*RHSBlock
= CGF
.createBasicBlock("land.rhs");
4998 CodeGenFunction::ConditionalEvaluation
eval(CGF
);
5000 // Branch on the LHS first. If it is false, go to the failure (cont) block.
5001 CGF
.EmitBranchOnBoolExpr(E
->getLHS(), RHSBlock
, ContBlock
,
5002 CGF
.getProfileCount(E
->getRHS()));
5004 // Any edges into the ContBlock are now from an (indeterminate number of)
5005 // edges from this first condition. All of these values will be false. Start
5006 // setting up the PHI node in the Cont Block for this.
5007 llvm::PHINode
*PN
= llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext
), 2,
5009 for (llvm::pred_iterator PI
= pred_begin(ContBlock
), PE
= pred_end(ContBlock
);
5011 PN
->addIncoming(llvm::ConstantInt::getFalse(VMContext
), *PI
);
5014 CGF
.EmitBlock(RHSBlock
);
5015 CGF
.incrementProfileCounter(E
);
5016 Value
*RHSCond
= CGF
.EvaluateExprAsBool(E
->getRHS());
5019 // Reaquire the RHS block, as there may be subblocks inserted.
5020 RHSBlock
= Builder
.GetInsertBlock();
5022 // If we're generating for profiling or coverage, generate a branch on the
5023 // RHS to a block that increments the RHS true counter needed to track branch
5024 // condition coverage.
5025 if (InstrumentRegions
&&
5026 CodeGenFunction::isInstrumentedCondition(E
->getRHS())) {
5027 CGF
.maybeUpdateMCDCCondBitmap(E
->getRHS(), RHSCond
);
5028 llvm::BasicBlock
*RHSBlockCnt
= CGF
.createBasicBlock("land.rhscnt");
5029 Builder
.CreateCondBr(RHSCond
, RHSBlockCnt
, ContBlock
);
5030 CGF
.EmitBlock(RHSBlockCnt
);
5031 CGF
.incrementProfileCounter(E
->getRHS());
5032 CGF
.EmitBranch(ContBlock
);
5033 PN
->addIncoming(RHSCond
, RHSBlockCnt
);
5036 // Emit an unconditional branch from this block to ContBlock.
5038 // There is no need to emit line number for unconditional branch.
5039 auto NL
= ApplyDebugLocation::CreateEmpty(CGF
);
5040 CGF
.EmitBlock(ContBlock
);
5042 // Insert an entry into the phi node for the edge with the value of RHSCond.
5043 PN
->addIncoming(RHSCond
, RHSBlock
);
5045 CGF
.MCDCLogOpStack
.pop_back();
5046 // If the top of the logical operator nest, update the MCDC bitmap.
5047 if (CGF
.MCDCLogOpStack
.empty())
5048 CGF
.maybeUpdateMCDCTestVectorBitmap(E
);
5050 // Artificial location to preserve the scope information
5052 auto NL
= ApplyDebugLocation::CreateArtificial(CGF
);
5053 PN
->setDebugLoc(Builder
.getCurrentDebugLocation());
5056 // ZExt result to int.
5057 return Builder
.CreateZExtOrBitCast(PN
, ResTy
, "land.ext");
5060 Value
*ScalarExprEmitter::VisitBinLOr(const BinaryOperator
*E
) {
5061 // Perform vector logical or on comparisons with zero vectors.
5062 if (E
->getType()->isVectorType()) {
5063 CGF
.incrementProfileCounter(E
);
5065 Value
*LHS
= Visit(E
->getLHS());
5066 Value
*RHS
= Visit(E
->getRHS());
5067 Value
*Zero
= llvm::ConstantAggregateZero::get(LHS
->getType());
5068 if (LHS
->getType()->isFPOrFPVectorTy()) {
5069 CodeGenFunction::CGFPOptionsRAII
FPOptsRAII(
5070 CGF
, E
->getFPFeaturesInEffect(CGF
.getLangOpts()));
5071 LHS
= Builder
.CreateFCmp(llvm::CmpInst::FCMP_UNE
, LHS
, Zero
, "cmp");
5072 RHS
= Builder
.CreateFCmp(llvm::CmpInst::FCMP_UNE
, RHS
, Zero
, "cmp");
5074 LHS
= Builder
.CreateICmp(llvm::CmpInst::ICMP_NE
, LHS
, Zero
, "cmp");
5075 RHS
= Builder
.CreateICmp(llvm::CmpInst::ICMP_NE
, RHS
, Zero
, "cmp");
5077 Value
*Or
= Builder
.CreateOr(LHS
, RHS
);
5078 return Builder
.CreateSExt(Or
, ConvertType(E
->getType()), "sext");
5081 bool InstrumentRegions
= CGF
.CGM
.getCodeGenOpts().hasProfileClangInstr();
5082 llvm::Type
*ResTy
= ConvertType(E
->getType());
5084 // If we have 1 || RHS, see if we can elide RHS, if so, just return 1.
5085 // If we have 0 || X, just emit X without inserting the control flow.
5087 if (CGF
.ConstantFoldsToSimpleInteger(E
->getLHS(), LHSCondVal
)) {
5088 if (!LHSCondVal
) { // If we have 0 || X, just emit X.
5089 CGF
.incrementProfileCounter(E
);
5091 // If the top of the logical operator nest, reset the MCDC temp to 0.
5092 if (CGF
.MCDCLogOpStack
.empty())
5093 CGF
.maybeResetMCDCCondBitmap(E
);
5095 CGF
.MCDCLogOpStack
.push_back(E
);
5097 Value
*RHSCond
= CGF
.EvaluateExprAsBool(E
->getRHS());
5099 // If we're generating for profiling or coverage, generate a branch to a
5100 // block that increments the RHS counter need to track branch condition
5101 // coverage. In this case, use "FBlock" as both the final "TrueBlock" and
5102 // "FalseBlock" after the increment is done.
5103 if (InstrumentRegions
&&
5104 CodeGenFunction::isInstrumentedCondition(E
->getRHS())) {
5105 CGF
.maybeUpdateMCDCCondBitmap(E
->getRHS(), RHSCond
);
5106 llvm::BasicBlock
*FBlock
= CGF
.createBasicBlock("lor.end");
5107 llvm::BasicBlock
*RHSBlockCnt
= CGF
.createBasicBlock("lor.rhscnt");
5108 Builder
.CreateCondBr(RHSCond
, FBlock
, RHSBlockCnt
);
5109 CGF
.EmitBlock(RHSBlockCnt
);
5110 CGF
.incrementProfileCounter(E
->getRHS());
5111 CGF
.EmitBranch(FBlock
);
5112 CGF
.EmitBlock(FBlock
);
5115 CGF
.MCDCLogOpStack
.pop_back();
5116 // If the top of the logical operator nest, update the MCDC bitmap.
5117 if (CGF
.MCDCLogOpStack
.empty())
5118 CGF
.maybeUpdateMCDCTestVectorBitmap(E
);
5120 // ZExt result to int or bool.
5121 return Builder
.CreateZExtOrBitCast(RHSCond
, ResTy
, "lor.ext");
5124 // 1 || RHS: If it is safe, just elide the RHS, and return 1/true.
5125 if (!CGF
.ContainsLabel(E
->getRHS()))
5126 return llvm::ConstantInt::get(ResTy
, 1);
5129 // If the top of the logical operator nest, reset the MCDC temp to 0.
5130 if (CGF
.MCDCLogOpStack
.empty())
5131 CGF
.maybeResetMCDCCondBitmap(E
);
5133 CGF
.MCDCLogOpStack
.push_back(E
);
5135 llvm::BasicBlock
*ContBlock
= CGF
.createBasicBlock("lor.end");
5136 llvm::BasicBlock
*RHSBlock
= CGF
.createBasicBlock("lor.rhs");
5138 CodeGenFunction::ConditionalEvaluation
eval(CGF
);
5140 // Branch on the LHS first. If it is true, go to the success (cont) block.
5141 CGF
.EmitBranchOnBoolExpr(E
->getLHS(), ContBlock
, RHSBlock
,
5142 CGF
.getCurrentProfileCount() -
5143 CGF
.getProfileCount(E
->getRHS()));
5145 // Any edges into the ContBlock are now from an (indeterminate number of)
5146 // edges from this first condition. All of these values will be true. Start
5147 // setting up the PHI node in the Cont Block for this.
5148 llvm::PHINode
*PN
= llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext
), 2,
5150 for (llvm::pred_iterator PI
= pred_begin(ContBlock
), PE
= pred_end(ContBlock
);
5152 PN
->addIncoming(llvm::ConstantInt::getTrue(VMContext
), *PI
);
5156 // Emit the RHS condition as a bool value.
5157 CGF
.EmitBlock(RHSBlock
);
5158 CGF
.incrementProfileCounter(E
);
5159 Value
*RHSCond
= CGF
.EvaluateExprAsBool(E
->getRHS());
5163 // Reaquire the RHS block, as there may be subblocks inserted.
5164 RHSBlock
= Builder
.GetInsertBlock();
5166 // If we're generating for profiling or coverage, generate a branch on the
5167 // RHS to a block that increments the RHS true counter needed to track branch
5168 // condition coverage.
5169 if (InstrumentRegions
&&
5170 CodeGenFunction::isInstrumentedCondition(E
->getRHS())) {
5171 CGF
.maybeUpdateMCDCCondBitmap(E
->getRHS(), RHSCond
);
5172 llvm::BasicBlock
*RHSBlockCnt
= CGF
.createBasicBlock("lor.rhscnt");
5173 Builder
.CreateCondBr(RHSCond
, ContBlock
, RHSBlockCnt
);
5174 CGF
.EmitBlock(RHSBlockCnt
);
5175 CGF
.incrementProfileCounter(E
->getRHS());
5176 CGF
.EmitBranch(ContBlock
);
5177 PN
->addIncoming(RHSCond
, RHSBlockCnt
);
5180 // Emit an unconditional branch from this block to ContBlock. Insert an entry
5181 // into the phi node for the edge with the value of RHSCond.
5182 CGF
.EmitBlock(ContBlock
);
5183 PN
->addIncoming(RHSCond
, RHSBlock
);
5185 CGF
.MCDCLogOpStack
.pop_back();
5186 // If the top of the logical operator nest, update the MCDC bitmap.
5187 if (CGF
.MCDCLogOpStack
.empty())
5188 CGF
.maybeUpdateMCDCTestVectorBitmap(E
);
5190 // ZExt result to int.
5191 return Builder
.CreateZExtOrBitCast(PN
, ResTy
, "lor.ext");
5194 Value
*ScalarExprEmitter::VisitBinComma(const BinaryOperator
*E
) {
5195 CGF
.EmitIgnoredExpr(E
->getLHS());
5196 CGF
.EnsureInsertPoint();
5197 return Visit(E
->getRHS());
5200 //===----------------------------------------------------------------------===//
5202 //===----------------------------------------------------------------------===//
5204 /// isCheapEnoughToEvaluateUnconditionally - Return true if the specified
5205 /// expression is cheap enough and side-effect-free enough to evaluate
5206 /// unconditionally instead of conditionally. This is used to convert control
5207 /// flow into selects in some cases.
5208 static bool isCheapEnoughToEvaluateUnconditionally(const Expr
*E
,
5209 CodeGenFunction
&CGF
) {
5210 // Anything that is an integer or floating point constant is fine.
5211 return E
->IgnoreParens()->isEvaluatable(CGF
.getContext());
5213 // Even non-volatile automatic variables can't be evaluated unconditionally.
5214 // Referencing a thread_local may cause non-trivial initialization work to
5215 // occur. If we're inside a lambda and one of the variables is from the scope
5216 // outside the lambda, that function may have returned already. Reading its
5217 // locals is a bad idea. Also, these reads may introduce races there didn't
5218 // exist in the source-level program.
5222 Value
*ScalarExprEmitter::
5223 VisitAbstractConditionalOperator(const AbstractConditionalOperator
*E
) {
5224 TestAndClearIgnoreResultAssign();
5226 // Bind the common expression if necessary.
5227 CodeGenFunction::OpaqueValueMapping
binding(CGF
, E
);
5229 Expr
*condExpr
= E
->getCond();
5230 Expr
*lhsExpr
= E
->getTrueExpr();
5231 Expr
*rhsExpr
= E
->getFalseExpr();
5233 // If the condition constant folds and can be elided, try to avoid emitting
5234 // the condition and the dead arm.
5236 if (CGF
.ConstantFoldsToSimpleInteger(condExpr
, CondExprBool
)) {
5237 Expr
*live
= lhsExpr
, *dead
= rhsExpr
;
5238 if (!CondExprBool
) std::swap(live
, dead
);
5240 // If the dead side doesn't have labels we need, just emit the Live part.
5241 if (!CGF
.ContainsLabel(dead
)) {
5243 if (llvm::EnableSingleByteCoverage
) {
5244 CGF
.incrementProfileCounter(lhsExpr
);
5245 CGF
.incrementProfileCounter(rhsExpr
);
5247 CGF
.incrementProfileCounter(E
);
5249 Value
*Result
= Visit(live
);
5251 // If the live part is a throw expression, it acts like it has a void
5252 // type, so evaluating it returns a null Value*. However, a conditional
5253 // with non-void type must return a non-null Value*.
5254 if (!Result
&& !E
->getType()->isVoidType())
5255 Result
= llvm::UndefValue::get(CGF
.ConvertType(E
->getType()));
5261 // OpenCL: If the condition is a vector, we can treat this condition like
5262 // the select function.
5263 if ((CGF
.getLangOpts().OpenCL
&& condExpr
->getType()->isVectorType()) ||
5264 condExpr
->getType()->isExtVectorType()) {
5265 CGF
.incrementProfileCounter(E
);
5267 llvm::Value
*CondV
= CGF
.EmitScalarExpr(condExpr
);
5268 llvm::Value
*LHS
= Visit(lhsExpr
);
5269 llvm::Value
*RHS
= Visit(rhsExpr
);
5271 llvm::Type
*condType
= ConvertType(condExpr
->getType());
5272 auto *vecTy
= cast
<llvm::FixedVectorType
>(condType
);
5274 unsigned numElem
= vecTy
->getNumElements();
5275 llvm::Type
*elemType
= vecTy
->getElementType();
5277 llvm::Value
*zeroVec
= llvm::Constant::getNullValue(vecTy
);
5278 llvm::Value
*TestMSB
= Builder
.CreateICmpSLT(CondV
, zeroVec
);
5279 llvm::Value
*tmp
= Builder
.CreateSExt(
5280 TestMSB
, llvm::FixedVectorType::get(elemType
, numElem
), "sext");
5281 llvm::Value
*tmp2
= Builder
.CreateNot(tmp
);
5283 // Cast float to int to perform ANDs if necessary.
5284 llvm::Value
*RHSTmp
= RHS
;
5285 llvm::Value
*LHSTmp
= LHS
;
5286 bool wasCast
= false;
5287 llvm::VectorType
*rhsVTy
= cast
<llvm::VectorType
>(RHS
->getType());
5288 if (rhsVTy
->getElementType()->isFloatingPointTy()) {
5289 RHSTmp
= Builder
.CreateBitCast(RHS
, tmp2
->getType());
5290 LHSTmp
= Builder
.CreateBitCast(LHS
, tmp
->getType());
5294 llvm::Value
*tmp3
= Builder
.CreateAnd(RHSTmp
, tmp2
);
5295 llvm::Value
*tmp4
= Builder
.CreateAnd(LHSTmp
, tmp
);
5296 llvm::Value
*tmp5
= Builder
.CreateOr(tmp3
, tmp4
, "cond");
5298 tmp5
= Builder
.CreateBitCast(tmp5
, RHS
->getType());
5303 if (condExpr
->getType()->isVectorType() ||
5304 condExpr
->getType()->isSveVLSBuiltinType()) {
5305 CGF
.incrementProfileCounter(E
);
5307 llvm::Value
*CondV
= CGF
.EmitScalarExpr(condExpr
);
5308 llvm::Value
*LHS
= Visit(lhsExpr
);
5309 llvm::Value
*RHS
= Visit(rhsExpr
);
5311 llvm::Type
*CondType
= ConvertType(condExpr
->getType());
5312 auto *VecTy
= cast
<llvm::VectorType
>(CondType
);
5313 llvm::Value
*ZeroVec
= llvm::Constant::getNullValue(VecTy
);
5315 CondV
= Builder
.CreateICmpNE(CondV
, ZeroVec
, "vector_cond");
5316 return Builder
.CreateSelect(CondV
, LHS
, RHS
, "vector_select");
5319 // If this is a really simple expression (like x ? 4 : 5), emit this as a
5320 // select instead of as control flow. We can only do this if it is cheap and
5321 // safe to evaluate the LHS and RHS unconditionally.
5322 if (isCheapEnoughToEvaluateUnconditionally(lhsExpr
, CGF
) &&
5323 isCheapEnoughToEvaluateUnconditionally(rhsExpr
, CGF
)) {
5324 llvm::Value
*CondV
= CGF
.EvaluateExprAsBool(condExpr
);
5325 llvm::Value
*StepV
= Builder
.CreateZExtOrBitCast(CondV
, CGF
.Int64Ty
);
5327 if (llvm::EnableSingleByteCoverage
) {
5328 CGF
.incrementProfileCounter(lhsExpr
);
5329 CGF
.incrementProfileCounter(rhsExpr
);
5330 CGF
.incrementProfileCounter(E
);
5332 CGF
.incrementProfileCounter(E
, StepV
);
5334 llvm::Value
*LHS
= Visit(lhsExpr
);
5335 llvm::Value
*RHS
= Visit(rhsExpr
);
5337 // If the conditional has void type, make sure we return a null Value*.
5338 assert(!RHS
&& "LHS and RHS types must match");
5341 return Builder
.CreateSelect(CondV
, LHS
, RHS
, "cond");
5344 // If the top of the logical operator nest, reset the MCDC temp to 0.
5345 if (CGF
.MCDCLogOpStack
.empty())
5346 CGF
.maybeResetMCDCCondBitmap(condExpr
);
5348 llvm::BasicBlock
*LHSBlock
= CGF
.createBasicBlock("cond.true");
5349 llvm::BasicBlock
*RHSBlock
= CGF
.createBasicBlock("cond.false");
5350 llvm::BasicBlock
*ContBlock
= CGF
.createBasicBlock("cond.end");
5352 CodeGenFunction::ConditionalEvaluation
eval(CGF
);
5353 CGF
.EmitBranchOnBoolExpr(condExpr
, LHSBlock
, RHSBlock
,
5354 CGF
.getProfileCount(lhsExpr
));
5356 CGF
.EmitBlock(LHSBlock
);
5358 // If the top of the logical operator nest, update the MCDC bitmap for the
5359 // ConditionalOperator prior to visiting its LHS and RHS blocks, since they
5360 // may also contain a boolean expression.
5361 if (CGF
.MCDCLogOpStack
.empty())
5362 CGF
.maybeUpdateMCDCTestVectorBitmap(condExpr
);
5364 if (llvm::EnableSingleByteCoverage
)
5365 CGF
.incrementProfileCounter(lhsExpr
);
5367 CGF
.incrementProfileCounter(E
);
5370 Value
*LHS
= Visit(lhsExpr
);
5373 LHSBlock
= Builder
.GetInsertBlock();
5374 Builder
.CreateBr(ContBlock
);
5376 CGF
.EmitBlock(RHSBlock
);
5378 // If the top of the logical operator nest, update the MCDC bitmap for the
5379 // ConditionalOperator prior to visiting its LHS and RHS blocks, since they
5380 // may also contain a boolean expression.
5381 if (CGF
.MCDCLogOpStack
.empty())
5382 CGF
.maybeUpdateMCDCTestVectorBitmap(condExpr
);
5384 if (llvm::EnableSingleByteCoverage
)
5385 CGF
.incrementProfileCounter(rhsExpr
);
5388 Value
*RHS
= Visit(rhsExpr
);
5391 RHSBlock
= Builder
.GetInsertBlock();
5392 CGF
.EmitBlock(ContBlock
);
5394 // If the LHS or RHS is a throw expression, it will be legitimately null.
5400 // Create a PHI node for the real part.
5401 llvm::PHINode
*PN
= Builder
.CreatePHI(LHS
->getType(), 2, "cond");
5402 PN
->addIncoming(LHS
, LHSBlock
);
5403 PN
->addIncoming(RHS
, RHSBlock
);
5405 // When single byte coverage mode is enabled, add a counter to continuation
5407 if (llvm::EnableSingleByteCoverage
)
5408 CGF
.incrementProfileCounter(E
);
5413 Value
*ScalarExprEmitter::VisitChooseExpr(ChooseExpr
*E
) {
5414 return Visit(E
->getChosenSubExpr());
5417 Value
*ScalarExprEmitter::VisitVAArgExpr(VAArgExpr
*VE
) {
5418 QualType Ty
= VE
->getType();
5420 if (Ty
->isVariablyModifiedType())
5421 CGF
.EmitVariablyModifiedType(Ty
);
5423 Address ArgValue
= Address::invalid();
5424 RValue ArgPtr
= CGF
.EmitVAArg(VE
, ArgValue
);
5426 return ArgPtr
.getScalarVal();
5429 Value
*ScalarExprEmitter::VisitBlockExpr(const BlockExpr
*block
) {
5430 return CGF
.EmitBlockLiteral(block
);
5433 // Convert a vec3 to vec4, or vice versa.
5434 static Value
*ConvertVec3AndVec4(CGBuilderTy
&Builder
, CodeGenFunction
&CGF
,
5435 Value
*Src
, unsigned NumElementsDst
) {
5436 static constexpr int Mask
[] = {0, 1, 2, -1};
5437 return Builder
.CreateShuffleVector(Src
, llvm::ArrayRef(Mask
, NumElementsDst
));
5440 // Create cast instructions for converting LLVM value \p Src to LLVM type \p
5441 // DstTy. \p Src has the same size as \p DstTy. Both are single value types
5442 // but could be scalar or vectors of different lengths, and either can be
5444 // There are 4 cases:
5445 // 1. non-pointer -> non-pointer : needs 1 bitcast
5446 // 2. pointer -> pointer : needs 1 bitcast or addrspacecast
5447 // 3. pointer -> non-pointer
5448 // a) pointer -> intptr_t : needs 1 ptrtoint
5449 // b) pointer -> non-intptr_t : needs 1 ptrtoint then 1 bitcast
5450 // 4. non-pointer -> pointer
5451 // a) intptr_t -> pointer : needs 1 inttoptr
5452 // b) non-intptr_t -> pointer : needs 1 bitcast then 1 inttoptr
5453 // Note: for cases 3b and 4b two casts are required since LLVM casts do not
5454 // allow casting directly between pointer types and non-integer non-pointer
5456 static Value
*createCastsForTypeOfSameSize(CGBuilderTy
&Builder
,
5457 const llvm::DataLayout
&DL
,
5458 Value
*Src
, llvm::Type
*DstTy
,
5459 StringRef Name
= "") {
5460 auto SrcTy
= Src
->getType();
5463 if (!SrcTy
->isPointerTy() && !DstTy
->isPointerTy())
5464 return Builder
.CreateBitCast(Src
, DstTy
, Name
);
5467 if (SrcTy
->isPointerTy() && DstTy
->isPointerTy())
5468 return Builder
.CreatePointerBitCastOrAddrSpaceCast(Src
, DstTy
, Name
);
5471 if (SrcTy
->isPointerTy() && !DstTy
->isPointerTy()) {
5473 if (!DstTy
->isIntegerTy())
5474 Src
= Builder
.CreatePtrToInt(Src
, DL
.getIntPtrType(SrcTy
));
5476 return Builder
.CreateBitOrPointerCast(Src
, DstTy
, Name
);
5480 if (!SrcTy
->isIntegerTy())
5481 Src
= Builder
.CreateBitCast(Src
, DL
.getIntPtrType(DstTy
));
5483 return Builder
.CreateIntToPtr(Src
, DstTy
, Name
);
5486 Value
*ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr
*E
) {
5487 Value
*Src
= CGF
.EmitScalarExpr(E
->getSrcExpr());
5488 llvm::Type
*DstTy
= ConvertType(E
->getType());
5490 llvm::Type
*SrcTy
= Src
->getType();
5491 unsigned NumElementsSrc
=
5492 isa
<llvm::VectorType
>(SrcTy
)
5493 ? cast
<llvm::FixedVectorType
>(SrcTy
)->getNumElements()
5495 unsigned NumElementsDst
=
5496 isa
<llvm::VectorType
>(DstTy
)
5497 ? cast
<llvm::FixedVectorType
>(DstTy
)->getNumElements()
5500 // Use bit vector expansion for ext_vector_type boolean vectors.
5501 if (E
->getType()->isExtVectorBoolType())
5502 return CGF
.emitBoolVecConversion(Src
, NumElementsDst
, "astype");
5504 // Going from vec3 to non-vec3 is a special case and requires a shuffle
5505 // vector to get a vec4, then a bitcast if the target type is different.
5506 if (NumElementsSrc
== 3 && NumElementsDst
!= 3) {
5507 Src
= ConvertVec3AndVec4(Builder
, CGF
, Src
, 4);
5508 Src
= createCastsForTypeOfSameSize(Builder
, CGF
.CGM
.getDataLayout(), Src
,
5511 Src
->setName("astype");
5515 // Going from non-vec3 to vec3 is a special case and requires a bitcast
5516 // to vec4 if the original type is not vec4, then a shuffle vector to
5518 if (NumElementsSrc
!= 3 && NumElementsDst
== 3) {
5519 auto *Vec4Ty
= llvm::FixedVectorType::get(
5520 cast
<llvm::VectorType
>(DstTy
)->getElementType(), 4);
5521 Src
= createCastsForTypeOfSameSize(Builder
, CGF
.CGM
.getDataLayout(), Src
,
5524 Src
= ConvertVec3AndVec4(Builder
, CGF
, Src
, 3);
5525 Src
->setName("astype");
5529 return createCastsForTypeOfSameSize(Builder
, CGF
.CGM
.getDataLayout(),
5530 Src
, DstTy
, "astype");
5533 Value
*ScalarExprEmitter::VisitAtomicExpr(AtomicExpr
*E
) {
5534 return CGF
.EmitAtomicExpr(E
).getScalarVal();
5537 //===----------------------------------------------------------------------===//
5538 // Entry Point into this File
5539 //===----------------------------------------------------------------------===//
5541 /// Emit the computation of the specified expression of scalar type, ignoring
5543 Value
*CodeGenFunction::EmitScalarExpr(const Expr
*E
, bool IgnoreResultAssign
) {
5544 assert(E
&& hasScalarEvaluationKind(E
->getType()) &&
5545 "Invalid scalar expression to emit");
5547 return ScalarExprEmitter(*this, IgnoreResultAssign
)
5548 .Visit(const_cast<Expr
*>(E
));
5551 /// Emit a conversion from the specified type to the specified destination type,
5552 /// both of which are LLVM scalar types.
5553 Value
*CodeGenFunction::EmitScalarConversion(Value
*Src
, QualType SrcTy
,
5555 SourceLocation Loc
) {
5556 assert(hasScalarEvaluationKind(SrcTy
) && hasScalarEvaluationKind(DstTy
) &&
5557 "Invalid scalar expression to emit");
5558 return ScalarExprEmitter(*this).EmitScalarConversion(Src
, SrcTy
, DstTy
, Loc
);
5561 /// Emit a conversion from the specified complex type to the specified
5562 /// destination type, where the destination type is an LLVM scalar type.
5563 Value
*CodeGenFunction::EmitComplexToScalarConversion(ComplexPairTy Src
,
5566 SourceLocation Loc
) {
5567 assert(SrcTy
->isAnyComplexType() && hasScalarEvaluationKind(DstTy
) &&
5568 "Invalid complex -> scalar conversion");
5569 return ScalarExprEmitter(*this)
5570 .EmitComplexToScalarConversion(Src
, SrcTy
, DstTy
, Loc
);
5575 CodeGenFunction::EmitPromotedScalarExpr(const Expr
*E
,
5576 QualType PromotionType
) {
5577 if (!PromotionType
.isNull())
5578 return ScalarExprEmitter(*this).EmitPromoted(E
, PromotionType
);
5580 return ScalarExprEmitter(*this).Visit(const_cast<Expr
*>(E
));
5584 llvm::Value
*CodeGenFunction::
5585 EmitScalarPrePostIncDec(const UnaryOperator
*E
, LValue LV
,
5586 bool isInc
, bool isPre
) {
5587 return ScalarExprEmitter(*this).EmitScalarPrePostIncDec(E
, LV
, isInc
, isPre
);
5590 LValue
CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr
*E
) {
5591 // object->isa or (*object).isa
5592 // Generate code as for: *(Class*)object
5594 Expr
*BaseExpr
= E
->getBase();
5595 Address Addr
= Address::invalid();
5596 if (BaseExpr
->isPRValue()) {
5597 llvm::Type
*BaseTy
=
5598 ConvertTypeForMem(BaseExpr
->getType()->getPointeeType());
5599 Addr
= Address(EmitScalarExpr(BaseExpr
), BaseTy
, getPointerAlign());
5601 Addr
= EmitLValue(BaseExpr
).getAddress();
5604 // Cast the address to Class*.
5605 Addr
= Addr
.withElementType(ConvertType(E
->getType()));
5606 return MakeAddrLValue(Addr
, E
->getType());
5610 LValue
CodeGenFunction::EmitCompoundAssignmentLValue(
5611 const CompoundAssignOperator
*E
) {
5612 ScalarExprEmitter
Scalar(*this);
5613 Value
*Result
= nullptr;
5614 switch (E
->getOpcode()) {
5615 #define COMPOUND_OP(Op) \
5616 case BO_##Op##Assign: \
5617 return Scalar.EmitCompoundAssignLValue(E, &ScalarExprEmitter::Emit##Op, \
5654 llvm_unreachable("Not valid compound assignment operators");
5657 llvm_unreachable("Unhandled compound assignment operator");
5660 struct GEPOffsetAndOverflow
{
5661 // The total (signed) byte offset for the GEP.
5662 llvm::Value
*TotalOffset
;
5663 // The offset overflow flag - true if the total offset overflows.
5664 llvm::Value
*OffsetOverflows
;
5667 /// Evaluate given GEPVal, which is either an inbounds GEP, or a constant,
5668 /// and compute the total offset it applies from it's base pointer BasePtr.
5669 /// Returns offset in bytes and a boolean flag whether an overflow happened
5670 /// during evaluation.
5671 static GEPOffsetAndOverflow
EmitGEPOffsetInBytes(Value
*BasePtr
, Value
*GEPVal
,
5672 llvm::LLVMContext
&VMContext
,
5674 CGBuilderTy
&Builder
) {
5675 const auto &DL
= CGM
.getDataLayout();
5677 // The total (signed) byte offset for the GEP.
5678 llvm::Value
*TotalOffset
= nullptr;
5680 // Was the GEP already reduced to a constant?
5681 if (isa
<llvm::Constant
>(GEPVal
)) {
5682 // Compute the offset by casting both pointers to integers and subtracting:
5683 // GEPVal = BasePtr + ptr(Offset) <--> Offset = int(GEPVal) - int(BasePtr)
5684 Value
*BasePtr_int
=
5685 Builder
.CreatePtrToInt(BasePtr
, DL
.getIntPtrType(BasePtr
->getType()));
5687 Builder
.CreatePtrToInt(GEPVal
, DL
.getIntPtrType(GEPVal
->getType()));
5688 TotalOffset
= Builder
.CreateSub(GEPVal_int
, BasePtr_int
);
5689 return {TotalOffset
, /*OffsetOverflows=*/Builder
.getFalse()};
5692 auto *GEP
= cast
<llvm::GEPOperator
>(GEPVal
);
5693 assert(GEP
->getPointerOperand() == BasePtr
&&
5694 "BasePtr must be the base of the GEP.");
5695 assert(GEP
->isInBounds() && "Expected inbounds GEP");
5697 auto *IntPtrTy
= DL
.getIntPtrType(GEP
->getPointerOperandType());
5699 // Grab references to the signed add/mul overflow intrinsics for intptr_t.
5700 auto *Zero
= llvm::ConstantInt::getNullValue(IntPtrTy
);
5701 auto *SAddIntrinsic
=
5702 CGM
.getIntrinsic(llvm::Intrinsic::sadd_with_overflow
, IntPtrTy
);
5703 auto *SMulIntrinsic
=
5704 CGM
.getIntrinsic(llvm::Intrinsic::smul_with_overflow
, IntPtrTy
);
5706 // The offset overflow flag - true if the total offset overflows.
5707 llvm::Value
*OffsetOverflows
= Builder
.getFalse();
5709 /// Return the result of the given binary operation.
5710 auto eval
= [&](BinaryOperator::Opcode Opcode
, llvm::Value
*LHS
,
5711 llvm::Value
*RHS
) -> llvm::Value
* {
5712 assert((Opcode
== BO_Add
|| Opcode
== BO_Mul
) && "Can't eval binop");
5714 // If the operands are constants, return a constant result.
5715 if (auto *LHSCI
= dyn_cast
<llvm::ConstantInt
>(LHS
)) {
5716 if (auto *RHSCI
= dyn_cast
<llvm::ConstantInt
>(RHS
)) {
5718 bool HasOverflow
= mayHaveIntegerOverflow(LHSCI
, RHSCI
, Opcode
,
5719 /*Signed=*/true, N
);
5721 OffsetOverflows
= Builder
.getTrue();
5722 return llvm::ConstantInt::get(VMContext
, N
);
5726 // Otherwise, compute the result with checked arithmetic.
5727 auto *ResultAndOverflow
= Builder
.CreateCall(
5728 (Opcode
== BO_Add
) ? SAddIntrinsic
: SMulIntrinsic
, {LHS
, RHS
});
5729 OffsetOverflows
= Builder
.CreateOr(
5730 Builder
.CreateExtractValue(ResultAndOverflow
, 1), OffsetOverflows
);
5731 return Builder
.CreateExtractValue(ResultAndOverflow
, 0);
5734 // Determine the total byte offset by looking at each GEP operand.
5735 for (auto GTI
= llvm::gep_type_begin(GEP
), GTE
= llvm::gep_type_end(GEP
);
5736 GTI
!= GTE
; ++GTI
) {
5737 llvm::Value
*LocalOffset
;
5738 auto *Index
= GTI
.getOperand();
5739 // Compute the local offset contributed by this indexing step:
5740 if (auto *STy
= GTI
.getStructTypeOrNull()) {
5741 // For struct indexing, the local offset is the byte position of the
5743 unsigned FieldNo
= cast
<llvm::ConstantInt
>(Index
)->getZExtValue();
5744 LocalOffset
= llvm::ConstantInt::get(
5745 IntPtrTy
, DL
.getStructLayout(STy
)->getElementOffset(FieldNo
));
5747 // Otherwise this is array-like indexing. The local offset is the index
5748 // multiplied by the element size.
5750 llvm::ConstantInt::get(IntPtrTy
, GTI
.getSequentialElementStride(DL
));
5751 auto *IndexS
= Builder
.CreateIntCast(Index
, IntPtrTy
, /*isSigned=*/true);
5752 LocalOffset
= eval(BO_Mul
, ElementSize
, IndexS
);
5755 // If this is the first offset, set it as the total offset. Otherwise, add
5756 // the local offset into the running total.
5757 if (!TotalOffset
|| TotalOffset
== Zero
)
5758 TotalOffset
= LocalOffset
;
5760 TotalOffset
= eval(BO_Add
, TotalOffset
, LocalOffset
);
5763 return {TotalOffset
, OffsetOverflows
};
5767 CodeGenFunction::EmitCheckedInBoundsGEP(llvm::Type
*ElemTy
, Value
*Ptr
,
5768 ArrayRef
<Value
*> IdxList
,
5769 bool SignedIndices
, bool IsSubtraction
,
5770 SourceLocation Loc
, const Twine
&Name
) {
5771 llvm::Type
*PtrTy
= Ptr
->getType();
5773 llvm::GEPNoWrapFlags NWFlags
= llvm::GEPNoWrapFlags::inBounds();
5774 if (!SignedIndices
&& !IsSubtraction
)
5775 NWFlags
|= llvm::GEPNoWrapFlags::noUnsignedWrap();
5777 Value
*GEPVal
= Builder
.CreateGEP(ElemTy
, Ptr
, IdxList
, Name
, NWFlags
);
5779 // If the pointer overflow sanitizer isn't enabled, do nothing.
5780 if (!SanOpts
.has(SanitizerKind::PointerOverflow
))
5783 // Perform nullptr-and-offset check unless the nullptr is defined.
5784 bool PerformNullCheck
= !NullPointerIsDefined(
5785 Builder
.GetInsertBlock()->getParent(), PtrTy
->getPointerAddressSpace());
5786 // Check for overflows unless the GEP got constant-folded,
5787 // and only in the default address space
5788 bool PerformOverflowCheck
=
5789 !isa
<llvm::Constant
>(GEPVal
) && PtrTy
->getPointerAddressSpace() == 0;
5791 if (!(PerformNullCheck
|| PerformOverflowCheck
))
5794 const auto &DL
= CGM
.getDataLayout();
5796 SanitizerScope
SanScope(this);
5797 llvm::Type
*IntPtrTy
= DL
.getIntPtrType(PtrTy
);
5799 GEPOffsetAndOverflow EvaluatedGEP
=
5800 EmitGEPOffsetInBytes(Ptr
, GEPVal
, getLLVMContext(), CGM
, Builder
);
5802 assert((!isa
<llvm::Constant
>(EvaluatedGEP
.TotalOffset
) ||
5803 EvaluatedGEP
.OffsetOverflows
== Builder
.getFalse()) &&
5804 "If the offset got constant-folded, we don't expect that there was an "
5807 auto *Zero
= llvm::ConstantInt::getNullValue(IntPtrTy
);
5809 // Common case: if the total offset is zero, and we are using C++ semantics,
5810 // where nullptr+0 is defined, don't emit a check.
5811 if (EvaluatedGEP
.TotalOffset
== Zero
&& CGM
.getLangOpts().CPlusPlus
)
5814 // Now that we've computed the total offset, add it to the base pointer (with
5815 // wrapping semantics).
5816 auto *IntPtr
= Builder
.CreatePtrToInt(Ptr
, IntPtrTy
);
5817 auto *ComputedGEP
= Builder
.CreateAdd(IntPtr
, EvaluatedGEP
.TotalOffset
);
5819 llvm::SmallVector
<std::pair
<llvm::Value
*, SanitizerMask
>, 2> Checks
;
5821 if (PerformNullCheck
) {
5822 // In C++, if the base pointer evaluates to a null pointer value,
5823 // the only valid pointer this inbounds GEP can produce is also
5824 // a null pointer, so the offset must also evaluate to zero.
5825 // Likewise, if we have non-zero base pointer, we can not get null pointer
5826 // as a result, so the offset can not be -intptr_t(BasePtr).
5827 // In other words, both pointers are either null, or both are non-null,
5828 // or the behaviour is undefined.
5830 // C, however, is more strict in this regard, and gives more
5831 // optimization opportunities: in C, additionally, nullptr+0 is undefined.
5832 // So both the input to the 'gep inbounds' AND the output must not be null.
5833 auto *BaseIsNotNullptr
= Builder
.CreateIsNotNull(Ptr
);
5834 auto *ResultIsNotNullptr
= Builder
.CreateIsNotNull(ComputedGEP
);
5836 CGM
.getLangOpts().CPlusPlus
5837 ? Builder
.CreateICmpEQ(BaseIsNotNullptr
, ResultIsNotNullptr
)
5838 : Builder
.CreateAnd(BaseIsNotNullptr
, ResultIsNotNullptr
);
5839 Checks
.emplace_back(Valid
, SanitizerKind::PointerOverflow
);
5842 if (PerformOverflowCheck
) {
5843 // The GEP is valid if:
5844 // 1) The total offset doesn't overflow, and
5845 // 2) The sign of the difference between the computed address and the base
5846 // pointer matches the sign of the total offset.
5847 llvm::Value
*ValidGEP
;
5848 auto *NoOffsetOverflow
= Builder
.CreateNot(EvaluatedGEP
.OffsetOverflows
);
5849 if (SignedIndices
) {
5850 // GEP is computed as `unsigned base + signed offset`, therefore:
5851 // * If offset was positive, then the computed pointer can not be
5852 // [unsigned] less than the base pointer, unless it overflowed.
5853 // * If offset was negative, then the computed pointer can not be
5854 // [unsigned] greater than the bas pointere, unless it overflowed.
5855 auto *PosOrZeroValid
= Builder
.CreateICmpUGE(ComputedGEP
, IntPtr
);
5856 auto *PosOrZeroOffset
=
5857 Builder
.CreateICmpSGE(EvaluatedGEP
.TotalOffset
, Zero
);
5858 llvm::Value
*NegValid
= Builder
.CreateICmpULT(ComputedGEP
, IntPtr
);
5860 Builder
.CreateSelect(PosOrZeroOffset
, PosOrZeroValid
, NegValid
);
5861 } else if (!IsSubtraction
) {
5862 // GEP is computed as `unsigned base + unsigned offset`, therefore the
5863 // computed pointer can not be [unsigned] less than base pointer,
5864 // unless there was an overflow.
5865 // Equivalent to `@llvm.uadd.with.overflow(%base, %offset)`.
5866 ValidGEP
= Builder
.CreateICmpUGE(ComputedGEP
, IntPtr
);
5868 // GEP is computed as `unsigned base - unsigned offset`, therefore the
5869 // computed pointer can not be [unsigned] greater than base pointer,
5870 // unless there was an overflow.
5871 // Equivalent to `@llvm.usub.with.overflow(%base, sub(0, %offset))`.
5872 ValidGEP
= Builder
.CreateICmpULE(ComputedGEP
, IntPtr
);
5874 ValidGEP
= Builder
.CreateAnd(ValidGEP
, NoOffsetOverflow
);
5875 Checks
.emplace_back(ValidGEP
, SanitizerKind::PointerOverflow
);
5878 assert(!Checks
.empty() && "Should have produced some checks.");
5880 llvm::Constant
*StaticArgs
[] = {EmitCheckSourceLocation(Loc
)};
5881 // Pass the computed GEP to the runtime to avoid emitting poisoned arguments.
5882 llvm::Value
*DynamicArgs
[] = {IntPtr
, ComputedGEP
};
5883 EmitCheck(Checks
, SanitizerHandler::PointerOverflow
, StaticArgs
, DynamicArgs
);
5888 Address
CodeGenFunction::EmitCheckedInBoundsGEP(
5889 Address Addr
, ArrayRef
<Value
*> IdxList
, llvm::Type
*elementType
,
5890 bool SignedIndices
, bool IsSubtraction
, SourceLocation Loc
, CharUnits Align
,
5891 const Twine
&Name
) {
5892 if (!SanOpts
.has(SanitizerKind::PointerOverflow
)) {
5893 llvm::GEPNoWrapFlags NWFlags
= llvm::GEPNoWrapFlags::inBounds();
5894 if (!SignedIndices
&& !IsSubtraction
)
5895 NWFlags
|= llvm::GEPNoWrapFlags::noUnsignedWrap();
5897 return Builder
.CreateGEP(Addr
, IdxList
, elementType
, Align
, Name
, NWFlags
);
5901 EmitCheckedInBoundsGEP(Addr
.getElementType(), Addr
.emitRawPointer(*this),
5902 IdxList
, SignedIndices
, IsSubtraction
, Loc
, Name
),
5903 elementType
, Align
);