1 //===- AtomicExpandPass.cpp - Expand atomic instructions ------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file contains a pass (at IR level) to replace atomic instructions with
10 // __atomic_* library calls, or target specific instruction which implement the
11 // same semantics in a way which better fits the target backend. This can
12 // include the use of (intrinsic-based) load-linked/store-conditional loops,
13 // AtomicCmpXchg, or type coercions.
15 //===----------------------------------------------------------------------===//
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/STLFunctionalExtras.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/Analysis/InstSimplifyFolder.h"
21 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
22 #include "llvm/CodeGen/AtomicExpandUtils.h"
23 #include "llvm/CodeGen/RuntimeLibcalls.h"
24 #include "llvm/CodeGen/TargetLowering.h"
25 #include "llvm/CodeGen/TargetPassConfig.h"
26 #include "llvm/CodeGen/TargetSubtargetInfo.h"
27 #include "llvm/CodeGen/ValueTypes.h"
28 #include "llvm/IR/Attributes.h"
29 #include "llvm/IR/BasicBlock.h"
30 #include "llvm/IR/Constant.h"
31 #include "llvm/IR/Constants.h"
32 #include "llvm/IR/DataLayout.h"
33 #include "llvm/IR/DerivedTypes.h"
34 #include "llvm/IR/Function.h"
35 #include "llvm/IR/IRBuilder.h"
36 #include "llvm/IR/InstIterator.h"
37 #include "llvm/IR/Instruction.h"
38 #include "llvm/IR/Instructions.h"
39 #include "llvm/IR/Module.h"
40 #include "llvm/IR/Type.h"
41 #include "llvm/IR/User.h"
42 #include "llvm/IR/Value.h"
43 #include "llvm/InitializePasses.h"
44 #include "llvm/Pass.h"
45 #include "llvm/Support/AtomicOrdering.h"
46 #include "llvm/Support/Casting.h"
47 #include "llvm/Support/Debug.h"
48 #include "llvm/Support/ErrorHandling.h"
49 #include "llvm/Support/raw_ostream.h"
50 #include "llvm/Target/TargetMachine.h"
51 #include "llvm/Transforms/Utils/LowerAtomic.h"
58 #define DEBUG_TYPE "atomic-expand"
62 class AtomicExpand
: public FunctionPass
{
63 const TargetLowering
*TLI
= nullptr;
64 const DataLayout
*DL
= nullptr;
67 static char ID
; // Pass identification, replacement for typeid
69 AtomicExpand() : FunctionPass(ID
) {
70 initializeAtomicExpandPass(*PassRegistry::getPassRegistry());
73 bool runOnFunction(Function
&F
) override
;
76 bool bracketInstWithFences(Instruction
*I
, AtomicOrdering Order
);
77 IntegerType
*getCorrespondingIntegerType(Type
*T
, const DataLayout
&DL
);
78 LoadInst
*convertAtomicLoadToIntegerType(LoadInst
*LI
);
79 bool tryExpandAtomicLoad(LoadInst
*LI
);
80 bool expandAtomicLoadToLL(LoadInst
*LI
);
81 bool expandAtomicLoadToCmpXchg(LoadInst
*LI
);
82 StoreInst
*convertAtomicStoreToIntegerType(StoreInst
*SI
);
83 bool tryExpandAtomicStore(StoreInst
*SI
);
84 void expandAtomicStore(StoreInst
*SI
);
85 bool tryExpandAtomicRMW(AtomicRMWInst
*AI
);
86 AtomicRMWInst
*convertAtomicXchgToIntegerType(AtomicRMWInst
*RMWI
);
88 insertRMWLLSCLoop(IRBuilderBase
&Builder
, Type
*ResultTy
, Value
*Addr
,
89 Align AddrAlign
, AtomicOrdering MemOpOrder
,
90 function_ref
<Value
*(IRBuilderBase
&, Value
*)> PerformOp
);
91 void expandAtomicOpToLLSC(
92 Instruction
*I
, Type
*ResultTy
, Value
*Addr
, Align AddrAlign
,
93 AtomicOrdering MemOpOrder
,
94 function_ref
<Value
*(IRBuilderBase
&, Value
*)> PerformOp
);
95 void expandPartwordAtomicRMW(
96 AtomicRMWInst
*I
, TargetLoweringBase::AtomicExpansionKind ExpansionKind
);
97 AtomicRMWInst
*widenPartwordAtomicRMW(AtomicRMWInst
*AI
);
98 bool expandPartwordCmpXchg(AtomicCmpXchgInst
*I
);
99 void expandAtomicRMWToMaskedIntrinsic(AtomicRMWInst
*AI
);
100 void expandAtomicCmpXchgToMaskedIntrinsic(AtomicCmpXchgInst
*CI
);
102 AtomicCmpXchgInst
*convertCmpXchgToIntegerType(AtomicCmpXchgInst
*CI
);
103 static Value
*insertRMWCmpXchgLoop(
104 IRBuilderBase
&Builder
, Type
*ResultType
, Value
*Addr
, Align AddrAlign
,
105 AtomicOrdering MemOpOrder
, SyncScope::ID SSID
,
106 function_ref
<Value
*(IRBuilderBase
&, Value
*)> PerformOp
,
107 CreateCmpXchgInstFun CreateCmpXchg
);
108 bool tryExpandAtomicCmpXchg(AtomicCmpXchgInst
*CI
);
110 bool expandAtomicCmpXchg(AtomicCmpXchgInst
*CI
);
111 bool isIdempotentRMW(AtomicRMWInst
*RMWI
);
112 bool simplifyIdempotentRMW(AtomicRMWInst
*RMWI
);
114 bool expandAtomicOpToLibcall(Instruction
*I
, unsigned Size
, Align Alignment
,
115 Value
*PointerOperand
, Value
*ValueOperand
,
116 Value
*CASExpected
, AtomicOrdering Ordering
,
117 AtomicOrdering Ordering2
,
118 ArrayRef
<RTLIB::Libcall
> Libcalls
);
119 void expandAtomicLoadToLibcall(LoadInst
*LI
);
120 void expandAtomicStoreToLibcall(StoreInst
*LI
);
121 void expandAtomicRMWToLibcall(AtomicRMWInst
*I
);
122 void expandAtomicCASToLibcall(AtomicCmpXchgInst
*I
);
125 llvm::expandAtomicRMWToCmpXchg(AtomicRMWInst
*AI
,
126 CreateCmpXchgInstFun CreateCmpXchg
);
129 // IRBuilder to be used for replacement atomic instructions.
130 struct ReplacementIRBuilder
: IRBuilder
<InstSimplifyFolder
> {
131 // Preserves the DebugLoc from I, and preserves still valid metadata.
132 explicit ReplacementIRBuilder(Instruction
*I
, const DataLayout
&DL
)
133 : IRBuilder(I
->getContext(), DL
) {
135 this->CollectMetadataToCopy(I
, {LLVMContext::MD_pcsections
});
139 } // end anonymous namespace
141 char AtomicExpand::ID
= 0;
143 char &llvm::AtomicExpandID
= AtomicExpand::ID
;
145 INITIALIZE_PASS(AtomicExpand
, DEBUG_TYPE
, "Expand Atomic instructions", false,
148 FunctionPass
*llvm::createAtomicExpandPass() { return new AtomicExpand(); }
150 // Helper functions to retrieve the size of atomic instructions.
151 static unsigned getAtomicOpSize(LoadInst
*LI
) {
152 const DataLayout
&DL
= LI
->getModule()->getDataLayout();
153 return DL
.getTypeStoreSize(LI
->getType());
156 static unsigned getAtomicOpSize(StoreInst
*SI
) {
157 const DataLayout
&DL
= SI
->getModule()->getDataLayout();
158 return DL
.getTypeStoreSize(SI
->getValueOperand()->getType());
161 static unsigned getAtomicOpSize(AtomicRMWInst
*RMWI
) {
162 const DataLayout
&DL
= RMWI
->getModule()->getDataLayout();
163 return DL
.getTypeStoreSize(RMWI
->getValOperand()->getType());
166 static unsigned getAtomicOpSize(AtomicCmpXchgInst
*CASI
) {
167 const DataLayout
&DL
= CASI
->getModule()->getDataLayout();
168 return DL
.getTypeStoreSize(CASI
->getCompareOperand()->getType());
171 // Determine if a particular atomic operation has a supported size,
172 // and is of appropriate alignment, to be passed through for target
173 // lowering. (Versus turning into a __atomic libcall)
174 template <typename Inst
>
175 static bool atomicSizeSupported(const TargetLowering
*TLI
, Inst
*I
) {
176 unsigned Size
= getAtomicOpSize(I
);
177 Align Alignment
= I
->getAlign();
178 return Alignment
>= Size
&&
179 Size
<= TLI
->getMaxAtomicSizeInBitsSupported() / 8;
182 bool AtomicExpand::runOnFunction(Function
&F
) {
183 auto *TPC
= getAnalysisIfAvailable
<TargetPassConfig
>();
187 auto &TM
= TPC
->getTM
<TargetMachine
>();
188 const auto *Subtarget
= TM
.getSubtargetImpl(F
);
189 if (!Subtarget
->enableAtomicExpand())
191 TLI
= Subtarget
->getTargetLowering();
192 DL
= &F
.getParent()->getDataLayout();
194 SmallVector
<Instruction
*, 1> AtomicInsts
;
196 // Changing control-flow while iterating through it is a bad idea, so gather a
197 // list of all atomic instructions before we start.
198 for (Instruction
&I
: instructions(F
))
199 if (I
.isAtomic() && !isa
<FenceInst
>(&I
))
200 AtomicInsts
.push_back(&I
);
202 bool MadeChange
= false;
203 for (auto *I
: AtomicInsts
) {
204 auto LI
= dyn_cast
<LoadInst
>(I
);
205 auto SI
= dyn_cast
<StoreInst
>(I
);
206 auto RMWI
= dyn_cast
<AtomicRMWInst
>(I
);
207 auto CASI
= dyn_cast
<AtomicCmpXchgInst
>(I
);
208 assert((LI
|| SI
|| RMWI
|| CASI
) && "Unknown atomic instruction");
210 // If the Size/Alignment is not supported, replace with a libcall.
212 if (!atomicSizeSupported(TLI
, LI
)) {
213 expandAtomicLoadToLibcall(LI
);
218 if (!atomicSizeSupported(TLI
, SI
)) {
219 expandAtomicStoreToLibcall(SI
);
224 if (!atomicSizeSupported(TLI
, RMWI
)) {
225 expandAtomicRMWToLibcall(RMWI
);
230 if (!atomicSizeSupported(TLI
, CASI
)) {
231 expandAtomicCASToLibcall(CASI
);
237 if (LI
&& TLI
->shouldCastAtomicLoadInIR(LI
) ==
238 TargetLoweringBase::AtomicExpansionKind::CastToInteger
) {
239 I
= LI
= convertAtomicLoadToIntegerType(LI
);
242 TLI
->shouldCastAtomicStoreInIR(SI
) ==
243 TargetLoweringBase::AtomicExpansionKind::CastToInteger
) {
244 I
= SI
= convertAtomicStoreToIntegerType(SI
);
247 TLI
->shouldCastAtomicRMWIInIR(RMWI
) ==
248 TargetLoweringBase::AtomicExpansionKind::CastToInteger
) {
249 I
= RMWI
= convertAtomicXchgToIntegerType(RMWI
);
252 // TODO: when we're ready to make the change at the IR level, we can
253 // extend convertCmpXchgToInteger for floating point too.
254 if (CASI
->getCompareOperand()->getType()->isPointerTy()) {
255 // TODO: add a TLI hook to control this so that each target can
256 // convert to lowering the original type one at a time.
257 I
= CASI
= convertCmpXchgToIntegerType(CASI
);
262 if (TLI
->shouldInsertFencesForAtomic(I
)) {
263 auto FenceOrdering
= AtomicOrdering::Monotonic
;
264 if (LI
&& isAcquireOrStronger(LI
->getOrdering())) {
265 FenceOrdering
= LI
->getOrdering();
266 LI
->setOrdering(AtomicOrdering::Monotonic
);
267 } else if (SI
&& isReleaseOrStronger(SI
->getOrdering())) {
268 FenceOrdering
= SI
->getOrdering();
269 SI
->setOrdering(AtomicOrdering::Monotonic
);
270 } else if (RMWI
&& (isReleaseOrStronger(RMWI
->getOrdering()) ||
271 isAcquireOrStronger(RMWI
->getOrdering()))) {
272 FenceOrdering
= RMWI
->getOrdering();
273 RMWI
->setOrdering(AtomicOrdering::Monotonic
);
275 TLI
->shouldExpandAtomicCmpXchgInIR(CASI
) ==
276 TargetLoweringBase::AtomicExpansionKind::None
&&
277 (isReleaseOrStronger(CASI
->getSuccessOrdering()) ||
278 isAcquireOrStronger(CASI
->getSuccessOrdering()) ||
279 isAcquireOrStronger(CASI
->getFailureOrdering()))) {
280 // If a compare and swap is lowered to LL/SC, we can do smarter fence
281 // insertion, with a stronger one on the success path than on the
282 // failure path. As a result, fence insertion is directly done by
283 // expandAtomicCmpXchg in that case.
284 FenceOrdering
= CASI
->getMergedOrdering();
285 CASI
->setSuccessOrdering(AtomicOrdering::Monotonic
);
286 CASI
->setFailureOrdering(AtomicOrdering::Monotonic
);
289 if (FenceOrdering
!= AtomicOrdering::Monotonic
) {
290 MadeChange
|= bracketInstWithFences(I
, FenceOrdering
);
292 } else if (I
->hasAtomicStore() &&
293 TLI
->shouldInsertTrailingFenceForAtomicStore(I
)) {
294 auto FenceOrdering
= AtomicOrdering::Monotonic
;
296 FenceOrdering
= SI
->getOrdering();
298 FenceOrdering
= RMWI
->getOrdering();
299 else if (CASI
&& TLI
->shouldExpandAtomicCmpXchgInIR(CASI
) !=
300 TargetLoweringBase::AtomicExpansionKind::LLSC
)
301 // LLSC is handled in expandAtomicCmpXchg().
302 FenceOrdering
= CASI
->getSuccessOrdering();
304 IRBuilder
Builder(I
);
305 if (auto TrailingFence
=
306 TLI
->emitTrailingFence(Builder
, I
, FenceOrdering
)) {
307 TrailingFence
->moveAfter(I
);
313 MadeChange
|= tryExpandAtomicLoad(LI
);
315 MadeChange
|= tryExpandAtomicStore(SI
);
317 // There are two different ways of expanding RMW instructions:
318 // - into a load if it is idempotent
319 // - into a Cmpxchg/LL-SC loop otherwise
320 // we try them in that order.
322 if (isIdempotentRMW(RMWI
) && simplifyIdempotentRMW(RMWI
)) {
325 AtomicRMWInst::BinOp Op
= RMWI
->getOperation();
326 unsigned MinCASSize
= TLI
->getMinCmpXchgSizeInBits() / 8;
327 unsigned ValueSize
= getAtomicOpSize(RMWI
);
328 if (ValueSize
< MinCASSize
&&
329 (Op
== AtomicRMWInst::Or
|| Op
== AtomicRMWInst::Xor
||
330 Op
== AtomicRMWInst::And
)) {
331 RMWI
= widenPartwordAtomicRMW(RMWI
);
335 MadeChange
|= tryExpandAtomicRMW(RMWI
);
338 MadeChange
|= tryExpandAtomicCmpXchg(CASI
);
343 bool AtomicExpand::bracketInstWithFences(Instruction
*I
, AtomicOrdering Order
) {
344 ReplacementIRBuilder
Builder(I
, *DL
);
346 auto LeadingFence
= TLI
->emitLeadingFence(Builder
, I
, Order
);
348 auto TrailingFence
= TLI
->emitTrailingFence(Builder
, I
, Order
);
349 // We have a guard here because not every atomic operation generates a
352 TrailingFence
->moveAfter(I
);
354 return (LeadingFence
|| TrailingFence
);
357 /// Get the iX type with the same bitwidth as T.
358 IntegerType
*AtomicExpand::getCorrespondingIntegerType(Type
*T
,
359 const DataLayout
&DL
) {
360 EVT VT
= TLI
->getMemValueType(DL
, T
);
361 unsigned BitWidth
= VT
.getStoreSizeInBits();
362 assert(BitWidth
== VT
.getSizeInBits() && "must be a power of two");
363 return IntegerType::get(T
->getContext(), BitWidth
);
366 /// Convert an atomic load of a non-integral type to an integer load of the
367 /// equivalent bitwidth. See the function comment on
368 /// convertAtomicStoreToIntegerType for background.
369 LoadInst
*AtomicExpand::convertAtomicLoadToIntegerType(LoadInst
*LI
) {
370 auto *M
= LI
->getModule();
371 Type
*NewTy
= getCorrespondingIntegerType(LI
->getType(), M
->getDataLayout());
373 ReplacementIRBuilder
Builder(LI
, *DL
);
375 Value
*Addr
= LI
->getPointerOperand();
376 Type
*PT
= PointerType::get(NewTy
, Addr
->getType()->getPointerAddressSpace());
377 Value
*NewAddr
= Builder
.CreateBitCast(Addr
, PT
);
379 auto *NewLI
= Builder
.CreateLoad(NewTy
, NewAddr
);
380 NewLI
->setAlignment(LI
->getAlign());
381 NewLI
->setVolatile(LI
->isVolatile());
382 NewLI
->setAtomic(LI
->getOrdering(), LI
->getSyncScopeID());
383 LLVM_DEBUG(dbgs() << "Replaced " << *LI
<< " with " << *NewLI
<< "\n");
385 Value
*NewVal
= Builder
.CreateBitCast(NewLI
, LI
->getType());
386 LI
->replaceAllUsesWith(NewVal
);
387 LI
->eraseFromParent();
392 AtomicExpand::convertAtomicXchgToIntegerType(AtomicRMWInst
*RMWI
) {
393 auto *M
= RMWI
->getModule();
395 getCorrespondingIntegerType(RMWI
->getType(), M
->getDataLayout());
397 ReplacementIRBuilder
Builder(RMWI
, *DL
);
399 Value
*Addr
= RMWI
->getPointerOperand();
400 Value
*Val
= RMWI
->getValOperand();
401 Type
*PT
= PointerType::get(NewTy
, RMWI
->getPointerAddressSpace());
402 Value
*NewAddr
= Builder
.CreateBitCast(Addr
, PT
);
403 Value
*NewVal
= Val
->getType()->isPointerTy()
404 ? Builder
.CreatePtrToInt(Val
, NewTy
)
405 : Builder
.CreateBitCast(Val
, NewTy
);
408 Builder
.CreateAtomicRMW(AtomicRMWInst::Xchg
, NewAddr
, NewVal
,
409 RMWI
->getAlign(), RMWI
->getOrdering());
410 NewRMWI
->setVolatile(RMWI
->isVolatile());
411 LLVM_DEBUG(dbgs() << "Replaced " << *RMWI
<< " with " << *NewRMWI
<< "\n");
413 Value
*NewRVal
= RMWI
->getType()->isPointerTy()
414 ? Builder
.CreateIntToPtr(NewRMWI
, RMWI
->getType())
415 : Builder
.CreateBitCast(NewRMWI
, RMWI
->getType());
416 RMWI
->replaceAllUsesWith(NewRVal
);
417 RMWI
->eraseFromParent();
421 bool AtomicExpand::tryExpandAtomicLoad(LoadInst
*LI
) {
422 switch (TLI
->shouldExpandAtomicLoadInIR(LI
)) {
423 case TargetLoweringBase::AtomicExpansionKind::None
:
425 case TargetLoweringBase::AtomicExpansionKind::LLSC
:
426 expandAtomicOpToLLSC(
427 LI
, LI
->getType(), LI
->getPointerOperand(), LI
->getAlign(),
429 [](IRBuilderBase
&Builder
, Value
*Loaded
) { return Loaded
; });
431 case TargetLoweringBase::AtomicExpansionKind::LLOnly
:
432 return expandAtomicLoadToLL(LI
);
433 case TargetLoweringBase::AtomicExpansionKind::CmpXChg
:
434 return expandAtomicLoadToCmpXchg(LI
);
435 case TargetLoweringBase::AtomicExpansionKind::NotAtomic
:
436 LI
->setAtomic(AtomicOrdering::NotAtomic
);
439 llvm_unreachable("Unhandled case in tryExpandAtomicLoad");
443 bool AtomicExpand::tryExpandAtomicStore(StoreInst
*SI
) {
444 switch (TLI
->shouldExpandAtomicStoreInIR(SI
)) {
445 case TargetLoweringBase::AtomicExpansionKind::None
:
447 case TargetLoweringBase::AtomicExpansionKind::Expand
:
448 expandAtomicStore(SI
);
450 case TargetLoweringBase::AtomicExpansionKind::NotAtomic
:
451 SI
->setAtomic(AtomicOrdering::NotAtomic
);
454 llvm_unreachable("Unhandled case in tryExpandAtomicStore");
458 bool AtomicExpand::expandAtomicLoadToLL(LoadInst
*LI
) {
459 ReplacementIRBuilder
Builder(LI
, *DL
);
461 // On some architectures, load-linked instructions are atomic for larger
462 // sizes than normal loads. For example, the only 64-bit load guaranteed
463 // to be single-copy atomic by ARM is an ldrexd (A3.5.3).
464 Value
*Val
= TLI
->emitLoadLinked(Builder
, LI
->getType(),
465 LI
->getPointerOperand(), LI
->getOrdering());
466 TLI
->emitAtomicCmpXchgNoStoreLLBalance(Builder
);
468 LI
->replaceAllUsesWith(Val
);
469 LI
->eraseFromParent();
474 bool AtomicExpand::expandAtomicLoadToCmpXchg(LoadInst
*LI
) {
475 ReplacementIRBuilder
Builder(LI
, *DL
);
476 AtomicOrdering Order
= LI
->getOrdering();
477 if (Order
== AtomicOrdering::Unordered
)
478 Order
= AtomicOrdering::Monotonic
;
480 Value
*Addr
= LI
->getPointerOperand();
481 Type
*Ty
= LI
->getType();
482 Constant
*DummyVal
= Constant::getNullValue(Ty
);
484 Value
*Pair
= Builder
.CreateAtomicCmpXchg(
485 Addr
, DummyVal
, DummyVal
, LI
->getAlign(), Order
,
486 AtomicCmpXchgInst::getStrongestFailureOrdering(Order
));
487 Value
*Loaded
= Builder
.CreateExtractValue(Pair
, 0, "loaded");
489 LI
->replaceAllUsesWith(Loaded
);
490 LI
->eraseFromParent();
495 /// Convert an atomic store of a non-integral type to an integer store of the
496 /// equivalent bitwidth. We used to not support floating point or vector
497 /// atomics in the IR at all. The backends learned to deal with the bitcast
498 /// idiom because that was the only way of expressing the notion of a atomic
499 /// float or vector store. The long term plan is to teach each backend to
500 /// instruction select from the original atomic store, but as a migration
501 /// mechanism, we convert back to the old format which the backends understand.
502 /// Each backend will need individual work to recognize the new format.
503 StoreInst
*AtomicExpand::convertAtomicStoreToIntegerType(StoreInst
*SI
) {
504 ReplacementIRBuilder
Builder(SI
, *DL
);
505 auto *M
= SI
->getModule();
506 Type
*NewTy
= getCorrespondingIntegerType(SI
->getValueOperand()->getType(),
508 Value
*NewVal
= Builder
.CreateBitCast(SI
->getValueOperand(), NewTy
);
510 Value
*Addr
= SI
->getPointerOperand();
511 Type
*PT
= PointerType::get(NewTy
, Addr
->getType()->getPointerAddressSpace());
512 Value
*NewAddr
= Builder
.CreateBitCast(Addr
, PT
);
514 StoreInst
*NewSI
= Builder
.CreateStore(NewVal
, NewAddr
);
515 NewSI
->setAlignment(SI
->getAlign());
516 NewSI
->setVolatile(SI
->isVolatile());
517 NewSI
->setAtomic(SI
->getOrdering(), SI
->getSyncScopeID());
518 LLVM_DEBUG(dbgs() << "Replaced " << *SI
<< " with " << *NewSI
<< "\n");
519 SI
->eraseFromParent();
523 void AtomicExpand::expandAtomicStore(StoreInst
*SI
) {
524 // This function is only called on atomic stores that are too large to be
525 // atomic if implemented as a native store. So we replace them by an
526 // atomic swap, that can be implemented for example as a ldrex/strex on ARM
527 // or lock cmpxchg8/16b on X86, as these are atomic for larger sizes.
528 // It is the responsibility of the target to only signal expansion via
529 // shouldExpandAtomicRMW in cases where this is required and possible.
530 ReplacementIRBuilder
Builder(SI
, *DL
);
531 AtomicOrdering Ordering
= SI
->getOrdering();
532 assert(Ordering
!= AtomicOrdering::NotAtomic
);
533 AtomicOrdering RMWOrdering
= Ordering
== AtomicOrdering::Unordered
534 ? AtomicOrdering::Monotonic
536 AtomicRMWInst
*AI
= Builder
.CreateAtomicRMW(
537 AtomicRMWInst::Xchg
, SI
->getPointerOperand(), SI
->getValueOperand(),
538 SI
->getAlign(), RMWOrdering
);
539 SI
->eraseFromParent();
541 // Now we have an appropriate swap instruction, lower it as usual.
542 tryExpandAtomicRMW(AI
);
545 static void createCmpXchgInstFun(IRBuilderBase
&Builder
, Value
*Addr
,
546 Value
*Loaded
, Value
*NewVal
, Align AddrAlign
,
547 AtomicOrdering MemOpOrder
, SyncScope::ID SSID
,
548 Value
*&Success
, Value
*&NewLoaded
) {
549 Type
*OrigTy
= NewVal
->getType();
551 // This code can go away when cmpxchg supports FP types.
552 assert(!OrigTy
->isPointerTy());
553 bool NeedBitcast
= OrigTy
->isFloatingPointTy();
555 IntegerType
*IntTy
= Builder
.getIntNTy(OrigTy
->getPrimitiveSizeInBits());
556 unsigned AS
= Addr
->getType()->getPointerAddressSpace();
557 Addr
= Builder
.CreateBitCast(Addr
, IntTy
->getPointerTo(AS
));
558 NewVal
= Builder
.CreateBitCast(NewVal
, IntTy
);
559 Loaded
= Builder
.CreateBitCast(Loaded
, IntTy
);
562 Value
*Pair
= Builder
.CreateAtomicCmpXchg(
563 Addr
, Loaded
, NewVal
, AddrAlign
, MemOpOrder
,
564 AtomicCmpXchgInst::getStrongestFailureOrdering(MemOpOrder
), SSID
);
565 Success
= Builder
.CreateExtractValue(Pair
, 1, "success");
566 NewLoaded
= Builder
.CreateExtractValue(Pair
, 0, "newloaded");
569 NewLoaded
= Builder
.CreateBitCast(NewLoaded
, OrigTy
);
572 bool AtomicExpand::tryExpandAtomicRMW(AtomicRMWInst
*AI
) {
573 LLVMContext
&Ctx
= AI
->getModule()->getContext();
574 TargetLowering::AtomicExpansionKind Kind
= TLI
->shouldExpandAtomicRMWInIR(AI
);
576 case TargetLoweringBase::AtomicExpansionKind::None
:
578 case TargetLoweringBase::AtomicExpansionKind::LLSC
: {
579 unsigned MinCASSize
= TLI
->getMinCmpXchgSizeInBits() / 8;
580 unsigned ValueSize
= getAtomicOpSize(AI
);
581 if (ValueSize
< MinCASSize
) {
582 expandPartwordAtomicRMW(AI
,
583 TargetLoweringBase::AtomicExpansionKind::LLSC
);
585 auto PerformOp
= [&](IRBuilderBase
&Builder
, Value
*Loaded
) {
586 return buildAtomicRMWValue(AI
->getOperation(), Builder
, Loaded
,
587 AI
->getValOperand());
589 expandAtomicOpToLLSC(AI
, AI
->getType(), AI
->getPointerOperand(),
590 AI
->getAlign(), AI
->getOrdering(), PerformOp
);
594 case TargetLoweringBase::AtomicExpansionKind::CmpXChg
: {
595 unsigned MinCASSize
= TLI
->getMinCmpXchgSizeInBits() / 8;
596 unsigned ValueSize
= getAtomicOpSize(AI
);
597 if (ValueSize
< MinCASSize
) {
598 expandPartwordAtomicRMW(AI
,
599 TargetLoweringBase::AtomicExpansionKind::CmpXChg
);
601 SmallVector
<StringRef
> SSNs
;
602 Ctx
.getSyncScopeNames(SSNs
);
603 auto MemScope
= SSNs
[AI
->getSyncScopeID()].empty()
605 : SSNs
[AI
->getSyncScopeID()];
606 OptimizationRemarkEmitter
ORE(AI
->getFunction());
608 return OptimizationRemark(DEBUG_TYPE
, "Passed", AI
)
609 << "A compare and swap loop was generated for an atomic "
610 << AI
->getOperationName(AI
->getOperation()) << " operation at "
611 << MemScope
<< " memory scope";
613 expandAtomicRMWToCmpXchg(AI
, createCmpXchgInstFun
);
617 case TargetLoweringBase::AtomicExpansionKind::MaskedIntrinsic
: {
618 expandAtomicRMWToMaskedIntrinsic(AI
);
621 case TargetLoweringBase::AtomicExpansionKind::BitTestIntrinsic
: {
622 TLI
->emitBitTestAtomicRMWIntrinsic(AI
);
625 case TargetLoweringBase::AtomicExpansionKind::CmpArithIntrinsic
: {
626 TLI
->emitCmpArithAtomicRMWIntrinsic(AI
);
629 case TargetLoweringBase::AtomicExpansionKind::NotAtomic
:
630 return lowerAtomicRMWInst(AI
);
631 case TargetLoweringBase::AtomicExpansionKind::Expand
:
632 TLI
->emitExpandAtomicRMW(AI
);
635 llvm_unreachable("Unhandled case in tryExpandAtomicRMW");
641 struct PartwordMaskValues
{
642 // These three fields are guaranteed to be set by createMaskInstrs.
643 Type
*WordType
= nullptr;
644 Type
*ValueType
= nullptr;
645 Type
*IntValueType
= nullptr;
646 Value
*AlignedAddr
= nullptr;
647 Align AlignedAddrAlignment
;
648 // The remaining fields can be null.
649 Value
*ShiftAmt
= nullptr;
650 Value
*Mask
= nullptr;
651 Value
*Inv_Mask
= nullptr;
654 LLVM_ATTRIBUTE_UNUSED
655 raw_ostream
&operator<<(raw_ostream
&O
, const PartwordMaskValues
&PMV
) {
656 auto PrintObj
= [&O
](auto *V
) {
663 O
<< "PartwordMaskValues {\n";
665 PrintObj(PMV
.WordType
);
667 PrintObj(PMV
.ValueType
);
668 O
<< " AlignedAddr: ";
669 PrintObj(PMV
.AlignedAddr
);
670 O
<< " AlignedAddrAlignment: " << PMV
.AlignedAddrAlignment
.value() << '\n';
672 PrintObj(PMV
.ShiftAmt
);
676 PrintObj(PMV
.Inv_Mask
);
681 } // end anonymous namespace
683 /// This is a helper function which builds instructions to provide
684 /// values necessary for partword atomic operations. It takes an
685 /// incoming address, Addr, and ValueType, and constructs the address,
686 /// shift-amounts and masks needed to work with a larger value of size
689 /// AlignedAddr: Addr rounded down to a multiple of WordSize
691 /// ShiftAmt: Number of bits to right-shift a WordSize value loaded
692 /// from AlignAddr for it to have the same value as if
693 /// ValueType was loaded from Addr.
695 /// Mask: Value to mask with the value loaded from AlignAddr to
696 /// include only the part that would've been loaded from Addr.
698 /// Inv_Mask: The inverse of Mask.
699 static PartwordMaskValues
createMaskInstrs(IRBuilderBase
&Builder
,
700 Instruction
*I
, Type
*ValueType
,
701 Value
*Addr
, Align AddrAlign
,
702 unsigned MinWordSize
) {
703 PartwordMaskValues PMV
;
705 Module
*M
= I
->getModule();
706 LLVMContext
&Ctx
= M
->getContext();
707 const DataLayout
&DL
= M
->getDataLayout();
708 unsigned ValueSize
= DL
.getTypeStoreSize(ValueType
);
710 PMV
.ValueType
= PMV
.IntValueType
= ValueType
;
711 if (PMV
.ValueType
->isFloatingPointTy())
713 Type::getIntNTy(Ctx
, ValueType
->getPrimitiveSizeInBits());
715 PMV
.WordType
= MinWordSize
> ValueSize
? Type::getIntNTy(Ctx
, MinWordSize
* 8)
717 if (PMV
.ValueType
== PMV
.WordType
) {
718 PMV
.AlignedAddr
= Addr
;
719 PMV
.AlignedAddrAlignment
= AddrAlign
;
720 PMV
.ShiftAmt
= ConstantInt::get(PMV
.ValueType
, 0);
721 PMV
.Mask
= ConstantInt::get(PMV
.ValueType
, ~0, /*isSigned*/ true);
725 PMV
.AlignedAddrAlignment
= Align(MinWordSize
);
727 assert(ValueSize
< MinWordSize
);
729 PointerType
*PtrTy
= cast
<PointerType
>(Addr
->getType());
730 Type
*WordPtrType
= PMV
.WordType
->getPointerTo(PtrTy
->getAddressSpace());
731 IntegerType
*IntTy
= DL
.getIntPtrType(Ctx
, PtrTy
->getAddressSpace());
734 if (AddrAlign
< MinWordSize
) {
735 PMV
.AlignedAddr
= Builder
.CreateIntrinsic(
736 Intrinsic::ptrmask
, {PtrTy
, IntTy
},
737 {Addr
, ConstantInt::get(IntTy
, ~(uint64_t)(MinWordSize
- 1))}, nullptr,
740 Value
*AddrInt
= Builder
.CreatePtrToInt(Addr
, IntTy
);
741 PtrLSB
= Builder
.CreateAnd(AddrInt
, MinWordSize
- 1, "PtrLSB");
743 // If the alignment is high enough, the LSB are known 0.
744 PMV
.AlignedAddr
= Addr
;
745 PtrLSB
= ConstantInt::getNullValue(IntTy
);
748 if (DL
.isLittleEndian()) {
749 // turn bytes into bits
750 PMV
.ShiftAmt
= Builder
.CreateShl(PtrLSB
, 3);
752 // turn bytes into bits, and count from the other side.
753 PMV
.ShiftAmt
= Builder
.CreateShl(
754 Builder
.CreateXor(PtrLSB
, MinWordSize
- ValueSize
), 3);
757 PMV
.ShiftAmt
= Builder
.CreateTrunc(PMV
.ShiftAmt
, PMV
.WordType
, "ShiftAmt");
758 PMV
.Mask
= Builder
.CreateShl(
759 ConstantInt::get(PMV
.WordType
, (1 << (ValueSize
* 8)) - 1), PMV
.ShiftAmt
,
762 PMV
.Inv_Mask
= Builder
.CreateNot(PMV
.Mask
, "Inv_Mask");
764 // Cast for typed pointers.
766 Builder
.CreateBitCast(PMV
.AlignedAddr
, WordPtrType
, "AlignedAddr");
771 static Value
*extractMaskedValue(IRBuilderBase
&Builder
, Value
*WideWord
,
772 const PartwordMaskValues
&PMV
) {
773 assert(WideWord
->getType() == PMV
.WordType
&& "Widened type mismatch");
774 if (PMV
.WordType
== PMV
.ValueType
)
777 Value
*Shift
= Builder
.CreateLShr(WideWord
, PMV
.ShiftAmt
, "shifted");
778 Value
*Trunc
= Builder
.CreateTrunc(Shift
, PMV
.IntValueType
, "extracted");
779 return Builder
.CreateBitCast(Trunc
, PMV
.ValueType
);
782 static Value
*insertMaskedValue(IRBuilderBase
&Builder
, Value
*WideWord
,
783 Value
*Updated
, const PartwordMaskValues
&PMV
) {
784 assert(WideWord
->getType() == PMV
.WordType
&& "Widened type mismatch");
785 assert(Updated
->getType() == PMV
.ValueType
&& "Value type mismatch");
786 if (PMV
.WordType
== PMV
.ValueType
)
789 Updated
= Builder
.CreateBitCast(Updated
, PMV
.IntValueType
);
791 Value
*ZExt
= Builder
.CreateZExt(Updated
, PMV
.WordType
, "extended");
793 Builder
.CreateShl(ZExt
, PMV
.ShiftAmt
, "shifted", /*HasNUW*/ true);
794 Value
*And
= Builder
.CreateAnd(WideWord
, PMV
.Inv_Mask
, "unmasked");
795 Value
*Or
= Builder
.CreateOr(And
, Shift
, "inserted");
799 /// Emit IR to implement a masked version of a given atomicrmw
800 /// operation. (That is, only the bits under the Mask should be
801 /// affected by the operation)
802 static Value
*performMaskedAtomicOp(AtomicRMWInst::BinOp Op
,
803 IRBuilderBase
&Builder
, Value
*Loaded
,
804 Value
*Shifted_Inc
, Value
*Inc
,
805 const PartwordMaskValues
&PMV
) {
806 // TODO: update to use
807 // https://graphics.stanford.edu/~seander/bithacks.html#MaskedMerge in order
808 // to merge bits from two values without requiring PMV.Inv_Mask.
810 case AtomicRMWInst::Xchg
: {
811 Value
*Loaded_MaskOut
= Builder
.CreateAnd(Loaded
, PMV
.Inv_Mask
);
812 Value
*FinalVal
= Builder
.CreateOr(Loaded_MaskOut
, Shifted_Inc
);
815 case AtomicRMWInst::Or
:
816 case AtomicRMWInst::Xor
:
817 case AtomicRMWInst::And
:
818 llvm_unreachable("Or/Xor/And handled by widenPartwordAtomicRMW");
819 case AtomicRMWInst::Add
:
820 case AtomicRMWInst::Sub
:
821 case AtomicRMWInst::Nand
: {
822 // The other arithmetic ops need to be masked into place.
823 Value
*NewVal
= buildAtomicRMWValue(Op
, Builder
, Loaded
, Shifted_Inc
);
824 Value
*NewVal_Masked
= Builder
.CreateAnd(NewVal
, PMV
.Mask
);
825 Value
*Loaded_MaskOut
= Builder
.CreateAnd(Loaded
, PMV
.Inv_Mask
);
826 Value
*FinalVal
= Builder
.CreateOr(Loaded_MaskOut
, NewVal_Masked
);
829 case AtomicRMWInst::Max
:
830 case AtomicRMWInst::Min
:
831 case AtomicRMWInst::UMax
:
832 case AtomicRMWInst::UMin
:
833 case AtomicRMWInst::FAdd
:
834 case AtomicRMWInst::FSub
:
835 case AtomicRMWInst::FMin
:
836 case AtomicRMWInst::FMax
:
837 case AtomicRMWInst::UIncWrap
:
838 case AtomicRMWInst::UDecWrap
: {
839 // Finally, other ops will operate on the full value, so truncate down to
840 // the original size, and expand out again after doing the
841 // operation. Bitcasts will be inserted for FP values.
842 Value
*Loaded_Extract
= extractMaskedValue(Builder
, Loaded
, PMV
);
843 Value
*NewVal
= buildAtomicRMWValue(Op
, Builder
, Loaded_Extract
, Inc
);
844 Value
*FinalVal
= insertMaskedValue(Builder
, Loaded
, NewVal
, PMV
);
848 llvm_unreachable("Unknown atomic op");
852 /// Expand a sub-word atomicrmw operation into an appropriate
853 /// word-sized operation.
855 /// It will create an LL/SC or cmpxchg loop, as appropriate, the same
856 /// way as a typical atomicrmw expansion. The only difference here is
857 /// that the operation inside of the loop may operate upon only a
858 /// part of the value.
859 void AtomicExpand::expandPartwordAtomicRMW(
860 AtomicRMWInst
*AI
, TargetLoweringBase::AtomicExpansionKind ExpansionKind
) {
861 AtomicOrdering MemOpOrder
= AI
->getOrdering();
862 SyncScope::ID SSID
= AI
->getSyncScopeID();
864 ReplacementIRBuilder
Builder(AI
, *DL
);
866 PartwordMaskValues PMV
=
867 createMaskInstrs(Builder
, AI
, AI
->getType(), AI
->getPointerOperand(),
868 AI
->getAlign(), TLI
->getMinCmpXchgSizeInBits() / 8);
870 Value
*ValOperand_Shifted
= nullptr;
871 if (AI
->getOperation() == AtomicRMWInst::Xchg
||
872 AI
->getOperation() == AtomicRMWInst::Add
||
873 AI
->getOperation() == AtomicRMWInst::Sub
||
874 AI
->getOperation() == AtomicRMWInst::Nand
) {
876 Builder
.CreateShl(Builder
.CreateZExt(AI
->getValOperand(), PMV
.WordType
),
877 PMV
.ShiftAmt
, "ValOperand_Shifted");
880 auto PerformPartwordOp
= [&](IRBuilderBase
&Builder
, Value
*Loaded
) {
881 return performMaskedAtomicOp(AI
->getOperation(), Builder
, Loaded
,
882 ValOperand_Shifted
, AI
->getValOperand(), PMV
);
886 if (ExpansionKind
== TargetLoweringBase::AtomicExpansionKind::CmpXChg
) {
887 OldResult
= insertRMWCmpXchgLoop(Builder
, PMV
.WordType
, PMV
.AlignedAddr
,
888 PMV
.AlignedAddrAlignment
, MemOpOrder
, SSID
,
889 PerformPartwordOp
, createCmpXchgInstFun
);
891 assert(ExpansionKind
== TargetLoweringBase::AtomicExpansionKind::LLSC
);
892 OldResult
= insertRMWLLSCLoop(Builder
, PMV
.WordType
, PMV
.AlignedAddr
,
893 PMV
.AlignedAddrAlignment
, MemOpOrder
,
897 Value
*FinalOldResult
= extractMaskedValue(Builder
, OldResult
, PMV
);
898 AI
->replaceAllUsesWith(FinalOldResult
);
899 AI
->eraseFromParent();
902 // Widen the bitwise atomicrmw (or/xor/and) to the minimum supported width.
903 AtomicRMWInst
*AtomicExpand::widenPartwordAtomicRMW(AtomicRMWInst
*AI
) {
904 ReplacementIRBuilder
Builder(AI
, *DL
);
905 AtomicRMWInst::BinOp Op
= AI
->getOperation();
907 assert((Op
== AtomicRMWInst::Or
|| Op
== AtomicRMWInst::Xor
||
908 Op
== AtomicRMWInst::And
) &&
909 "Unable to widen operation");
911 PartwordMaskValues PMV
=
912 createMaskInstrs(Builder
, AI
, AI
->getType(), AI
->getPointerOperand(),
913 AI
->getAlign(), TLI
->getMinCmpXchgSizeInBits() / 8);
915 Value
*ValOperand_Shifted
=
916 Builder
.CreateShl(Builder
.CreateZExt(AI
->getValOperand(), PMV
.WordType
),
917 PMV
.ShiftAmt
, "ValOperand_Shifted");
921 if (Op
== AtomicRMWInst::And
)
923 Builder
.CreateOr(PMV
.Inv_Mask
, ValOperand_Shifted
, "AndOperand");
925 NewOperand
= ValOperand_Shifted
;
927 AtomicRMWInst
*NewAI
=
928 Builder
.CreateAtomicRMW(Op
, PMV
.AlignedAddr
, NewOperand
,
929 PMV
.AlignedAddrAlignment
, AI
->getOrdering());
931 Value
*FinalOldResult
= extractMaskedValue(Builder
, NewAI
, PMV
);
932 AI
->replaceAllUsesWith(FinalOldResult
);
933 AI
->eraseFromParent();
937 bool AtomicExpand::expandPartwordCmpXchg(AtomicCmpXchgInst
*CI
) {
938 // The basic idea here is that we're expanding a cmpxchg of a
939 // smaller memory size up to a word-sized cmpxchg. To do this, we
940 // need to add a retry-loop for strong cmpxchg, so that
941 // modifications to other parts of the word don't cause a spurious
944 // This generates code like the following:
945 // [[Setup mask values PMV.*]]
946 // %NewVal_Shifted = shl i32 %NewVal, %PMV.ShiftAmt
947 // %Cmp_Shifted = shl i32 %Cmp, %PMV.ShiftAmt
948 // %InitLoaded = load i32* %addr
949 // %InitLoaded_MaskOut = and i32 %InitLoaded, %PMV.Inv_Mask
950 // br partword.cmpxchg.loop
951 // partword.cmpxchg.loop:
952 // %Loaded_MaskOut = phi i32 [ %InitLoaded_MaskOut, %entry ],
953 // [ %OldVal_MaskOut, %partword.cmpxchg.failure ]
954 // %FullWord_NewVal = or i32 %Loaded_MaskOut, %NewVal_Shifted
955 // %FullWord_Cmp = or i32 %Loaded_MaskOut, %Cmp_Shifted
956 // %NewCI = cmpxchg i32* %PMV.AlignedAddr, i32 %FullWord_Cmp,
957 // i32 %FullWord_NewVal success_ordering failure_ordering
958 // %OldVal = extractvalue { i32, i1 } %NewCI, 0
959 // %Success = extractvalue { i32, i1 } %NewCI, 1
960 // br i1 %Success, label %partword.cmpxchg.end,
961 // label %partword.cmpxchg.failure
962 // partword.cmpxchg.failure:
963 // %OldVal_MaskOut = and i32 %OldVal, %PMV.Inv_Mask
964 // %ShouldContinue = icmp ne i32 %Loaded_MaskOut, %OldVal_MaskOut
965 // br i1 %ShouldContinue, label %partword.cmpxchg.loop,
966 // label %partword.cmpxchg.end
967 // partword.cmpxchg.end:
968 // %tmp1 = lshr i32 %OldVal, %PMV.ShiftAmt
969 // %FinalOldVal = trunc i32 %tmp1 to i8
970 // %tmp2 = insertvalue { i8, i1 } undef, i8 %FinalOldVal, 0
971 // %Res = insertvalue { i8, i1 } %25, i1 %Success, 1
973 Value
*Addr
= CI
->getPointerOperand();
974 Value
*Cmp
= CI
->getCompareOperand();
975 Value
*NewVal
= CI
->getNewValOperand();
977 BasicBlock
*BB
= CI
->getParent();
978 Function
*F
= BB
->getParent();
979 ReplacementIRBuilder
Builder(CI
, *DL
);
980 LLVMContext
&Ctx
= Builder
.getContext();
983 BB
->splitBasicBlock(CI
->getIterator(), "partword.cmpxchg.end");
985 BasicBlock::Create(Ctx
, "partword.cmpxchg.failure", F
, EndBB
);
986 auto LoopBB
= BasicBlock::Create(Ctx
, "partword.cmpxchg.loop", F
, FailureBB
);
988 // The split call above "helpfully" added a branch at the end of BB
989 // (to the wrong place).
990 std::prev(BB
->end())->eraseFromParent();
991 Builder
.SetInsertPoint(BB
);
993 PartwordMaskValues PMV
=
994 createMaskInstrs(Builder
, CI
, CI
->getCompareOperand()->getType(), Addr
,
995 CI
->getAlign(), TLI
->getMinCmpXchgSizeInBits() / 8);
997 // Shift the incoming values over, into the right location in the word.
998 Value
*NewVal_Shifted
=
999 Builder
.CreateShl(Builder
.CreateZExt(NewVal
, PMV
.WordType
), PMV
.ShiftAmt
);
1000 Value
*Cmp_Shifted
=
1001 Builder
.CreateShl(Builder
.CreateZExt(Cmp
, PMV
.WordType
), PMV
.ShiftAmt
);
1003 // Load the entire current word, and mask into place the expected and new
1005 LoadInst
*InitLoaded
= Builder
.CreateLoad(PMV
.WordType
, PMV
.AlignedAddr
);
1006 InitLoaded
->setVolatile(CI
->isVolatile());
1007 Value
*InitLoaded_MaskOut
= Builder
.CreateAnd(InitLoaded
, PMV
.Inv_Mask
);
1008 Builder
.CreateBr(LoopBB
);
1010 // partword.cmpxchg.loop:
1011 Builder
.SetInsertPoint(LoopBB
);
1012 PHINode
*Loaded_MaskOut
= Builder
.CreatePHI(PMV
.WordType
, 2);
1013 Loaded_MaskOut
->addIncoming(InitLoaded_MaskOut
, BB
);
1015 // Mask/Or the expected and new values into place in the loaded word.
1016 Value
*FullWord_NewVal
= Builder
.CreateOr(Loaded_MaskOut
, NewVal_Shifted
);
1017 Value
*FullWord_Cmp
= Builder
.CreateOr(Loaded_MaskOut
, Cmp_Shifted
);
1018 AtomicCmpXchgInst
*NewCI
= Builder
.CreateAtomicCmpXchg(
1019 PMV
.AlignedAddr
, FullWord_Cmp
, FullWord_NewVal
, PMV
.AlignedAddrAlignment
,
1020 CI
->getSuccessOrdering(), CI
->getFailureOrdering(), CI
->getSyncScopeID());
1021 NewCI
->setVolatile(CI
->isVolatile());
1022 // When we're building a strong cmpxchg, we need a loop, so you
1023 // might think we could use a weak cmpxchg inside. But, using strong
1024 // allows the below comparison for ShouldContinue, and we're
1025 // expecting the underlying cmpxchg to be a machine instruction,
1026 // which is strong anyways.
1027 NewCI
->setWeak(CI
->isWeak());
1029 Value
*OldVal
= Builder
.CreateExtractValue(NewCI
, 0);
1030 Value
*Success
= Builder
.CreateExtractValue(NewCI
, 1);
1033 Builder
.CreateBr(EndBB
);
1035 Builder
.CreateCondBr(Success
, EndBB
, FailureBB
);
1037 // partword.cmpxchg.failure:
1038 Builder
.SetInsertPoint(FailureBB
);
1039 // Upon failure, verify that the masked-out part of the loaded value
1040 // has been modified. If it didn't, abort the cmpxchg, since the
1041 // masked-in part must've.
1042 Value
*OldVal_MaskOut
= Builder
.CreateAnd(OldVal
, PMV
.Inv_Mask
);
1043 Value
*ShouldContinue
= Builder
.CreateICmpNE(Loaded_MaskOut
, OldVal_MaskOut
);
1044 Builder
.CreateCondBr(ShouldContinue
, LoopBB
, EndBB
);
1046 // Add the second value to the phi from above
1047 Loaded_MaskOut
->addIncoming(OldVal_MaskOut
, FailureBB
);
1049 // partword.cmpxchg.end:
1050 Builder
.SetInsertPoint(CI
);
1052 Value
*FinalOldVal
= extractMaskedValue(Builder
, OldVal
, PMV
);
1053 Value
*Res
= PoisonValue::get(CI
->getType());
1054 Res
= Builder
.CreateInsertValue(Res
, FinalOldVal
, 0);
1055 Res
= Builder
.CreateInsertValue(Res
, Success
, 1);
1057 CI
->replaceAllUsesWith(Res
);
1058 CI
->eraseFromParent();
1062 void AtomicExpand::expandAtomicOpToLLSC(
1063 Instruction
*I
, Type
*ResultType
, Value
*Addr
, Align AddrAlign
,
1064 AtomicOrdering MemOpOrder
,
1065 function_ref
<Value
*(IRBuilderBase
&, Value
*)> PerformOp
) {
1066 ReplacementIRBuilder
Builder(I
, *DL
);
1067 Value
*Loaded
= insertRMWLLSCLoop(Builder
, ResultType
, Addr
, AddrAlign
,
1068 MemOpOrder
, PerformOp
);
1070 I
->replaceAllUsesWith(Loaded
);
1071 I
->eraseFromParent();
1074 void AtomicExpand::expandAtomicRMWToMaskedIntrinsic(AtomicRMWInst
*AI
) {
1075 ReplacementIRBuilder
Builder(AI
, *DL
);
1077 PartwordMaskValues PMV
=
1078 createMaskInstrs(Builder
, AI
, AI
->getType(), AI
->getPointerOperand(),
1079 AI
->getAlign(), TLI
->getMinCmpXchgSizeInBits() / 8);
1081 // The value operand must be sign-extended for signed min/max so that the
1082 // target's signed comparison instructions can be used. Otherwise, just
1084 Instruction::CastOps CastOp
= Instruction::ZExt
;
1085 AtomicRMWInst::BinOp RMWOp
= AI
->getOperation();
1086 if (RMWOp
== AtomicRMWInst::Max
|| RMWOp
== AtomicRMWInst::Min
)
1087 CastOp
= Instruction::SExt
;
1089 Value
*ValOperand_Shifted
= Builder
.CreateShl(
1090 Builder
.CreateCast(CastOp
, AI
->getValOperand(), PMV
.WordType
),
1091 PMV
.ShiftAmt
, "ValOperand_Shifted");
1092 Value
*OldResult
= TLI
->emitMaskedAtomicRMWIntrinsic(
1093 Builder
, AI
, PMV
.AlignedAddr
, ValOperand_Shifted
, PMV
.Mask
, PMV
.ShiftAmt
,
1095 Value
*FinalOldResult
= extractMaskedValue(Builder
, OldResult
, PMV
);
1096 AI
->replaceAllUsesWith(FinalOldResult
);
1097 AI
->eraseFromParent();
1100 void AtomicExpand::expandAtomicCmpXchgToMaskedIntrinsic(AtomicCmpXchgInst
*CI
) {
1101 ReplacementIRBuilder
Builder(CI
, *DL
);
1103 PartwordMaskValues PMV
= createMaskInstrs(
1104 Builder
, CI
, CI
->getCompareOperand()->getType(), CI
->getPointerOperand(),
1105 CI
->getAlign(), TLI
->getMinCmpXchgSizeInBits() / 8);
1107 Value
*CmpVal_Shifted
= Builder
.CreateShl(
1108 Builder
.CreateZExt(CI
->getCompareOperand(), PMV
.WordType
), PMV
.ShiftAmt
,
1110 Value
*NewVal_Shifted
= Builder
.CreateShl(
1111 Builder
.CreateZExt(CI
->getNewValOperand(), PMV
.WordType
), PMV
.ShiftAmt
,
1113 Value
*OldVal
= TLI
->emitMaskedAtomicCmpXchgIntrinsic(
1114 Builder
, CI
, PMV
.AlignedAddr
, CmpVal_Shifted
, NewVal_Shifted
, PMV
.Mask
,
1115 CI
->getMergedOrdering());
1116 Value
*FinalOldVal
= extractMaskedValue(Builder
, OldVal
, PMV
);
1117 Value
*Res
= PoisonValue::get(CI
->getType());
1118 Res
= Builder
.CreateInsertValue(Res
, FinalOldVal
, 0);
1119 Value
*Success
= Builder
.CreateICmpEQ(
1120 CmpVal_Shifted
, Builder
.CreateAnd(OldVal
, PMV
.Mask
), "Success");
1121 Res
= Builder
.CreateInsertValue(Res
, Success
, 1);
1123 CI
->replaceAllUsesWith(Res
);
1124 CI
->eraseFromParent();
1127 Value
*AtomicExpand::insertRMWLLSCLoop(
1128 IRBuilderBase
&Builder
, Type
*ResultTy
, Value
*Addr
, Align AddrAlign
,
1129 AtomicOrdering MemOpOrder
,
1130 function_ref
<Value
*(IRBuilderBase
&, Value
*)> PerformOp
) {
1131 LLVMContext
&Ctx
= Builder
.getContext();
1132 BasicBlock
*BB
= Builder
.GetInsertBlock();
1133 Function
*F
= BB
->getParent();
1136 F
->getParent()->getDataLayout().getTypeStoreSize(ResultTy
) &&
1137 "Expected at least natural alignment at this point.");
1139 // Given: atomicrmw some_op iN* %addr, iN %incr ordering
1141 // The standard expansion we produce is:
1144 // %loaded = @load.linked(%addr)
1145 // %new = some_op iN %loaded, %incr
1146 // %stored = @store_conditional(%new, %addr)
1147 // %try_again = icmp i32 ne %stored, 0
1148 // br i1 %try_again, label %loop, label %atomicrmw.end
1151 BasicBlock
*ExitBB
=
1152 BB
->splitBasicBlock(Builder
.GetInsertPoint(), "atomicrmw.end");
1153 BasicBlock
*LoopBB
= BasicBlock::Create(Ctx
, "atomicrmw.start", F
, ExitBB
);
1155 // The split call above "helpfully" added a branch at the end of BB (to the
1157 std::prev(BB
->end())->eraseFromParent();
1158 Builder
.SetInsertPoint(BB
);
1159 Builder
.CreateBr(LoopBB
);
1161 // Start the main loop block now that we've taken care of the preliminaries.
1162 Builder
.SetInsertPoint(LoopBB
);
1163 Value
*Loaded
= TLI
->emitLoadLinked(Builder
, ResultTy
, Addr
, MemOpOrder
);
1165 Value
*NewVal
= PerformOp(Builder
, Loaded
);
1167 Value
*StoreSuccess
=
1168 TLI
->emitStoreConditional(Builder
, NewVal
, Addr
, MemOpOrder
);
1169 Value
*TryAgain
= Builder
.CreateICmpNE(
1170 StoreSuccess
, ConstantInt::get(IntegerType::get(Ctx
, 32), 0), "tryagain");
1171 Builder
.CreateCondBr(TryAgain
, LoopBB
, ExitBB
);
1173 Builder
.SetInsertPoint(ExitBB
, ExitBB
->begin());
1177 /// Convert an atomic cmpxchg of a non-integral type to an integer cmpxchg of
1178 /// the equivalent bitwidth. We used to not support pointer cmpxchg in the
1179 /// IR. As a migration step, we convert back to what use to be the standard
1180 /// way to represent a pointer cmpxchg so that we can update backends one by
1183 AtomicExpand::convertCmpXchgToIntegerType(AtomicCmpXchgInst
*CI
) {
1184 auto *M
= CI
->getModule();
1185 Type
*NewTy
= getCorrespondingIntegerType(CI
->getCompareOperand()->getType(),
1186 M
->getDataLayout());
1188 ReplacementIRBuilder
Builder(CI
, *DL
);
1190 Value
*Addr
= CI
->getPointerOperand();
1191 Type
*PT
= PointerType::get(NewTy
, Addr
->getType()->getPointerAddressSpace());
1192 Value
*NewAddr
= Builder
.CreateBitCast(Addr
, PT
);
1194 Value
*NewCmp
= Builder
.CreatePtrToInt(CI
->getCompareOperand(), NewTy
);
1195 Value
*NewNewVal
= Builder
.CreatePtrToInt(CI
->getNewValOperand(), NewTy
);
1197 auto *NewCI
= Builder
.CreateAtomicCmpXchg(
1198 NewAddr
, NewCmp
, NewNewVal
, CI
->getAlign(), CI
->getSuccessOrdering(),
1199 CI
->getFailureOrdering(), CI
->getSyncScopeID());
1200 NewCI
->setVolatile(CI
->isVolatile());
1201 NewCI
->setWeak(CI
->isWeak());
1202 LLVM_DEBUG(dbgs() << "Replaced " << *CI
<< " with " << *NewCI
<< "\n");
1204 Value
*OldVal
= Builder
.CreateExtractValue(NewCI
, 0);
1205 Value
*Succ
= Builder
.CreateExtractValue(NewCI
, 1);
1207 OldVal
= Builder
.CreateIntToPtr(OldVal
, CI
->getCompareOperand()->getType());
1209 Value
*Res
= PoisonValue::get(CI
->getType());
1210 Res
= Builder
.CreateInsertValue(Res
, OldVal
, 0);
1211 Res
= Builder
.CreateInsertValue(Res
, Succ
, 1);
1213 CI
->replaceAllUsesWith(Res
);
1214 CI
->eraseFromParent();
1218 bool AtomicExpand::expandAtomicCmpXchg(AtomicCmpXchgInst
*CI
) {
1219 AtomicOrdering SuccessOrder
= CI
->getSuccessOrdering();
1220 AtomicOrdering FailureOrder
= CI
->getFailureOrdering();
1221 Value
*Addr
= CI
->getPointerOperand();
1222 BasicBlock
*BB
= CI
->getParent();
1223 Function
*F
= BB
->getParent();
1224 LLVMContext
&Ctx
= F
->getContext();
1225 // If shouldInsertFencesForAtomic() returns true, then the target does not
1226 // want to deal with memory orders, and emitLeading/TrailingFence should take
1227 // care of everything. Otherwise, emitLeading/TrailingFence are no-op and we
1228 // should preserve the ordering.
1229 bool ShouldInsertFencesForAtomic
= TLI
->shouldInsertFencesForAtomic(CI
);
1230 AtomicOrdering MemOpOrder
= ShouldInsertFencesForAtomic
1231 ? AtomicOrdering::Monotonic
1232 : CI
->getMergedOrdering();
1234 // In implementations which use a barrier to achieve release semantics, we can
1235 // delay emitting this barrier until we know a store is actually going to be
1236 // attempted. The cost of this delay is that we need 2 copies of the block
1237 // emitting the load-linked, affecting code size.
1239 // Ideally, this logic would be unconditional except for the minsize check
1240 // since in other cases the extra blocks naturally collapse down to the
1241 // minimal loop. Unfortunately, this puts too much stress on later
1242 // optimisations so we avoid emitting the extra logic in those cases too.
1243 bool HasReleasedLoadBB
= !CI
->isWeak() && ShouldInsertFencesForAtomic
&&
1244 SuccessOrder
!= AtomicOrdering::Monotonic
&&
1245 SuccessOrder
!= AtomicOrdering::Acquire
&&
1248 // There's no overhead for sinking the release barrier in a weak cmpxchg, so
1249 // do it even on minsize.
1250 bool UseUnconditionalReleaseBarrier
= F
->hasMinSize() && !CI
->isWeak();
1252 // Given: cmpxchg some_op iN* %addr, iN %desired, iN %new success_ord fail_ord
1254 // The full expansion we produce is:
1256 // %aligned.addr = ...
1258 // %unreleasedload = @load.linked(%aligned.addr)
1259 // %unreleasedload.extract = extract value from %unreleasedload
1260 // %should_store = icmp eq %unreleasedload.extract, %desired
1261 // br i1 %should_store, label %cmpxchg.releasingstore,
1262 // label %cmpxchg.nostore
1263 // cmpxchg.releasingstore:
1265 // br label cmpxchg.trystore
1266 // cmpxchg.trystore:
1267 // %loaded.trystore = phi [%unreleasedload, %cmpxchg.releasingstore],
1268 // [%releasedload, %cmpxchg.releasedload]
1269 // %updated.new = insert %new into %loaded.trystore
1270 // %stored = @store_conditional(%updated.new, %aligned.addr)
1271 // %success = icmp eq i32 %stored, 0
1272 // br i1 %success, label %cmpxchg.success,
1273 // label %cmpxchg.releasedload/%cmpxchg.failure
1274 // cmpxchg.releasedload:
1275 // %releasedload = @load.linked(%aligned.addr)
1276 // %releasedload.extract = extract value from %releasedload
1277 // %should_store = icmp eq %releasedload.extract, %desired
1278 // br i1 %should_store, label %cmpxchg.trystore,
1279 // label %cmpxchg.failure
1282 // br label %cmpxchg.end
1284 // %loaded.nostore = phi [%unreleasedload, %cmpxchg.start],
1286 // %cmpxchg.releasedload/%cmpxchg.trystore]
1287 // @load_linked_fail_balance()?
1288 // br label %cmpxchg.failure
1291 // br label %cmpxchg.end
1293 // %loaded.exit = phi [%loaded.nostore, %cmpxchg.failure],
1294 // [%loaded.trystore, %cmpxchg.trystore]
1295 // %success = phi i1 [true, %cmpxchg.success], [false, %cmpxchg.failure]
1296 // %loaded = extract value from %loaded.exit
1297 // %restmp = insertvalue { iN, i1 } undef, iN %loaded, 0
1298 // %res = insertvalue { iN, i1 } %restmp, i1 %success, 1
1300 BasicBlock
*ExitBB
= BB
->splitBasicBlock(CI
->getIterator(), "cmpxchg.end");
1301 auto FailureBB
= BasicBlock::Create(Ctx
, "cmpxchg.failure", F
, ExitBB
);
1302 auto NoStoreBB
= BasicBlock::Create(Ctx
, "cmpxchg.nostore", F
, FailureBB
);
1303 auto SuccessBB
= BasicBlock::Create(Ctx
, "cmpxchg.success", F
, NoStoreBB
);
1304 auto ReleasedLoadBB
=
1305 BasicBlock::Create(Ctx
, "cmpxchg.releasedload", F
, SuccessBB
);
1307 BasicBlock::Create(Ctx
, "cmpxchg.trystore", F
, ReleasedLoadBB
);
1308 auto ReleasingStoreBB
=
1309 BasicBlock::Create(Ctx
, "cmpxchg.fencedstore", F
, TryStoreBB
);
1310 auto StartBB
= BasicBlock::Create(Ctx
, "cmpxchg.start", F
, ReleasingStoreBB
);
1312 ReplacementIRBuilder
Builder(CI
, *DL
);
1314 // The split call above "helpfully" added a branch at the end of BB (to the
1315 // wrong place), but we might want a fence too. It's easiest to just remove
1316 // the branch entirely.
1317 std::prev(BB
->end())->eraseFromParent();
1318 Builder
.SetInsertPoint(BB
);
1319 if (ShouldInsertFencesForAtomic
&& UseUnconditionalReleaseBarrier
)
1320 TLI
->emitLeadingFence(Builder
, CI
, SuccessOrder
);
1322 PartwordMaskValues PMV
=
1323 createMaskInstrs(Builder
, CI
, CI
->getCompareOperand()->getType(), Addr
,
1324 CI
->getAlign(), TLI
->getMinCmpXchgSizeInBits() / 8);
1325 Builder
.CreateBr(StartBB
);
1327 // Start the main loop block now that we've taken care of the preliminaries.
1328 Builder
.SetInsertPoint(StartBB
);
1329 Value
*UnreleasedLoad
=
1330 TLI
->emitLoadLinked(Builder
, PMV
.WordType
, PMV
.AlignedAddr
, MemOpOrder
);
1331 Value
*UnreleasedLoadExtract
=
1332 extractMaskedValue(Builder
, UnreleasedLoad
, PMV
);
1333 Value
*ShouldStore
= Builder
.CreateICmpEQ(
1334 UnreleasedLoadExtract
, CI
->getCompareOperand(), "should_store");
1336 // If the cmpxchg doesn't actually need any ordering when it fails, we can
1337 // jump straight past that fence instruction (if it exists).
1338 Builder
.CreateCondBr(ShouldStore
, ReleasingStoreBB
, NoStoreBB
);
1340 Builder
.SetInsertPoint(ReleasingStoreBB
);
1341 if (ShouldInsertFencesForAtomic
&& !UseUnconditionalReleaseBarrier
)
1342 TLI
->emitLeadingFence(Builder
, CI
, SuccessOrder
);
1343 Builder
.CreateBr(TryStoreBB
);
1345 Builder
.SetInsertPoint(TryStoreBB
);
1346 PHINode
*LoadedTryStore
=
1347 Builder
.CreatePHI(PMV
.WordType
, 2, "loaded.trystore");
1348 LoadedTryStore
->addIncoming(UnreleasedLoad
, ReleasingStoreBB
);
1349 Value
*NewValueInsert
=
1350 insertMaskedValue(Builder
, LoadedTryStore
, CI
->getNewValOperand(), PMV
);
1351 Value
*StoreSuccess
= TLI
->emitStoreConditional(Builder
, NewValueInsert
,
1352 PMV
.AlignedAddr
, MemOpOrder
);
1353 StoreSuccess
= Builder
.CreateICmpEQ(
1354 StoreSuccess
, ConstantInt::get(Type::getInt32Ty(Ctx
), 0), "success");
1355 BasicBlock
*RetryBB
= HasReleasedLoadBB
? ReleasedLoadBB
: StartBB
;
1356 Builder
.CreateCondBr(StoreSuccess
, SuccessBB
,
1357 CI
->isWeak() ? FailureBB
: RetryBB
);
1359 Builder
.SetInsertPoint(ReleasedLoadBB
);
1361 if (HasReleasedLoadBB
) {
1363 TLI
->emitLoadLinked(Builder
, PMV
.WordType
, PMV
.AlignedAddr
, MemOpOrder
);
1364 Value
*SecondLoadExtract
= extractMaskedValue(Builder
, SecondLoad
, PMV
);
1365 ShouldStore
= Builder
.CreateICmpEQ(SecondLoadExtract
,
1366 CI
->getCompareOperand(), "should_store");
1368 // If the cmpxchg doesn't actually need any ordering when it fails, we can
1369 // jump straight past that fence instruction (if it exists).
1370 Builder
.CreateCondBr(ShouldStore
, TryStoreBB
, NoStoreBB
);
1371 // Update PHI node in TryStoreBB.
1372 LoadedTryStore
->addIncoming(SecondLoad
, ReleasedLoadBB
);
1374 Builder
.CreateUnreachable();
1376 // Make sure later instructions don't get reordered with a fence if
1378 Builder
.SetInsertPoint(SuccessBB
);
1379 if (ShouldInsertFencesForAtomic
||
1380 TLI
->shouldInsertTrailingFenceForAtomicStore(CI
))
1381 TLI
->emitTrailingFence(Builder
, CI
, SuccessOrder
);
1382 Builder
.CreateBr(ExitBB
);
1384 Builder
.SetInsertPoint(NoStoreBB
);
1385 PHINode
*LoadedNoStore
=
1386 Builder
.CreatePHI(UnreleasedLoad
->getType(), 2, "loaded.nostore");
1387 LoadedNoStore
->addIncoming(UnreleasedLoad
, StartBB
);
1388 if (HasReleasedLoadBB
)
1389 LoadedNoStore
->addIncoming(SecondLoad
, ReleasedLoadBB
);
1391 // In the failing case, where we don't execute the store-conditional, the
1392 // target might want to balance out the load-linked with a dedicated
1393 // instruction (e.g., on ARM, clearing the exclusive monitor).
1394 TLI
->emitAtomicCmpXchgNoStoreLLBalance(Builder
);
1395 Builder
.CreateBr(FailureBB
);
1397 Builder
.SetInsertPoint(FailureBB
);
1398 PHINode
*LoadedFailure
=
1399 Builder
.CreatePHI(UnreleasedLoad
->getType(), 2, "loaded.failure");
1400 LoadedFailure
->addIncoming(LoadedNoStore
, NoStoreBB
);
1402 LoadedFailure
->addIncoming(LoadedTryStore
, TryStoreBB
);
1403 if (ShouldInsertFencesForAtomic
)
1404 TLI
->emitTrailingFence(Builder
, CI
, FailureOrder
);
1405 Builder
.CreateBr(ExitBB
);
1407 // Finally, we have control-flow based knowledge of whether the cmpxchg
1408 // succeeded or not. We expose this to later passes by converting any
1409 // subsequent "icmp eq/ne %loaded, %oldval" into a use of an appropriate
1411 Builder
.SetInsertPoint(ExitBB
, ExitBB
->begin());
1412 PHINode
*LoadedExit
=
1413 Builder
.CreatePHI(UnreleasedLoad
->getType(), 2, "loaded.exit");
1414 LoadedExit
->addIncoming(LoadedTryStore
, SuccessBB
);
1415 LoadedExit
->addIncoming(LoadedFailure
, FailureBB
);
1416 PHINode
*Success
= Builder
.CreatePHI(Type::getInt1Ty(Ctx
), 2, "success");
1417 Success
->addIncoming(ConstantInt::getTrue(Ctx
), SuccessBB
);
1418 Success
->addIncoming(ConstantInt::getFalse(Ctx
), FailureBB
);
1420 // This is the "exit value" from the cmpxchg expansion. It may be of
1421 // a type wider than the one in the cmpxchg instruction.
1422 Value
*LoadedFull
= LoadedExit
;
1424 Builder
.SetInsertPoint(ExitBB
, std::next(Success
->getIterator()));
1425 Value
*Loaded
= extractMaskedValue(Builder
, LoadedFull
, PMV
);
1427 // Look for any users of the cmpxchg that are just comparing the loaded value
1428 // against the desired one, and replace them with the CFG-derived version.
1429 SmallVector
<ExtractValueInst
*, 2> PrunedInsts
;
1430 for (auto *User
: CI
->users()) {
1431 ExtractValueInst
*EV
= dyn_cast
<ExtractValueInst
>(User
);
1435 assert(EV
->getNumIndices() == 1 && EV
->getIndices()[0] <= 1 &&
1436 "weird extraction from { iN, i1 }");
1438 if (EV
->getIndices()[0] == 0)
1439 EV
->replaceAllUsesWith(Loaded
);
1441 EV
->replaceAllUsesWith(Success
);
1443 PrunedInsts
.push_back(EV
);
1446 // We can remove the instructions now we're no longer iterating through them.
1447 for (auto *EV
: PrunedInsts
)
1448 EV
->eraseFromParent();
1450 if (!CI
->use_empty()) {
1451 // Some use of the full struct return that we don't understand has happened,
1452 // so we've got to reconstruct it properly.
1454 Res
= Builder
.CreateInsertValue(PoisonValue::get(CI
->getType()), Loaded
, 0);
1455 Res
= Builder
.CreateInsertValue(Res
, Success
, 1);
1457 CI
->replaceAllUsesWith(Res
);
1460 CI
->eraseFromParent();
1464 bool AtomicExpand::isIdempotentRMW(AtomicRMWInst
*RMWI
) {
1465 auto C
= dyn_cast
<ConstantInt
>(RMWI
->getValOperand());
1469 AtomicRMWInst::BinOp Op
= RMWI
->getOperation();
1471 case AtomicRMWInst::Add
:
1472 case AtomicRMWInst::Sub
:
1473 case AtomicRMWInst::Or
:
1474 case AtomicRMWInst::Xor
:
1476 case AtomicRMWInst::And
:
1477 return C
->isMinusOne();
1478 // FIXME: we could also treat Min/Max/UMin/UMax by the INT_MIN/INT_MAX/...
1484 bool AtomicExpand::simplifyIdempotentRMW(AtomicRMWInst
*RMWI
) {
1485 if (auto ResultingLoad
= TLI
->lowerIdempotentRMWIntoFencedLoad(RMWI
)) {
1486 tryExpandAtomicLoad(ResultingLoad
);
1492 Value
*AtomicExpand::insertRMWCmpXchgLoop(
1493 IRBuilderBase
&Builder
, Type
*ResultTy
, Value
*Addr
, Align AddrAlign
,
1494 AtomicOrdering MemOpOrder
, SyncScope::ID SSID
,
1495 function_ref
<Value
*(IRBuilderBase
&, Value
*)> PerformOp
,
1496 CreateCmpXchgInstFun CreateCmpXchg
) {
1497 LLVMContext
&Ctx
= Builder
.getContext();
1498 BasicBlock
*BB
= Builder
.GetInsertBlock();
1499 Function
*F
= BB
->getParent();
1501 // Given: atomicrmw some_op iN* %addr, iN %incr ordering
1503 // The standard expansion we produce is:
1505 // %init_loaded = load atomic iN* %addr
1508 // %loaded = phi iN [ %init_loaded, %entry ], [ %new_loaded, %loop ]
1509 // %new = some_op iN %loaded, %incr
1510 // %pair = cmpxchg iN* %addr, iN %loaded, iN %new
1511 // %new_loaded = extractvalue { iN, i1 } %pair, 0
1512 // %success = extractvalue { iN, i1 } %pair, 1
1513 // br i1 %success, label %atomicrmw.end, label %loop
1516 BasicBlock
*ExitBB
=
1517 BB
->splitBasicBlock(Builder
.GetInsertPoint(), "atomicrmw.end");
1518 BasicBlock
*LoopBB
= BasicBlock::Create(Ctx
, "atomicrmw.start", F
, ExitBB
);
1520 // The split call above "helpfully" added a branch at the end of BB (to the
1521 // wrong place), but we want a load. It's easiest to just remove
1522 // the branch entirely.
1523 std::prev(BB
->end())->eraseFromParent();
1524 Builder
.SetInsertPoint(BB
);
1525 LoadInst
*InitLoaded
= Builder
.CreateAlignedLoad(ResultTy
, Addr
, AddrAlign
);
1526 Builder
.CreateBr(LoopBB
);
1528 // Start the main loop block now that we've taken care of the preliminaries.
1529 Builder
.SetInsertPoint(LoopBB
);
1530 PHINode
*Loaded
= Builder
.CreatePHI(ResultTy
, 2, "loaded");
1531 Loaded
->addIncoming(InitLoaded
, BB
);
1533 Value
*NewVal
= PerformOp(Builder
, Loaded
);
1535 Value
*NewLoaded
= nullptr;
1536 Value
*Success
= nullptr;
1538 CreateCmpXchg(Builder
, Addr
, Loaded
, NewVal
, AddrAlign
,
1539 MemOpOrder
== AtomicOrdering::Unordered
1540 ? AtomicOrdering::Monotonic
1542 SSID
, Success
, NewLoaded
);
1543 assert(Success
&& NewLoaded
);
1545 Loaded
->addIncoming(NewLoaded
, LoopBB
);
1547 Builder
.CreateCondBr(Success
, ExitBB
, LoopBB
);
1549 Builder
.SetInsertPoint(ExitBB
, ExitBB
->begin());
1553 bool AtomicExpand::tryExpandAtomicCmpXchg(AtomicCmpXchgInst
*CI
) {
1554 unsigned MinCASSize
= TLI
->getMinCmpXchgSizeInBits() / 8;
1555 unsigned ValueSize
= getAtomicOpSize(CI
);
1557 switch (TLI
->shouldExpandAtomicCmpXchgInIR(CI
)) {
1559 llvm_unreachable("Unhandled case in tryExpandAtomicCmpXchg");
1560 case TargetLoweringBase::AtomicExpansionKind::None
:
1561 if (ValueSize
< MinCASSize
)
1562 return expandPartwordCmpXchg(CI
);
1564 case TargetLoweringBase::AtomicExpansionKind::LLSC
: {
1565 return expandAtomicCmpXchg(CI
);
1567 case TargetLoweringBase::AtomicExpansionKind::MaskedIntrinsic
:
1568 expandAtomicCmpXchgToMaskedIntrinsic(CI
);
1570 case TargetLoweringBase::AtomicExpansionKind::NotAtomic
:
1571 return lowerAtomicCmpXchgInst(CI
);
1575 // Note: This function is exposed externally by AtomicExpandUtils.h
1576 bool llvm::expandAtomicRMWToCmpXchg(AtomicRMWInst
*AI
,
1577 CreateCmpXchgInstFun CreateCmpXchg
) {
1578 ReplacementIRBuilder
Builder(AI
, AI
->getModule()->getDataLayout());
1579 Builder
.setIsFPConstrained(
1580 AI
->getFunction()->hasFnAttribute(Attribute::StrictFP
));
1582 // FIXME: If FP exceptions are observable, we should force them off for the
1583 // loop for the FP atomics.
1584 Value
*Loaded
= AtomicExpand::insertRMWCmpXchgLoop(
1585 Builder
, AI
->getType(), AI
->getPointerOperand(), AI
->getAlign(),
1586 AI
->getOrdering(), AI
->getSyncScopeID(),
1587 [&](IRBuilderBase
&Builder
, Value
*Loaded
) {
1588 return buildAtomicRMWValue(AI
->getOperation(), Builder
, Loaded
,
1589 AI
->getValOperand());
1593 AI
->replaceAllUsesWith(Loaded
);
1594 AI
->eraseFromParent();
1598 // In order to use one of the sized library calls such as
1599 // __atomic_fetch_add_4, the alignment must be sufficient, the size
1600 // must be one of the potentially-specialized sizes, and the value
1601 // type must actually exist in C on the target (otherwise, the
1602 // function wouldn't actually be defined.)
1603 static bool canUseSizedAtomicCall(unsigned Size
, Align Alignment
,
1604 const DataLayout
&DL
) {
1605 // TODO: "LargestSize" is an approximation for "largest type that
1606 // you can express in C". It seems to be the case that int128 is
1607 // supported on all 64-bit platforms, otherwise only up to 64-bit
1608 // integers are supported. If we get this wrong, then we'll try to
1609 // call a sized libcall that doesn't actually exist. There should
1610 // really be some more reliable way in LLVM of determining integer
1611 // sizes which are valid in the target's C ABI...
1612 unsigned LargestSize
= DL
.getLargestLegalIntTypeSizeInBits() >= 64 ? 16 : 8;
1613 return Alignment
>= Size
&&
1614 (Size
== 1 || Size
== 2 || Size
== 4 || Size
== 8 || Size
== 16) &&
1615 Size
<= LargestSize
;
1618 void AtomicExpand::expandAtomicLoadToLibcall(LoadInst
*I
) {
1619 static const RTLIB::Libcall Libcalls
[6] = {
1620 RTLIB::ATOMIC_LOAD
, RTLIB::ATOMIC_LOAD_1
, RTLIB::ATOMIC_LOAD_2
,
1621 RTLIB::ATOMIC_LOAD_4
, RTLIB::ATOMIC_LOAD_8
, RTLIB::ATOMIC_LOAD_16
};
1622 unsigned Size
= getAtomicOpSize(I
);
1624 bool expanded
= expandAtomicOpToLibcall(
1625 I
, Size
, I
->getAlign(), I
->getPointerOperand(), nullptr, nullptr,
1626 I
->getOrdering(), AtomicOrdering::NotAtomic
, Libcalls
);
1628 report_fatal_error("expandAtomicOpToLibcall shouldn't fail for Load");
1631 void AtomicExpand::expandAtomicStoreToLibcall(StoreInst
*I
) {
1632 static const RTLIB::Libcall Libcalls
[6] = {
1633 RTLIB::ATOMIC_STORE
, RTLIB::ATOMIC_STORE_1
, RTLIB::ATOMIC_STORE_2
,
1634 RTLIB::ATOMIC_STORE_4
, RTLIB::ATOMIC_STORE_8
, RTLIB::ATOMIC_STORE_16
};
1635 unsigned Size
= getAtomicOpSize(I
);
1637 bool expanded
= expandAtomicOpToLibcall(
1638 I
, Size
, I
->getAlign(), I
->getPointerOperand(), I
->getValueOperand(),
1639 nullptr, I
->getOrdering(), AtomicOrdering::NotAtomic
, Libcalls
);
1641 report_fatal_error("expandAtomicOpToLibcall shouldn't fail for Store");
1644 void AtomicExpand::expandAtomicCASToLibcall(AtomicCmpXchgInst
*I
) {
1645 static const RTLIB::Libcall Libcalls
[6] = {
1646 RTLIB::ATOMIC_COMPARE_EXCHANGE
, RTLIB::ATOMIC_COMPARE_EXCHANGE_1
,
1647 RTLIB::ATOMIC_COMPARE_EXCHANGE_2
, RTLIB::ATOMIC_COMPARE_EXCHANGE_4
,
1648 RTLIB::ATOMIC_COMPARE_EXCHANGE_8
, RTLIB::ATOMIC_COMPARE_EXCHANGE_16
};
1649 unsigned Size
= getAtomicOpSize(I
);
1651 bool expanded
= expandAtomicOpToLibcall(
1652 I
, Size
, I
->getAlign(), I
->getPointerOperand(), I
->getNewValOperand(),
1653 I
->getCompareOperand(), I
->getSuccessOrdering(), I
->getFailureOrdering(),
1656 report_fatal_error("expandAtomicOpToLibcall shouldn't fail for CAS");
1659 static ArrayRef
<RTLIB::Libcall
> GetRMWLibcall(AtomicRMWInst::BinOp Op
) {
1660 static const RTLIB::Libcall LibcallsXchg
[6] = {
1661 RTLIB::ATOMIC_EXCHANGE
, RTLIB::ATOMIC_EXCHANGE_1
,
1662 RTLIB::ATOMIC_EXCHANGE_2
, RTLIB::ATOMIC_EXCHANGE_4
,
1663 RTLIB::ATOMIC_EXCHANGE_8
, RTLIB::ATOMIC_EXCHANGE_16
};
1664 static const RTLIB::Libcall LibcallsAdd
[6] = {
1665 RTLIB::UNKNOWN_LIBCALL
, RTLIB::ATOMIC_FETCH_ADD_1
,
1666 RTLIB::ATOMIC_FETCH_ADD_2
, RTLIB::ATOMIC_FETCH_ADD_4
,
1667 RTLIB::ATOMIC_FETCH_ADD_8
, RTLIB::ATOMIC_FETCH_ADD_16
};
1668 static const RTLIB::Libcall LibcallsSub
[6] = {
1669 RTLIB::UNKNOWN_LIBCALL
, RTLIB::ATOMIC_FETCH_SUB_1
,
1670 RTLIB::ATOMIC_FETCH_SUB_2
, RTLIB::ATOMIC_FETCH_SUB_4
,
1671 RTLIB::ATOMIC_FETCH_SUB_8
, RTLIB::ATOMIC_FETCH_SUB_16
};
1672 static const RTLIB::Libcall LibcallsAnd
[6] = {
1673 RTLIB::UNKNOWN_LIBCALL
, RTLIB::ATOMIC_FETCH_AND_1
,
1674 RTLIB::ATOMIC_FETCH_AND_2
, RTLIB::ATOMIC_FETCH_AND_4
,
1675 RTLIB::ATOMIC_FETCH_AND_8
, RTLIB::ATOMIC_FETCH_AND_16
};
1676 static const RTLIB::Libcall LibcallsOr
[6] = {
1677 RTLIB::UNKNOWN_LIBCALL
, RTLIB::ATOMIC_FETCH_OR_1
,
1678 RTLIB::ATOMIC_FETCH_OR_2
, RTLIB::ATOMIC_FETCH_OR_4
,
1679 RTLIB::ATOMIC_FETCH_OR_8
, RTLIB::ATOMIC_FETCH_OR_16
};
1680 static const RTLIB::Libcall LibcallsXor
[6] = {
1681 RTLIB::UNKNOWN_LIBCALL
, RTLIB::ATOMIC_FETCH_XOR_1
,
1682 RTLIB::ATOMIC_FETCH_XOR_2
, RTLIB::ATOMIC_FETCH_XOR_4
,
1683 RTLIB::ATOMIC_FETCH_XOR_8
, RTLIB::ATOMIC_FETCH_XOR_16
};
1684 static const RTLIB::Libcall LibcallsNand
[6] = {
1685 RTLIB::UNKNOWN_LIBCALL
, RTLIB::ATOMIC_FETCH_NAND_1
,
1686 RTLIB::ATOMIC_FETCH_NAND_2
, RTLIB::ATOMIC_FETCH_NAND_4
,
1687 RTLIB::ATOMIC_FETCH_NAND_8
, RTLIB::ATOMIC_FETCH_NAND_16
};
1690 case AtomicRMWInst::BAD_BINOP
:
1691 llvm_unreachable("Should not have BAD_BINOP.");
1692 case AtomicRMWInst::Xchg
:
1693 return ArrayRef(LibcallsXchg
);
1694 case AtomicRMWInst::Add
:
1695 return ArrayRef(LibcallsAdd
);
1696 case AtomicRMWInst::Sub
:
1697 return ArrayRef(LibcallsSub
);
1698 case AtomicRMWInst::And
:
1699 return ArrayRef(LibcallsAnd
);
1700 case AtomicRMWInst::Or
:
1701 return ArrayRef(LibcallsOr
);
1702 case AtomicRMWInst::Xor
:
1703 return ArrayRef(LibcallsXor
);
1704 case AtomicRMWInst::Nand
:
1705 return ArrayRef(LibcallsNand
);
1706 case AtomicRMWInst::Max
:
1707 case AtomicRMWInst::Min
:
1708 case AtomicRMWInst::UMax
:
1709 case AtomicRMWInst::UMin
:
1710 case AtomicRMWInst::FMax
:
1711 case AtomicRMWInst::FMin
:
1712 case AtomicRMWInst::FAdd
:
1713 case AtomicRMWInst::FSub
:
1714 case AtomicRMWInst::UIncWrap
:
1715 case AtomicRMWInst::UDecWrap
:
1716 // No atomic libcalls are available for max/min/umax/umin.
1719 llvm_unreachable("Unexpected AtomicRMW operation.");
1722 void AtomicExpand::expandAtomicRMWToLibcall(AtomicRMWInst
*I
) {
1723 ArrayRef
<RTLIB::Libcall
> Libcalls
= GetRMWLibcall(I
->getOperation());
1725 unsigned Size
= getAtomicOpSize(I
);
1727 bool Success
= false;
1728 if (!Libcalls
.empty())
1729 Success
= expandAtomicOpToLibcall(
1730 I
, Size
, I
->getAlign(), I
->getPointerOperand(), I
->getValOperand(),
1731 nullptr, I
->getOrdering(), AtomicOrdering::NotAtomic
, Libcalls
);
1733 // The expansion failed: either there were no libcalls at all for
1734 // the operation (min/max), or there were only size-specialized
1735 // libcalls (add/sub/etc) and we needed a generic. So, expand to a
1736 // CAS libcall, via a CAS loop, instead.
1738 expandAtomicRMWToCmpXchg(
1739 I
, [this](IRBuilderBase
&Builder
, Value
*Addr
, Value
*Loaded
,
1740 Value
*NewVal
, Align Alignment
, AtomicOrdering MemOpOrder
,
1741 SyncScope::ID SSID
, Value
*&Success
, Value
*&NewLoaded
) {
1742 // Create the CAS instruction normally...
1743 AtomicCmpXchgInst
*Pair
= Builder
.CreateAtomicCmpXchg(
1744 Addr
, Loaded
, NewVal
, Alignment
, MemOpOrder
,
1745 AtomicCmpXchgInst::getStrongestFailureOrdering(MemOpOrder
), SSID
);
1746 Success
= Builder
.CreateExtractValue(Pair
, 1, "success");
1747 NewLoaded
= Builder
.CreateExtractValue(Pair
, 0, "newloaded");
1749 // ...and then expand the CAS into a libcall.
1750 expandAtomicCASToLibcall(Pair
);
1755 // A helper routine for the above expandAtomic*ToLibcall functions.
1757 // 'Libcalls' contains an array of enum values for the particular
1758 // ATOMIC libcalls to be emitted. All of the other arguments besides
1759 // 'I' are extracted from the Instruction subclass by the
1760 // caller. Depending on the particular call, some will be null.
1761 bool AtomicExpand::expandAtomicOpToLibcall(
1762 Instruction
*I
, unsigned Size
, Align Alignment
, Value
*PointerOperand
,
1763 Value
*ValueOperand
, Value
*CASExpected
, AtomicOrdering Ordering
,
1764 AtomicOrdering Ordering2
, ArrayRef
<RTLIB::Libcall
> Libcalls
) {
1765 assert(Libcalls
.size() == 6);
1767 LLVMContext
&Ctx
= I
->getContext();
1768 Module
*M
= I
->getModule();
1769 const DataLayout
&DL
= M
->getDataLayout();
1770 IRBuilder
<> Builder(I
);
1771 IRBuilder
<> AllocaBuilder(&I
->getFunction()->getEntryBlock().front());
1773 bool UseSizedLibcall
= canUseSizedAtomicCall(Size
, Alignment
, DL
);
1774 Type
*SizedIntTy
= Type::getIntNTy(Ctx
, Size
* 8);
1776 const Align AllocaAlignment
= DL
.getPrefTypeAlign(SizedIntTy
);
1778 // TODO: the "order" argument type is "int", not int32. So
1779 // getInt32Ty may be wrong if the arch uses e.g. 16-bit ints.
1780 ConstantInt
*SizeVal64
= ConstantInt::get(Type::getInt64Ty(Ctx
), Size
);
1781 assert(Ordering
!= AtomicOrdering::NotAtomic
&& "expect atomic MO");
1782 Constant
*OrderingVal
=
1783 ConstantInt::get(Type::getInt32Ty(Ctx
), (int)toCABI(Ordering
));
1784 Constant
*Ordering2Val
= nullptr;
1786 assert(Ordering2
!= AtomicOrdering::NotAtomic
&& "expect atomic MO");
1788 ConstantInt::get(Type::getInt32Ty(Ctx
), (int)toCABI(Ordering2
));
1790 bool HasResult
= I
->getType() != Type::getVoidTy(Ctx
);
1792 RTLIB::Libcall RTLibType
;
1793 if (UseSizedLibcall
) {
1796 RTLibType
= Libcalls
[1];
1799 RTLibType
= Libcalls
[2];
1802 RTLibType
= Libcalls
[3];
1805 RTLibType
= Libcalls
[4];
1808 RTLibType
= Libcalls
[5];
1811 } else if (Libcalls
[0] != RTLIB::UNKNOWN_LIBCALL
) {
1812 RTLibType
= Libcalls
[0];
1814 // Can't use sized function, and there's no generic for this
1815 // operation, so give up.
1819 if (!TLI
->getLibcallName(RTLibType
)) {
1820 // This target does not implement the requested atomic libcall so give up.
1824 // Build up the function call. There's two kinds. First, the sized
1825 // variants. These calls are going to be one of the following (with
1827 // iN __atomic_load_N(iN *ptr, int ordering)
1828 // void __atomic_store_N(iN *ptr, iN val, int ordering)
1829 // iN __atomic_{exchange|fetch_*}_N(iN *ptr, iN val, int ordering)
1830 // bool __atomic_compare_exchange_N(iN *ptr, iN *expected, iN desired,
1831 // int success_order, int failure_order)
1833 // Note that these functions can be used for non-integer atomic
1834 // operations, the values just need to be bitcast to integers on the
1837 // And, then, the generic variants. They look like the following:
1838 // void __atomic_load(size_t size, void *ptr, void *ret, int ordering)
1839 // void __atomic_store(size_t size, void *ptr, void *val, int ordering)
1840 // void __atomic_exchange(size_t size, void *ptr, void *val, void *ret,
1842 // bool __atomic_compare_exchange(size_t size, void *ptr, void *expected,
1843 // void *desired, int success_order,
1844 // int failure_order)
1846 // The different signatures are built up depending on the
1847 // 'UseSizedLibcall', 'CASExpected', 'ValueOperand', and 'HasResult'
1850 AllocaInst
*AllocaCASExpected
= nullptr;
1851 Value
*AllocaCASExpected_i8
= nullptr;
1852 AllocaInst
*AllocaValue
= nullptr;
1853 Value
*AllocaValue_i8
= nullptr;
1854 AllocaInst
*AllocaResult
= nullptr;
1855 Value
*AllocaResult_i8
= nullptr;
1858 SmallVector
<Value
*, 6> Args
;
1862 if (!UseSizedLibcall
) {
1863 // Note, getIntPtrType is assumed equivalent to size_t.
1864 Args
.push_back(ConstantInt::get(DL
.getIntPtrType(Ctx
), Size
));
1868 // note: This assumes all address spaces share a common libfunc
1869 // implementation and that addresses are convertable. For systems without
1870 // that property, we'd need to extend this mechanism to support AS-specific
1871 // families of atomic intrinsics.
1872 auto PtrTypeAS
= PointerOperand
->getType()->getPointerAddressSpace();
1874 Builder
.CreateBitCast(PointerOperand
, Type::getInt8PtrTy(Ctx
, PtrTypeAS
));
1875 PtrVal
= Builder
.CreateAddrSpaceCast(PtrVal
, Type::getInt8PtrTy(Ctx
));
1876 Args
.push_back(PtrVal
);
1878 // 'expected' argument, if present.
1880 AllocaCASExpected
= AllocaBuilder
.CreateAlloca(CASExpected
->getType());
1881 AllocaCASExpected
->setAlignment(AllocaAlignment
);
1882 unsigned AllocaAS
= AllocaCASExpected
->getType()->getPointerAddressSpace();
1884 AllocaCASExpected_i8
= Builder
.CreateBitCast(
1885 AllocaCASExpected
, Type::getInt8PtrTy(Ctx
, AllocaAS
));
1886 Builder
.CreateLifetimeStart(AllocaCASExpected_i8
, SizeVal64
);
1887 Builder
.CreateAlignedStore(CASExpected
, AllocaCASExpected
, AllocaAlignment
);
1888 Args
.push_back(AllocaCASExpected_i8
);
1891 // 'val' argument ('desired' for cas), if present.
1893 if (UseSizedLibcall
) {
1895 Builder
.CreateBitOrPointerCast(ValueOperand
, SizedIntTy
);
1896 Args
.push_back(IntValue
);
1898 AllocaValue
= AllocaBuilder
.CreateAlloca(ValueOperand
->getType());
1899 AllocaValue
->setAlignment(AllocaAlignment
);
1901 Builder
.CreateBitCast(AllocaValue
, Type::getInt8PtrTy(Ctx
));
1902 Builder
.CreateLifetimeStart(AllocaValue_i8
, SizeVal64
);
1903 Builder
.CreateAlignedStore(ValueOperand
, AllocaValue
, AllocaAlignment
);
1904 Args
.push_back(AllocaValue_i8
);
1909 if (!CASExpected
&& HasResult
&& !UseSizedLibcall
) {
1910 AllocaResult
= AllocaBuilder
.CreateAlloca(I
->getType());
1911 AllocaResult
->setAlignment(AllocaAlignment
);
1912 unsigned AllocaAS
= AllocaResult
->getType()->getPointerAddressSpace();
1914 Builder
.CreateBitCast(AllocaResult
, Type::getInt8PtrTy(Ctx
, AllocaAS
));
1915 Builder
.CreateLifetimeStart(AllocaResult_i8
, SizeVal64
);
1916 Args
.push_back(AllocaResult_i8
);
1919 // 'ordering' ('success_order' for cas) argument.
1920 Args
.push_back(OrderingVal
);
1922 // 'failure_order' argument, if present.
1924 Args
.push_back(Ordering2Val
);
1926 // Now, the return type.
1928 ResultTy
= Type::getInt1Ty(Ctx
);
1929 Attr
= Attr
.addRetAttribute(Ctx
, Attribute::ZExt
);
1930 } else if (HasResult
&& UseSizedLibcall
)
1931 ResultTy
= SizedIntTy
;
1933 ResultTy
= Type::getVoidTy(Ctx
);
1935 // Done with setting up arguments and return types, create the call:
1936 SmallVector
<Type
*, 6> ArgTys
;
1937 for (Value
*Arg
: Args
)
1938 ArgTys
.push_back(Arg
->getType());
1939 FunctionType
*FnType
= FunctionType::get(ResultTy
, ArgTys
, false);
1940 FunctionCallee LibcallFn
=
1941 M
->getOrInsertFunction(TLI
->getLibcallName(RTLibType
), FnType
, Attr
);
1942 CallInst
*Call
= Builder
.CreateCall(LibcallFn
, Args
);
1943 Call
->setAttributes(Attr
);
1944 Value
*Result
= Call
;
1946 // And then, extract the results...
1947 if (ValueOperand
&& !UseSizedLibcall
)
1948 Builder
.CreateLifetimeEnd(AllocaValue_i8
, SizeVal64
);
1951 // The final result from the CAS is {load of 'expected' alloca, bool result
1953 Type
*FinalResultTy
= I
->getType();
1954 Value
*V
= PoisonValue::get(FinalResultTy
);
1955 Value
*ExpectedOut
= Builder
.CreateAlignedLoad(
1956 CASExpected
->getType(), AllocaCASExpected
, AllocaAlignment
);
1957 Builder
.CreateLifetimeEnd(AllocaCASExpected_i8
, SizeVal64
);
1958 V
= Builder
.CreateInsertValue(V
, ExpectedOut
, 0);
1959 V
= Builder
.CreateInsertValue(V
, Result
, 1);
1960 I
->replaceAllUsesWith(V
);
1961 } else if (HasResult
) {
1963 if (UseSizedLibcall
)
1964 V
= Builder
.CreateBitOrPointerCast(Result
, I
->getType());
1966 V
= Builder
.CreateAlignedLoad(I
->getType(), AllocaResult
,
1968 Builder
.CreateLifetimeEnd(AllocaResult_i8
, SizeVal64
);
1970 I
->replaceAllUsesWith(V
);
1972 I
->eraseFromParent();