1 //===- AtomicExpandPass.cpp - Expand atomic instructions ------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file contains a pass (at IR level) to replace atomic instructions with
10 // __atomic_* library calls, or target specific instruction which implement the
11 // same semantics in a way which better fits the target backend. This can
12 // include the use of (intrinsic-based) load-linked/store-conditional loops,
13 // AtomicCmpXchg, or type coercions.
15 //===----------------------------------------------------------------------===//
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/STLFunctionalExtras.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/Analysis/InstSimplifyFolder.h"
21 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
22 #include "llvm/CodeGen/AtomicExpandUtils.h"
23 #include "llvm/CodeGen/RuntimeLibcalls.h"
24 #include "llvm/CodeGen/TargetLowering.h"
25 #include "llvm/CodeGen/TargetPassConfig.h"
26 #include "llvm/CodeGen/TargetSubtargetInfo.h"
27 #include "llvm/CodeGen/ValueTypes.h"
28 #include "llvm/IR/Attributes.h"
29 #include "llvm/IR/BasicBlock.h"
30 #include "llvm/IR/Constant.h"
31 #include "llvm/IR/Constants.h"
32 #include "llvm/IR/DataLayout.h"
33 #include "llvm/IR/DerivedTypes.h"
34 #include "llvm/IR/Function.h"
35 #include "llvm/IR/IRBuilder.h"
36 #include "llvm/IR/InstIterator.h"
37 #include "llvm/IR/Instruction.h"
38 #include "llvm/IR/Instructions.h"
39 #include "llvm/IR/Module.h"
40 #include "llvm/IR/Type.h"
41 #include "llvm/IR/User.h"
42 #include "llvm/IR/Value.h"
43 #include "llvm/InitializePasses.h"
44 #include "llvm/Pass.h"
45 #include "llvm/Support/AtomicOrdering.h"
46 #include "llvm/Support/Casting.h"
47 #include "llvm/Support/Debug.h"
48 #include "llvm/Support/ErrorHandling.h"
49 #include "llvm/Support/raw_ostream.h"
50 #include "llvm/Target/TargetMachine.h"
51 #include "llvm/Transforms/Utils/LowerAtomic.h"
58 #define DEBUG_TYPE "atomic-expand"
62 class AtomicExpand
: public FunctionPass
{
63 const TargetLowering
*TLI
= nullptr;
64 const DataLayout
*DL
= nullptr;
67 static char ID
; // Pass identification, replacement for typeid
69 AtomicExpand() : FunctionPass(ID
) {
70 initializeAtomicExpandPass(*PassRegistry::getPassRegistry());
73 bool runOnFunction(Function
&F
) override
;
76 bool bracketInstWithFences(Instruction
*I
, AtomicOrdering Order
);
77 IntegerType
*getCorrespondingIntegerType(Type
*T
, const DataLayout
&DL
);
78 LoadInst
*convertAtomicLoadToIntegerType(LoadInst
*LI
);
79 bool tryExpandAtomicLoad(LoadInst
*LI
);
80 bool expandAtomicLoadToLL(LoadInst
*LI
);
81 bool expandAtomicLoadToCmpXchg(LoadInst
*LI
);
82 StoreInst
*convertAtomicStoreToIntegerType(StoreInst
*SI
);
83 bool tryExpandAtomicStore(StoreInst
*SI
);
84 void expandAtomicStore(StoreInst
*SI
);
85 bool tryExpandAtomicRMW(AtomicRMWInst
*AI
);
86 AtomicRMWInst
*convertAtomicXchgToIntegerType(AtomicRMWInst
*RMWI
);
88 insertRMWLLSCLoop(IRBuilderBase
&Builder
, Type
*ResultTy
, Value
*Addr
,
89 Align AddrAlign
, AtomicOrdering MemOpOrder
,
90 function_ref
<Value
*(IRBuilderBase
&, Value
*)> PerformOp
);
91 void expandAtomicOpToLLSC(
92 Instruction
*I
, Type
*ResultTy
, Value
*Addr
, Align AddrAlign
,
93 AtomicOrdering MemOpOrder
,
94 function_ref
<Value
*(IRBuilderBase
&, Value
*)> PerformOp
);
95 void expandPartwordAtomicRMW(
96 AtomicRMWInst
*I
, TargetLoweringBase::AtomicExpansionKind ExpansionKind
);
97 AtomicRMWInst
*widenPartwordAtomicRMW(AtomicRMWInst
*AI
);
98 bool expandPartwordCmpXchg(AtomicCmpXchgInst
*I
);
99 void expandAtomicRMWToMaskedIntrinsic(AtomicRMWInst
*AI
);
100 void expandAtomicCmpXchgToMaskedIntrinsic(AtomicCmpXchgInst
*CI
);
102 AtomicCmpXchgInst
*convertCmpXchgToIntegerType(AtomicCmpXchgInst
*CI
);
103 static Value
*insertRMWCmpXchgLoop(
104 IRBuilderBase
&Builder
, Type
*ResultType
, Value
*Addr
, Align AddrAlign
,
105 AtomicOrdering MemOpOrder
, SyncScope::ID SSID
,
106 function_ref
<Value
*(IRBuilderBase
&, Value
*)> PerformOp
,
107 CreateCmpXchgInstFun CreateCmpXchg
);
108 bool tryExpandAtomicCmpXchg(AtomicCmpXchgInst
*CI
);
110 bool expandAtomicCmpXchg(AtomicCmpXchgInst
*CI
);
111 bool isIdempotentRMW(AtomicRMWInst
*RMWI
);
112 bool simplifyIdempotentRMW(AtomicRMWInst
*RMWI
);
114 bool expandAtomicOpToLibcall(Instruction
*I
, unsigned Size
, Align Alignment
,
115 Value
*PointerOperand
, Value
*ValueOperand
,
116 Value
*CASExpected
, AtomicOrdering Ordering
,
117 AtomicOrdering Ordering2
,
118 ArrayRef
<RTLIB::Libcall
> Libcalls
);
119 void expandAtomicLoadToLibcall(LoadInst
*LI
);
120 void expandAtomicStoreToLibcall(StoreInst
*LI
);
121 void expandAtomicRMWToLibcall(AtomicRMWInst
*I
);
122 void expandAtomicCASToLibcall(AtomicCmpXchgInst
*I
);
125 llvm::expandAtomicRMWToCmpXchg(AtomicRMWInst
*AI
,
126 CreateCmpXchgInstFun CreateCmpXchg
);
129 // IRBuilder to be used for replacement atomic instructions.
130 struct ReplacementIRBuilder
: IRBuilder
<InstSimplifyFolder
> {
131 // Preserves the DebugLoc from I, and preserves still valid metadata.
132 explicit ReplacementIRBuilder(Instruction
*I
, const DataLayout
&DL
)
133 : IRBuilder(I
->getContext(), DL
) {
135 this->CollectMetadataToCopy(I
, {LLVMContext::MD_pcsections
});
139 } // end anonymous namespace
141 char AtomicExpand::ID
= 0;
143 char &llvm::AtomicExpandID
= AtomicExpand::ID
;
145 INITIALIZE_PASS(AtomicExpand
, DEBUG_TYPE
, "Expand Atomic instructions", false,
148 FunctionPass
*llvm::createAtomicExpandPass() { return new AtomicExpand(); }
150 // Helper functions to retrieve the size of atomic instructions.
151 static unsigned getAtomicOpSize(LoadInst
*LI
) {
152 const DataLayout
&DL
= LI
->getModule()->getDataLayout();
153 return DL
.getTypeStoreSize(LI
->getType());
156 static unsigned getAtomicOpSize(StoreInst
*SI
) {
157 const DataLayout
&DL
= SI
->getModule()->getDataLayout();
158 return DL
.getTypeStoreSize(SI
->getValueOperand()->getType());
161 static unsigned getAtomicOpSize(AtomicRMWInst
*RMWI
) {
162 const DataLayout
&DL
= RMWI
->getModule()->getDataLayout();
163 return DL
.getTypeStoreSize(RMWI
->getValOperand()->getType());
166 static unsigned getAtomicOpSize(AtomicCmpXchgInst
*CASI
) {
167 const DataLayout
&DL
= CASI
->getModule()->getDataLayout();
168 return DL
.getTypeStoreSize(CASI
->getCompareOperand()->getType());
171 // Determine if a particular atomic operation has a supported size,
172 // and is of appropriate alignment, to be passed through for target
173 // lowering. (Versus turning into a __atomic libcall)
174 template <typename Inst
>
175 static bool atomicSizeSupported(const TargetLowering
*TLI
, Inst
*I
) {
176 unsigned Size
= getAtomicOpSize(I
);
177 Align Alignment
= I
->getAlign();
178 return Alignment
>= Size
&&
179 Size
<= TLI
->getMaxAtomicSizeInBitsSupported() / 8;
182 bool AtomicExpand::runOnFunction(Function
&F
) {
183 auto *TPC
= getAnalysisIfAvailable
<TargetPassConfig
>();
187 auto &TM
= TPC
->getTM
<TargetMachine
>();
188 const auto *Subtarget
= TM
.getSubtargetImpl(F
);
189 if (!Subtarget
->enableAtomicExpand())
191 TLI
= Subtarget
->getTargetLowering();
192 DL
= &F
.getParent()->getDataLayout();
194 SmallVector
<Instruction
*, 1> AtomicInsts
;
196 // Changing control-flow while iterating through it is a bad idea, so gather a
197 // list of all atomic instructions before we start.
198 for (Instruction
&I
: instructions(F
))
199 if (I
.isAtomic() && !isa
<FenceInst
>(&I
))
200 AtomicInsts
.push_back(&I
);
202 bool MadeChange
= false;
203 for (auto *I
: AtomicInsts
) {
204 auto LI
= dyn_cast
<LoadInst
>(I
);
205 auto SI
= dyn_cast
<StoreInst
>(I
);
206 auto RMWI
= dyn_cast
<AtomicRMWInst
>(I
);
207 auto CASI
= dyn_cast
<AtomicCmpXchgInst
>(I
);
208 assert((LI
|| SI
|| RMWI
|| CASI
) && "Unknown atomic instruction");
210 // If the Size/Alignment is not supported, replace with a libcall.
212 if (!atomicSizeSupported(TLI
, LI
)) {
213 expandAtomicLoadToLibcall(LI
);
218 if (!atomicSizeSupported(TLI
, SI
)) {
219 expandAtomicStoreToLibcall(SI
);
224 if (!atomicSizeSupported(TLI
, RMWI
)) {
225 expandAtomicRMWToLibcall(RMWI
);
230 if (!atomicSizeSupported(TLI
, CASI
)) {
231 expandAtomicCASToLibcall(CASI
);
237 if (LI
&& TLI
->shouldCastAtomicLoadInIR(LI
) ==
238 TargetLoweringBase::AtomicExpansionKind::CastToInteger
) {
239 I
= LI
= convertAtomicLoadToIntegerType(LI
);
242 TLI
->shouldCastAtomicStoreInIR(SI
) ==
243 TargetLoweringBase::AtomicExpansionKind::CastToInteger
) {
244 I
= SI
= convertAtomicStoreToIntegerType(SI
);
247 TLI
->shouldCastAtomicRMWIInIR(RMWI
) ==
248 TargetLoweringBase::AtomicExpansionKind::CastToInteger
) {
249 I
= RMWI
= convertAtomicXchgToIntegerType(RMWI
);
252 // TODO: when we're ready to make the change at the IR level, we can
253 // extend convertCmpXchgToInteger for floating point too.
254 if (CASI
->getCompareOperand()->getType()->isPointerTy()) {
255 // TODO: add a TLI hook to control this so that each target can
256 // convert to lowering the original type one at a time.
257 I
= CASI
= convertCmpXchgToIntegerType(CASI
);
262 if (TLI
->shouldInsertFencesForAtomic(I
)) {
263 auto FenceOrdering
= AtomicOrdering::Monotonic
;
264 if (LI
&& isAcquireOrStronger(LI
->getOrdering())) {
265 FenceOrdering
= LI
->getOrdering();
266 LI
->setOrdering(AtomicOrdering::Monotonic
);
267 } else if (SI
&& isReleaseOrStronger(SI
->getOrdering())) {
268 FenceOrdering
= SI
->getOrdering();
269 SI
->setOrdering(AtomicOrdering::Monotonic
);
270 } else if (RMWI
&& (isReleaseOrStronger(RMWI
->getOrdering()) ||
271 isAcquireOrStronger(RMWI
->getOrdering()))) {
272 FenceOrdering
= RMWI
->getOrdering();
273 RMWI
->setOrdering(AtomicOrdering::Monotonic
);
275 TLI
->shouldExpandAtomicCmpXchgInIR(CASI
) ==
276 TargetLoweringBase::AtomicExpansionKind::None
&&
277 (isReleaseOrStronger(CASI
->getSuccessOrdering()) ||
278 isAcquireOrStronger(CASI
->getSuccessOrdering()) ||
279 isAcquireOrStronger(CASI
->getFailureOrdering()))) {
280 // If a compare and swap is lowered to LL/SC, we can do smarter fence
281 // insertion, with a stronger one on the success path than on the
282 // failure path. As a result, fence insertion is directly done by
283 // expandAtomicCmpXchg in that case.
284 FenceOrdering
= CASI
->getMergedOrdering();
285 CASI
->setSuccessOrdering(AtomicOrdering::Monotonic
);
286 CASI
->setFailureOrdering(AtomicOrdering::Monotonic
);
289 if (FenceOrdering
!= AtomicOrdering::Monotonic
) {
290 MadeChange
|= bracketInstWithFences(I
, FenceOrdering
);
292 } else if (I
->hasAtomicStore() &&
293 TLI
->shouldInsertTrailingFenceForAtomicStore(I
)) {
294 auto FenceOrdering
= AtomicOrdering::Monotonic
;
296 FenceOrdering
= SI
->getOrdering();
298 FenceOrdering
= RMWI
->getOrdering();
299 else if (CASI
&& TLI
->shouldExpandAtomicCmpXchgInIR(CASI
) !=
300 TargetLoweringBase::AtomicExpansionKind::LLSC
)
301 // LLSC is handled in expandAtomicCmpXchg().
302 FenceOrdering
= CASI
->getSuccessOrdering();
304 IRBuilder
Builder(I
);
305 if (auto TrailingFence
=
306 TLI
->emitTrailingFence(Builder
, I
, FenceOrdering
)) {
307 TrailingFence
->moveAfter(I
);
313 MadeChange
|= tryExpandAtomicLoad(LI
);
315 MadeChange
|= tryExpandAtomicStore(SI
);
317 // There are two different ways of expanding RMW instructions:
318 // - into a load if it is idempotent
319 // - into a Cmpxchg/LL-SC loop otherwise
320 // we try them in that order.
322 if (isIdempotentRMW(RMWI
) && simplifyIdempotentRMW(RMWI
)) {
325 AtomicRMWInst::BinOp Op
= RMWI
->getOperation();
326 unsigned MinCASSize
= TLI
->getMinCmpXchgSizeInBits() / 8;
327 unsigned ValueSize
= getAtomicOpSize(RMWI
);
328 if (ValueSize
< MinCASSize
&&
329 (Op
== AtomicRMWInst::Or
|| Op
== AtomicRMWInst::Xor
||
330 Op
== AtomicRMWInst::And
)) {
331 RMWI
= widenPartwordAtomicRMW(RMWI
);
335 MadeChange
|= tryExpandAtomicRMW(RMWI
);
338 MadeChange
|= tryExpandAtomicCmpXchg(CASI
);
343 bool AtomicExpand::bracketInstWithFences(Instruction
*I
, AtomicOrdering Order
) {
344 ReplacementIRBuilder
Builder(I
, *DL
);
346 auto LeadingFence
= TLI
->emitLeadingFence(Builder
, I
, Order
);
348 auto TrailingFence
= TLI
->emitTrailingFence(Builder
, I
, Order
);
349 // We have a guard here because not every atomic operation generates a
352 TrailingFence
->moveAfter(I
);
354 return (LeadingFence
|| TrailingFence
);
357 /// Get the iX type with the same bitwidth as T.
358 IntegerType
*AtomicExpand::getCorrespondingIntegerType(Type
*T
,
359 const DataLayout
&DL
) {
360 EVT VT
= TLI
->getMemValueType(DL
, T
);
361 unsigned BitWidth
= VT
.getStoreSizeInBits();
362 assert(BitWidth
== VT
.getSizeInBits() && "must be a power of two");
363 return IntegerType::get(T
->getContext(), BitWidth
);
366 /// Convert an atomic load of a non-integral type to an integer load of the
367 /// equivalent bitwidth. See the function comment on
368 /// convertAtomicStoreToIntegerType for background.
369 LoadInst
*AtomicExpand::convertAtomicLoadToIntegerType(LoadInst
*LI
) {
370 auto *M
= LI
->getModule();
371 Type
*NewTy
= getCorrespondingIntegerType(LI
->getType(), M
->getDataLayout());
373 ReplacementIRBuilder
Builder(LI
, *DL
);
375 Value
*Addr
= LI
->getPointerOperand();
377 auto *NewLI
= Builder
.CreateLoad(NewTy
, Addr
);
378 NewLI
->setAlignment(LI
->getAlign());
379 NewLI
->setVolatile(LI
->isVolatile());
380 NewLI
->setAtomic(LI
->getOrdering(), LI
->getSyncScopeID());
381 LLVM_DEBUG(dbgs() << "Replaced " << *LI
<< " with " << *NewLI
<< "\n");
383 Value
*NewVal
= Builder
.CreateBitCast(NewLI
, LI
->getType());
384 LI
->replaceAllUsesWith(NewVal
);
385 LI
->eraseFromParent();
390 AtomicExpand::convertAtomicXchgToIntegerType(AtomicRMWInst
*RMWI
) {
391 auto *M
= RMWI
->getModule();
393 getCorrespondingIntegerType(RMWI
->getType(), M
->getDataLayout());
395 ReplacementIRBuilder
Builder(RMWI
, *DL
);
397 Value
*Addr
= RMWI
->getPointerOperand();
398 Value
*Val
= RMWI
->getValOperand();
399 Value
*NewVal
= Val
->getType()->isPointerTy()
400 ? Builder
.CreatePtrToInt(Val
, NewTy
)
401 : Builder
.CreateBitCast(Val
, NewTy
);
404 Builder
.CreateAtomicRMW(AtomicRMWInst::Xchg
, Addr
, NewVal
,
405 RMWI
->getAlign(), RMWI
->getOrdering());
406 NewRMWI
->setVolatile(RMWI
->isVolatile());
407 LLVM_DEBUG(dbgs() << "Replaced " << *RMWI
<< " with " << *NewRMWI
<< "\n");
409 Value
*NewRVal
= RMWI
->getType()->isPointerTy()
410 ? Builder
.CreateIntToPtr(NewRMWI
, RMWI
->getType())
411 : Builder
.CreateBitCast(NewRMWI
, RMWI
->getType());
412 RMWI
->replaceAllUsesWith(NewRVal
);
413 RMWI
->eraseFromParent();
417 bool AtomicExpand::tryExpandAtomicLoad(LoadInst
*LI
) {
418 switch (TLI
->shouldExpandAtomicLoadInIR(LI
)) {
419 case TargetLoweringBase::AtomicExpansionKind::None
:
421 case TargetLoweringBase::AtomicExpansionKind::LLSC
:
422 expandAtomicOpToLLSC(
423 LI
, LI
->getType(), LI
->getPointerOperand(), LI
->getAlign(),
425 [](IRBuilderBase
&Builder
, Value
*Loaded
) { return Loaded
; });
427 case TargetLoweringBase::AtomicExpansionKind::LLOnly
:
428 return expandAtomicLoadToLL(LI
);
429 case TargetLoweringBase::AtomicExpansionKind::CmpXChg
:
430 return expandAtomicLoadToCmpXchg(LI
);
431 case TargetLoweringBase::AtomicExpansionKind::NotAtomic
:
432 LI
->setAtomic(AtomicOrdering::NotAtomic
);
435 llvm_unreachable("Unhandled case in tryExpandAtomicLoad");
439 bool AtomicExpand::tryExpandAtomicStore(StoreInst
*SI
) {
440 switch (TLI
->shouldExpandAtomicStoreInIR(SI
)) {
441 case TargetLoweringBase::AtomicExpansionKind::None
:
443 case TargetLoweringBase::AtomicExpansionKind::Expand
:
444 expandAtomicStore(SI
);
446 case TargetLoweringBase::AtomicExpansionKind::NotAtomic
:
447 SI
->setAtomic(AtomicOrdering::NotAtomic
);
450 llvm_unreachable("Unhandled case in tryExpandAtomicStore");
454 bool AtomicExpand::expandAtomicLoadToLL(LoadInst
*LI
) {
455 ReplacementIRBuilder
Builder(LI
, *DL
);
457 // On some architectures, load-linked instructions are atomic for larger
458 // sizes than normal loads. For example, the only 64-bit load guaranteed
459 // to be single-copy atomic by ARM is an ldrexd (A3.5.3).
460 Value
*Val
= TLI
->emitLoadLinked(Builder
, LI
->getType(),
461 LI
->getPointerOperand(), LI
->getOrdering());
462 TLI
->emitAtomicCmpXchgNoStoreLLBalance(Builder
);
464 LI
->replaceAllUsesWith(Val
);
465 LI
->eraseFromParent();
470 bool AtomicExpand::expandAtomicLoadToCmpXchg(LoadInst
*LI
) {
471 ReplacementIRBuilder
Builder(LI
, *DL
);
472 AtomicOrdering Order
= LI
->getOrdering();
473 if (Order
== AtomicOrdering::Unordered
)
474 Order
= AtomicOrdering::Monotonic
;
476 Value
*Addr
= LI
->getPointerOperand();
477 Type
*Ty
= LI
->getType();
478 Constant
*DummyVal
= Constant::getNullValue(Ty
);
480 Value
*Pair
= Builder
.CreateAtomicCmpXchg(
481 Addr
, DummyVal
, DummyVal
, LI
->getAlign(), Order
,
482 AtomicCmpXchgInst::getStrongestFailureOrdering(Order
));
483 Value
*Loaded
= Builder
.CreateExtractValue(Pair
, 0, "loaded");
485 LI
->replaceAllUsesWith(Loaded
);
486 LI
->eraseFromParent();
491 /// Convert an atomic store of a non-integral type to an integer store of the
492 /// equivalent bitwidth. We used to not support floating point or vector
493 /// atomics in the IR at all. The backends learned to deal with the bitcast
494 /// idiom because that was the only way of expressing the notion of a atomic
495 /// float or vector store. The long term plan is to teach each backend to
496 /// instruction select from the original atomic store, but as a migration
497 /// mechanism, we convert back to the old format which the backends understand.
498 /// Each backend will need individual work to recognize the new format.
499 StoreInst
*AtomicExpand::convertAtomicStoreToIntegerType(StoreInst
*SI
) {
500 ReplacementIRBuilder
Builder(SI
, *DL
);
501 auto *M
= SI
->getModule();
502 Type
*NewTy
= getCorrespondingIntegerType(SI
->getValueOperand()->getType(),
504 Value
*NewVal
= Builder
.CreateBitCast(SI
->getValueOperand(), NewTy
);
506 Value
*Addr
= SI
->getPointerOperand();
508 StoreInst
*NewSI
= Builder
.CreateStore(NewVal
, Addr
);
509 NewSI
->setAlignment(SI
->getAlign());
510 NewSI
->setVolatile(SI
->isVolatile());
511 NewSI
->setAtomic(SI
->getOrdering(), SI
->getSyncScopeID());
512 LLVM_DEBUG(dbgs() << "Replaced " << *SI
<< " with " << *NewSI
<< "\n");
513 SI
->eraseFromParent();
517 void AtomicExpand::expandAtomicStore(StoreInst
*SI
) {
518 // This function is only called on atomic stores that are too large to be
519 // atomic if implemented as a native store. So we replace them by an
520 // atomic swap, that can be implemented for example as a ldrex/strex on ARM
521 // or lock cmpxchg8/16b on X86, as these are atomic for larger sizes.
522 // It is the responsibility of the target to only signal expansion via
523 // shouldExpandAtomicRMW in cases where this is required and possible.
524 ReplacementIRBuilder
Builder(SI
, *DL
);
525 AtomicOrdering Ordering
= SI
->getOrdering();
526 assert(Ordering
!= AtomicOrdering::NotAtomic
);
527 AtomicOrdering RMWOrdering
= Ordering
== AtomicOrdering::Unordered
528 ? AtomicOrdering::Monotonic
530 AtomicRMWInst
*AI
= Builder
.CreateAtomicRMW(
531 AtomicRMWInst::Xchg
, SI
->getPointerOperand(), SI
->getValueOperand(),
532 SI
->getAlign(), RMWOrdering
);
533 SI
->eraseFromParent();
535 // Now we have an appropriate swap instruction, lower it as usual.
536 tryExpandAtomicRMW(AI
);
539 static void createCmpXchgInstFun(IRBuilderBase
&Builder
, Value
*Addr
,
540 Value
*Loaded
, Value
*NewVal
, Align AddrAlign
,
541 AtomicOrdering MemOpOrder
, SyncScope::ID SSID
,
542 Value
*&Success
, Value
*&NewLoaded
) {
543 Type
*OrigTy
= NewVal
->getType();
545 // This code can go away when cmpxchg supports FP types.
546 assert(!OrigTy
->isPointerTy());
547 bool NeedBitcast
= OrigTy
->isFloatingPointTy();
549 IntegerType
*IntTy
= Builder
.getIntNTy(OrigTy
->getPrimitiveSizeInBits());
550 NewVal
= Builder
.CreateBitCast(NewVal
, IntTy
);
551 Loaded
= Builder
.CreateBitCast(Loaded
, IntTy
);
554 Value
*Pair
= Builder
.CreateAtomicCmpXchg(
555 Addr
, Loaded
, NewVal
, AddrAlign
, MemOpOrder
,
556 AtomicCmpXchgInst::getStrongestFailureOrdering(MemOpOrder
), SSID
);
557 Success
= Builder
.CreateExtractValue(Pair
, 1, "success");
558 NewLoaded
= Builder
.CreateExtractValue(Pair
, 0, "newloaded");
561 NewLoaded
= Builder
.CreateBitCast(NewLoaded
, OrigTy
);
564 bool AtomicExpand::tryExpandAtomicRMW(AtomicRMWInst
*AI
) {
565 LLVMContext
&Ctx
= AI
->getModule()->getContext();
566 TargetLowering::AtomicExpansionKind Kind
= TLI
->shouldExpandAtomicRMWInIR(AI
);
568 case TargetLoweringBase::AtomicExpansionKind::None
:
570 case TargetLoweringBase::AtomicExpansionKind::LLSC
: {
571 unsigned MinCASSize
= TLI
->getMinCmpXchgSizeInBits() / 8;
572 unsigned ValueSize
= getAtomicOpSize(AI
);
573 if (ValueSize
< MinCASSize
) {
574 expandPartwordAtomicRMW(AI
,
575 TargetLoweringBase::AtomicExpansionKind::LLSC
);
577 auto PerformOp
= [&](IRBuilderBase
&Builder
, Value
*Loaded
) {
578 return buildAtomicRMWValue(AI
->getOperation(), Builder
, Loaded
,
579 AI
->getValOperand());
581 expandAtomicOpToLLSC(AI
, AI
->getType(), AI
->getPointerOperand(),
582 AI
->getAlign(), AI
->getOrdering(), PerformOp
);
586 case TargetLoweringBase::AtomicExpansionKind::CmpXChg
: {
587 unsigned MinCASSize
= TLI
->getMinCmpXchgSizeInBits() / 8;
588 unsigned ValueSize
= getAtomicOpSize(AI
);
589 if (ValueSize
< MinCASSize
) {
590 expandPartwordAtomicRMW(AI
,
591 TargetLoweringBase::AtomicExpansionKind::CmpXChg
);
593 SmallVector
<StringRef
> SSNs
;
594 Ctx
.getSyncScopeNames(SSNs
);
595 auto MemScope
= SSNs
[AI
->getSyncScopeID()].empty()
597 : SSNs
[AI
->getSyncScopeID()];
598 OptimizationRemarkEmitter
ORE(AI
->getFunction());
600 return OptimizationRemark(DEBUG_TYPE
, "Passed", AI
)
601 << "A compare and swap loop was generated for an atomic "
602 << AI
->getOperationName(AI
->getOperation()) << " operation at "
603 << MemScope
<< " memory scope";
605 expandAtomicRMWToCmpXchg(AI
, createCmpXchgInstFun
);
609 case TargetLoweringBase::AtomicExpansionKind::MaskedIntrinsic
: {
610 expandAtomicRMWToMaskedIntrinsic(AI
);
613 case TargetLoweringBase::AtomicExpansionKind::BitTestIntrinsic
: {
614 TLI
->emitBitTestAtomicRMWIntrinsic(AI
);
617 case TargetLoweringBase::AtomicExpansionKind::CmpArithIntrinsic
: {
618 TLI
->emitCmpArithAtomicRMWIntrinsic(AI
);
621 case TargetLoweringBase::AtomicExpansionKind::NotAtomic
:
622 return lowerAtomicRMWInst(AI
);
623 case TargetLoweringBase::AtomicExpansionKind::Expand
:
624 TLI
->emitExpandAtomicRMW(AI
);
627 llvm_unreachable("Unhandled case in tryExpandAtomicRMW");
633 struct PartwordMaskValues
{
634 // These three fields are guaranteed to be set by createMaskInstrs.
635 Type
*WordType
= nullptr;
636 Type
*ValueType
= nullptr;
637 Type
*IntValueType
= nullptr;
638 Value
*AlignedAddr
= nullptr;
639 Align AlignedAddrAlignment
;
640 // The remaining fields can be null.
641 Value
*ShiftAmt
= nullptr;
642 Value
*Mask
= nullptr;
643 Value
*Inv_Mask
= nullptr;
646 LLVM_ATTRIBUTE_UNUSED
647 raw_ostream
&operator<<(raw_ostream
&O
, const PartwordMaskValues
&PMV
) {
648 auto PrintObj
= [&O
](auto *V
) {
655 O
<< "PartwordMaskValues {\n";
657 PrintObj(PMV
.WordType
);
659 PrintObj(PMV
.ValueType
);
660 O
<< " AlignedAddr: ";
661 PrintObj(PMV
.AlignedAddr
);
662 O
<< " AlignedAddrAlignment: " << PMV
.AlignedAddrAlignment
.value() << '\n';
664 PrintObj(PMV
.ShiftAmt
);
668 PrintObj(PMV
.Inv_Mask
);
673 } // end anonymous namespace
675 /// This is a helper function which builds instructions to provide
676 /// values necessary for partword atomic operations. It takes an
677 /// incoming address, Addr, and ValueType, and constructs the address,
678 /// shift-amounts and masks needed to work with a larger value of size
681 /// AlignedAddr: Addr rounded down to a multiple of WordSize
683 /// ShiftAmt: Number of bits to right-shift a WordSize value loaded
684 /// from AlignAddr for it to have the same value as if
685 /// ValueType was loaded from Addr.
687 /// Mask: Value to mask with the value loaded from AlignAddr to
688 /// include only the part that would've been loaded from Addr.
690 /// Inv_Mask: The inverse of Mask.
691 static PartwordMaskValues
createMaskInstrs(IRBuilderBase
&Builder
,
692 Instruction
*I
, Type
*ValueType
,
693 Value
*Addr
, Align AddrAlign
,
694 unsigned MinWordSize
) {
695 PartwordMaskValues PMV
;
697 Module
*M
= I
->getModule();
698 LLVMContext
&Ctx
= M
->getContext();
699 const DataLayout
&DL
= M
->getDataLayout();
700 unsigned ValueSize
= DL
.getTypeStoreSize(ValueType
);
702 PMV
.ValueType
= PMV
.IntValueType
= ValueType
;
703 if (PMV
.ValueType
->isFloatingPointTy())
705 Type::getIntNTy(Ctx
, ValueType
->getPrimitiveSizeInBits());
707 PMV
.WordType
= MinWordSize
> ValueSize
? Type::getIntNTy(Ctx
, MinWordSize
* 8)
709 if (PMV
.ValueType
== PMV
.WordType
) {
710 PMV
.AlignedAddr
= Addr
;
711 PMV
.AlignedAddrAlignment
= AddrAlign
;
712 PMV
.ShiftAmt
= ConstantInt::get(PMV
.ValueType
, 0);
713 PMV
.Mask
= ConstantInt::get(PMV
.ValueType
, ~0, /*isSigned*/ true);
717 PMV
.AlignedAddrAlignment
= Align(MinWordSize
);
719 assert(ValueSize
< MinWordSize
);
721 PointerType
*PtrTy
= cast
<PointerType
>(Addr
->getType());
722 IntegerType
*IntTy
= DL
.getIntPtrType(Ctx
, PtrTy
->getAddressSpace());
725 if (AddrAlign
< MinWordSize
) {
726 PMV
.AlignedAddr
= Builder
.CreateIntrinsic(
727 Intrinsic::ptrmask
, {PtrTy
, IntTy
},
728 {Addr
, ConstantInt::get(IntTy
, ~(uint64_t)(MinWordSize
- 1))}, nullptr,
731 Value
*AddrInt
= Builder
.CreatePtrToInt(Addr
, IntTy
);
732 PtrLSB
= Builder
.CreateAnd(AddrInt
, MinWordSize
- 1, "PtrLSB");
734 // If the alignment is high enough, the LSB are known 0.
735 PMV
.AlignedAddr
= Addr
;
736 PtrLSB
= ConstantInt::getNullValue(IntTy
);
739 if (DL
.isLittleEndian()) {
740 // turn bytes into bits
741 PMV
.ShiftAmt
= Builder
.CreateShl(PtrLSB
, 3);
743 // turn bytes into bits, and count from the other side.
744 PMV
.ShiftAmt
= Builder
.CreateShl(
745 Builder
.CreateXor(PtrLSB
, MinWordSize
- ValueSize
), 3);
748 PMV
.ShiftAmt
= Builder
.CreateTrunc(PMV
.ShiftAmt
, PMV
.WordType
, "ShiftAmt");
749 PMV
.Mask
= Builder
.CreateShl(
750 ConstantInt::get(PMV
.WordType
, (1 << (ValueSize
* 8)) - 1), PMV
.ShiftAmt
,
753 PMV
.Inv_Mask
= Builder
.CreateNot(PMV
.Mask
, "Inv_Mask");
758 static Value
*extractMaskedValue(IRBuilderBase
&Builder
, Value
*WideWord
,
759 const PartwordMaskValues
&PMV
) {
760 assert(WideWord
->getType() == PMV
.WordType
&& "Widened type mismatch");
761 if (PMV
.WordType
== PMV
.ValueType
)
764 Value
*Shift
= Builder
.CreateLShr(WideWord
, PMV
.ShiftAmt
, "shifted");
765 Value
*Trunc
= Builder
.CreateTrunc(Shift
, PMV
.IntValueType
, "extracted");
766 return Builder
.CreateBitCast(Trunc
, PMV
.ValueType
);
769 static Value
*insertMaskedValue(IRBuilderBase
&Builder
, Value
*WideWord
,
770 Value
*Updated
, const PartwordMaskValues
&PMV
) {
771 assert(WideWord
->getType() == PMV
.WordType
&& "Widened type mismatch");
772 assert(Updated
->getType() == PMV
.ValueType
&& "Value type mismatch");
773 if (PMV
.WordType
== PMV
.ValueType
)
776 Updated
= Builder
.CreateBitCast(Updated
, PMV
.IntValueType
);
778 Value
*ZExt
= Builder
.CreateZExt(Updated
, PMV
.WordType
, "extended");
780 Builder
.CreateShl(ZExt
, PMV
.ShiftAmt
, "shifted", /*HasNUW*/ true);
781 Value
*And
= Builder
.CreateAnd(WideWord
, PMV
.Inv_Mask
, "unmasked");
782 Value
*Or
= Builder
.CreateOr(And
, Shift
, "inserted");
786 /// Emit IR to implement a masked version of a given atomicrmw
787 /// operation. (That is, only the bits under the Mask should be
788 /// affected by the operation)
789 static Value
*performMaskedAtomicOp(AtomicRMWInst::BinOp Op
,
790 IRBuilderBase
&Builder
, Value
*Loaded
,
791 Value
*Shifted_Inc
, Value
*Inc
,
792 const PartwordMaskValues
&PMV
) {
793 // TODO: update to use
794 // https://graphics.stanford.edu/~seander/bithacks.html#MaskedMerge in order
795 // to merge bits from two values without requiring PMV.Inv_Mask.
797 case AtomicRMWInst::Xchg
: {
798 Value
*Loaded_MaskOut
= Builder
.CreateAnd(Loaded
, PMV
.Inv_Mask
);
799 Value
*FinalVal
= Builder
.CreateOr(Loaded_MaskOut
, Shifted_Inc
);
802 case AtomicRMWInst::Or
:
803 case AtomicRMWInst::Xor
:
804 case AtomicRMWInst::And
:
805 llvm_unreachable("Or/Xor/And handled by widenPartwordAtomicRMW");
806 case AtomicRMWInst::Add
:
807 case AtomicRMWInst::Sub
:
808 case AtomicRMWInst::Nand
: {
809 // The other arithmetic ops need to be masked into place.
810 Value
*NewVal
= buildAtomicRMWValue(Op
, Builder
, Loaded
, Shifted_Inc
);
811 Value
*NewVal_Masked
= Builder
.CreateAnd(NewVal
, PMV
.Mask
);
812 Value
*Loaded_MaskOut
= Builder
.CreateAnd(Loaded
, PMV
.Inv_Mask
);
813 Value
*FinalVal
= Builder
.CreateOr(Loaded_MaskOut
, NewVal_Masked
);
816 case AtomicRMWInst::Max
:
817 case AtomicRMWInst::Min
:
818 case AtomicRMWInst::UMax
:
819 case AtomicRMWInst::UMin
:
820 case AtomicRMWInst::FAdd
:
821 case AtomicRMWInst::FSub
:
822 case AtomicRMWInst::FMin
:
823 case AtomicRMWInst::FMax
:
824 case AtomicRMWInst::UIncWrap
:
825 case AtomicRMWInst::UDecWrap
: {
826 // Finally, other ops will operate on the full value, so truncate down to
827 // the original size, and expand out again after doing the
828 // operation. Bitcasts will be inserted for FP values.
829 Value
*Loaded_Extract
= extractMaskedValue(Builder
, Loaded
, PMV
);
830 Value
*NewVal
= buildAtomicRMWValue(Op
, Builder
, Loaded_Extract
, Inc
);
831 Value
*FinalVal
= insertMaskedValue(Builder
, Loaded
, NewVal
, PMV
);
835 llvm_unreachable("Unknown atomic op");
839 /// Expand a sub-word atomicrmw operation into an appropriate
840 /// word-sized operation.
842 /// It will create an LL/SC or cmpxchg loop, as appropriate, the same
843 /// way as a typical atomicrmw expansion. The only difference here is
844 /// that the operation inside of the loop may operate upon only a
845 /// part of the value.
846 void AtomicExpand::expandPartwordAtomicRMW(
847 AtomicRMWInst
*AI
, TargetLoweringBase::AtomicExpansionKind ExpansionKind
) {
848 AtomicOrdering MemOpOrder
= AI
->getOrdering();
849 SyncScope::ID SSID
= AI
->getSyncScopeID();
851 ReplacementIRBuilder
Builder(AI
, *DL
);
853 PartwordMaskValues PMV
=
854 createMaskInstrs(Builder
, AI
, AI
->getType(), AI
->getPointerOperand(),
855 AI
->getAlign(), TLI
->getMinCmpXchgSizeInBits() / 8);
857 Value
*ValOperand_Shifted
= nullptr;
858 if (AI
->getOperation() == AtomicRMWInst::Xchg
||
859 AI
->getOperation() == AtomicRMWInst::Add
||
860 AI
->getOperation() == AtomicRMWInst::Sub
||
861 AI
->getOperation() == AtomicRMWInst::Nand
) {
863 Builder
.CreateShl(Builder
.CreateZExt(AI
->getValOperand(), PMV
.WordType
),
864 PMV
.ShiftAmt
, "ValOperand_Shifted");
867 auto PerformPartwordOp
= [&](IRBuilderBase
&Builder
, Value
*Loaded
) {
868 return performMaskedAtomicOp(AI
->getOperation(), Builder
, Loaded
,
869 ValOperand_Shifted
, AI
->getValOperand(), PMV
);
873 if (ExpansionKind
== TargetLoweringBase::AtomicExpansionKind::CmpXChg
) {
874 OldResult
= insertRMWCmpXchgLoop(Builder
, PMV
.WordType
, PMV
.AlignedAddr
,
875 PMV
.AlignedAddrAlignment
, MemOpOrder
, SSID
,
876 PerformPartwordOp
, createCmpXchgInstFun
);
878 assert(ExpansionKind
== TargetLoweringBase::AtomicExpansionKind::LLSC
);
879 OldResult
= insertRMWLLSCLoop(Builder
, PMV
.WordType
, PMV
.AlignedAddr
,
880 PMV
.AlignedAddrAlignment
, MemOpOrder
,
884 Value
*FinalOldResult
= extractMaskedValue(Builder
, OldResult
, PMV
);
885 AI
->replaceAllUsesWith(FinalOldResult
);
886 AI
->eraseFromParent();
889 // Widen the bitwise atomicrmw (or/xor/and) to the minimum supported width.
890 AtomicRMWInst
*AtomicExpand::widenPartwordAtomicRMW(AtomicRMWInst
*AI
) {
891 ReplacementIRBuilder
Builder(AI
, *DL
);
892 AtomicRMWInst::BinOp Op
= AI
->getOperation();
894 assert((Op
== AtomicRMWInst::Or
|| Op
== AtomicRMWInst::Xor
||
895 Op
== AtomicRMWInst::And
) &&
896 "Unable to widen operation");
898 PartwordMaskValues PMV
=
899 createMaskInstrs(Builder
, AI
, AI
->getType(), AI
->getPointerOperand(),
900 AI
->getAlign(), TLI
->getMinCmpXchgSizeInBits() / 8);
902 Value
*ValOperand_Shifted
=
903 Builder
.CreateShl(Builder
.CreateZExt(AI
->getValOperand(), PMV
.WordType
),
904 PMV
.ShiftAmt
, "ValOperand_Shifted");
908 if (Op
== AtomicRMWInst::And
)
910 Builder
.CreateOr(PMV
.Inv_Mask
, ValOperand_Shifted
, "AndOperand");
912 NewOperand
= ValOperand_Shifted
;
914 AtomicRMWInst
*NewAI
= Builder
.CreateAtomicRMW(
915 Op
, PMV
.AlignedAddr
, NewOperand
, PMV
.AlignedAddrAlignment
,
916 AI
->getOrdering(), AI
->getSyncScopeID());
917 // TODO: Preserve metadata
919 Value
*FinalOldResult
= extractMaskedValue(Builder
, NewAI
, PMV
);
920 AI
->replaceAllUsesWith(FinalOldResult
);
921 AI
->eraseFromParent();
925 bool AtomicExpand::expandPartwordCmpXchg(AtomicCmpXchgInst
*CI
) {
926 // The basic idea here is that we're expanding a cmpxchg of a
927 // smaller memory size up to a word-sized cmpxchg. To do this, we
928 // need to add a retry-loop for strong cmpxchg, so that
929 // modifications to other parts of the word don't cause a spurious
932 // This generates code like the following:
933 // [[Setup mask values PMV.*]]
934 // %NewVal_Shifted = shl i32 %NewVal, %PMV.ShiftAmt
935 // %Cmp_Shifted = shl i32 %Cmp, %PMV.ShiftAmt
936 // %InitLoaded = load i32* %addr
937 // %InitLoaded_MaskOut = and i32 %InitLoaded, %PMV.Inv_Mask
938 // br partword.cmpxchg.loop
939 // partword.cmpxchg.loop:
940 // %Loaded_MaskOut = phi i32 [ %InitLoaded_MaskOut, %entry ],
941 // [ %OldVal_MaskOut, %partword.cmpxchg.failure ]
942 // %FullWord_NewVal = or i32 %Loaded_MaskOut, %NewVal_Shifted
943 // %FullWord_Cmp = or i32 %Loaded_MaskOut, %Cmp_Shifted
944 // %NewCI = cmpxchg i32* %PMV.AlignedAddr, i32 %FullWord_Cmp,
945 // i32 %FullWord_NewVal success_ordering failure_ordering
946 // %OldVal = extractvalue { i32, i1 } %NewCI, 0
947 // %Success = extractvalue { i32, i1 } %NewCI, 1
948 // br i1 %Success, label %partword.cmpxchg.end,
949 // label %partword.cmpxchg.failure
950 // partword.cmpxchg.failure:
951 // %OldVal_MaskOut = and i32 %OldVal, %PMV.Inv_Mask
952 // %ShouldContinue = icmp ne i32 %Loaded_MaskOut, %OldVal_MaskOut
953 // br i1 %ShouldContinue, label %partword.cmpxchg.loop,
954 // label %partword.cmpxchg.end
955 // partword.cmpxchg.end:
956 // %tmp1 = lshr i32 %OldVal, %PMV.ShiftAmt
957 // %FinalOldVal = trunc i32 %tmp1 to i8
958 // %tmp2 = insertvalue { i8, i1 } undef, i8 %FinalOldVal, 0
959 // %Res = insertvalue { i8, i1 } %25, i1 %Success, 1
961 Value
*Addr
= CI
->getPointerOperand();
962 Value
*Cmp
= CI
->getCompareOperand();
963 Value
*NewVal
= CI
->getNewValOperand();
965 BasicBlock
*BB
= CI
->getParent();
966 Function
*F
= BB
->getParent();
967 ReplacementIRBuilder
Builder(CI
, *DL
);
968 LLVMContext
&Ctx
= Builder
.getContext();
971 BB
->splitBasicBlock(CI
->getIterator(), "partword.cmpxchg.end");
973 BasicBlock::Create(Ctx
, "partword.cmpxchg.failure", F
, EndBB
);
974 auto LoopBB
= BasicBlock::Create(Ctx
, "partword.cmpxchg.loop", F
, FailureBB
);
976 // The split call above "helpfully" added a branch at the end of BB
977 // (to the wrong place).
978 std::prev(BB
->end())->eraseFromParent();
979 Builder
.SetInsertPoint(BB
);
981 PartwordMaskValues PMV
=
982 createMaskInstrs(Builder
, CI
, CI
->getCompareOperand()->getType(), Addr
,
983 CI
->getAlign(), TLI
->getMinCmpXchgSizeInBits() / 8);
985 // Shift the incoming values over, into the right location in the word.
986 Value
*NewVal_Shifted
=
987 Builder
.CreateShl(Builder
.CreateZExt(NewVal
, PMV
.WordType
), PMV
.ShiftAmt
);
989 Builder
.CreateShl(Builder
.CreateZExt(Cmp
, PMV
.WordType
), PMV
.ShiftAmt
);
991 // Load the entire current word, and mask into place the expected and new
993 LoadInst
*InitLoaded
= Builder
.CreateLoad(PMV
.WordType
, PMV
.AlignedAddr
);
994 InitLoaded
->setVolatile(CI
->isVolatile());
995 Value
*InitLoaded_MaskOut
= Builder
.CreateAnd(InitLoaded
, PMV
.Inv_Mask
);
996 Builder
.CreateBr(LoopBB
);
998 // partword.cmpxchg.loop:
999 Builder
.SetInsertPoint(LoopBB
);
1000 PHINode
*Loaded_MaskOut
= Builder
.CreatePHI(PMV
.WordType
, 2);
1001 Loaded_MaskOut
->addIncoming(InitLoaded_MaskOut
, BB
);
1003 // Mask/Or the expected and new values into place in the loaded word.
1004 Value
*FullWord_NewVal
= Builder
.CreateOr(Loaded_MaskOut
, NewVal_Shifted
);
1005 Value
*FullWord_Cmp
= Builder
.CreateOr(Loaded_MaskOut
, Cmp_Shifted
);
1006 AtomicCmpXchgInst
*NewCI
= Builder
.CreateAtomicCmpXchg(
1007 PMV
.AlignedAddr
, FullWord_Cmp
, FullWord_NewVal
, PMV
.AlignedAddrAlignment
,
1008 CI
->getSuccessOrdering(), CI
->getFailureOrdering(), CI
->getSyncScopeID());
1009 NewCI
->setVolatile(CI
->isVolatile());
1010 // When we're building a strong cmpxchg, we need a loop, so you
1011 // might think we could use a weak cmpxchg inside. But, using strong
1012 // allows the below comparison for ShouldContinue, and we're
1013 // expecting the underlying cmpxchg to be a machine instruction,
1014 // which is strong anyways.
1015 NewCI
->setWeak(CI
->isWeak());
1017 Value
*OldVal
= Builder
.CreateExtractValue(NewCI
, 0);
1018 Value
*Success
= Builder
.CreateExtractValue(NewCI
, 1);
1021 Builder
.CreateBr(EndBB
);
1023 Builder
.CreateCondBr(Success
, EndBB
, FailureBB
);
1025 // partword.cmpxchg.failure:
1026 Builder
.SetInsertPoint(FailureBB
);
1027 // Upon failure, verify that the masked-out part of the loaded value
1028 // has been modified. If it didn't, abort the cmpxchg, since the
1029 // masked-in part must've.
1030 Value
*OldVal_MaskOut
= Builder
.CreateAnd(OldVal
, PMV
.Inv_Mask
);
1031 Value
*ShouldContinue
= Builder
.CreateICmpNE(Loaded_MaskOut
, OldVal_MaskOut
);
1032 Builder
.CreateCondBr(ShouldContinue
, LoopBB
, EndBB
);
1034 // Add the second value to the phi from above
1035 Loaded_MaskOut
->addIncoming(OldVal_MaskOut
, FailureBB
);
1037 // partword.cmpxchg.end:
1038 Builder
.SetInsertPoint(CI
);
1040 Value
*FinalOldVal
= extractMaskedValue(Builder
, OldVal
, PMV
);
1041 Value
*Res
= PoisonValue::get(CI
->getType());
1042 Res
= Builder
.CreateInsertValue(Res
, FinalOldVal
, 0);
1043 Res
= Builder
.CreateInsertValue(Res
, Success
, 1);
1045 CI
->replaceAllUsesWith(Res
);
1046 CI
->eraseFromParent();
1050 void AtomicExpand::expandAtomicOpToLLSC(
1051 Instruction
*I
, Type
*ResultType
, Value
*Addr
, Align AddrAlign
,
1052 AtomicOrdering MemOpOrder
,
1053 function_ref
<Value
*(IRBuilderBase
&, Value
*)> PerformOp
) {
1054 ReplacementIRBuilder
Builder(I
, *DL
);
1055 Value
*Loaded
= insertRMWLLSCLoop(Builder
, ResultType
, Addr
, AddrAlign
,
1056 MemOpOrder
, PerformOp
);
1058 I
->replaceAllUsesWith(Loaded
);
1059 I
->eraseFromParent();
1062 void AtomicExpand::expandAtomicRMWToMaskedIntrinsic(AtomicRMWInst
*AI
) {
1063 ReplacementIRBuilder
Builder(AI
, *DL
);
1065 PartwordMaskValues PMV
=
1066 createMaskInstrs(Builder
, AI
, AI
->getType(), AI
->getPointerOperand(),
1067 AI
->getAlign(), TLI
->getMinCmpXchgSizeInBits() / 8);
1069 // The value operand must be sign-extended for signed min/max so that the
1070 // target's signed comparison instructions can be used. Otherwise, just
1072 Instruction::CastOps CastOp
= Instruction::ZExt
;
1073 AtomicRMWInst::BinOp RMWOp
= AI
->getOperation();
1074 if (RMWOp
== AtomicRMWInst::Max
|| RMWOp
== AtomicRMWInst::Min
)
1075 CastOp
= Instruction::SExt
;
1077 Value
*ValOperand_Shifted
= Builder
.CreateShl(
1078 Builder
.CreateCast(CastOp
, AI
->getValOperand(), PMV
.WordType
),
1079 PMV
.ShiftAmt
, "ValOperand_Shifted");
1080 Value
*OldResult
= TLI
->emitMaskedAtomicRMWIntrinsic(
1081 Builder
, AI
, PMV
.AlignedAddr
, ValOperand_Shifted
, PMV
.Mask
, PMV
.ShiftAmt
,
1083 Value
*FinalOldResult
= extractMaskedValue(Builder
, OldResult
, PMV
);
1084 AI
->replaceAllUsesWith(FinalOldResult
);
1085 AI
->eraseFromParent();
1088 void AtomicExpand::expandAtomicCmpXchgToMaskedIntrinsic(AtomicCmpXchgInst
*CI
) {
1089 ReplacementIRBuilder
Builder(CI
, *DL
);
1091 PartwordMaskValues PMV
= createMaskInstrs(
1092 Builder
, CI
, CI
->getCompareOperand()->getType(), CI
->getPointerOperand(),
1093 CI
->getAlign(), TLI
->getMinCmpXchgSizeInBits() / 8);
1095 Value
*CmpVal_Shifted
= Builder
.CreateShl(
1096 Builder
.CreateZExt(CI
->getCompareOperand(), PMV
.WordType
), PMV
.ShiftAmt
,
1098 Value
*NewVal_Shifted
= Builder
.CreateShl(
1099 Builder
.CreateZExt(CI
->getNewValOperand(), PMV
.WordType
), PMV
.ShiftAmt
,
1101 Value
*OldVal
= TLI
->emitMaskedAtomicCmpXchgIntrinsic(
1102 Builder
, CI
, PMV
.AlignedAddr
, CmpVal_Shifted
, NewVal_Shifted
, PMV
.Mask
,
1103 CI
->getMergedOrdering());
1104 Value
*FinalOldVal
= extractMaskedValue(Builder
, OldVal
, PMV
);
1105 Value
*Res
= PoisonValue::get(CI
->getType());
1106 Res
= Builder
.CreateInsertValue(Res
, FinalOldVal
, 0);
1107 Value
*Success
= Builder
.CreateICmpEQ(
1108 CmpVal_Shifted
, Builder
.CreateAnd(OldVal
, PMV
.Mask
), "Success");
1109 Res
= Builder
.CreateInsertValue(Res
, Success
, 1);
1111 CI
->replaceAllUsesWith(Res
);
1112 CI
->eraseFromParent();
1115 Value
*AtomicExpand::insertRMWLLSCLoop(
1116 IRBuilderBase
&Builder
, Type
*ResultTy
, Value
*Addr
, Align AddrAlign
,
1117 AtomicOrdering MemOpOrder
,
1118 function_ref
<Value
*(IRBuilderBase
&, Value
*)> PerformOp
) {
1119 LLVMContext
&Ctx
= Builder
.getContext();
1120 BasicBlock
*BB
= Builder
.GetInsertBlock();
1121 Function
*F
= BB
->getParent();
1124 F
->getParent()->getDataLayout().getTypeStoreSize(ResultTy
) &&
1125 "Expected at least natural alignment at this point.");
1127 // Given: atomicrmw some_op iN* %addr, iN %incr ordering
1129 // The standard expansion we produce is:
1132 // %loaded = @load.linked(%addr)
1133 // %new = some_op iN %loaded, %incr
1134 // %stored = @store_conditional(%new, %addr)
1135 // %try_again = icmp i32 ne %stored, 0
1136 // br i1 %try_again, label %loop, label %atomicrmw.end
1139 BasicBlock
*ExitBB
=
1140 BB
->splitBasicBlock(Builder
.GetInsertPoint(), "atomicrmw.end");
1141 BasicBlock
*LoopBB
= BasicBlock::Create(Ctx
, "atomicrmw.start", F
, ExitBB
);
1143 // The split call above "helpfully" added a branch at the end of BB (to the
1145 std::prev(BB
->end())->eraseFromParent();
1146 Builder
.SetInsertPoint(BB
);
1147 Builder
.CreateBr(LoopBB
);
1149 // Start the main loop block now that we've taken care of the preliminaries.
1150 Builder
.SetInsertPoint(LoopBB
);
1151 Value
*Loaded
= TLI
->emitLoadLinked(Builder
, ResultTy
, Addr
, MemOpOrder
);
1153 Value
*NewVal
= PerformOp(Builder
, Loaded
);
1155 Value
*StoreSuccess
=
1156 TLI
->emitStoreConditional(Builder
, NewVal
, Addr
, MemOpOrder
);
1157 Value
*TryAgain
= Builder
.CreateICmpNE(
1158 StoreSuccess
, ConstantInt::get(IntegerType::get(Ctx
, 32), 0), "tryagain");
1159 Builder
.CreateCondBr(TryAgain
, LoopBB
, ExitBB
);
1161 Builder
.SetInsertPoint(ExitBB
, ExitBB
->begin());
1165 /// Convert an atomic cmpxchg of a non-integral type to an integer cmpxchg of
1166 /// the equivalent bitwidth. We used to not support pointer cmpxchg in the
1167 /// IR. As a migration step, we convert back to what use to be the standard
1168 /// way to represent a pointer cmpxchg so that we can update backends one by
1171 AtomicExpand::convertCmpXchgToIntegerType(AtomicCmpXchgInst
*CI
) {
1172 auto *M
= CI
->getModule();
1173 Type
*NewTy
= getCorrespondingIntegerType(CI
->getCompareOperand()->getType(),
1174 M
->getDataLayout());
1176 ReplacementIRBuilder
Builder(CI
, *DL
);
1178 Value
*Addr
= CI
->getPointerOperand();
1180 Value
*NewCmp
= Builder
.CreatePtrToInt(CI
->getCompareOperand(), NewTy
);
1181 Value
*NewNewVal
= Builder
.CreatePtrToInt(CI
->getNewValOperand(), NewTy
);
1183 auto *NewCI
= Builder
.CreateAtomicCmpXchg(
1184 Addr
, NewCmp
, NewNewVal
, CI
->getAlign(), CI
->getSuccessOrdering(),
1185 CI
->getFailureOrdering(), CI
->getSyncScopeID());
1186 NewCI
->setVolatile(CI
->isVolatile());
1187 NewCI
->setWeak(CI
->isWeak());
1188 LLVM_DEBUG(dbgs() << "Replaced " << *CI
<< " with " << *NewCI
<< "\n");
1190 Value
*OldVal
= Builder
.CreateExtractValue(NewCI
, 0);
1191 Value
*Succ
= Builder
.CreateExtractValue(NewCI
, 1);
1193 OldVal
= Builder
.CreateIntToPtr(OldVal
, CI
->getCompareOperand()->getType());
1195 Value
*Res
= PoisonValue::get(CI
->getType());
1196 Res
= Builder
.CreateInsertValue(Res
, OldVal
, 0);
1197 Res
= Builder
.CreateInsertValue(Res
, Succ
, 1);
1199 CI
->replaceAllUsesWith(Res
);
1200 CI
->eraseFromParent();
1204 bool AtomicExpand::expandAtomicCmpXchg(AtomicCmpXchgInst
*CI
) {
1205 AtomicOrdering SuccessOrder
= CI
->getSuccessOrdering();
1206 AtomicOrdering FailureOrder
= CI
->getFailureOrdering();
1207 Value
*Addr
= CI
->getPointerOperand();
1208 BasicBlock
*BB
= CI
->getParent();
1209 Function
*F
= BB
->getParent();
1210 LLVMContext
&Ctx
= F
->getContext();
1211 // If shouldInsertFencesForAtomic() returns true, then the target does not
1212 // want to deal with memory orders, and emitLeading/TrailingFence should take
1213 // care of everything. Otherwise, emitLeading/TrailingFence are no-op and we
1214 // should preserve the ordering.
1215 bool ShouldInsertFencesForAtomic
= TLI
->shouldInsertFencesForAtomic(CI
);
1216 AtomicOrdering MemOpOrder
= ShouldInsertFencesForAtomic
1217 ? AtomicOrdering::Monotonic
1218 : CI
->getMergedOrdering();
1220 // In implementations which use a barrier to achieve release semantics, we can
1221 // delay emitting this barrier until we know a store is actually going to be
1222 // attempted. The cost of this delay is that we need 2 copies of the block
1223 // emitting the load-linked, affecting code size.
1225 // Ideally, this logic would be unconditional except for the minsize check
1226 // since in other cases the extra blocks naturally collapse down to the
1227 // minimal loop. Unfortunately, this puts too much stress on later
1228 // optimisations so we avoid emitting the extra logic in those cases too.
1229 bool HasReleasedLoadBB
= !CI
->isWeak() && ShouldInsertFencesForAtomic
&&
1230 SuccessOrder
!= AtomicOrdering::Monotonic
&&
1231 SuccessOrder
!= AtomicOrdering::Acquire
&&
1234 // There's no overhead for sinking the release barrier in a weak cmpxchg, so
1235 // do it even on minsize.
1236 bool UseUnconditionalReleaseBarrier
= F
->hasMinSize() && !CI
->isWeak();
1238 // Given: cmpxchg some_op iN* %addr, iN %desired, iN %new success_ord fail_ord
1240 // The full expansion we produce is:
1242 // %aligned.addr = ...
1244 // %unreleasedload = @load.linked(%aligned.addr)
1245 // %unreleasedload.extract = extract value from %unreleasedload
1246 // %should_store = icmp eq %unreleasedload.extract, %desired
1247 // br i1 %should_store, label %cmpxchg.releasingstore,
1248 // label %cmpxchg.nostore
1249 // cmpxchg.releasingstore:
1251 // br label cmpxchg.trystore
1252 // cmpxchg.trystore:
1253 // %loaded.trystore = phi [%unreleasedload, %cmpxchg.releasingstore],
1254 // [%releasedload, %cmpxchg.releasedload]
1255 // %updated.new = insert %new into %loaded.trystore
1256 // %stored = @store_conditional(%updated.new, %aligned.addr)
1257 // %success = icmp eq i32 %stored, 0
1258 // br i1 %success, label %cmpxchg.success,
1259 // label %cmpxchg.releasedload/%cmpxchg.failure
1260 // cmpxchg.releasedload:
1261 // %releasedload = @load.linked(%aligned.addr)
1262 // %releasedload.extract = extract value from %releasedload
1263 // %should_store = icmp eq %releasedload.extract, %desired
1264 // br i1 %should_store, label %cmpxchg.trystore,
1265 // label %cmpxchg.failure
1268 // br label %cmpxchg.end
1270 // %loaded.nostore = phi [%unreleasedload, %cmpxchg.start],
1272 // %cmpxchg.releasedload/%cmpxchg.trystore]
1273 // @load_linked_fail_balance()?
1274 // br label %cmpxchg.failure
1277 // br label %cmpxchg.end
1279 // %loaded.exit = phi [%loaded.nostore, %cmpxchg.failure],
1280 // [%loaded.trystore, %cmpxchg.trystore]
1281 // %success = phi i1 [true, %cmpxchg.success], [false, %cmpxchg.failure]
1282 // %loaded = extract value from %loaded.exit
1283 // %restmp = insertvalue { iN, i1 } undef, iN %loaded, 0
1284 // %res = insertvalue { iN, i1 } %restmp, i1 %success, 1
1286 BasicBlock
*ExitBB
= BB
->splitBasicBlock(CI
->getIterator(), "cmpxchg.end");
1287 auto FailureBB
= BasicBlock::Create(Ctx
, "cmpxchg.failure", F
, ExitBB
);
1288 auto NoStoreBB
= BasicBlock::Create(Ctx
, "cmpxchg.nostore", F
, FailureBB
);
1289 auto SuccessBB
= BasicBlock::Create(Ctx
, "cmpxchg.success", F
, NoStoreBB
);
1290 auto ReleasedLoadBB
=
1291 BasicBlock::Create(Ctx
, "cmpxchg.releasedload", F
, SuccessBB
);
1293 BasicBlock::Create(Ctx
, "cmpxchg.trystore", F
, ReleasedLoadBB
);
1294 auto ReleasingStoreBB
=
1295 BasicBlock::Create(Ctx
, "cmpxchg.fencedstore", F
, TryStoreBB
);
1296 auto StartBB
= BasicBlock::Create(Ctx
, "cmpxchg.start", F
, ReleasingStoreBB
);
1298 ReplacementIRBuilder
Builder(CI
, *DL
);
1300 // The split call above "helpfully" added a branch at the end of BB (to the
1301 // wrong place), but we might want a fence too. It's easiest to just remove
1302 // the branch entirely.
1303 std::prev(BB
->end())->eraseFromParent();
1304 Builder
.SetInsertPoint(BB
);
1305 if (ShouldInsertFencesForAtomic
&& UseUnconditionalReleaseBarrier
)
1306 TLI
->emitLeadingFence(Builder
, CI
, SuccessOrder
);
1308 PartwordMaskValues PMV
=
1309 createMaskInstrs(Builder
, CI
, CI
->getCompareOperand()->getType(), Addr
,
1310 CI
->getAlign(), TLI
->getMinCmpXchgSizeInBits() / 8);
1311 Builder
.CreateBr(StartBB
);
1313 // Start the main loop block now that we've taken care of the preliminaries.
1314 Builder
.SetInsertPoint(StartBB
);
1315 Value
*UnreleasedLoad
=
1316 TLI
->emitLoadLinked(Builder
, PMV
.WordType
, PMV
.AlignedAddr
, MemOpOrder
);
1317 Value
*UnreleasedLoadExtract
=
1318 extractMaskedValue(Builder
, UnreleasedLoad
, PMV
);
1319 Value
*ShouldStore
= Builder
.CreateICmpEQ(
1320 UnreleasedLoadExtract
, CI
->getCompareOperand(), "should_store");
1322 // If the cmpxchg doesn't actually need any ordering when it fails, we can
1323 // jump straight past that fence instruction (if it exists).
1324 Builder
.CreateCondBr(ShouldStore
, ReleasingStoreBB
, NoStoreBB
);
1326 Builder
.SetInsertPoint(ReleasingStoreBB
);
1327 if (ShouldInsertFencesForAtomic
&& !UseUnconditionalReleaseBarrier
)
1328 TLI
->emitLeadingFence(Builder
, CI
, SuccessOrder
);
1329 Builder
.CreateBr(TryStoreBB
);
1331 Builder
.SetInsertPoint(TryStoreBB
);
1332 PHINode
*LoadedTryStore
=
1333 Builder
.CreatePHI(PMV
.WordType
, 2, "loaded.trystore");
1334 LoadedTryStore
->addIncoming(UnreleasedLoad
, ReleasingStoreBB
);
1335 Value
*NewValueInsert
=
1336 insertMaskedValue(Builder
, LoadedTryStore
, CI
->getNewValOperand(), PMV
);
1337 Value
*StoreSuccess
= TLI
->emitStoreConditional(Builder
, NewValueInsert
,
1338 PMV
.AlignedAddr
, MemOpOrder
);
1339 StoreSuccess
= Builder
.CreateICmpEQ(
1340 StoreSuccess
, ConstantInt::get(Type::getInt32Ty(Ctx
), 0), "success");
1341 BasicBlock
*RetryBB
= HasReleasedLoadBB
? ReleasedLoadBB
: StartBB
;
1342 Builder
.CreateCondBr(StoreSuccess
, SuccessBB
,
1343 CI
->isWeak() ? FailureBB
: RetryBB
);
1345 Builder
.SetInsertPoint(ReleasedLoadBB
);
1347 if (HasReleasedLoadBB
) {
1349 TLI
->emitLoadLinked(Builder
, PMV
.WordType
, PMV
.AlignedAddr
, MemOpOrder
);
1350 Value
*SecondLoadExtract
= extractMaskedValue(Builder
, SecondLoad
, PMV
);
1351 ShouldStore
= Builder
.CreateICmpEQ(SecondLoadExtract
,
1352 CI
->getCompareOperand(), "should_store");
1354 // If the cmpxchg doesn't actually need any ordering when it fails, we can
1355 // jump straight past that fence instruction (if it exists).
1356 Builder
.CreateCondBr(ShouldStore
, TryStoreBB
, NoStoreBB
);
1357 // Update PHI node in TryStoreBB.
1358 LoadedTryStore
->addIncoming(SecondLoad
, ReleasedLoadBB
);
1360 Builder
.CreateUnreachable();
1362 // Make sure later instructions don't get reordered with a fence if
1364 Builder
.SetInsertPoint(SuccessBB
);
1365 if (ShouldInsertFencesForAtomic
||
1366 TLI
->shouldInsertTrailingFenceForAtomicStore(CI
))
1367 TLI
->emitTrailingFence(Builder
, CI
, SuccessOrder
);
1368 Builder
.CreateBr(ExitBB
);
1370 Builder
.SetInsertPoint(NoStoreBB
);
1371 PHINode
*LoadedNoStore
=
1372 Builder
.CreatePHI(UnreleasedLoad
->getType(), 2, "loaded.nostore");
1373 LoadedNoStore
->addIncoming(UnreleasedLoad
, StartBB
);
1374 if (HasReleasedLoadBB
)
1375 LoadedNoStore
->addIncoming(SecondLoad
, ReleasedLoadBB
);
1377 // In the failing case, where we don't execute the store-conditional, the
1378 // target might want to balance out the load-linked with a dedicated
1379 // instruction (e.g., on ARM, clearing the exclusive monitor).
1380 TLI
->emitAtomicCmpXchgNoStoreLLBalance(Builder
);
1381 Builder
.CreateBr(FailureBB
);
1383 Builder
.SetInsertPoint(FailureBB
);
1384 PHINode
*LoadedFailure
=
1385 Builder
.CreatePHI(UnreleasedLoad
->getType(), 2, "loaded.failure");
1386 LoadedFailure
->addIncoming(LoadedNoStore
, NoStoreBB
);
1388 LoadedFailure
->addIncoming(LoadedTryStore
, TryStoreBB
);
1389 if (ShouldInsertFencesForAtomic
)
1390 TLI
->emitTrailingFence(Builder
, CI
, FailureOrder
);
1391 Builder
.CreateBr(ExitBB
);
1393 // Finally, we have control-flow based knowledge of whether the cmpxchg
1394 // succeeded or not. We expose this to later passes by converting any
1395 // subsequent "icmp eq/ne %loaded, %oldval" into a use of an appropriate
1397 Builder
.SetInsertPoint(ExitBB
, ExitBB
->begin());
1398 PHINode
*LoadedExit
=
1399 Builder
.CreatePHI(UnreleasedLoad
->getType(), 2, "loaded.exit");
1400 LoadedExit
->addIncoming(LoadedTryStore
, SuccessBB
);
1401 LoadedExit
->addIncoming(LoadedFailure
, FailureBB
);
1402 PHINode
*Success
= Builder
.CreatePHI(Type::getInt1Ty(Ctx
), 2, "success");
1403 Success
->addIncoming(ConstantInt::getTrue(Ctx
), SuccessBB
);
1404 Success
->addIncoming(ConstantInt::getFalse(Ctx
), FailureBB
);
1406 // This is the "exit value" from the cmpxchg expansion. It may be of
1407 // a type wider than the one in the cmpxchg instruction.
1408 Value
*LoadedFull
= LoadedExit
;
1410 Builder
.SetInsertPoint(ExitBB
, std::next(Success
->getIterator()));
1411 Value
*Loaded
= extractMaskedValue(Builder
, LoadedFull
, PMV
);
1413 // Look for any users of the cmpxchg that are just comparing the loaded value
1414 // against the desired one, and replace them with the CFG-derived version.
1415 SmallVector
<ExtractValueInst
*, 2> PrunedInsts
;
1416 for (auto *User
: CI
->users()) {
1417 ExtractValueInst
*EV
= dyn_cast
<ExtractValueInst
>(User
);
1421 assert(EV
->getNumIndices() == 1 && EV
->getIndices()[0] <= 1 &&
1422 "weird extraction from { iN, i1 }");
1424 if (EV
->getIndices()[0] == 0)
1425 EV
->replaceAllUsesWith(Loaded
);
1427 EV
->replaceAllUsesWith(Success
);
1429 PrunedInsts
.push_back(EV
);
1432 // We can remove the instructions now we're no longer iterating through them.
1433 for (auto *EV
: PrunedInsts
)
1434 EV
->eraseFromParent();
1436 if (!CI
->use_empty()) {
1437 // Some use of the full struct return that we don't understand has happened,
1438 // so we've got to reconstruct it properly.
1440 Res
= Builder
.CreateInsertValue(PoisonValue::get(CI
->getType()), Loaded
, 0);
1441 Res
= Builder
.CreateInsertValue(Res
, Success
, 1);
1443 CI
->replaceAllUsesWith(Res
);
1446 CI
->eraseFromParent();
1450 bool AtomicExpand::isIdempotentRMW(AtomicRMWInst
*RMWI
) {
1451 auto C
= dyn_cast
<ConstantInt
>(RMWI
->getValOperand());
1455 AtomicRMWInst::BinOp Op
= RMWI
->getOperation();
1457 case AtomicRMWInst::Add
:
1458 case AtomicRMWInst::Sub
:
1459 case AtomicRMWInst::Or
:
1460 case AtomicRMWInst::Xor
:
1462 case AtomicRMWInst::And
:
1463 return C
->isMinusOne();
1464 // FIXME: we could also treat Min/Max/UMin/UMax by the INT_MIN/INT_MAX/...
1470 bool AtomicExpand::simplifyIdempotentRMW(AtomicRMWInst
*RMWI
) {
1471 if (auto ResultingLoad
= TLI
->lowerIdempotentRMWIntoFencedLoad(RMWI
)) {
1472 tryExpandAtomicLoad(ResultingLoad
);
1478 Value
*AtomicExpand::insertRMWCmpXchgLoop(
1479 IRBuilderBase
&Builder
, Type
*ResultTy
, Value
*Addr
, Align AddrAlign
,
1480 AtomicOrdering MemOpOrder
, SyncScope::ID SSID
,
1481 function_ref
<Value
*(IRBuilderBase
&, Value
*)> PerformOp
,
1482 CreateCmpXchgInstFun CreateCmpXchg
) {
1483 LLVMContext
&Ctx
= Builder
.getContext();
1484 BasicBlock
*BB
= Builder
.GetInsertBlock();
1485 Function
*F
= BB
->getParent();
1487 // Given: atomicrmw some_op iN* %addr, iN %incr ordering
1489 // The standard expansion we produce is:
1491 // %init_loaded = load atomic iN* %addr
1494 // %loaded = phi iN [ %init_loaded, %entry ], [ %new_loaded, %loop ]
1495 // %new = some_op iN %loaded, %incr
1496 // %pair = cmpxchg iN* %addr, iN %loaded, iN %new
1497 // %new_loaded = extractvalue { iN, i1 } %pair, 0
1498 // %success = extractvalue { iN, i1 } %pair, 1
1499 // br i1 %success, label %atomicrmw.end, label %loop
1502 BasicBlock
*ExitBB
=
1503 BB
->splitBasicBlock(Builder
.GetInsertPoint(), "atomicrmw.end");
1504 BasicBlock
*LoopBB
= BasicBlock::Create(Ctx
, "atomicrmw.start", F
, ExitBB
);
1506 // The split call above "helpfully" added a branch at the end of BB (to the
1507 // wrong place), but we want a load. It's easiest to just remove
1508 // the branch entirely.
1509 std::prev(BB
->end())->eraseFromParent();
1510 Builder
.SetInsertPoint(BB
);
1511 LoadInst
*InitLoaded
= Builder
.CreateAlignedLoad(ResultTy
, Addr
, AddrAlign
);
1512 Builder
.CreateBr(LoopBB
);
1514 // Start the main loop block now that we've taken care of the preliminaries.
1515 Builder
.SetInsertPoint(LoopBB
);
1516 PHINode
*Loaded
= Builder
.CreatePHI(ResultTy
, 2, "loaded");
1517 Loaded
->addIncoming(InitLoaded
, BB
);
1519 Value
*NewVal
= PerformOp(Builder
, Loaded
);
1521 Value
*NewLoaded
= nullptr;
1522 Value
*Success
= nullptr;
1524 CreateCmpXchg(Builder
, Addr
, Loaded
, NewVal
, AddrAlign
,
1525 MemOpOrder
== AtomicOrdering::Unordered
1526 ? AtomicOrdering::Monotonic
1528 SSID
, Success
, NewLoaded
);
1529 assert(Success
&& NewLoaded
);
1531 Loaded
->addIncoming(NewLoaded
, LoopBB
);
1533 Builder
.CreateCondBr(Success
, ExitBB
, LoopBB
);
1535 Builder
.SetInsertPoint(ExitBB
, ExitBB
->begin());
1539 bool AtomicExpand::tryExpandAtomicCmpXchg(AtomicCmpXchgInst
*CI
) {
1540 unsigned MinCASSize
= TLI
->getMinCmpXchgSizeInBits() / 8;
1541 unsigned ValueSize
= getAtomicOpSize(CI
);
1543 switch (TLI
->shouldExpandAtomicCmpXchgInIR(CI
)) {
1545 llvm_unreachable("Unhandled case in tryExpandAtomicCmpXchg");
1546 case TargetLoweringBase::AtomicExpansionKind::None
:
1547 if (ValueSize
< MinCASSize
)
1548 return expandPartwordCmpXchg(CI
);
1550 case TargetLoweringBase::AtomicExpansionKind::LLSC
: {
1551 return expandAtomicCmpXchg(CI
);
1553 case TargetLoweringBase::AtomicExpansionKind::MaskedIntrinsic
:
1554 expandAtomicCmpXchgToMaskedIntrinsic(CI
);
1556 case TargetLoweringBase::AtomicExpansionKind::NotAtomic
:
1557 return lowerAtomicCmpXchgInst(CI
);
1561 // Note: This function is exposed externally by AtomicExpandUtils.h
1562 bool llvm::expandAtomicRMWToCmpXchg(AtomicRMWInst
*AI
,
1563 CreateCmpXchgInstFun CreateCmpXchg
) {
1564 ReplacementIRBuilder
Builder(AI
, AI
->getModule()->getDataLayout());
1565 Builder
.setIsFPConstrained(
1566 AI
->getFunction()->hasFnAttribute(Attribute::StrictFP
));
1568 // FIXME: If FP exceptions are observable, we should force them off for the
1569 // loop for the FP atomics.
1570 Value
*Loaded
= AtomicExpand::insertRMWCmpXchgLoop(
1571 Builder
, AI
->getType(), AI
->getPointerOperand(), AI
->getAlign(),
1572 AI
->getOrdering(), AI
->getSyncScopeID(),
1573 [&](IRBuilderBase
&Builder
, Value
*Loaded
) {
1574 return buildAtomicRMWValue(AI
->getOperation(), Builder
, Loaded
,
1575 AI
->getValOperand());
1579 AI
->replaceAllUsesWith(Loaded
);
1580 AI
->eraseFromParent();
1584 // In order to use one of the sized library calls such as
1585 // __atomic_fetch_add_4, the alignment must be sufficient, the size
1586 // must be one of the potentially-specialized sizes, and the value
1587 // type must actually exist in C on the target (otherwise, the
1588 // function wouldn't actually be defined.)
1589 static bool canUseSizedAtomicCall(unsigned Size
, Align Alignment
,
1590 const DataLayout
&DL
) {
1591 // TODO: "LargestSize" is an approximation for "largest type that
1592 // you can express in C". It seems to be the case that int128 is
1593 // supported on all 64-bit platforms, otherwise only up to 64-bit
1594 // integers are supported. If we get this wrong, then we'll try to
1595 // call a sized libcall that doesn't actually exist. There should
1596 // really be some more reliable way in LLVM of determining integer
1597 // sizes which are valid in the target's C ABI...
1598 unsigned LargestSize
= DL
.getLargestLegalIntTypeSizeInBits() >= 64 ? 16 : 8;
1599 return Alignment
>= Size
&&
1600 (Size
== 1 || Size
== 2 || Size
== 4 || Size
== 8 || Size
== 16) &&
1601 Size
<= LargestSize
;
1604 void AtomicExpand::expandAtomicLoadToLibcall(LoadInst
*I
) {
1605 static const RTLIB::Libcall Libcalls
[6] = {
1606 RTLIB::ATOMIC_LOAD
, RTLIB::ATOMIC_LOAD_1
, RTLIB::ATOMIC_LOAD_2
,
1607 RTLIB::ATOMIC_LOAD_4
, RTLIB::ATOMIC_LOAD_8
, RTLIB::ATOMIC_LOAD_16
};
1608 unsigned Size
= getAtomicOpSize(I
);
1610 bool expanded
= expandAtomicOpToLibcall(
1611 I
, Size
, I
->getAlign(), I
->getPointerOperand(), nullptr, nullptr,
1612 I
->getOrdering(), AtomicOrdering::NotAtomic
, Libcalls
);
1614 report_fatal_error("expandAtomicOpToLibcall shouldn't fail for Load");
1617 void AtomicExpand::expandAtomicStoreToLibcall(StoreInst
*I
) {
1618 static const RTLIB::Libcall Libcalls
[6] = {
1619 RTLIB::ATOMIC_STORE
, RTLIB::ATOMIC_STORE_1
, RTLIB::ATOMIC_STORE_2
,
1620 RTLIB::ATOMIC_STORE_4
, RTLIB::ATOMIC_STORE_8
, RTLIB::ATOMIC_STORE_16
};
1621 unsigned Size
= getAtomicOpSize(I
);
1623 bool expanded
= expandAtomicOpToLibcall(
1624 I
, Size
, I
->getAlign(), I
->getPointerOperand(), I
->getValueOperand(),
1625 nullptr, I
->getOrdering(), AtomicOrdering::NotAtomic
, Libcalls
);
1627 report_fatal_error("expandAtomicOpToLibcall shouldn't fail for Store");
1630 void AtomicExpand::expandAtomicCASToLibcall(AtomicCmpXchgInst
*I
) {
1631 static const RTLIB::Libcall Libcalls
[6] = {
1632 RTLIB::ATOMIC_COMPARE_EXCHANGE
, RTLIB::ATOMIC_COMPARE_EXCHANGE_1
,
1633 RTLIB::ATOMIC_COMPARE_EXCHANGE_2
, RTLIB::ATOMIC_COMPARE_EXCHANGE_4
,
1634 RTLIB::ATOMIC_COMPARE_EXCHANGE_8
, RTLIB::ATOMIC_COMPARE_EXCHANGE_16
};
1635 unsigned Size
= getAtomicOpSize(I
);
1637 bool expanded
= expandAtomicOpToLibcall(
1638 I
, Size
, I
->getAlign(), I
->getPointerOperand(), I
->getNewValOperand(),
1639 I
->getCompareOperand(), I
->getSuccessOrdering(), I
->getFailureOrdering(),
1642 report_fatal_error("expandAtomicOpToLibcall shouldn't fail for CAS");
1645 static ArrayRef
<RTLIB::Libcall
> GetRMWLibcall(AtomicRMWInst::BinOp Op
) {
1646 static const RTLIB::Libcall LibcallsXchg
[6] = {
1647 RTLIB::ATOMIC_EXCHANGE
, RTLIB::ATOMIC_EXCHANGE_1
,
1648 RTLIB::ATOMIC_EXCHANGE_2
, RTLIB::ATOMIC_EXCHANGE_4
,
1649 RTLIB::ATOMIC_EXCHANGE_8
, RTLIB::ATOMIC_EXCHANGE_16
};
1650 static const RTLIB::Libcall LibcallsAdd
[6] = {
1651 RTLIB::UNKNOWN_LIBCALL
, RTLIB::ATOMIC_FETCH_ADD_1
,
1652 RTLIB::ATOMIC_FETCH_ADD_2
, RTLIB::ATOMIC_FETCH_ADD_4
,
1653 RTLIB::ATOMIC_FETCH_ADD_8
, RTLIB::ATOMIC_FETCH_ADD_16
};
1654 static const RTLIB::Libcall LibcallsSub
[6] = {
1655 RTLIB::UNKNOWN_LIBCALL
, RTLIB::ATOMIC_FETCH_SUB_1
,
1656 RTLIB::ATOMIC_FETCH_SUB_2
, RTLIB::ATOMIC_FETCH_SUB_4
,
1657 RTLIB::ATOMIC_FETCH_SUB_8
, RTLIB::ATOMIC_FETCH_SUB_16
};
1658 static const RTLIB::Libcall LibcallsAnd
[6] = {
1659 RTLIB::UNKNOWN_LIBCALL
, RTLIB::ATOMIC_FETCH_AND_1
,
1660 RTLIB::ATOMIC_FETCH_AND_2
, RTLIB::ATOMIC_FETCH_AND_4
,
1661 RTLIB::ATOMIC_FETCH_AND_8
, RTLIB::ATOMIC_FETCH_AND_16
};
1662 static const RTLIB::Libcall LibcallsOr
[6] = {
1663 RTLIB::UNKNOWN_LIBCALL
, RTLIB::ATOMIC_FETCH_OR_1
,
1664 RTLIB::ATOMIC_FETCH_OR_2
, RTLIB::ATOMIC_FETCH_OR_4
,
1665 RTLIB::ATOMIC_FETCH_OR_8
, RTLIB::ATOMIC_FETCH_OR_16
};
1666 static const RTLIB::Libcall LibcallsXor
[6] = {
1667 RTLIB::UNKNOWN_LIBCALL
, RTLIB::ATOMIC_FETCH_XOR_1
,
1668 RTLIB::ATOMIC_FETCH_XOR_2
, RTLIB::ATOMIC_FETCH_XOR_4
,
1669 RTLIB::ATOMIC_FETCH_XOR_8
, RTLIB::ATOMIC_FETCH_XOR_16
};
1670 static const RTLIB::Libcall LibcallsNand
[6] = {
1671 RTLIB::UNKNOWN_LIBCALL
, RTLIB::ATOMIC_FETCH_NAND_1
,
1672 RTLIB::ATOMIC_FETCH_NAND_2
, RTLIB::ATOMIC_FETCH_NAND_4
,
1673 RTLIB::ATOMIC_FETCH_NAND_8
, RTLIB::ATOMIC_FETCH_NAND_16
};
1676 case AtomicRMWInst::BAD_BINOP
:
1677 llvm_unreachable("Should not have BAD_BINOP.");
1678 case AtomicRMWInst::Xchg
:
1679 return ArrayRef(LibcallsXchg
);
1680 case AtomicRMWInst::Add
:
1681 return ArrayRef(LibcallsAdd
);
1682 case AtomicRMWInst::Sub
:
1683 return ArrayRef(LibcallsSub
);
1684 case AtomicRMWInst::And
:
1685 return ArrayRef(LibcallsAnd
);
1686 case AtomicRMWInst::Or
:
1687 return ArrayRef(LibcallsOr
);
1688 case AtomicRMWInst::Xor
:
1689 return ArrayRef(LibcallsXor
);
1690 case AtomicRMWInst::Nand
:
1691 return ArrayRef(LibcallsNand
);
1692 case AtomicRMWInst::Max
:
1693 case AtomicRMWInst::Min
:
1694 case AtomicRMWInst::UMax
:
1695 case AtomicRMWInst::UMin
:
1696 case AtomicRMWInst::FMax
:
1697 case AtomicRMWInst::FMin
:
1698 case AtomicRMWInst::FAdd
:
1699 case AtomicRMWInst::FSub
:
1700 case AtomicRMWInst::UIncWrap
:
1701 case AtomicRMWInst::UDecWrap
:
1702 // No atomic libcalls are available for max/min/umax/umin.
1705 llvm_unreachable("Unexpected AtomicRMW operation.");
1708 void AtomicExpand::expandAtomicRMWToLibcall(AtomicRMWInst
*I
) {
1709 ArrayRef
<RTLIB::Libcall
> Libcalls
= GetRMWLibcall(I
->getOperation());
1711 unsigned Size
= getAtomicOpSize(I
);
1713 bool Success
= false;
1714 if (!Libcalls
.empty())
1715 Success
= expandAtomicOpToLibcall(
1716 I
, Size
, I
->getAlign(), I
->getPointerOperand(), I
->getValOperand(),
1717 nullptr, I
->getOrdering(), AtomicOrdering::NotAtomic
, Libcalls
);
1719 // The expansion failed: either there were no libcalls at all for
1720 // the operation (min/max), or there were only size-specialized
1721 // libcalls (add/sub/etc) and we needed a generic. So, expand to a
1722 // CAS libcall, via a CAS loop, instead.
1724 expandAtomicRMWToCmpXchg(
1725 I
, [this](IRBuilderBase
&Builder
, Value
*Addr
, Value
*Loaded
,
1726 Value
*NewVal
, Align Alignment
, AtomicOrdering MemOpOrder
,
1727 SyncScope::ID SSID
, Value
*&Success
, Value
*&NewLoaded
) {
1728 // Create the CAS instruction normally...
1729 AtomicCmpXchgInst
*Pair
= Builder
.CreateAtomicCmpXchg(
1730 Addr
, Loaded
, NewVal
, Alignment
, MemOpOrder
,
1731 AtomicCmpXchgInst::getStrongestFailureOrdering(MemOpOrder
), SSID
);
1732 Success
= Builder
.CreateExtractValue(Pair
, 1, "success");
1733 NewLoaded
= Builder
.CreateExtractValue(Pair
, 0, "newloaded");
1735 // ...and then expand the CAS into a libcall.
1736 expandAtomicCASToLibcall(Pair
);
1741 // A helper routine for the above expandAtomic*ToLibcall functions.
1743 // 'Libcalls' contains an array of enum values for the particular
1744 // ATOMIC libcalls to be emitted. All of the other arguments besides
1745 // 'I' are extracted from the Instruction subclass by the
1746 // caller. Depending on the particular call, some will be null.
1747 bool AtomicExpand::expandAtomicOpToLibcall(
1748 Instruction
*I
, unsigned Size
, Align Alignment
, Value
*PointerOperand
,
1749 Value
*ValueOperand
, Value
*CASExpected
, AtomicOrdering Ordering
,
1750 AtomicOrdering Ordering2
, ArrayRef
<RTLIB::Libcall
> Libcalls
) {
1751 assert(Libcalls
.size() == 6);
1753 LLVMContext
&Ctx
= I
->getContext();
1754 Module
*M
= I
->getModule();
1755 const DataLayout
&DL
= M
->getDataLayout();
1756 IRBuilder
<> Builder(I
);
1757 IRBuilder
<> AllocaBuilder(&I
->getFunction()->getEntryBlock().front());
1759 bool UseSizedLibcall
= canUseSizedAtomicCall(Size
, Alignment
, DL
);
1760 Type
*SizedIntTy
= Type::getIntNTy(Ctx
, Size
* 8);
1762 const Align AllocaAlignment
= DL
.getPrefTypeAlign(SizedIntTy
);
1764 // TODO: the "order" argument type is "int", not int32. So
1765 // getInt32Ty may be wrong if the arch uses e.g. 16-bit ints.
1766 ConstantInt
*SizeVal64
= ConstantInt::get(Type::getInt64Ty(Ctx
), Size
);
1767 assert(Ordering
!= AtomicOrdering::NotAtomic
&& "expect atomic MO");
1768 Constant
*OrderingVal
=
1769 ConstantInt::get(Type::getInt32Ty(Ctx
), (int)toCABI(Ordering
));
1770 Constant
*Ordering2Val
= nullptr;
1772 assert(Ordering2
!= AtomicOrdering::NotAtomic
&& "expect atomic MO");
1774 ConstantInt::get(Type::getInt32Ty(Ctx
), (int)toCABI(Ordering2
));
1776 bool HasResult
= I
->getType() != Type::getVoidTy(Ctx
);
1778 RTLIB::Libcall RTLibType
;
1779 if (UseSizedLibcall
) {
1782 RTLibType
= Libcalls
[1];
1785 RTLibType
= Libcalls
[2];
1788 RTLibType
= Libcalls
[3];
1791 RTLibType
= Libcalls
[4];
1794 RTLibType
= Libcalls
[5];
1797 } else if (Libcalls
[0] != RTLIB::UNKNOWN_LIBCALL
) {
1798 RTLibType
= Libcalls
[0];
1800 // Can't use sized function, and there's no generic for this
1801 // operation, so give up.
1805 if (!TLI
->getLibcallName(RTLibType
)) {
1806 // This target does not implement the requested atomic libcall so give up.
1810 // Build up the function call. There's two kinds. First, the sized
1811 // variants. These calls are going to be one of the following (with
1813 // iN __atomic_load_N(iN *ptr, int ordering)
1814 // void __atomic_store_N(iN *ptr, iN val, int ordering)
1815 // iN __atomic_{exchange|fetch_*}_N(iN *ptr, iN val, int ordering)
1816 // bool __atomic_compare_exchange_N(iN *ptr, iN *expected, iN desired,
1817 // int success_order, int failure_order)
1819 // Note that these functions can be used for non-integer atomic
1820 // operations, the values just need to be bitcast to integers on the
1823 // And, then, the generic variants. They look like the following:
1824 // void __atomic_load(size_t size, void *ptr, void *ret, int ordering)
1825 // void __atomic_store(size_t size, void *ptr, void *val, int ordering)
1826 // void __atomic_exchange(size_t size, void *ptr, void *val, void *ret,
1828 // bool __atomic_compare_exchange(size_t size, void *ptr, void *expected,
1829 // void *desired, int success_order,
1830 // int failure_order)
1832 // The different signatures are built up depending on the
1833 // 'UseSizedLibcall', 'CASExpected', 'ValueOperand', and 'HasResult'
1836 AllocaInst
*AllocaCASExpected
= nullptr;
1837 AllocaInst
*AllocaValue
= nullptr;
1838 AllocaInst
*AllocaResult
= nullptr;
1841 SmallVector
<Value
*, 6> Args
;
1845 if (!UseSizedLibcall
) {
1846 // Note, getIntPtrType is assumed equivalent to size_t.
1847 Args
.push_back(ConstantInt::get(DL
.getIntPtrType(Ctx
), Size
));
1851 // note: This assumes all address spaces share a common libfunc
1852 // implementation and that addresses are convertable. For systems without
1853 // that property, we'd need to extend this mechanism to support AS-specific
1854 // families of atomic intrinsics.
1855 Value
*PtrVal
= PointerOperand
;
1856 PtrVal
= Builder
.CreateAddrSpaceCast(PtrVal
, PointerType::getUnqual(Ctx
));
1857 Args
.push_back(PtrVal
);
1859 // 'expected' argument, if present.
1861 AllocaCASExpected
= AllocaBuilder
.CreateAlloca(CASExpected
->getType());
1862 AllocaCASExpected
->setAlignment(AllocaAlignment
);
1863 Builder
.CreateLifetimeStart(AllocaCASExpected
, SizeVal64
);
1864 Builder
.CreateAlignedStore(CASExpected
, AllocaCASExpected
, AllocaAlignment
);
1865 Args
.push_back(AllocaCASExpected
);
1868 // 'val' argument ('desired' for cas), if present.
1870 if (UseSizedLibcall
) {
1872 Builder
.CreateBitOrPointerCast(ValueOperand
, SizedIntTy
);
1873 Args
.push_back(IntValue
);
1875 AllocaValue
= AllocaBuilder
.CreateAlloca(ValueOperand
->getType());
1876 AllocaValue
->setAlignment(AllocaAlignment
);
1877 Builder
.CreateLifetimeStart(AllocaValue
, SizeVal64
);
1878 Builder
.CreateAlignedStore(ValueOperand
, AllocaValue
, AllocaAlignment
);
1879 Args
.push_back(AllocaValue
);
1884 if (!CASExpected
&& HasResult
&& !UseSizedLibcall
) {
1885 AllocaResult
= AllocaBuilder
.CreateAlloca(I
->getType());
1886 AllocaResult
->setAlignment(AllocaAlignment
);
1887 Builder
.CreateLifetimeStart(AllocaResult
, SizeVal64
);
1888 Args
.push_back(AllocaResult
);
1891 // 'ordering' ('success_order' for cas) argument.
1892 Args
.push_back(OrderingVal
);
1894 // 'failure_order' argument, if present.
1896 Args
.push_back(Ordering2Val
);
1898 // Now, the return type.
1900 ResultTy
= Type::getInt1Ty(Ctx
);
1901 Attr
= Attr
.addRetAttribute(Ctx
, Attribute::ZExt
);
1902 } else if (HasResult
&& UseSizedLibcall
)
1903 ResultTy
= SizedIntTy
;
1905 ResultTy
= Type::getVoidTy(Ctx
);
1907 // Done with setting up arguments and return types, create the call:
1908 SmallVector
<Type
*, 6> ArgTys
;
1909 for (Value
*Arg
: Args
)
1910 ArgTys
.push_back(Arg
->getType());
1911 FunctionType
*FnType
= FunctionType::get(ResultTy
, ArgTys
, false);
1912 FunctionCallee LibcallFn
=
1913 M
->getOrInsertFunction(TLI
->getLibcallName(RTLibType
), FnType
, Attr
);
1914 CallInst
*Call
= Builder
.CreateCall(LibcallFn
, Args
);
1915 Call
->setAttributes(Attr
);
1916 Value
*Result
= Call
;
1918 // And then, extract the results...
1919 if (ValueOperand
&& !UseSizedLibcall
)
1920 Builder
.CreateLifetimeEnd(AllocaValue
, SizeVal64
);
1923 // The final result from the CAS is {load of 'expected' alloca, bool result
1925 Type
*FinalResultTy
= I
->getType();
1926 Value
*V
= PoisonValue::get(FinalResultTy
);
1927 Value
*ExpectedOut
= Builder
.CreateAlignedLoad(
1928 CASExpected
->getType(), AllocaCASExpected
, AllocaAlignment
);
1929 Builder
.CreateLifetimeEnd(AllocaCASExpected
, SizeVal64
);
1930 V
= Builder
.CreateInsertValue(V
, ExpectedOut
, 0);
1931 V
= Builder
.CreateInsertValue(V
, Result
, 1);
1932 I
->replaceAllUsesWith(V
);
1933 } else if (HasResult
) {
1935 if (UseSizedLibcall
)
1936 V
= Builder
.CreateBitOrPointerCast(Result
, I
->getType());
1938 V
= Builder
.CreateAlignedLoad(I
->getType(), AllocaResult
,
1940 Builder
.CreateLifetimeEnd(AllocaResult
, SizeVal64
);
1942 I
->replaceAllUsesWith(V
);
1944 I
->eraseFromParent();