[sanitizer] Improve FreeBSD ASLR detection
[llvm-project.git] / llvm / lib / CodeGen / AtomicExpandPass.cpp
blob4838f6da750dda29b808b00897e497bb9e257984
1 //===- AtomicExpandPass.cpp - Expand atomic instructions ------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains a pass (at IR level) to replace atomic instructions with
10 // __atomic_* library calls, or target specific instruction which implement the
11 // same semantics in a way which better fits the target backend. This can
12 // include the use of (intrinsic-based) load-linked/store-conditional loops,
13 // AtomicCmpXchg, or type coercions.
15 //===----------------------------------------------------------------------===//
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/STLExtras.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
21 #include "llvm/CodeGen/AtomicExpandUtils.h"
22 #include "llvm/CodeGen/RuntimeLibcalls.h"
23 #include "llvm/CodeGen/TargetLowering.h"
24 #include "llvm/CodeGen/TargetPassConfig.h"
25 #include "llvm/CodeGen/TargetSubtargetInfo.h"
26 #include "llvm/CodeGen/ValueTypes.h"
27 #include "llvm/IR/Attributes.h"
28 #include "llvm/IR/BasicBlock.h"
29 #include "llvm/IR/Constant.h"
30 #include "llvm/IR/Constants.h"
31 #include "llvm/IR/DataLayout.h"
32 #include "llvm/IR/DerivedTypes.h"
33 #include "llvm/IR/Function.h"
34 #include "llvm/IR/IRBuilder.h"
35 #include "llvm/IR/InstIterator.h"
36 #include "llvm/IR/Instruction.h"
37 #include "llvm/IR/Instructions.h"
38 #include "llvm/IR/Module.h"
39 #include "llvm/IR/Type.h"
40 #include "llvm/IR/User.h"
41 #include "llvm/IR/Value.h"
42 #include "llvm/InitializePasses.h"
43 #include "llvm/Pass.h"
44 #include "llvm/Support/AtomicOrdering.h"
45 #include "llvm/Support/Casting.h"
46 #include "llvm/Support/Debug.h"
47 #include "llvm/Support/ErrorHandling.h"
48 #include "llvm/Support/raw_ostream.h"
49 #include "llvm/Target/TargetMachine.h"
50 #include <cassert>
51 #include <cstdint>
52 #include <iterator>
54 using namespace llvm;
56 #define DEBUG_TYPE "atomic-expand"
58 namespace {
60 class AtomicExpand: public FunctionPass {
61 const TargetLowering *TLI = nullptr;
63 public:
64 static char ID; // Pass identification, replacement for typeid
66 AtomicExpand() : FunctionPass(ID) {
67 initializeAtomicExpandPass(*PassRegistry::getPassRegistry());
70 bool runOnFunction(Function &F) override;
72 private:
73 bool bracketInstWithFences(Instruction *I, AtomicOrdering Order);
74 IntegerType *getCorrespondingIntegerType(Type *T, const DataLayout &DL);
75 LoadInst *convertAtomicLoadToIntegerType(LoadInst *LI);
76 bool tryExpandAtomicLoad(LoadInst *LI);
77 bool expandAtomicLoadToLL(LoadInst *LI);
78 bool expandAtomicLoadToCmpXchg(LoadInst *LI);
79 StoreInst *convertAtomicStoreToIntegerType(StoreInst *SI);
80 bool expandAtomicStore(StoreInst *SI);
81 bool tryExpandAtomicRMW(AtomicRMWInst *AI);
82 AtomicRMWInst *convertAtomicXchgToIntegerType(AtomicRMWInst *RMWI);
83 Value *
84 insertRMWLLSCLoop(IRBuilder<> &Builder, Type *ResultTy, Value *Addr,
85 Align AddrAlign, AtomicOrdering MemOpOrder,
86 function_ref<Value *(IRBuilder<> &, Value *)> PerformOp);
87 void expandAtomicOpToLLSC(
88 Instruction *I, Type *ResultTy, Value *Addr, Align AddrAlign,
89 AtomicOrdering MemOpOrder,
90 function_ref<Value *(IRBuilder<> &, Value *)> PerformOp);
91 void expandPartwordAtomicRMW(
92 AtomicRMWInst *I,
93 TargetLoweringBase::AtomicExpansionKind ExpansionKind);
94 AtomicRMWInst *widenPartwordAtomicRMW(AtomicRMWInst *AI);
95 bool expandPartwordCmpXchg(AtomicCmpXchgInst *I);
96 void expandAtomicRMWToMaskedIntrinsic(AtomicRMWInst *AI);
97 void expandAtomicCmpXchgToMaskedIntrinsic(AtomicCmpXchgInst *CI);
99 AtomicCmpXchgInst *convertCmpXchgToIntegerType(AtomicCmpXchgInst *CI);
100 static Value *insertRMWCmpXchgLoop(
101 IRBuilder<> &Builder, Type *ResultType, Value *Addr, Align AddrAlign,
102 AtomicOrdering MemOpOrder, SyncScope::ID SSID,
103 function_ref<Value *(IRBuilder<> &, Value *)> PerformOp,
104 CreateCmpXchgInstFun CreateCmpXchg);
105 bool tryExpandAtomicCmpXchg(AtomicCmpXchgInst *CI);
107 bool expandAtomicCmpXchg(AtomicCmpXchgInst *CI);
108 bool isIdempotentRMW(AtomicRMWInst *RMWI);
109 bool simplifyIdempotentRMW(AtomicRMWInst *RMWI);
111 bool expandAtomicOpToLibcall(Instruction *I, unsigned Size, Align Alignment,
112 Value *PointerOperand, Value *ValueOperand,
113 Value *CASExpected, AtomicOrdering Ordering,
114 AtomicOrdering Ordering2,
115 ArrayRef<RTLIB::Libcall> Libcalls);
116 void expandAtomicLoadToLibcall(LoadInst *LI);
117 void expandAtomicStoreToLibcall(StoreInst *LI);
118 void expandAtomicRMWToLibcall(AtomicRMWInst *I);
119 void expandAtomicCASToLibcall(AtomicCmpXchgInst *I);
121 friend bool
122 llvm::expandAtomicRMWToCmpXchg(AtomicRMWInst *AI,
123 CreateCmpXchgInstFun CreateCmpXchg);
126 } // end anonymous namespace
128 char AtomicExpand::ID = 0;
130 char &llvm::AtomicExpandID = AtomicExpand::ID;
132 INITIALIZE_PASS(AtomicExpand, DEBUG_TYPE, "Expand Atomic instructions",
133 false, false)
135 FunctionPass *llvm::createAtomicExpandPass() { return new AtomicExpand(); }
137 // Helper functions to retrieve the size of atomic instructions.
138 static unsigned getAtomicOpSize(LoadInst *LI) {
139 const DataLayout &DL = LI->getModule()->getDataLayout();
140 return DL.getTypeStoreSize(LI->getType());
143 static unsigned getAtomicOpSize(StoreInst *SI) {
144 const DataLayout &DL = SI->getModule()->getDataLayout();
145 return DL.getTypeStoreSize(SI->getValueOperand()->getType());
148 static unsigned getAtomicOpSize(AtomicRMWInst *RMWI) {
149 const DataLayout &DL = RMWI->getModule()->getDataLayout();
150 return DL.getTypeStoreSize(RMWI->getValOperand()->getType());
153 static unsigned getAtomicOpSize(AtomicCmpXchgInst *CASI) {
154 const DataLayout &DL = CASI->getModule()->getDataLayout();
155 return DL.getTypeStoreSize(CASI->getCompareOperand()->getType());
158 // Determine if a particular atomic operation has a supported size,
159 // and is of appropriate alignment, to be passed through for target
160 // lowering. (Versus turning into a __atomic libcall)
161 template <typename Inst>
162 static bool atomicSizeSupported(const TargetLowering *TLI, Inst *I) {
163 unsigned Size = getAtomicOpSize(I);
164 Align Alignment = I->getAlign();
165 return Alignment >= Size &&
166 Size <= TLI->getMaxAtomicSizeInBitsSupported() / 8;
169 bool AtomicExpand::runOnFunction(Function &F) {
170 auto *TPC = getAnalysisIfAvailable<TargetPassConfig>();
171 if (!TPC)
172 return false;
174 auto &TM = TPC->getTM<TargetMachine>();
175 if (!TM.getSubtargetImpl(F)->enableAtomicExpand())
176 return false;
177 TLI = TM.getSubtargetImpl(F)->getTargetLowering();
179 SmallVector<Instruction *, 1> AtomicInsts;
181 // Changing control-flow while iterating through it is a bad idea, so gather a
182 // list of all atomic instructions before we start.
183 for (Instruction &I : instructions(F))
184 if (I.isAtomic() && !isa<FenceInst>(&I))
185 AtomicInsts.push_back(&I);
187 bool MadeChange = false;
188 for (auto I : AtomicInsts) {
189 auto LI = dyn_cast<LoadInst>(I);
190 auto SI = dyn_cast<StoreInst>(I);
191 auto RMWI = dyn_cast<AtomicRMWInst>(I);
192 auto CASI = dyn_cast<AtomicCmpXchgInst>(I);
193 assert((LI || SI || RMWI || CASI) && "Unknown atomic instruction");
195 // If the Size/Alignment is not supported, replace with a libcall.
196 if (LI) {
197 if (!atomicSizeSupported(TLI, LI)) {
198 expandAtomicLoadToLibcall(LI);
199 MadeChange = true;
200 continue;
202 } else if (SI) {
203 if (!atomicSizeSupported(TLI, SI)) {
204 expandAtomicStoreToLibcall(SI);
205 MadeChange = true;
206 continue;
208 } else if (RMWI) {
209 if (!atomicSizeSupported(TLI, RMWI)) {
210 expandAtomicRMWToLibcall(RMWI);
211 MadeChange = true;
212 continue;
214 } else if (CASI) {
215 if (!atomicSizeSupported(TLI, CASI)) {
216 expandAtomicCASToLibcall(CASI);
217 MadeChange = true;
218 continue;
222 if (TLI->shouldInsertFencesForAtomic(I)) {
223 auto FenceOrdering = AtomicOrdering::Monotonic;
224 if (LI && isAcquireOrStronger(LI->getOrdering())) {
225 FenceOrdering = LI->getOrdering();
226 LI->setOrdering(AtomicOrdering::Monotonic);
227 } else if (SI && isReleaseOrStronger(SI->getOrdering())) {
228 FenceOrdering = SI->getOrdering();
229 SI->setOrdering(AtomicOrdering::Monotonic);
230 } else if (RMWI && (isReleaseOrStronger(RMWI->getOrdering()) ||
231 isAcquireOrStronger(RMWI->getOrdering()))) {
232 FenceOrdering = RMWI->getOrdering();
233 RMWI->setOrdering(AtomicOrdering::Monotonic);
234 } else if (CASI &&
235 TLI->shouldExpandAtomicCmpXchgInIR(CASI) ==
236 TargetLoweringBase::AtomicExpansionKind::None &&
237 (isReleaseOrStronger(CASI->getSuccessOrdering()) ||
238 isAcquireOrStronger(CASI->getSuccessOrdering()) ||
239 isAcquireOrStronger(CASI->getFailureOrdering()))) {
240 // If a compare and swap is lowered to LL/SC, we can do smarter fence
241 // insertion, with a stronger one on the success path than on the
242 // failure path. As a result, fence insertion is directly done by
243 // expandAtomicCmpXchg in that case.
244 FenceOrdering = CASI->getMergedOrdering();
245 CASI->setSuccessOrdering(AtomicOrdering::Monotonic);
246 CASI->setFailureOrdering(AtomicOrdering::Monotonic);
249 if (FenceOrdering != AtomicOrdering::Monotonic) {
250 MadeChange |= bracketInstWithFences(I, FenceOrdering);
254 if (LI) {
255 if (LI->getType()->isFloatingPointTy()) {
256 // TODO: add a TLI hook to control this so that each target can
257 // convert to lowering the original type one at a time.
258 LI = convertAtomicLoadToIntegerType(LI);
259 assert(LI->getType()->isIntegerTy() && "invariant broken");
260 MadeChange = true;
263 MadeChange |= tryExpandAtomicLoad(LI);
264 } else if (SI) {
265 if (SI->getValueOperand()->getType()->isFloatingPointTy()) {
266 // TODO: add a TLI hook to control this so that each target can
267 // convert to lowering the original type one at a time.
268 SI = convertAtomicStoreToIntegerType(SI);
269 assert(SI->getValueOperand()->getType()->isIntegerTy() &&
270 "invariant broken");
271 MadeChange = true;
274 if (TLI->shouldExpandAtomicStoreInIR(SI))
275 MadeChange |= expandAtomicStore(SI);
276 } else if (RMWI) {
277 // There are two different ways of expanding RMW instructions:
278 // - into a load if it is idempotent
279 // - into a Cmpxchg/LL-SC loop otherwise
280 // we try them in that order.
282 if (isIdempotentRMW(RMWI) && simplifyIdempotentRMW(RMWI)) {
283 MadeChange = true;
284 } else {
285 AtomicRMWInst::BinOp Op = RMWI->getOperation();
286 if (Op == AtomicRMWInst::Xchg &&
287 RMWI->getValOperand()->getType()->isFloatingPointTy()) {
288 // TODO: add a TLI hook to control this so that each target can
289 // convert to lowering the original type one at a time.
290 RMWI = convertAtomicXchgToIntegerType(RMWI);
291 assert(RMWI->getValOperand()->getType()->isIntegerTy() &&
292 "invariant broken");
293 MadeChange = true;
295 unsigned MinCASSize = TLI->getMinCmpXchgSizeInBits() / 8;
296 unsigned ValueSize = getAtomicOpSize(RMWI);
297 if (ValueSize < MinCASSize &&
298 (Op == AtomicRMWInst::Or || Op == AtomicRMWInst::Xor ||
299 Op == AtomicRMWInst::And)) {
300 RMWI = widenPartwordAtomicRMW(RMWI);
301 MadeChange = true;
304 MadeChange |= tryExpandAtomicRMW(RMWI);
306 } else if (CASI) {
307 // TODO: when we're ready to make the change at the IR level, we can
308 // extend convertCmpXchgToInteger for floating point too.
309 assert(!CASI->getCompareOperand()->getType()->isFloatingPointTy() &&
310 "unimplemented - floating point not legal at IR level");
311 if (CASI->getCompareOperand()->getType()->isPointerTy() ) {
312 // TODO: add a TLI hook to control this so that each target can
313 // convert to lowering the original type one at a time.
314 CASI = convertCmpXchgToIntegerType(CASI);
315 assert(CASI->getCompareOperand()->getType()->isIntegerTy() &&
316 "invariant broken");
317 MadeChange = true;
320 MadeChange |= tryExpandAtomicCmpXchg(CASI);
323 return MadeChange;
326 bool AtomicExpand::bracketInstWithFences(Instruction *I, AtomicOrdering Order) {
327 IRBuilder<> Builder(I);
329 auto LeadingFence = TLI->emitLeadingFence(Builder, I, Order);
331 auto TrailingFence = TLI->emitTrailingFence(Builder, I, Order);
332 // We have a guard here because not every atomic operation generates a
333 // trailing fence.
334 if (TrailingFence)
335 TrailingFence->moveAfter(I);
337 return (LeadingFence || TrailingFence);
340 /// Get the iX type with the same bitwidth as T.
341 IntegerType *AtomicExpand::getCorrespondingIntegerType(Type *T,
342 const DataLayout &DL) {
343 EVT VT = TLI->getMemValueType(DL, T);
344 unsigned BitWidth = VT.getStoreSizeInBits();
345 assert(BitWidth == VT.getSizeInBits() && "must be a power of two");
346 return IntegerType::get(T->getContext(), BitWidth);
349 /// Convert an atomic load of a non-integral type to an integer load of the
350 /// equivalent bitwidth. See the function comment on
351 /// convertAtomicStoreToIntegerType for background.
352 LoadInst *AtomicExpand::convertAtomicLoadToIntegerType(LoadInst *LI) {
353 auto *M = LI->getModule();
354 Type *NewTy = getCorrespondingIntegerType(LI->getType(),
355 M->getDataLayout());
357 IRBuilder<> Builder(LI);
359 Value *Addr = LI->getPointerOperand();
360 Type *PT = PointerType::get(NewTy,
361 Addr->getType()->getPointerAddressSpace());
362 Value *NewAddr = Builder.CreateBitCast(Addr, PT);
364 auto *NewLI = Builder.CreateLoad(NewTy, NewAddr);
365 NewLI->setAlignment(LI->getAlign());
366 NewLI->setVolatile(LI->isVolatile());
367 NewLI->setAtomic(LI->getOrdering(), LI->getSyncScopeID());
368 LLVM_DEBUG(dbgs() << "Replaced " << *LI << " with " << *NewLI << "\n");
370 Value *NewVal = Builder.CreateBitCast(NewLI, LI->getType());
371 LI->replaceAllUsesWith(NewVal);
372 LI->eraseFromParent();
373 return NewLI;
376 AtomicRMWInst *
377 AtomicExpand::convertAtomicXchgToIntegerType(AtomicRMWInst *RMWI) {
378 auto *M = RMWI->getModule();
379 Type *NewTy =
380 getCorrespondingIntegerType(RMWI->getType(), M->getDataLayout());
382 IRBuilder<> Builder(RMWI);
384 Value *Addr = RMWI->getPointerOperand();
385 Value *Val = RMWI->getValOperand();
386 Type *PT = PointerType::get(NewTy, RMWI->getPointerAddressSpace());
387 Value *NewAddr = Builder.CreateBitCast(Addr, PT);
388 Value *NewVal = Builder.CreateBitCast(Val, NewTy);
390 auto *NewRMWI =
391 Builder.CreateAtomicRMW(AtomicRMWInst::Xchg, NewAddr, NewVal,
392 RMWI->getAlign(), RMWI->getOrdering());
393 NewRMWI->setVolatile(RMWI->isVolatile());
394 LLVM_DEBUG(dbgs() << "Replaced " << *RMWI << " with " << *NewRMWI << "\n");
396 Value *NewRVal = Builder.CreateBitCast(NewRMWI, RMWI->getType());
397 RMWI->replaceAllUsesWith(NewRVal);
398 RMWI->eraseFromParent();
399 return NewRMWI;
402 bool AtomicExpand::tryExpandAtomicLoad(LoadInst *LI) {
403 switch (TLI->shouldExpandAtomicLoadInIR(LI)) {
404 case TargetLoweringBase::AtomicExpansionKind::None:
405 return false;
406 case TargetLoweringBase::AtomicExpansionKind::LLSC:
407 expandAtomicOpToLLSC(
408 LI, LI->getType(), LI->getPointerOperand(), LI->getAlign(),
409 LI->getOrdering(),
410 [](IRBuilder<> &Builder, Value *Loaded) { return Loaded; });
411 return true;
412 case TargetLoweringBase::AtomicExpansionKind::LLOnly:
413 return expandAtomicLoadToLL(LI);
414 case TargetLoweringBase::AtomicExpansionKind::CmpXChg:
415 return expandAtomicLoadToCmpXchg(LI);
416 default:
417 llvm_unreachable("Unhandled case in tryExpandAtomicLoad");
421 bool AtomicExpand::expandAtomicLoadToLL(LoadInst *LI) {
422 IRBuilder<> Builder(LI);
424 // On some architectures, load-linked instructions are atomic for larger
425 // sizes than normal loads. For example, the only 64-bit load guaranteed
426 // to be single-copy atomic by ARM is an ldrexd (A3.5.3).
427 Value *Val = TLI->emitLoadLinked(Builder, LI->getType(),
428 LI->getPointerOperand(), LI->getOrdering());
429 TLI->emitAtomicCmpXchgNoStoreLLBalance(Builder);
431 LI->replaceAllUsesWith(Val);
432 LI->eraseFromParent();
434 return true;
437 bool AtomicExpand::expandAtomicLoadToCmpXchg(LoadInst *LI) {
438 IRBuilder<> Builder(LI);
439 AtomicOrdering Order = LI->getOrdering();
440 if (Order == AtomicOrdering::Unordered)
441 Order = AtomicOrdering::Monotonic;
443 Value *Addr = LI->getPointerOperand();
444 Type *Ty = LI->getType();
445 Constant *DummyVal = Constant::getNullValue(Ty);
447 Value *Pair = Builder.CreateAtomicCmpXchg(
448 Addr, DummyVal, DummyVal, LI->getAlign(), Order,
449 AtomicCmpXchgInst::getStrongestFailureOrdering(Order));
450 Value *Loaded = Builder.CreateExtractValue(Pair, 0, "loaded");
452 LI->replaceAllUsesWith(Loaded);
453 LI->eraseFromParent();
455 return true;
458 /// Convert an atomic store of a non-integral type to an integer store of the
459 /// equivalent bitwidth. We used to not support floating point or vector
460 /// atomics in the IR at all. The backends learned to deal with the bitcast
461 /// idiom because that was the only way of expressing the notion of a atomic
462 /// float or vector store. The long term plan is to teach each backend to
463 /// instruction select from the original atomic store, but as a migration
464 /// mechanism, we convert back to the old format which the backends understand.
465 /// Each backend will need individual work to recognize the new format.
466 StoreInst *AtomicExpand::convertAtomicStoreToIntegerType(StoreInst *SI) {
467 IRBuilder<> Builder(SI);
468 auto *M = SI->getModule();
469 Type *NewTy = getCorrespondingIntegerType(SI->getValueOperand()->getType(),
470 M->getDataLayout());
471 Value *NewVal = Builder.CreateBitCast(SI->getValueOperand(), NewTy);
473 Value *Addr = SI->getPointerOperand();
474 Type *PT = PointerType::get(NewTy,
475 Addr->getType()->getPointerAddressSpace());
476 Value *NewAddr = Builder.CreateBitCast(Addr, PT);
478 StoreInst *NewSI = Builder.CreateStore(NewVal, NewAddr);
479 NewSI->setAlignment(SI->getAlign());
480 NewSI->setVolatile(SI->isVolatile());
481 NewSI->setAtomic(SI->getOrdering(), SI->getSyncScopeID());
482 LLVM_DEBUG(dbgs() << "Replaced " << *SI << " with " << *NewSI << "\n");
483 SI->eraseFromParent();
484 return NewSI;
487 bool AtomicExpand::expandAtomicStore(StoreInst *SI) {
488 // This function is only called on atomic stores that are too large to be
489 // atomic if implemented as a native store. So we replace them by an
490 // atomic swap, that can be implemented for example as a ldrex/strex on ARM
491 // or lock cmpxchg8/16b on X86, as these are atomic for larger sizes.
492 // It is the responsibility of the target to only signal expansion via
493 // shouldExpandAtomicRMW in cases where this is required and possible.
494 IRBuilder<> Builder(SI);
495 AtomicRMWInst *AI = Builder.CreateAtomicRMW(
496 AtomicRMWInst::Xchg, SI->getPointerOperand(), SI->getValueOperand(),
497 SI->getAlign(), SI->getOrdering());
498 SI->eraseFromParent();
500 // Now we have an appropriate swap instruction, lower it as usual.
501 return tryExpandAtomicRMW(AI);
504 static void createCmpXchgInstFun(IRBuilder<> &Builder, Value *Addr,
505 Value *Loaded, Value *NewVal, Align AddrAlign,
506 AtomicOrdering MemOpOrder, SyncScope::ID SSID,
507 Value *&Success, Value *&NewLoaded) {
508 Type *OrigTy = NewVal->getType();
510 // This code can go away when cmpxchg supports FP types.
511 bool NeedBitcast = OrigTy->isFloatingPointTy();
512 if (NeedBitcast) {
513 IntegerType *IntTy = Builder.getIntNTy(OrigTy->getPrimitiveSizeInBits());
514 unsigned AS = Addr->getType()->getPointerAddressSpace();
515 Addr = Builder.CreateBitCast(Addr, IntTy->getPointerTo(AS));
516 NewVal = Builder.CreateBitCast(NewVal, IntTy);
517 Loaded = Builder.CreateBitCast(Loaded, IntTy);
520 Value *Pair = Builder.CreateAtomicCmpXchg(
521 Addr, Loaded, NewVal, AddrAlign, MemOpOrder,
522 AtomicCmpXchgInst::getStrongestFailureOrdering(MemOpOrder), SSID);
523 Success = Builder.CreateExtractValue(Pair, 1, "success");
524 NewLoaded = Builder.CreateExtractValue(Pair, 0, "newloaded");
526 if (NeedBitcast)
527 NewLoaded = Builder.CreateBitCast(NewLoaded, OrigTy);
530 /// Emit IR to implement the given atomicrmw operation on values in registers,
531 /// returning the new value.
532 static Value *performAtomicOp(AtomicRMWInst::BinOp Op, IRBuilder<> &Builder,
533 Value *Loaded, Value *Inc) {
534 Value *NewVal;
535 switch (Op) {
536 case AtomicRMWInst::Xchg:
537 return Inc;
538 case AtomicRMWInst::Add:
539 return Builder.CreateAdd(Loaded, Inc, "new");
540 case AtomicRMWInst::Sub:
541 return Builder.CreateSub(Loaded, Inc, "new");
542 case AtomicRMWInst::And:
543 return Builder.CreateAnd(Loaded, Inc, "new");
544 case AtomicRMWInst::Nand:
545 return Builder.CreateNot(Builder.CreateAnd(Loaded, Inc), "new");
546 case AtomicRMWInst::Or:
547 return Builder.CreateOr(Loaded, Inc, "new");
548 case AtomicRMWInst::Xor:
549 return Builder.CreateXor(Loaded, Inc, "new");
550 case AtomicRMWInst::Max:
551 NewVal = Builder.CreateICmpSGT(Loaded, Inc);
552 return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
553 case AtomicRMWInst::Min:
554 NewVal = Builder.CreateICmpSLE(Loaded, Inc);
555 return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
556 case AtomicRMWInst::UMax:
557 NewVal = Builder.CreateICmpUGT(Loaded, Inc);
558 return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
559 case AtomicRMWInst::UMin:
560 NewVal = Builder.CreateICmpULE(Loaded, Inc);
561 return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
562 case AtomicRMWInst::FAdd:
563 return Builder.CreateFAdd(Loaded, Inc, "new");
564 case AtomicRMWInst::FSub:
565 return Builder.CreateFSub(Loaded, Inc, "new");
566 default:
567 llvm_unreachable("Unknown atomic op");
571 bool AtomicExpand::tryExpandAtomicRMW(AtomicRMWInst *AI) {
572 LLVMContext &Ctx = AI->getModule()->getContext();
573 TargetLowering::AtomicExpansionKind Kind = TLI->shouldExpandAtomicRMWInIR(AI);
574 switch (Kind) {
575 case TargetLoweringBase::AtomicExpansionKind::None:
576 return false;
577 case TargetLoweringBase::AtomicExpansionKind::LLSC: {
578 unsigned MinCASSize = TLI->getMinCmpXchgSizeInBits() / 8;
579 unsigned ValueSize = getAtomicOpSize(AI);
580 if (ValueSize < MinCASSize) {
581 expandPartwordAtomicRMW(AI,
582 TargetLoweringBase::AtomicExpansionKind::LLSC);
583 } else {
584 auto PerformOp = [&](IRBuilder<> &Builder, Value *Loaded) {
585 return performAtomicOp(AI->getOperation(), Builder, Loaded,
586 AI->getValOperand());
588 expandAtomicOpToLLSC(AI, AI->getType(), AI->getPointerOperand(),
589 AI->getAlign(), AI->getOrdering(), PerformOp);
591 return true;
593 case TargetLoweringBase::AtomicExpansionKind::CmpXChg: {
594 unsigned MinCASSize = TLI->getMinCmpXchgSizeInBits() / 8;
595 unsigned ValueSize = getAtomicOpSize(AI);
596 if (ValueSize < MinCASSize) {
597 // TODO: Handle atomicrmw fadd/fsub
598 if (AI->getType()->isFloatingPointTy())
599 return false;
601 expandPartwordAtomicRMW(AI,
602 TargetLoweringBase::AtomicExpansionKind::CmpXChg);
603 } else {
604 SmallVector<StringRef> SSNs;
605 Ctx.getSyncScopeNames(SSNs);
606 auto MemScope = SSNs[AI->getSyncScopeID()].empty()
607 ? "system"
608 : SSNs[AI->getSyncScopeID()];
609 OptimizationRemarkEmitter ORE(AI->getFunction());
610 ORE.emit([&]() {
611 return OptimizationRemark(DEBUG_TYPE, "Passed", AI)
612 << "A compare and swap loop was generated for an atomic "
613 << AI->getOperationName(AI->getOperation()) << " operation at "
614 << MemScope << " memory scope";
616 expandAtomicRMWToCmpXchg(AI, createCmpXchgInstFun);
618 return true;
620 case TargetLoweringBase::AtomicExpansionKind::MaskedIntrinsic: {
621 expandAtomicRMWToMaskedIntrinsic(AI);
622 return true;
624 default:
625 llvm_unreachable("Unhandled case in tryExpandAtomicRMW");
629 namespace {
631 struct PartwordMaskValues {
632 // These three fields are guaranteed to be set by createMaskInstrs.
633 Type *WordType = nullptr;
634 Type *ValueType = nullptr;
635 Value *AlignedAddr = nullptr;
636 Align AlignedAddrAlignment;
637 // The remaining fields can be null.
638 Value *ShiftAmt = nullptr;
639 Value *Mask = nullptr;
640 Value *Inv_Mask = nullptr;
643 LLVM_ATTRIBUTE_UNUSED
644 raw_ostream &operator<<(raw_ostream &O, const PartwordMaskValues &PMV) {
645 auto PrintObj = [&O](auto *V) {
646 if (V)
647 O << *V;
648 else
649 O << "nullptr";
650 O << '\n';
652 O << "PartwordMaskValues {\n";
653 O << " WordType: ";
654 PrintObj(PMV.WordType);
655 O << " ValueType: ";
656 PrintObj(PMV.ValueType);
657 O << " AlignedAddr: ";
658 PrintObj(PMV.AlignedAddr);
659 O << " AlignedAddrAlignment: " << PMV.AlignedAddrAlignment.value() << '\n';
660 O << " ShiftAmt: ";
661 PrintObj(PMV.ShiftAmt);
662 O << " Mask: ";
663 PrintObj(PMV.Mask);
664 O << " Inv_Mask: ";
665 PrintObj(PMV.Inv_Mask);
666 O << "}\n";
667 return O;
670 } // end anonymous namespace
672 /// This is a helper function which builds instructions to provide
673 /// values necessary for partword atomic operations. It takes an
674 /// incoming address, Addr, and ValueType, and constructs the address,
675 /// shift-amounts and masks needed to work with a larger value of size
676 /// WordSize.
678 /// AlignedAddr: Addr rounded down to a multiple of WordSize
680 /// ShiftAmt: Number of bits to right-shift a WordSize value loaded
681 /// from AlignAddr for it to have the same value as if
682 /// ValueType was loaded from Addr.
684 /// Mask: Value to mask with the value loaded from AlignAddr to
685 /// include only the part that would've been loaded from Addr.
687 /// Inv_Mask: The inverse of Mask.
688 static PartwordMaskValues createMaskInstrs(IRBuilder<> &Builder, Instruction *I,
689 Type *ValueType, Value *Addr,
690 Align AddrAlign,
691 unsigned MinWordSize) {
692 PartwordMaskValues PMV;
694 Module *M = I->getModule();
695 LLVMContext &Ctx = M->getContext();
696 const DataLayout &DL = M->getDataLayout();
697 unsigned ValueSize = DL.getTypeStoreSize(ValueType);
699 PMV.ValueType = ValueType;
700 PMV.WordType = MinWordSize > ValueSize ? Type::getIntNTy(Ctx, MinWordSize * 8)
701 : ValueType;
702 if (PMV.ValueType == PMV.WordType) {
703 PMV.AlignedAddr = Addr;
704 PMV.AlignedAddrAlignment = AddrAlign;
705 PMV.ShiftAmt = ConstantInt::get(PMV.ValueType, 0);
706 PMV.Mask = ConstantInt::get(PMV.ValueType, ~0);
707 return PMV;
710 assert(ValueSize < MinWordSize);
712 Type *WordPtrType =
713 PMV.WordType->getPointerTo(Addr->getType()->getPointerAddressSpace());
715 // TODO: we could skip some of this if AddrAlign >= MinWordSize.
716 Value *AddrInt = Builder.CreatePtrToInt(Addr, DL.getIntPtrType(Ctx));
717 PMV.AlignedAddr = Builder.CreateIntToPtr(
718 Builder.CreateAnd(AddrInt, ~(uint64_t)(MinWordSize - 1)), WordPtrType,
719 "AlignedAddr");
720 PMV.AlignedAddrAlignment = Align(MinWordSize);
722 Value *PtrLSB = Builder.CreateAnd(AddrInt, MinWordSize - 1, "PtrLSB");
723 if (DL.isLittleEndian()) {
724 // turn bytes into bits
725 PMV.ShiftAmt = Builder.CreateShl(PtrLSB, 3);
726 } else {
727 // turn bytes into bits, and count from the other side.
728 PMV.ShiftAmt = Builder.CreateShl(
729 Builder.CreateXor(PtrLSB, MinWordSize - ValueSize), 3);
732 PMV.ShiftAmt = Builder.CreateTrunc(PMV.ShiftAmt, PMV.WordType, "ShiftAmt");
733 PMV.Mask = Builder.CreateShl(
734 ConstantInt::get(PMV.WordType, (1 << (ValueSize * 8)) - 1), PMV.ShiftAmt,
735 "Mask");
736 PMV.Inv_Mask = Builder.CreateNot(PMV.Mask, "Inv_Mask");
737 return PMV;
740 static Value *extractMaskedValue(IRBuilder<> &Builder, Value *WideWord,
741 const PartwordMaskValues &PMV) {
742 assert(WideWord->getType() == PMV.WordType && "Widened type mismatch");
743 if (PMV.WordType == PMV.ValueType)
744 return WideWord;
746 Value *Shift = Builder.CreateLShr(WideWord, PMV.ShiftAmt, "shifted");
747 Value *Trunc = Builder.CreateTrunc(Shift, PMV.ValueType, "extracted");
748 return Trunc;
751 static Value *insertMaskedValue(IRBuilder<> &Builder, Value *WideWord,
752 Value *Updated, const PartwordMaskValues &PMV) {
753 assert(WideWord->getType() == PMV.WordType && "Widened type mismatch");
754 assert(Updated->getType() == PMV.ValueType && "Value type mismatch");
755 if (PMV.WordType == PMV.ValueType)
756 return Updated;
758 Value *ZExt = Builder.CreateZExt(Updated, PMV.WordType, "extended");
759 Value *Shift =
760 Builder.CreateShl(ZExt, PMV.ShiftAmt, "shifted", /*HasNUW*/ true);
761 Value *And = Builder.CreateAnd(WideWord, PMV.Inv_Mask, "unmasked");
762 Value *Or = Builder.CreateOr(And, Shift, "inserted");
763 return Or;
766 /// Emit IR to implement a masked version of a given atomicrmw
767 /// operation. (That is, only the bits under the Mask should be
768 /// affected by the operation)
769 static Value *performMaskedAtomicOp(AtomicRMWInst::BinOp Op,
770 IRBuilder<> &Builder, Value *Loaded,
771 Value *Shifted_Inc, Value *Inc,
772 const PartwordMaskValues &PMV) {
773 // TODO: update to use
774 // https://graphics.stanford.edu/~seander/bithacks.html#MaskedMerge in order
775 // to merge bits from two values without requiring PMV.Inv_Mask.
776 switch (Op) {
777 case AtomicRMWInst::Xchg: {
778 Value *Loaded_MaskOut = Builder.CreateAnd(Loaded, PMV.Inv_Mask);
779 Value *FinalVal = Builder.CreateOr(Loaded_MaskOut, Shifted_Inc);
780 return FinalVal;
782 case AtomicRMWInst::Or:
783 case AtomicRMWInst::Xor:
784 case AtomicRMWInst::And:
785 llvm_unreachable("Or/Xor/And handled by widenPartwordAtomicRMW");
786 case AtomicRMWInst::Add:
787 case AtomicRMWInst::Sub:
788 case AtomicRMWInst::Nand: {
789 // The other arithmetic ops need to be masked into place.
790 Value *NewVal = performAtomicOp(Op, Builder, Loaded, Shifted_Inc);
791 Value *NewVal_Masked = Builder.CreateAnd(NewVal, PMV.Mask);
792 Value *Loaded_MaskOut = Builder.CreateAnd(Loaded, PMV.Inv_Mask);
793 Value *FinalVal = Builder.CreateOr(Loaded_MaskOut, NewVal_Masked);
794 return FinalVal;
796 case AtomicRMWInst::Max:
797 case AtomicRMWInst::Min:
798 case AtomicRMWInst::UMax:
799 case AtomicRMWInst::UMin: {
800 // Finally, comparison ops will operate on the full value, so
801 // truncate down to the original size, and expand out again after
802 // doing the operation.
803 Value *Loaded_Extract = extractMaskedValue(Builder, Loaded, PMV);
804 Value *NewVal = performAtomicOp(Op, Builder, Loaded_Extract, Inc);
805 Value *FinalVal = insertMaskedValue(Builder, Loaded, NewVal, PMV);
806 return FinalVal;
808 default:
809 llvm_unreachable("Unknown atomic op");
813 /// Expand a sub-word atomicrmw operation into an appropriate
814 /// word-sized operation.
816 /// It will create an LL/SC or cmpxchg loop, as appropriate, the same
817 /// way as a typical atomicrmw expansion. The only difference here is
818 /// that the operation inside of the loop may operate upon only a
819 /// part of the value.
820 void AtomicExpand::expandPartwordAtomicRMW(
821 AtomicRMWInst *AI, TargetLoweringBase::AtomicExpansionKind ExpansionKind) {
822 AtomicOrdering MemOpOrder = AI->getOrdering();
823 SyncScope::ID SSID = AI->getSyncScopeID();
825 IRBuilder<> Builder(AI);
827 PartwordMaskValues PMV =
828 createMaskInstrs(Builder, AI, AI->getType(), AI->getPointerOperand(),
829 AI->getAlign(), TLI->getMinCmpXchgSizeInBits() / 8);
831 Value *ValOperand_Shifted =
832 Builder.CreateShl(Builder.CreateZExt(AI->getValOperand(), PMV.WordType),
833 PMV.ShiftAmt, "ValOperand_Shifted");
835 auto PerformPartwordOp = [&](IRBuilder<> &Builder, Value *Loaded) {
836 return performMaskedAtomicOp(AI->getOperation(), Builder, Loaded,
837 ValOperand_Shifted, AI->getValOperand(), PMV);
840 Value *OldResult;
841 if (ExpansionKind == TargetLoweringBase::AtomicExpansionKind::CmpXChg) {
842 OldResult = insertRMWCmpXchgLoop(Builder, PMV.WordType, PMV.AlignedAddr,
843 PMV.AlignedAddrAlignment, MemOpOrder,
844 SSID, PerformPartwordOp,
845 createCmpXchgInstFun);
846 } else {
847 assert(ExpansionKind == TargetLoweringBase::AtomicExpansionKind::LLSC);
848 OldResult = insertRMWLLSCLoop(Builder, PMV.WordType, PMV.AlignedAddr,
849 PMV.AlignedAddrAlignment, MemOpOrder,
850 PerformPartwordOp);
853 Value *FinalOldResult = extractMaskedValue(Builder, OldResult, PMV);
854 AI->replaceAllUsesWith(FinalOldResult);
855 AI->eraseFromParent();
858 // Widen the bitwise atomicrmw (or/xor/and) to the minimum supported width.
859 AtomicRMWInst *AtomicExpand::widenPartwordAtomicRMW(AtomicRMWInst *AI) {
860 IRBuilder<> Builder(AI);
861 AtomicRMWInst::BinOp Op = AI->getOperation();
863 assert((Op == AtomicRMWInst::Or || Op == AtomicRMWInst::Xor ||
864 Op == AtomicRMWInst::And) &&
865 "Unable to widen operation");
867 PartwordMaskValues PMV =
868 createMaskInstrs(Builder, AI, AI->getType(), AI->getPointerOperand(),
869 AI->getAlign(), TLI->getMinCmpXchgSizeInBits() / 8);
871 Value *ValOperand_Shifted =
872 Builder.CreateShl(Builder.CreateZExt(AI->getValOperand(), PMV.WordType),
873 PMV.ShiftAmt, "ValOperand_Shifted");
875 Value *NewOperand;
877 if (Op == AtomicRMWInst::And)
878 NewOperand =
879 Builder.CreateOr(PMV.Inv_Mask, ValOperand_Shifted, "AndOperand");
880 else
881 NewOperand = ValOperand_Shifted;
883 AtomicRMWInst *NewAI =
884 Builder.CreateAtomicRMW(Op, PMV.AlignedAddr, NewOperand,
885 PMV.AlignedAddrAlignment, AI->getOrdering());
887 Value *FinalOldResult = extractMaskedValue(Builder, NewAI, PMV);
888 AI->replaceAllUsesWith(FinalOldResult);
889 AI->eraseFromParent();
890 return NewAI;
893 bool AtomicExpand::expandPartwordCmpXchg(AtomicCmpXchgInst *CI) {
894 // The basic idea here is that we're expanding a cmpxchg of a
895 // smaller memory size up to a word-sized cmpxchg. To do this, we
896 // need to add a retry-loop for strong cmpxchg, so that
897 // modifications to other parts of the word don't cause a spurious
898 // failure.
900 // This generates code like the following:
901 // [[Setup mask values PMV.*]]
902 // %NewVal_Shifted = shl i32 %NewVal, %PMV.ShiftAmt
903 // %Cmp_Shifted = shl i32 %Cmp, %PMV.ShiftAmt
904 // %InitLoaded = load i32* %addr
905 // %InitLoaded_MaskOut = and i32 %InitLoaded, %PMV.Inv_Mask
906 // br partword.cmpxchg.loop
907 // partword.cmpxchg.loop:
908 // %Loaded_MaskOut = phi i32 [ %InitLoaded_MaskOut, %entry ],
909 // [ %OldVal_MaskOut, %partword.cmpxchg.failure ]
910 // %FullWord_NewVal = or i32 %Loaded_MaskOut, %NewVal_Shifted
911 // %FullWord_Cmp = or i32 %Loaded_MaskOut, %Cmp_Shifted
912 // %NewCI = cmpxchg i32* %PMV.AlignedAddr, i32 %FullWord_Cmp,
913 // i32 %FullWord_NewVal success_ordering failure_ordering
914 // %OldVal = extractvalue { i32, i1 } %NewCI, 0
915 // %Success = extractvalue { i32, i1 } %NewCI, 1
916 // br i1 %Success, label %partword.cmpxchg.end,
917 // label %partword.cmpxchg.failure
918 // partword.cmpxchg.failure:
919 // %OldVal_MaskOut = and i32 %OldVal, %PMV.Inv_Mask
920 // %ShouldContinue = icmp ne i32 %Loaded_MaskOut, %OldVal_MaskOut
921 // br i1 %ShouldContinue, label %partword.cmpxchg.loop,
922 // label %partword.cmpxchg.end
923 // partword.cmpxchg.end:
924 // %tmp1 = lshr i32 %OldVal, %PMV.ShiftAmt
925 // %FinalOldVal = trunc i32 %tmp1 to i8
926 // %tmp2 = insertvalue { i8, i1 } undef, i8 %FinalOldVal, 0
927 // %Res = insertvalue { i8, i1 } %25, i1 %Success, 1
929 Value *Addr = CI->getPointerOperand();
930 Value *Cmp = CI->getCompareOperand();
931 Value *NewVal = CI->getNewValOperand();
933 BasicBlock *BB = CI->getParent();
934 Function *F = BB->getParent();
935 IRBuilder<> Builder(CI);
936 LLVMContext &Ctx = Builder.getContext();
938 BasicBlock *EndBB =
939 BB->splitBasicBlock(CI->getIterator(), "partword.cmpxchg.end");
940 auto FailureBB =
941 BasicBlock::Create(Ctx, "partword.cmpxchg.failure", F, EndBB);
942 auto LoopBB = BasicBlock::Create(Ctx, "partword.cmpxchg.loop", F, FailureBB);
944 // The split call above "helpfully" added a branch at the end of BB
945 // (to the wrong place).
946 std::prev(BB->end())->eraseFromParent();
947 Builder.SetInsertPoint(BB);
949 PartwordMaskValues PMV =
950 createMaskInstrs(Builder, CI, CI->getCompareOperand()->getType(), Addr,
951 CI->getAlign(), TLI->getMinCmpXchgSizeInBits() / 8);
953 // Shift the incoming values over, into the right location in the word.
954 Value *NewVal_Shifted =
955 Builder.CreateShl(Builder.CreateZExt(NewVal, PMV.WordType), PMV.ShiftAmt);
956 Value *Cmp_Shifted =
957 Builder.CreateShl(Builder.CreateZExt(Cmp, PMV.WordType), PMV.ShiftAmt);
959 // Load the entire current word, and mask into place the expected and new
960 // values
961 LoadInst *InitLoaded = Builder.CreateLoad(PMV.WordType, PMV.AlignedAddr);
962 InitLoaded->setVolatile(CI->isVolatile());
963 Value *InitLoaded_MaskOut = Builder.CreateAnd(InitLoaded, PMV.Inv_Mask);
964 Builder.CreateBr(LoopBB);
966 // partword.cmpxchg.loop:
967 Builder.SetInsertPoint(LoopBB);
968 PHINode *Loaded_MaskOut = Builder.CreatePHI(PMV.WordType, 2);
969 Loaded_MaskOut->addIncoming(InitLoaded_MaskOut, BB);
971 // Mask/Or the expected and new values into place in the loaded word.
972 Value *FullWord_NewVal = Builder.CreateOr(Loaded_MaskOut, NewVal_Shifted);
973 Value *FullWord_Cmp = Builder.CreateOr(Loaded_MaskOut, Cmp_Shifted);
974 AtomicCmpXchgInst *NewCI = Builder.CreateAtomicCmpXchg(
975 PMV.AlignedAddr, FullWord_Cmp, FullWord_NewVal, PMV.AlignedAddrAlignment,
976 CI->getSuccessOrdering(), CI->getFailureOrdering(), CI->getSyncScopeID());
977 NewCI->setVolatile(CI->isVolatile());
978 // When we're building a strong cmpxchg, we need a loop, so you
979 // might think we could use a weak cmpxchg inside. But, using strong
980 // allows the below comparison for ShouldContinue, and we're
981 // expecting the underlying cmpxchg to be a machine instruction,
982 // which is strong anyways.
983 NewCI->setWeak(CI->isWeak());
985 Value *OldVal = Builder.CreateExtractValue(NewCI, 0);
986 Value *Success = Builder.CreateExtractValue(NewCI, 1);
988 if (CI->isWeak())
989 Builder.CreateBr(EndBB);
990 else
991 Builder.CreateCondBr(Success, EndBB, FailureBB);
993 // partword.cmpxchg.failure:
994 Builder.SetInsertPoint(FailureBB);
995 // Upon failure, verify that the masked-out part of the loaded value
996 // has been modified. If it didn't, abort the cmpxchg, since the
997 // masked-in part must've.
998 Value *OldVal_MaskOut = Builder.CreateAnd(OldVal, PMV.Inv_Mask);
999 Value *ShouldContinue = Builder.CreateICmpNE(Loaded_MaskOut, OldVal_MaskOut);
1000 Builder.CreateCondBr(ShouldContinue, LoopBB, EndBB);
1002 // Add the second value to the phi from above
1003 Loaded_MaskOut->addIncoming(OldVal_MaskOut, FailureBB);
1005 // partword.cmpxchg.end:
1006 Builder.SetInsertPoint(CI);
1008 Value *FinalOldVal = extractMaskedValue(Builder, OldVal, PMV);
1009 Value *Res = UndefValue::get(CI->getType());
1010 Res = Builder.CreateInsertValue(Res, FinalOldVal, 0);
1011 Res = Builder.CreateInsertValue(Res, Success, 1);
1013 CI->replaceAllUsesWith(Res);
1014 CI->eraseFromParent();
1015 return true;
1018 void AtomicExpand::expandAtomicOpToLLSC(
1019 Instruction *I, Type *ResultType, Value *Addr, Align AddrAlign,
1020 AtomicOrdering MemOpOrder,
1021 function_ref<Value *(IRBuilder<> &, Value *)> PerformOp) {
1022 IRBuilder<> Builder(I);
1023 Value *Loaded = insertRMWLLSCLoop(Builder, ResultType, Addr, AddrAlign,
1024 MemOpOrder, PerformOp);
1026 I->replaceAllUsesWith(Loaded);
1027 I->eraseFromParent();
1030 void AtomicExpand::expandAtomicRMWToMaskedIntrinsic(AtomicRMWInst *AI) {
1031 IRBuilder<> Builder(AI);
1033 PartwordMaskValues PMV =
1034 createMaskInstrs(Builder, AI, AI->getType(), AI->getPointerOperand(),
1035 AI->getAlign(), TLI->getMinCmpXchgSizeInBits() / 8);
1037 // The value operand must be sign-extended for signed min/max so that the
1038 // target's signed comparison instructions can be used. Otherwise, just
1039 // zero-ext.
1040 Instruction::CastOps CastOp = Instruction::ZExt;
1041 AtomicRMWInst::BinOp RMWOp = AI->getOperation();
1042 if (RMWOp == AtomicRMWInst::Max || RMWOp == AtomicRMWInst::Min)
1043 CastOp = Instruction::SExt;
1045 Value *ValOperand_Shifted = Builder.CreateShl(
1046 Builder.CreateCast(CastOp, AI->getValOperand(), PMV.WordType),
1047 PMV.ShiftAmt, "ValOperand_Shifted");
1048 Value *OldResult = TLI->emitMaskedAtomicRMWIntrinsic(
1049 Builder, AI, PMV.AlignedAddr, ValOperand_Shifted, PMV.Mask, PMV.ShiftAmt,
1050 AI->getOrdering());
1051 Value *FinalOldResult = extractMaskedValue(Builder, OldResult, PMV);
1052 AI->replaceAllUsesWith(FinalOldResult);
1053 AI->eraseFromParent();
1056 void AtomicExpand::expandAtomicCmpXchgToMaskedIntrinsic(AtomicCmpXchgInst *CI) {
1057 IRBuilder<> Builder(CI);
1059 PartwordMaskValues PMV = createMaskInstrs(
1060 Builder, CI, CI->getCompareOperand()->getType(), CI->getPointerOperand(),
1061 CI->getAlign(), TLI->getMinCmpXchgSizeInBits() / 8);
1063 Value *CmpVal_Shifted = Builder.CreateShl(
1064 Builder.CreateZExt(CI->getCompareOperand(), PMV.WordType), PMV.ShiftAmt,
1065 "CmpVal_Shifted");
1066 Value *NewVal_Shifted = Builder.CreateShl(
1067 Builder.CreateZExt(CI->getNewValOperand(), PMV.WordType), PMV.ShiftAmt,
1068 "NewVal_Shifted");
1069 Value *OldVal = TLI->emitMaskedAtomicCmpXchgIntrinsic(
1070 Builder, CI, PMV.AlignedAddr, CmpVal_Shifted, NewVal_Shifted, PMV.Mask,
1071 CI->getMergedOrdering());
1072 Value *FinalOldVal = extractMaskedValue(Builder, OldVal, PMV);
1073 Value *Res = UndefValue::get(CI->getType());
1074 Res = Builder.CreateInsertValue(Res, FinalOldVal, 0);
1075 Value *Success = Builder.CreateICmpEQ(
1076 CmpVal_Shifted, Builder.CreateAnd(OldVal, PMV.Mask), "Success");
1077 Res = Builder.CreateInsertValue(Res, Success, 1);
1079 CI->replaceAllUsesWith(Res);
1080 CI->eraseFromParent();
1083 Value *AtomicExpand::insertRMWLLSCLoop(
1084 IRBuilder<> &Builder, Type *ResultTy, Value *Addr, Align AddrAlign,
1085 AtomicOrdering MemOpOrder,
1086 function_ref<Value *(IRBuilder<> &, Value *)> PerformOp) {
1087 LLVMContext &Ctx = Builder.getContext();
1088 BasicBlock *BB = Builder.GetInsertBlock();
1089 Function *F = BB->getParent();
1091 assert(AddrAlign >=
1092 F->getParent()->getDataLayout().getTypeStoreSize(ResultTy) &&
1093 "Expected at least natural alignment at this point.");
1095 // Given: atomicrmw some_op iN* %addr, iN %incr ordering
1097 // The standard expansion we produce is:
1098 // [...]
1099 // atomicrmw.start:
1100 // %loaded = @load.linked(%addr)
1101 // %new = some_op iN %loaded, %incr
1102 // %stored = @store_conditional(%new, %addr)
1103 // %try_again = icmp i32 ne %stored, 0
1104 // br i1 %try_again, label %loop, label %atomicrmw.end
1105 // atomicrmw.end:
1106 // [...]
1107 BasicBlock *ExitBB =
1108 BB->splitBasicBlock(Builder.GetInsertPoint(), "atomicrmw.end");
1109 BasicBlock *LoopBB = BasicBlock::Create(Ctx, "atomicrmw.start", F, ExitBB);
1111 // The split call above "helpfully" added a branch at the end of BB (to the
1112 // wrong place).
1113 std::prev(BB->end())->eraseFromParent();
1114 Builder.SetInsertPoint(BB);
1115 Builder.CreateBr(LoopBB);
1117 // Start the main loop block now that we've taken care of the preliminaries.
1118 Builder.SetInsertPoint(LoopBB);
1119 Value *Loaded = TLI->emitLoadLinked(Builder, ResultTy, Addr, MemOpOrder);
1121 Value *NewVal = PerformOp(Builder, Loaded);
1123 Value *StoreSuccess =
1124 TLI->emitStoreConditional(Builder, NewVal, Addr, MemOpOrder);
1125 Value *TryAgain = Builder.CreateICmpNE(
1126 StoreSuccess, ConstantInt::get(IntegerType::get(Ctx, 32), 0), "tryagain");
1127 Builder.CreateCondBr(TryAgain, LoopBB, ExitBB);
1129 Builder.SetInsertPoint(ExitBB, ExitBB->begin());
1130 return Loaded;
1133 /// Convert an atomic cmpxchg of a non-integral type to an integer cmpxchg of
1134 /// the equivalent bitwidth. We used to not support pointer cmpxchg in the
1135 /// IR. As a migration step, we convert back to what use to be the standard
1136 /// way to represent a pointer cmpxchg so that we can update backends one by
1137 /// one.
1138 AtomicCmpXchgInst *AtomicExpand::convertCmpXchgToIntegerType(AtomicCmpXchgInst *CI) {
1139 auto *M = CI->getModule();
1140 Type *NewTy = getCorrespondingIntegerType(CI->getCompareOperand()->getType(),
1141 M->getDataLayout());
1143 IRBuilder<> Builder(CI);
1145 Value *Addr = CI->getPointerOperand();
1146 Type *PT = PointerType::get(NewTy,
1147 Addr->getType()->getPointerAddressSpace());
1148 Value *NewAddr = Builder.CreateBitCast(Addr, PT);
1150 Value *NewCmp = Builder.CreatePtrToInt(CI->getCompareOperand(), NewTy);
1151 Value *NewNewVal = Builder.CreatePtrToInt(CI->getNewValOperand(), NewTy);
1153 auto *NewCI = Builder.CreateAtomicCmpXchg(
1154 NewAddr, NewCmp, NewNewVal, CI->getAlign(), CI->getSuccessOrdering(),
1155 CI->getFailureOrdering(), CI->getSyncScopeID());
1156 NewCI->setVolatile(CI->isVolatile());
1157 NewCI->setWeak(CI->isWeak());
1158 LLVM_DEBUG(dbgs() << "Replaced " << *CI << " with " << *NewCI << "\n");
1160 Value *OldVal = Builder.CreateExtractValue(NewCI, 0);
1161 Value *Succ = Builder.CreateExtractValue(NewCI, 1);
1163 OldVal = Builder.CreateIntToPtr(OldVal, CI->getCompareOperand()->getType());
1165 Value *Res = UndefValue::get(CI->getType());
1166 Res = Builder.CreateInsertValue(Res, OldVal, 0);
1167 Res = Builder.CreateInsertValue(Res, Succ, 1);
1169 CI->replaceAllUsesWith(Res);
1170 CI->eraseFromParent();
1171 return NewCI;
1174 bool AtomicExpand::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
1175 AtomicOrdering SuccessOrder = CI->getSuccessOrdering();
1176 AtomicOrdering FailureOrder = CI->getFailureOrdering();
1177 Value *Addr = CI->getPointerOperand();
1178 BasicBlock *BB = CI->getParent();
1179 Function *F = BB->getParent();
1180 LLVMContext &Ctx = F->getContext();
1181 // If shouldInsertFencesForAtomic() returns true, then the target does not
1182 // want to deal with memory orders, and emitLeading/TrailingFence should take
1183 // care of everything. Otherwise, emitLeading/TrailingFence are no-op and we
1184 // should preserve the ordering.
1185 bool ShouldInsertFencesForAtomic = TLI->shouldInsertFencesForAtomic(CI);
1186 AtomicOrdering MemOpOrder = ShouldInsertFencesForAtomic
1187 ? AtomicOrdering::Monotonic
1188 : CI->getMergedOrdering();
1190 // In implementations which use a barrier to achieve release semantics, we can
1191 // delay emitting this barrier until we know a store is actually going to be
1192 // attempted. The cost of this delay is that we need 2 copies of the block
1193 // emitting the load-linked, affecting code size.
1195 // Ideally, this logic would be unconditional except for the minsize check
1196 // since in other cases the extra blocks naturally collapse down to the
1197 // minimal loop. Unfortunately, this puts too much stress on later
1198 // optimisations so we avoid emitting the extra logic in those cases too.
1199 bool HasReleasedLoadBB = !CI->isWeak() && ShouldInsertFencesForAtomic &&
1200 SuccessOrder != AtomicOrdering::Monotonic &&
1201 SuccessOrder != AtomicOrdering::Acquire &&
1202 !F->hasMinSize();
1204 // There's no overhead for sinking the release barrier in a weak cmpxchg, so
1205 // do it even on minsize.
1206 bool UseUnconditionalReleaseBarrier = F->hasMinSize() && !CI->isWeak();
1208 // Given: cmpxchg some_op iN* %addr, iN %desired, iN %new success_ord fail_ord
1210 // The full expansion we produce is:
1211 // [...]
1212 // %aligned.addr = ...
1213 // cmpxchg.start:
1214 // %unreleasedload = @load.linked(%aligned.addr)
1215 // %unreleasedload.extract = extract value from %unreleasedload
1216 // %should_store = icmp eq %unreleasedload.extract, %desired
1217 // br i1 %should_store, label %cmpxchg.releasingstore,
1218 // label %cmpxchg.nostore
1219 // cmpxchg.releasingstore:
1220 // fence?
1221 // br label cmpxchg.trystore
1222 // cmpxchg.trystore:
1223 // %loaded.trystore = phi [%unreleasedload, %cmpxchg.releasingstore],
1224 // [%releasedload, %cmpxchg.releasedload]
1225 // %updated.new = insert %new into %loaded.trystore
1226 // %stored = @store_conditional(%updated.new, %aligned.addr)
1227 // %success = icmp eq i32 %stored, 0
1228 // br i1 %success, label %cmpxchg.success,
1229 // label %cmpxchg.releasedload/%cmpxchg.failure
1230 // cmpxchg.releasedload:
1231 // %releasedload = @load.linked(%aligned.addr)
1232 // %releasedload.extract = extract value from %releasedload
1233 // %should_store = icmp eq %releasedload.extract, %desired
1234 // br i1 %should_store, label %cmpxchg.trystore,
1235 // label %cmpxchg.failure
1236 // cmpxchg.success:
1237 // fence?
1238 // br label %cmpxchg.end
1239 // cmpxchg.nostore:
1240 // %loaded.nostore = phi [%unreleasedload, %cmpxchg.start],
1241 // [%releasedload,
1242 // %cmpxchg.releasedload/%cmpxchg.trystore]
1243 // @load_linked_fail_balance()?
1244 // br label %cmpxchg.failure
1245 // cmpxchg.failure:
1246 // fence?
1247 // br label %cmpxchg.end
1248 // cmpxchg.end:
1249 // %loaded.exit = phi [%loaded.nostore, %cmpxchg.failure],
1250 // [%loaded.trystore, %cmpxchg.trystore]
1251 // %success = phi i1 [true, %cmpxchg.success], [false, %cmpxchg.failure]
1252 // %loaded = extract value from %loaded.exit
1253 // %restmp = insertvalue { iN, i1 } undef, iN %loaded, 0
1254 // %res = insertvalue { iN, i1 } %restmp, i1 %success, 1
1255 // [...]
1256 BasicBlock *ExitBB = BB->splitBasicBlock(CI->getIterator(), "cmpxchg.end");
1257 auto FailureBB = BasicBlock::Create(Ctx, "cmpxchg.failure", F, ExitBB);
1258 auto NoStoreBB = BasicBlock::Create(Ctx, "cmpxchg.nostore", F, FailureBB);
1259 auto SuccessBB = BasicBlock::Create(Ctx, "cmpxchg.success", F, NoStoreBB);
1260 auto ReleasedLoadBB =
1261 BasicBlock::Create(Ctx, "cmpxchg.releasedload", F, SuccessBB);
1262 auto TryStoreBB =
1263 BasicBlock::Create(Ctx, "cmpxchg.trystore", F, ReleasedLoadBB);
1264 auto ReleasingStoreBB =
1265 BasicBlock::Create(Ctx, "cmpxchg.fencedstore", F, TryStoreBB);
1266 auto StartBB = BasicBlock::Create(Ctx, "cmpxchg.start", F, ReleasingStoreBB);
1268 // This grabs the DebugLoc from CI
1269 IRBuilder<> Builder(CI);
1271 // The split call above "helpfully" added a branch at the end of BB (to the
1272 // wrong place), but we might want a fence too. It's easiest to just remove
1273 // the branch entirely.
1274 std::prev(BB->end())->eraseFromParent();
1275 Builder.SetInsertPoint(BB);
1276 if (ShouldInsertFencesForAtomic && UseUnconditionalReleaseBarrier)
1277 TLI->emitLeadingFence(Builder, CI, SuccessOrder);
1279 PartwordMaskValues PMV =
1280 createMaskInstrs(Builder, CI, CI->getCompareOperand()->getType(), Addr,
1281 CI->getAlign(), TLI->getMinCmpXchgSizeInBits() / 8);
1282 Builder.CreateBr(StartBB);
1284 // Start the main loop block now that we've taken care of the preliminaries.
1285 Builder.SetInsertPoint(StartBB);
1286 Value *UnreleasedLoad =
1287 TLI->emitLoadLinked(Builder, PMV.WordType, PMV.AlignedAddr, MemOpOrder);
1288 Value *UnreleasedLoadExtract =
1289 extractMaskedValue(Builder, UnreleasedLoad, PMV);
1290 Value *ShouldStore = Builder.CreateICmpEQ(
1291 UnreleasedLoadExtract, CI->getCompareOperand(), "should_store");
1293 // If the cmpxchg doesn't actually need any ordering when it fails, we can
1294 // jump straight past that fence instruction (if it exists).
1295 Builder.CreateCondBr(ShouldStore, ReleasingStoreBB, NoStoreBB);
1297 Builder.SetInsertPoint(ReleasingStoreBB);
1298 if (ShouldInsertFencesForAtomic && !UseUnconditionalReleaseBarrier)
1299 TLI->emitLeadingFence(Builder, CI, SuccessOrder);
1300 Builder.CreateBr(TryStoreBB);
1302 Builder.SetInsertPoint(TryStoreBB);
1303 PHINode *LoadedTryStore =
1304 Builder.CreatePHI(PMV.WordType, 2, "loaded.trystore");
1305 LoadedTryStore->addIncoming(UnreleasedLoad, ReleasingStoreBB);
1306 Value *NewValueInsert =
1307 insertMaskedValue(Builder, LoadedTryStore, CI->getNewValOperand(), PMV);
1308 Value *StoreSuccess =
1309 TLI->emitStoreConditional(Builder, NewValueInsert, PMV.AlignedAddr,
1310 MemOpOrder);
1311 StoreSuccess = Builder.CreateICmpEQ(
1312 StoreSuccess, ConstantInt::get(Type::getInt32Ty(Ctx), 0), "success");
1313 BasicBlock *RetryBB = HasReleasedLoadBB ? ReleasedLoadBB : StartBB;
1314 Builder.CreateCondBr(StoreSuccess, SuccessBB,
1315 CI->isWeak() ? FailureBB : RetryBB);
1317 Builder.SetInsertPoint(ReleasedLoadBB);
1318 Value *SecondLoad;
1319 if (HasReleasedLoadBB) {
1320 SecondLoad =
1321 TLI->emitLoadLinked(Builder, PMV.WordType, PMV.AlignedAddr, MemOpOrder);
1322 Value *SecondLoadExtract = extractMaskedValue(Builder, SecondLoad, PMV);
1323 ShouldStore = Builder.CreateICmpEQ(SecondLoadExtract,
1324 CI->getCompareOperand(), "should_store");
1326 // If the cmpxchg doesn't actually need any ordering when it fails, we can
1327 // jump straight past that fence instruction (if it exists).
1328 Builder.CreateCondBr(ShouldStore, TryStoreBB, NoStoreBB);
1329 // Update PHI node in TryStoreBB.
1330 LoadedTryStore->addIncoming(SecondLoad, ReleasedLoadBB);
1331 } else
1332 Builder.CreateUnreachable();
1334 // Make sure later instructions don't get reordered with a fence if
1335 // necessary.
1336 Builder.SetInsertPoint(SuccessBB);
1337 if (ShouldInsertFencesForAtomic)
1338 TLI->emitTrailingFence(Builder, CI, SuccessOrder);
1339 Builder.CreateBr(ExitBB);
1341 Builder.SetInsertPoint(NoStoreBB);
1342 PHINode *LoadedNoStore =
1343 Builder.CreatePHI(UnreleasedLoad->getType(), 2, "loaded.nostore");
1344 LoadedNoStore->addIncoming(UnreleasedLoad, StartBB);
1345 if (HasReleasedLoadBB)
1346 LoadedNoStore->addIncoming(SecondLoad, ReleasedLoadBB);
1348 // In the failing case, where we don't execute the store-conditional, the
1349 // target might want to balance out the load-linked with a dedicated
1350 // instruction (e.g., on ARM, clearing the exclusive monitor).
1351 TLI->emitAtomicCmpXchgNoStoreLLBalance(Builder);
1352 Builder.CreateBr(FailureBB);
1354 Builder.SetInsertPoint(FailureBB);
1355 PHINode *LoadedFailure =
1356 Builder.CreatePHI(UnreleasedLoad->getType(), 2, "loaded.failure");
1357 LoadedFailure->addIncoming(LoadedNoStore, NoStoreBB);
1358 if (CI->isWeak())
1359 LoadedFailure->addIncoming(LoadedTryStore, TryStoreBB);
1360 if (ShouldInsertFencesForAtomic)
1361 TLI->emitTrailingFence(Builder, CI, FailureOrder);
1362 Builder.CreateBr(ExitBB);
1364 // Finally, we have control-flow based knowledge of whether the cmpxchg
1365 // succeeded or not. We expose this to later passes by converting any
1366 // subsequent "icmp eq/ne %loaded, %oldval" into a use of an appropriate
1367 // PHI.
1368 Builder.SetInsertPoint(ExitBB, ExitBB->begin());
1369 PHINode *LoadedExit =
1370 Builder.CreatePHI(UnreleasedLoad->getType(), 2, "loaded.exit");
1371 LoadedExit->addIncoming(LoadedTryStore, SuccessBB);
1372 LoadedExit->addIncoming(LoadedFailure, FailureBB);
1373 PHINode *Success = Builder.CreatePHI(Type::getInt1Ty(Ctx), 2, "success");
1374 Success->addIncoming(ConstantInt::getTrue(Ctx), SuccessBB);
1375 Success->addIncoming(ConstantInt::getFalse(Ctx), FailureBB);
1377 // This is the "exit value" from the cmpxchg expansion. It may be of
1378 // a type wider than the one in the cmpxchg instruction.
1379 Value *LoadedFull = LoadedExit;
1381 Builder.SetInsertPoint(ExitBB, std::next(Success->getIterator()));
1382 Value *Loaded = extractMaskedValue(Builder, LoadedFull, PMV);
1384 // Look for any users of the cmpxchg that are just comparing the loaded value
1385 // against the desired one, and replace them with the CFG-derived version.
1386 SmallVector<ExtractValueInst *, 2> PrunedInsts;
1387 for (auto User : CI->users()) {
1388 ExtractValueInst *EV = dyn_cast<ExtractValueInst>(User);
1389 if (!EV)
1390 continue;
1392 assert(EV->getNumIndices() == 1 && EV->getIndices()[0] <= 1 &&
1393 "weird extraction from { iN, i1 }");
1395 if (EV->getIndices()[0] == 0)
1396 EV->replaceAllUsesWith(Loaded);
1397 else
1398 EV->replaceAllUsesWith(Success);
1400 PrunedInsts.push_back(EV);
1403 // We can remove the instructions now we're no longer iterating through them.
1404 for (auto EV : PrunedInsts)
1405 EV->eraseFromParent();
1407 if (!CI->use_empty()) {
1408 // Some use of the full struct return that we don't understand has happened,
1409 // so we've got to reconstruct it properly.
1410 Value *Res;
1411 Res = Builder.CreateInsertValue(UndefValue::get(CI->getType()), Loaded, 0);
1412 Res = Builder.CreateInsertValue(Res, Success, 1);
1414 CI->replaceAllUsesWith(Res);
1417 CI->eraseFromParent();
1418 return true;
1421 bool AtomicExpand::isIdempotentRMW(AtomicRMWInst* RMWI) {
1422 auto C = dyn_cast<ConstantInt>(RMWI->getValOperand());
1423 if(!C)
1424 return false;
1426 AtomicRMWInst::BinOp Op = RMWI->getOperation();
1427 switch(Op) {
1428 case AtomicRMWInst::Add:
1429 case AtomicRMWInst::Sub:
1430 case AtomicRMWInst::Or:
1431 case AtomicRMWInst::Xor:
1432 return C->isZero();
1433 case AtomicRMWInst::And:
1434 return C->isMinusOne();
1435 // FIXME: we could also treat Min/Max/UMin/UMax by the INT_MIN/INT_MAX/...
1436 default:
1437 return false;
1441 bool AtomicExpand::simplifyIdempotentRMW(AtomicRMWInst* RMWI) {
1442 if (auto ResultingLoad = TLI->lowerIdempotentRMWIntoFencedLoad(RMWI)) {
1443 tryExpandAtomicLoad(ResultingLoad);
1444 return true;
1446 return false;
1449 Value *AtomicExpand::insertRMWCmpXchgLoop(
1450 IRBuilder<> &Builder, Type *ResultTy, Value *Addr, Align AddrAlign,
1451 AtomicOrdering MemOpOrder, SyncScope::ID SSID,
1452 function_ref<Value *(IRBuilder<> &, Value *)> PerformOp,
1453 CreateCmpXchgInstFun CreateCmpXchg) {
1454 LLVMContext &Ctx = Builder.getContext();
1455 BasicBlock *BB = Builder.GetInsertBlock();
1456 Function *F = BB->getParent();
1458 // Given: atomicrmw some_op iN* %addr, iN %incr ordering
1460 // The standard expansion we produce is:
1461 // [...]
1462 // %init_loaded = load atomic iN* %addr
1463 // br label %loop
1464 // loop:
1465 // %loaded = phi iN [ %init_loaded, %entry ], [ %new_loaded, %loop ]
1466 // %new = some_op iN %loaded, %incr
1467 // %pair = cmpxchg iN* %addr, iN %loaded, iN %new
1468 // %new_loaded = extractvalue { iN, i1 } %pair, 0
1469 // %success = extractvalue { iN, i1 } %pair, 1
1470 // br i1 %success, label %atomicrmw.end, label %loop
1471 // atomicrmw.end:
1472 // [...]
1473 BasicBlock *ExitBB =
1474 BB->splitBasicBlock(Builder.GetInsertPoint(), "atomicrmw.end");
1475 BasicBlock *LoopBB = BasicBlock::Create(Ctx, "atomicrmw.start", F, ExitBB);
1477 // The split call above "helpfully" added a branch at the end of BB (to the
1478 // wrong place), but we want a load. It's easiest to just remove
1479 // the branch entirely.
1480 std::prev(BB->end())->eraseFromParent();
1481 Builder.SetInsertPoint(BB);
1482 LoadInst *InitLoaded = Builder.CreateAlignedLoad(ResultTy, Addr, AddrAlign);
1483 Builder.CreateBr(LoopBB);
1485 // Start the main loop block now that we've taken care of the preliminaries.
1486 Builder.SetInsertPoint(LoopBB);
1487 PHINode *Loaded = Builder.CreatePHI(ResultTy, 2, "loaded");
1488 Loaded->addIncoming(InitLoaded, BB);
1490 Value *NewVal = PerformOp(Builder, Loaded);
1492 Value *NewLoaded = nullptr;
1493 Value *Success = nullptr;
1495 CreateCmpXchg(Builder, Addr, Loaded, NewVal, AddrAlign,
1496 MemOpOrder == AtomicOrdering::Unordered
1497 ? AtomicOrdering::Monotonic
1498 : MemOpOrder,
1499 SSID, Success, NewLoaded);
1500 assert(Success && NewLoaded);
1502 Loaded->addIncoming(NewLoaded, LoopBB);
1504 Builder.CreateCondBr(Success, ExitBB, LoopBB);
1506 Builder.SetInsertPoint(ExitBB, ExitBB->begin());
1507 return NewLoaded;
1510 bool AtomicExpand::tryExpandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
1511 unsigned MinCASSize = TLI->getMinCmpXchgSizeInBits() / 8;
1512 unsigned ValueSize = getAtomicOpSize(CI);
1514 switch (TLI->shouldExpandAtomicCmpXchgInIR(CI)) {
1515 default:
1516 llvm_unreachable("Unhandled case in tryExpandAtomicCmpXchg");
1517 case TargetLoweringBase::AtomicExpansionKind::None:
1518 if (ValueSize < MinCASSize)
1519 return expandPartwordCmpXchg(CI);
1520 return false;
1521 case TargetLoweringBase::AtomicExpansionKind::LLSC: {
1522 return expandAtomicCmpXchg(CI);
1524 case TargetLoweringBase::AtomicExpansionKind::MaskedIntrinsic:
1525 expandAtomicCmpXchgToMaskedIntrinsic(CI);
1526 return true;
1530 // Note: This function is exposed externally by AtomicExpandUtils.h
1531 bool llvm::expandAtomicRMWToCmpXchg(AtomicRMWInst *AI,
1532 CreateCmpXchgInstFun CreateCmpXchg) {
1533 IRBuilder<> Builder(AI);
1534 Value *Loaded = AtomicExpand::insertRMWCmpXchgLoop(
1535 Builder, AI->getType(), AI->getPointerOperand(), AI->getAlign(),
1536 AI->getOrdering(), AI->getSyncScopeID(),
1537 [&](IRBuilder<> &Builder, Value *Loaded) {
1538 return performAtomicOp(AI->getOperation(), Builder, Loaded,
1539 AI->getValOperand());
1541 CreateCmpXchg);
1543 AI->replaceAllUsesWith(Loaded);
1544 AI->eraseFromParent();
1545 return true;
1548 // In order to use one of the sized library calls such as
1549 // __atomic_fetch_add_4, the alignment must be sufficient, the size
1550 // must be one of the potentially-specialized sizes, and the value
1551 // type must actually exist in C on the target (otherwise, the
1552 // function wouldn't actually be defined.)
1553 static bool canUseSizedAtomicCall(unsigned Size, Align Alignment,
1554 const DataLayout &DL) {
1555 // TODO: "LargestSize" is an approximation for "largest type that
1556 // you can express in C". It seems to be the case that int128 is
1557 // supported on all 64-bit platforms, otherwise only up to 64-bit
1558 // integers are supported. If we get this wrong, then we'll try to
1559 // call a sized libcall that doesn't actually exist. There should
1560 // really be some more reliable way in LLVM of determining integer
1561 // sizes which are valid in the target's C ABI...
1562 unsigned LargestSize = DL.getLargestLegalIntTypeSizeInBits() >= 64 ? 16 : 8;
1563 return Alignment >= Size &&
1564 (Size == 1 || Size == 2 || Size == 4 || Size == 8 || Size == 16) &&
1565 Size <= LargestSize;
1568 void AtomicExpand::expandAtomicLoadToLibcall(LoadInst *I) {
1569 static const RTLIB::Libcall Libcalls[6] = {
1570 RTLIB::ATOMIC_LOAD, RTLIB::ATOMIC_LOAD_1, RTLIB::ATOMIC_LOAD_2,
1571 RTLIB::ATOMIC_LOAD_4, RTLIB::ATOMIC_LOAD_8, RTLIB::ATOMIC_LOAD_16};
1572 unsigned Size = getAtomicOpSize(I);
1574 bool expanded = expandAtomicOpToLibcall(
1575 I, Size, I->getAlign(), I->getPointerOperand(), nullptr, nullptr,
1576 I->getOrdering(), AtomicOrdering::NotAtomic, Libcalls);
1577 if (!expanded)
1578 report_fatal_error("expandAtomicOpToLibcall shouldn't fail for Load");
1581 void AtomicExpand::expandAtomicStoreToLibcall(StoreInst *I) {
1582 static const RTLIB::Libcall Libcalls[6] = {
1583 RTLIB::ATOMIC_STORE, RTLIB::ATOMIC_STORE_1, RTLIB::ATOMIC_STORE_2,
1584 RTLIB::ATOMIC_STORE_4, RTLIB::ATOMIC_STORE_8, RTLIB::ATOMIC_STORE_16};
1585 unsigned Size = getAtomicOpSize(I);
1587 bool expanded = expandAtomicOpToLibcall(
1588 I, Size, I->getAlign(), I->getPointerOperand(), I->getValueOperand(),
1589 nullptr, I->getOrdering(), AtomicOrdering::NotAtomic, Libcalls);
1590 if (!expanded)
1591 report_fatal_error("expandAtomicOpToLibcall shouldn't fail for Store");
1594 void AtomicExpand::expandAtomicCASToLibcall(AtomicCmpXchgInst *I) {
1595 static const RTLIB::Libcall Libcalls[6] = {
1596 RTLIB::ATOMIC_COMPARE_EXCHANGE, RTLIB::ATOMIC_COMPARE_EXCHANGE_1,
1597 RTLIB::ATOMIC_COMPARE_EXCHANGE_2, RTLIB::ATOMIC_COMPARE_EXCHANGE_4,
1598 RTLIB::ATOMIC_COMPARE_EXCHANGE_8, RTLIB::ATOMIC_COMPARE_EXCHANGE_16};
1599 unsigned Size = getAtomicOpSize(I);
1601 bool expanded = expandAtomicOpToLibcall(
1602 I, Size, I->getAlign(), I->getPointerOperand(), I->getNewValOperand(),
1603 I->getCompareOperand(), I->getSuccessOrdering(), I->getFailureOrdering(),
1604 Libcalls);
1605 if (!expanded)
1606 report_fatal_error("expandAtomicOpToLibcall shouldn't fail for CAS");
1609 static ArrayRef<RTLIB::Libcall> GetRMWLibcall(AtomicRMWInst::BinOp Op) {
1610 static const RTLIB::Libcall LibcallsXchg[6] = {
1611 RTLIB::ATOMIC_EXCHANGE, RTLIB::ATOMIC_EXCHANGE_1,
1612 RTLIB::ATOMIC_EXCHANGE_2, RTLIB::ATOMIC_EXCHANGE_4,
1613 RTLIB::ATOMIC_EXCHANGE_8, RTLIB::ATOMIC_EXCHANGE_16};
1614 static const RTLIB::Libcall LibcallsAdd[6] = {
1615 RTLIB::UNKNOWN_LIBCALL, RTLIB::ATOMIC_FETCH_ADD_1,
1616 RTLIB::ATOMIC_FETCH_ADD_2, RTLIB::ATOMIC_FETCH_ADD_4,
1617 RTLIB::ATOMIC_FETCH_ADD_8, RTLIB::ATOMIC_FETCH_ADD_16};
1618 static const RTLIB::Libcall LibcallsSub[6] = {
1619 RTLIB::UNKNOWN_LIBCALL, RTLIB::ATOMIC_FETCH_SUB_1,
1620 RTLIB::ATOMIC_FETCH_SUB_2, RTLIB::ATOMIC_FETCH_SUB_4,
1621 RTLIB::ATOMIC_FETCH_SUB_8, RTLIB::ATOMIC_FETCH_SUB_16};
1622 static const RTLIB::Libcall LibcallsAnd[6] = {
1623 RTLIB::UNKNOWN_LIBCALL, RTLIB::ATOMIC_FETCH_AND_1,
1624 RTLIB::ATOMIC_FETCH_AND_2, RTLIB::ATOMIC_FETCH_AND_4,
1625 RTLIB::ATOMIC_FETCH_AND_8, RTLIB::ATOMIC_FETCH_AND_16};
1626 static const RTLIB::Libcall LibcallsOr[6] = {
1627 RTLIB::UNKNOWN_LIBCALL, RTLIB::ATOMIC_FETCH_OR_1,
1628 RTLIB::ATOMIC_FETCH_OR_2, RTLIB::ATOMIC_FETCH_OR_4,
1629 RTLIB::ATOMIC_FETCH_OR_8, RTLIB::ATOMIC_FETCH_OR_16};
1630 static const RTLIB::Libcall LibcallsXor[6] = {
1631 RTLIB::UNKNOWN_LIBCALL, RTLIB::ATOMIC_FETCH_XOR_1,
1632 RTLIB::ATOMIC_FETCH_XOR_2, RTLIB::ATOMIC_FETCH_XOR_4,
1633 RTLIB::ATOMIC_FETCH_XOR_8, RTLIB::ATOMIC_FETCH_XOR_16};
1634 static const RTLIB::Libcall LibcallsNand[6] = {
1635 RTLIB::UNKNOWN_LIBCALL, RTLIB::ATOMIC_FETCH_NAND_1,
1636 RTLIB::ATOMIC_FETCH_NAND_2, RTLIB::ATOMIC_FETCH_NAND_4,
1637 RTLIB::ATOMIC_FETCH_NAND_8, RTLIB::ATOMIC_FETCH_NAND_16};
1639 switch (Op) {
1640 case AtomicRMWInst::BAD_BINOP:
1641 llvm_unreachable("Should not have BAD_BINOP.");
1642 case AtomicRMWInst::Xchg:
1643 return makeArrayRef(LibcallsXchg);
1644 case AtomicRMWInst::Add:
1645 return makeArrayRef(LibcallsAdd);
1646 case AtomicRMWInst::Sub:
1647 return makeArrayRef(LibcallsSub);
1648 case AtomicRMWInst::And:
1649 return makeArrayRef(LibcallsAnd);
1650 case AtomicRMWInst::Or:
1651 return makeArrayRef(LibcallsOr);
1652 case AtomicRMWInst::Xor:
1653 return makeArrayRef(LibcallsXor);
1654 case AtomicRMWInst::Nand:
1655 return makeArrayRef(LibcallsNand);
1656 case AtomicRMWInst::Max:
1657 case AtomicRMWInst::Min:
1658 case AtomicRMWInst::UMax:
1659 case AtomicRMWInst::UMin:
1660 case AtomicRMWInst::FAdd:
1661 case AtomicRMWInst::FSub:
1662 // No atomic libcalls are available for max/min/umax/umin.
1663 return {};
1665 llvm_unreachable("Unexpected AtomicRMW operation.");
1668 void AtomicExpand::expandAtomicRMWToLibcall(AtomicRMWInst *I) {
1669 ArrayRef<RTLIB::Libcall> Libcalls = GetRMWLibcall(I->getOperation());
1671 unsigned Size = getAtomicOpSize(I);
1673 bool Success = false;
1674 if (!Libcalls.empty())
1675 Success = expandAtomicOpToLibcall(
1676 I, Size, I->getAlign(), I->getPointerOperand(), I->getValOperand(),
1677 nullptr, I->getOrdering(), AtomicOrdering::NotAtomic, Libcalls);
1679 // The expansion failed: either there were no libcalls at all for
1680 // the operation (min/max), or there were only size-specialized
1681 // libcalls (add/sub/etc) and we needed a generic. So, expand to a
1682 // CAS libcall, via a CAS loop, instead.
1683 if (!Success) {
1684 expandAtomicRMWToCmpXchg(
1685 I, [this](IRBuilder<> &Builder, Value *Addr, Value *Loaded,
1686 Value *NewVal, Align Alignment, AtomicOrdering MemOpOrder,
1687 SyncScope::ID SSID, Value *&Success, Value *&NewLoaded) {
1688 // Create the CAS instruction normally...
1689 AtomicCmpXchgInst *Pair = Builder.CreateAtomicCmpXchg(
1690 Addr, Loaded, NewVal, Alignment, MemOpOrder,
1691 AtomicCmpXchgInst::getStrongestFailureOrdering(MemOpOrder), SSID);
1692 Success = Builder.CreateExtractValue(Pair, 1, "success");
1693 NewLoaded = Builder.CreateExtractValue(Pair, 0, "newloaded");
1695 // ...and then expand the CAS into a libcall.
1696 expandAtomicCASToLibcall(Pair);
1701 // A helper routine for the above expandAtomic*ToLibcall functions.
1703 // 'Libcalls' contains an array of enum values for the particular
1704 // ATOMIC libcalls to be emitted. All of the other arguments besides
1705 // 'I' are extracted from the Instruction subclass by the
1706 // caller. Depending on the particular call, some will be null.
1707 bool AtomicExpand::expandAtomicOpToLibcall(
1708 Instruction *I, unsigned Size, Align Alignment, Value *PointerOperand,
1709 Value *ValueOperand, Value *CASExpected, AtomicOrdering Ordering,
1710 AtomicOrdering Ordering2, ArrayRef<RTLIB::Libcall> Libcalls) {
1711 assert(Libcalls.size() == 6);
1713 LLVMContext &Ctx = I->getContext();
1714 Module *M = I->getModule();
1715 const DataLayout &DL = M->getDataLayout();
1716 IRBuilder<> Builder(I);
1717 IRBuilder<> AllocaBuilder(&I->getFunction()->getEntryBlock().front());
1719 bool UseSizedLibcall = canUseSizedAtomicCall(Size, Alignment, DL);
1720 Type *SizedIntTy = Type::getIntNTy(Ctx, Size * 8);
1722 const Align AllocaAlignment = DL.getPrefTypeAlign(SizedIntTy);
1724 // TODO: the "order" argument type is "int", not int32. So
1725 // getInt32Ty may be wrong if the arch uses e.g. 16-bit ints.
1726 ConstantInt *SizeVal64 = ConstantInt::get(Type::getInt64Ty(Ctx), Size);
1727 assert(Ordering != AtomicOrdering::NotAtomic && "expect atomic MO");
1728 Constant *OrderingVal =
1729 ConstantInt::get(Type::getInt32Ty(Ctx), (int)toCABI(Ordering));
1730 Constant *Ordering2Val = nullptr;
1731 if (CASExpected) {
1732 assert(Ordering2 != AtomicOrdering::NotAtomic && "expect atomic MO");
1733 Ordering2Val =
1734 ConstantInt::get(Type::getInt32Ty(Ctx), (int)toCABI(Ordering2));
1736 bool HasResult = I->getType() != Type::getVoidTy(Ctx);
1738 RTLIB::Libcall RTLibType;
1739 if (UseSizedLibcall) {
1740 switch (Size) {
1741 case 1: RTLibType = Libcalls[1]; break;
1742 case 2: RTLibType = Libcalls[2]; break;
1743 case 4: RTLibType = Libcalls[3]; break;
1744 case 8: RTLibType = Libcalls[4]; break;
1745 case 16: RTLibType = Libcalls[5]; break;
1747 } else if (Libcalls[0] != RTLIB::UNKNOWN_LIBCALL) {
1748 RTLibType = Libcalls[0];
1749 } else {
1750 // Can't use sized function, and there's no generic for this
1751 // operation, so give up.
1752 return false;
1755 if (!TLI->getLibcallName(RTLibType)) {
1756 // This target does not implement the requested atomic libcall so give up.
1757 return false;
1760 // Build up the function call. There's two kinds. First, the sized
1761 // variants. These calls are going to be one of the following (with
1762 // N=1,2,4,8,16):
1763 // iN __atomic_load_N(iN *ptr, int ordering)
1764 // void __atomic_store_N(iN *ptr, iN val, int ordering)
1765 // iN __atomic_{exchange|fetch_*}_N(iN *ptr, iN val, int ordering)
1766 // bool __atomic_compare_exchange_N(iN *ptr, iN *expected, iN desired,
1767 // int success_order, int failure_order)
1769 // Note that these functions can be used for non-integer atomic
1770 // operations, the values just need to be bitcast to integers on the
1771 // way in and out.
1773 // And, then, the generic variants. They look like the following:
1774 // void __atomic_load(size_t size, void *ptr, void *ret, int ordering)
1775 // void __atomic_store(size_t size, void *ptr, void *val, int ordering)
1776 // void __atomic_exchange(size_t size, void *ptr, void *val, void *ret,
1777 // int ordering)
1778 // bool __atomic_compare_exchange(size_t size, void *ptr, void *expected,
1779 // void *desired, int success_order,
1780 // int failure_order)
1782 // The different signatures are built up depending on the
1783 // 'UseSizedLibcall', 'CASExpected', 'ValueOperand', and 'HasResult'
1784 // variables.
1786 AllocaInst *AllocaCASExpected = nullptr;
1787 Value *AllocaCASExpected_i8 = nullptr;
1788 AllocaInst *AllocaValue = nullptr;
1789 Value *AllocaValue_i8 = nullptr;
1790 AllocaInst *AllocaResult = nullptr;
1791 Value *AllocaResult_i8 = nullptr;
1793 Type *ResultTy;
1794 SmallVector<Value *, 6> Args;
1795 AttributeList Attr;
1797 // 'size' argument.
1798 if (!UseSizedLibcall) {
1799 // Note, getIntPtrType is assumed equivalent to size_t.
1800 Args.push_back(ConstantInt::get(DL.getIntPtrType(Ctx), Size));
1803 // 'ptr' argument.
1804 // note: This assumes all address spaces share a common libfunc
1805 // implementation and that addresses are convertable. For systems without
1806 // that property, we'd need to extend this mechanism to support AS-specific
1807 // families of atomic intrinsics.
1808 auto PtrTypeAS = PointerOperand->getType()->getPointerAddressSpace();
1809 Value *PtrVal = Builder.CreateBitCast(PointerOperand,
1810 Type::getInt8PtrTy(Ctx, PtrTypeAS));
1811 PtrVal = Builder.CreateAddrSpaceCast(PtrVal, Type::getInt8PtrTy(Ctx));
1812 Args.push_back(PtrVal);
1814 // 'expected' argument, if present.
1815 if (CASExpected) {
1816 AllocaCASExpected = AllocaBuilder.CreateAlloca(CASExpected->getType());
1817 AllocaCASExpected->setAlignment(AllocaAlignment);
1818 unsigned AllocaAS = AllocaCASExpected->getType()->getPointerAddressSpace();
1820 AllocaCASExpected_i8 =
1821 Builder.CreateBitCast(AllocaCASExpected,
1822 Type::getInt8PtrTy(Ctx, AllocaAS));
1823 Builder.CreateLifetimeStart(AllocaCASExpected_i8, SizeVal64);
1824 Builder.CreateAlignedStore(CASExpected, AllocaCASExpected, AllocaAlignment);
1825 Args.push_back(AllocaCASExpected_i8);
1828 // 'val' argument ('desired' for cas), if present.
1829 if (ValueOperand) {
1830 if (UseSizedLibcall) {
1831 Value *IntValue =
1832 Builder.CreateBitOrPointerCast(ValueOperand, SizedIntTy);
1833 Args.push_back(IntValue);
1834 } else {
1835 AllocaValue = AllocaBuilder.CreateAlloca(ValueOperand->getType());
1836 AllocaValue->setAlignment(AllocaAlignment);
1837 AllocaValue_i8 =
1838 Builder.CreateBitCast(AllocaValue, Type::getInt8PtrTy(Ctx));
1839 Builder.CreateLifetimeStart(AllocaValue_i8, SizeVal64);
1840 Builder.CreateAlignedStore(ValueOperand, AllocaValue, AllocaAlignment);
1841 Args.push_back(AllocaValue_i8);
1845 // 'ret' argument.
1846 if (!CASExpected && HasResult && !UseSizedLibcall) {
1847 AllocaResult = AllocaBuilder.CreateAlloca(I->getType());
1848 AllocaResult->setAlignment(AllocaAlignment);
1849 unsigned AllocaAS = AllocaResult->getType()->getPointerAddressSpace();
1850 AllocaResult_i8 =
1851 Builder.CreateBitCast(AllocaResult, Type::getInt8PtrTy(Ctx, AllocaAS));
1852 Builder.CreateLifetimeStart(AllocaResult_i8, SizeVal64);
1853 Args.push_back(AllocaResult_i8);
1856 // 'ordering' ('success_order' for cas) argument.
1857 Args.push_back(OrderingVal);
1859 // 'failure_order' argument, if present.
1860 if (Ordering2Val)
1861 Args.push_back(Ordering2Val);
1863 // Now, the return type.
1864 if (CASExpected) {
1865 ResultTy = Type::getInt1Ty(Ctx);
1866 Attr = Attr.addRetAttribute(Ctx, Attribute::ZExt);
1867 } else if (HasResult && UseSizedLibcall)
1868 ResultTy = SizedIntTy;
1869 else
1870 ResultTy = Type::getVoidTy(Ctx);
1872 // Done with setting up arguments and return types, create the call:
1873 SmallVector<Type *, 6> ArgTys;
1874 for (Value *Arg : Args)
1875 ArgTys.push_back(Arg->getType());
1876 FunctionType *FnType = FunctionType::get(ResultTy, ArgTys, false);
1877 FunctionCallee LibcallFn =
1878 M->getOrInsertFunction(TLI->getLibcallName(RTLibType), FnType, Attr);
1879 CallInst *Call = Builder.CreateCall(LibcallFn, Args);
1880 Call->setAttributes(Attr);
1881 Value *Result = Call;
1883 // And then, extract the results...
1884 if (ValueOperand && !UseSizedLibcall)
1885 Builder.CreateLifetimeEnd(AllocaValue_i8, SizeVal64);
1887 if (CASExpected) {
1888 // The final result from the CAS is {load of 'expected' alloca, bool result
1889 // from call}
1890 Type *FinalResultTy = I->getType();
1891 Value *V = UndefValue::get(FinalResultTy);
1892 Value *ExpectedOut = Builder.CreateAlignedLoad(
1893 CASExpected->getType(), AllocaCASExpected, AllocaAlignment);
1894 Builder.CreateLifetimeEnd(AllocaCASExpected_i8, SizeVal64);
1895 V = Builder.CreateInsertValue(V, ExpectedOut, 0);
1896 V = Builder.CreateInsertValue(V, Result, 1);
1897 I->replaceAllUsesWith(V);
1898 } else if (HasResult) {
1899 Value *V;
1900 if (UseSizedLibcall)
1901 V = Builder.CreateBitOrPointerCast(Result, I->getType());
1902 else {
1903 V = Builder.CreateAlignedLoad(I->getType(), AllocaResult,
1904 AllocaAlignment);
1905 Builder.CreateLifetimeEnd(AllocaResult_i8, SizeVal64);
1907 I->replaceAllUsesWith(V);
1909 I->eraseFromParent();
1910 return true;