[ORC] Add std::tuple support to SimplePackedSerialization.
[llvm-project.git] / llvm / lib / Transforms / InstCombine / InstCombineAtomicRMW.cpp
blob495493aab4b576f2b61095814b242c21016e797f
1 //===- InstCombineAtomicRMW.cpp -------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the visit functions for atomic rmw instructions.
11 //===----------------------------------------------------------------------===//
13 #include "InstCombineInternal.h"
14 #include "llvm/IR/Instructions.h"
15 #include "llvm/Transforms/InstCombine/InstCombiner.h"
17 using namespace llvm;
19 namespace {
20 /// Return true if and only if the given instruction does not modify the memory
21 /// location referenced. Note that an idemptent atomicrmw may still have
22 /// ordering effects on nearby instructions, or be volatile.
23 /// TODO: Common w/ the version in AtomicExpandPass, and change the term used.
24 /// Idemptotent is confusing in this context.
25 bool isIdempotentRMW(AtomicRMWInst& RMWI) {
26 if (auto CF = dyn_cast<ConstantFP>(RMWI.getValOperand()))
27 switch(RMWI.getOperation()) {
28 case AtomicRMWInst::FAdd: // -0.0
29 return CF->isZero() && CF->isNegative();
30 case AtomicRMWInst::FSub: // +0.0
31 return CF->isZero() && !CF->isNegative();
32 default:
33 return false;
36 auto C = dyn_cast<ConstantInt>(RMWI.getValOperand());
37 if(!C)
38 return false;
40 switch(RMWI.getOperation()) {
41 case AtomicRMWInst::Add:
42 case AtomicRMWInst::Sub:
43 case AtomicRMWInst::Or:
44 case AtomicRMWInst::Xor:
45 return C->isZero();
46 case AtomicRMWInst::And:
47 return C->isMinusOne();
48 case AtomicRMWInst::Min:
49 return C->isMaxValue(true);
50 case AtomicRMWInst::Max:
51 return C->isMinValue(true);
52 case AtomicRMWInst::UMin:
53 return C->isMaxValue(false);
54 case AtomicRMWInst::UMax:
55 return C->isMinValue(false);
56 default:
57 return false;
61 /// Return true if the given instruction always produces a value in memory
62 /// equivalent to its value operand.
63 bool isSaturating(AtomicRMWInst& RMWI) {
64 if (auto CF = dyn_cast<ConstantFP>(RMWI.getValOperand()))
65 switch(RMWI.getOperation()) {
66 case AtomicRMWInst::FAdd:
67 case AtomicRMWInst::FSub:
68 return CF->isNaN();
69 default:
70 return false;
73 auto C = dyn_cast<ConstantInt>(RMWI.getValOperand());
74 if(!C)
75 return false;
77 switch(RMWI.getOperation()) {
78 default:
79 return false;
80 case AtomicRMWInst::Xchg:
81 return true;
82 case AtomicRMWInst::Or:
83 return C->isAllOnesValue();
84 case AtomicRMWInst::And:
85 return C->isZero();
86 case AtomicRMWInst::Min:
87 return C->isMinValue(true);
88 case AtomicRMWInst::Max:
89 return C->isMaxValue(true);
90 case AtomicRMWInst::UMin:
91 return C->isMinValue(false);
92 case AtomicRMWInst::UMax:
93 return C->isMaxValue(false);
96 } // namespace
98 Instruction *InstCombinerImpl::visitAtomicRMWInst(AtomicRMWInst &RMWI) {
100 // Volatile RMWs perform a load and a store, we cannot replace this by just a
101 // load or just a store. We chose not to canonicalize out of general paranoia
102 // about user expectations around volatile.
103 if (RMWI.isVolatile())
104 return nullptr;
106 // Any atomicrmw op which produces a known result in memory can be
107 // replaced w/an atomicrmw xchg.
108 if (isSaturating(RMWI) &&
109 RMWI.getOperation() != AtomicRMWInst::Xchg) {
110 RMWI.setOperation(AtomicRMWInst::Xchg);
111 return &RMWI;
114 AtomicOrdering Ordering = RMWI.getOrdering();
115 assert(Ordering != AtomicOrdering::NotAtomic &&
116 Ordering != AtomicOrdering::Unordered &&
117 "AtomicRMWs don't make sense with Unordered or NotAtomic");
119 // Any atomicrmw xchg with no uses can be converted to a atomic store if the
120 // ordering is compatible.
121 if (RMWI.getOperation() == AtomicRMWInst::Xchg &&
122 RMWI.use_empty()) {
123 if (Ordering != AtomicOrdering::Release &&
124 Ordering != AtomicOrdering::Monotonic)
125 return nullptr;
126 auto *SI = new StoreInst(RMWI.getValOperand(),
127 RMWI.getPointerOperand(), &RMWI);
128 SI->setAtomic(Ordering, RMWI.getSyncScopeID());
129 SI->setAlignment(DL.getABITypeAlign(RMWI.getType()));
130 return eraseInstFromFunction(RMWI);
133 if (!isIdempotentRMW(RMWI))
134 return nullptr;
136 // We chose to canonicalize all idempotent operations to an single
137 // operation code and constant. This makes it easier for the rest of the
138 // optimizer to match easily. The choices of or w/0 and fadd w/-0.0 are
139 // arbitrary.
140 if (RMWI.getType()->isIntegerTy() &&
141 RMWI.getOperation() != AtomicRMWInst::Or) {
142 RMWI.setOperation(AtomicRMWInst::Or);
143 return replaceOperand(RMWI, 1, ConstantInt::get(RMWI.getType(), 0));
144 } else if (RMWI.getType()->isFloatingPointTy() &&
145 RMWI.getOperation() != AtomicRMWInst::FAdd) {
146 RMWI.setOperation(AtomicRMWInst::FAdd);
147 return replaceOperand(RMWI, 1, ConstantFP::getNegativeZero(RMWI.getType()));
150 // Check if the required ordering is compatible with an atomic load.
151 if (Ordering != AtomicOrdering::Acquire &&
152 Ordering != AtomicOrdering::Monotonic)
153 return nullptr;
155 LoadInst *Load = new LoadInst(RMWI.getType(), RMWI.getPointerOperand(), "",
156 false, DL.getABITypeAlign(RMWI.getType()),
157 Ordering, RMWI.getSyncScopeID());
158 return Load;