1 //===- Block.cpp - MLIR Block Class ---------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 #include "mlir/IR/Block.h"
11 #include "mlir/IR/Builders.h"
12 #include "mlir/IR/Operation.h"
13 #include "llvm/ADT/BitVector.h"
14 #include "llvm/ADT/SmallPtrSet.h"
18 //===----------------------------------------------------------------------===//
20 //===----------------------------------------------------------------------===//
23 assert(!verifyOpOrder() && "Expected valid operation ordering.");
25 for (BlockArgument arg
: arguments
)
29 Region
*Block::getParent() const { return parentValidOpOrderPair
.getPointer(); }
31 /// Returns the closest surrounding operation that contains this block or
32 /// nullptr if this block is unlinked.
33 Operation
*Block::getParentOp() {
34 return getParent() ? getParent()->getParentOp() : nullptr;
37 /// Return if this block is the entry block in the parent region.
38 bool Block::isEntryBlock() { return this == &getParent()->front(); }
40 /// Insert this block (which must not already be in a region) right before the
42 void Block::insertBefore(Block
*block
) {
43 assert(!getParent() && "already inserted into a block!");
44 assert(block
->getParent() && "cannot insert before a block without a parent");
45 block
->getParent()->getBlocks().insert(block
->getIterator(), this);
48 void Block::insertAfter(Block
*block
) {
49 assert(!getParent() && "already inserted into a block!");
50 assert(block
->getParent() && "cannot insert before a block without a parent");
51 block
->getParent()->getBlocks().insertAfter(block
->getIterator(), this);
54 /// Unlink this block from its current region and insert it right before the
56 void Block::moveBefore(Block
*block
) {
57 assert(block
->getParent() && "cannot insert before a block without a parent");
58 moveBefore(block
->getParent(), block
->getIterator());
61 /// Unlink this block from its current region and insert it right before the
62 /// block that the given iterator points to in the region region.
63 void Block::moveBefore(Region
*region
, llvm::iplist
<Block
>::iterator iterator
) {
64 region
->getBlocks().splice(iterator
, getParent()->getBlocks(), getIterator());
67 /// Unlink this Block from its parent Region and delete it.
69 assert(getParent() && "Block has no parent");
70 getParent()->getBlocks().erase(this);
73 /// Returns 'op' if 'op' lies in this block, or otherwise finds the
74 /// ancestor operation of 'op' that lies in this block. Returns nullptr if
76 Operation
*Block::findAncestorOpInBlock(Operation
&op
) {
77 // Traverse up the operation hierarchy starting from the owner of operand to
78 // find the ancestor operation that resides in the block of 'forOp'.
80 while (currOp
->getBlock() != this) {
81 currOp
= currOp
->getParentOp();
88 /// This drops all operand uses from operations within this block, which is
89 /// an essential step in breaking cyclic dependences between references when
90 /// they are to be deleted.
91 void Block::dropAllReferences() {
92 for (Operation
&i
: *this)
93 i
.dropAllReferences();
96 void Block::dropAllDefinedValueUses() {
97 for (auto arg
: getArguments())
99 for (auto &op
: *this)
100 op
.dropAllDefinedValueUses();
104 /// Returns true if the ordering of the child operations is valid, false
106 bool Block::isOpOrderValid() { return parentValidOpOrderPair
.getInt(); }
108 /// Invalidates the current ordering of operations.
109 void Block::invalidateOpOrder() {
110 // Validate the current ordering.
111 assert(!verifyOpOrder());
112 parentValidOpOrderPair
.setInt(false);
115 /// Verifies the current ordering of child operations. Returns false if the
116 /// order is valid, true otherwise.
117 bool Block::verifyOpOrder() {
118 // The order is already known to be invalid.
119 if (!isOpOrderValid())
121 // The order is valid if there are less than 2 operations.
122 if (operations
.empty() || std::next(operations
.begin()) == operations
.end())
125 Operation
*prev
= nullptr;
126 for (auto &i
: *this) {
127 // The previous operation must have a smaller order index than the next as
128 // it appears earlier in the list.
129 if (prev
&& prev
->orderIndex
!= Operation::kInvalidOrderIdx
&&
130 prev
->orderIndex
>= i
.orderIndex
)
137 /// Recomputes the ordering of child operations within the block.
138 void Block::recomputeOpOrder() {
139 parentValidOpOrderPair
.setInt(true);
141 unsigned orderIndex
= 0;
142 for (auto &op
: *this)
143 op
.orderIndex
= (orderIndex
+= Operation::kOrderStride
);
146 //===----------------------------------------------------------------------===//
147 // Argument list management.
148 //===----------------------------------------------------------------------===//
150 /// Return a range containing the types of the arguments for this block.
151 auto Block::getArgumentTypes() -> ValueTypeRange
<BlockArgListType
> {
152 return ValueTypeRange
<BlockArgListType
>(getArguments());
155 BlockArgument
Block::addArgument(Type type
, Location loc
) {
156 BlockArgument arg
= BlockArgument::create(type
, this, arguments
.size(), loc
);
157 arguments
.push_back(arg
);
161 /// Add one argument to the argument list for each type specified in the list.
162 auto Block::addArguments(TypeRange types
, ArrayRef
<Location
> locs
)
163 -> iterator_range
<args_iterator
> {
164 assert(types
.size() == locs
.size() &&
165 "incorrect number of block argument locations");
166 size_t initialSize
= arguments
.size();
167 arguments
.reserve(initialSize
+ types
.size());
169 for (auto typeAndLoc
: llvm::zip(types
, locs
))
170 addArgument(std::get
<0>(typeAndLoc
), std::get
<1>(typeAndLoc
));
171 return {arguments
.data() + initialSize
, arguments
.data() + arguments
.size()};
174 BlockArgument
Block::insertArgument(unsigned index
, Type type
, Location loc
) {
175 assert(index
<= arguments
.size() && "invalid insertion index");
177 auto arg
= BlockArgument::create(type
, this, index
, loc
);
178 arguments
.insert(arguments
.begin() + index
, arg
);
179 // Update the cached position for all the arguments after the newly inserted
182 for (BlockArgument arg
: llvm::drop_begin(arguments
, index
))
183 arg
.setArgNumber(index
++);
187 /// Insert one value to the given position of the argument list. The existing
188 /// arguments are shifted. The block is expected not to have predecessors.
189 BlockArgument
Block::insertArgument(args_iterator it
, Type type
, Location loc
) {
190 assert(getPredecessors().empty() &&
191 "cannot insert arguments to blocks with predecessors");
192 return insertArgument(it
->getArgNumber(), type
, loc
);
195 void Block::eraseArgument(unsigned index
) {
196 assert(index
< arguments
.size());
197 arguments
[index
].destroy();
198 arguments
.erase(arguments
.begin() + index
);
199 for (BlockArgument arg
: llvm::drop_begin(arguments
, index
))
200 arg
.setArgNumber(index
++);
203 void Block::eraseArguments(unsigned start
, unsigned num
) {
204 assert(start
+ num
<= arguments
.size());
205 for (unsigned i
= 0; i
< num
; ++i
)
206 arguments
[start
+ i
].destroy();
207 arguments
.erase(arguments
.begin() + start
, arguments
.begin() + start
+ num
);
208 for (BlockArgument arg
: llvm::drop_begin(arguments
, start
))
209 arg
.setArgNumber(start
++);
212 void Block::eraseArguments(const BitVector
&eraseIndices
) {
214 [&](BlockArgument arg
) { return eraseIndices
.test(arg
.getArgNumber()); });
217 void Block::eraseArguments(function_ref
<bool(BlockArgument
)> shouldEraseFn
) {
218 auto firstDead
= llvm::find_if(arguments
, shouldEraseFn
);
219 if (firstDead
== arguments
.end())
222 // Destroy the first dead argument, this avoids reapplying the predicate to
224 unsigned index
= firstDead
->getArgNumber();
225 firstDead
->destroy();
227 // Iterate the remaining arguments to remove any that are now dead.
228 for (auto it
= std::next(firstDead
), e
= arguments
.end(); it
!= e
; ++it
) {
229 // Destroy dead arguments, and shift those that are still live.
230 if (shouldEraseFn(*it
)) {
233 it
->setArgNumber(index
++);
237 arguments
.erase(firstDead
, arguments
.end());
240 //===----------------------------------------------------------------------===//
241 // Terminator management
242 //===----------------------------------------------------------------------===//
244 /// Get the terminator operation of this block. This function asserts that
245 /// the block might have a valid terminator operation.
246 Operation
*Block::getTerminator() {
247 assert(mightHaveTerminator());
251 /// Check whether this block might have a terminator.
252 bool Block::mightHaveTerminator() {
253 return !empty() && back().mightHaveTrait
<OpTrait::IsTerminator
>();
256 // Indexed successor access.
257 unsigned Block::getNumSuccessors() {
258 return empty() ? 0 : back().getNumSuccessors();
261 Block
*Block::getSuccessor(unsigned i
) {
262 assert(i
< getNumSuccessors());
263 return getTerminator()->getSuccessor(i
);
266 /// If this block has exactly one predecessor, return it. Otherwise, return
269 /// Note that multiple edges from a single block (e.g. if you have a cond
270 /// branch with the same block as the true/false destinations) is not
271 /// considered to be a single predecessor.
272 Block
*Block::getSinglePredecessor() {
273 auto it
= pred_begin();
274 if (it
== pred_end())
276 auto *firstPred
= *it
;
278 return it
== pred_end() ? firstPred
: nullptr;
281 /// If this block has a unique predecessor, i.e., all incoming edges originate
282 /// from one block, return it. Otherwise, return null.
283 Block
*Block::getUniquePredecessor() {
284 auto it
= pred_begin(), e
= pred_end();
288 // Check for any conflicting predecessors.
289 auto *firstPred
= *it
;
290 for (++it
; it
!= e
; ++it
)
291 if (*it
!= firstPred
)
296 //===----------------------------------------------------------------------===//
298 //===----------------------------------------------------------------------===//
300 /// Split the block into two blocks before the specified operation or
303 /// Note that all operations BEFORE the specified iterator stay as part of
304 /// the original basic block, and the rest of the operations in the original
305 /// block are moved to the new block, including the old terminator. The
306 /// original block is left without a terminator.
308 /// The newly formed Block is returned, and the specified iterator is
310 Block
*Block::splitBlock(iterator splitBefore
) {
311 // Start by creating a new basic block, and insert it immediate after this
312 // one in the containing region.
313 auto *newBB
= new Block();
314 getParent()->getBlocks().insert(std::next(Region::iterator(this)), newBB
);
316 // Move all of the operations from the split point to the end of the region
317 // into the new block.
318 newBB
->getOperations().splice(newBB
->end(), getOperations(), splitBefore
,
323 //===----------------------------------------------------------------------===//
325 //===----------------------------------------------------------------------===//
327 Block
*PredecessorIterator::unwrap(BlockOperand
&value
) {
328 return value
.getOwner()->getBlock();
331 /// Get the successor number in the predecessor terminator.
332 unsigned PredecessorIterator::getSuccessorIndex() const {
333 return I
->getOperandNumber();
336 //===----------------------------------------------------------------------===//
338 //===----------------------------------------------------------------------===//
340 SuccessorRange::SuccessorRange() : SuccessorRange(nullptr, 0) {}
342 SuccessorRange::SuccessorRange(Block
*block
) : SuccessorRange() {
343 if (block
->empty() || llvm::hasSingleElement(*block
->getParent()))
345 Operation
*term
= &block
->back();
346 if ((count
= term
->getNumSuccessors()))
347 base
= term
->getBlockOperands().data();
350 SuccessorRange::SuccessorRange(Operation
*term
) : SuccessorRange() {
351 if ((count
= term
->getNumSuccessors()))
352 base
= term
->getBlockOperands().data();
355 bool Block::isReachable(Block
*other
, SmallPtrSet
<Block
*, 16> &&except
) {
356 assert(getParent() == other
->getParent() && "expected same region");
357 if (except
.contains(other
)) {
358 // Fast path: If `other` is in the `except` set, there can be no path from
359 // "this" to `other` (that does not pass through an excluded block).
362 SmallVector
<Block
*> worklist(succ_begin(), succ_end());
363 while (!worklist
.empty()) {
364 Block
*next
= worklist
.pop_back_val();
367 // Note: `except` keeps track of already visited blocks.
368 if (!except
.insert(next
).second
)
370 worklist
.append(next
->succ_begin(), next
->succ_end());
375 //===----------------------------------------------------------------------===//
377 //===----------------------------------------------------------------------===//
379 BlockRange::BlockRange(ArrayRef
<Block
*> blocks
) : BlockRange(nullptr, 0) {
380 if ((count
= blocks
.size()))
381 base
= blocks
.data();
384 BlockRange::BlockRange(SuccessorRange successors
)
385 : BlockRange(successors
.begin().getBase(), successors
.size()) {}
387 /// See `llvm::detail::indexed_accessor_range_base` for details.
388 BlockRange::OwnerT
BlockRange::offset_base(OwnerT object
, ptrdiff_t index
) {
389 if (auto *operand
= llvm::dyn_cast_if_present
<BlockOperand
*>(object
))
390 return {operand
+ index
};
391 return {llvm::dyn_cast_if_present
<Block
*const *>(object
) + index
};
394 /// See `llvm::detail::indexed_accessor_range_base` for details.
395 Block
*BlockRange::dereference_iterator(OwnerT object
, ptrdiff_t index
) {
396 if (const auto *operand
= llvm::dyn_cast_if_present
<BlockOperand
*>(object
))
397 return operand
[index
].get();
398 return llvm::dyn_cast_if_present
<Block
*const *>(object
)[index
];