1//===-- MemorySSAUpdater.cpp - Memory SSA Updater--------------------===// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 7//===----------------------------------------------------------------===// 9// This file implements the MemorySSAUpdater class. 11//===----------------------------------------------------------------===// 24#define DEBUG_TYPE "memoryssa" 27// This is the marker algorithm from "Simple and Efficient Construction of 28// Static Single Assignment Form" 29// The simple, non-marker algorithm places phi nodes at any join 30// Here, we place markers, and only place phi nodes if they end up necessary. 31// They are only necessary if they break a cycle (IE we recursively visit 32// ourselves again), or we discover, while getting the value of the operands, 33// that there are two or more definitions needing to be merged. 34// This still will leave non-minimal form in the case of irreducible control 35// flow, where phi nodes may be in cycles with themselves, but unnecessary. 39// First, do a cache lookup. Without this cache, certain CFG structures 40// (like a series of if statements) take exponential time to visit. 41auto Cached = CachedPreviousDef.find(BB);
42if (Cached != CachedPreviousDef.end())
45// If this method is called from an unreachable block, return LoE. 50 VisitedBlocks.insert(BB);
51// Single predecessor case, just recurse, we can only have one definition. 52MemoryAccess *Result = getPreviousDefFromEnd(Pred, CachedPreviousDef);
53 CachedPreviousDef.insert({BB, Result});
57if (VisitedBlocks.count(BB)) {
58// We hit our node again, meaning we had a cycle, we must insert a phi 59// node to break it so we have an operand. The only case this will 60// insert useless phis is if we have irreducible control flow. 62 CachedPreviousDef.insert({BB, Result});
66if (VisitedBlocks.insert(BB).second) {
67// Mark us visited so we can detect a cycle 70// Recurse to get the values in our predecessors for placement of a 71// potential phi node. This will insert phi nodes if we cycle in order to 72// break the cycle and have an operand. 73bool UniqueIncomingAccess =
true;
77auto *IncomingAccess = getPreviousDefFromEnd(Pred, CachedPreviousDef);
79 SingleAccess = IncomingAccess;
80elseif (IncomingAccess != SingleAccess)
81 UniqueIncomingAccess =
false;
87// Now try to simplify the ops to avoid placing a phi. 88// This may return null if we never created a phi yet, that's okay 91// See if we can avoid the phi by simplifying it. 92auto *Result = tryRemoveTrivialPhi(Phi, PhiOps);
93// If we couldn't simplify, we may have to create a phi 94if (Result == Phi && UniqueIncomingAccess && SingleAccess) {
95// A concrete Phi only exists if we created an empty one to break a cycle. 97assert(Phi->operands().empty() &&
"Expected empty Phi");
98 Phi->replaceAllUsesWith(SingleAccess);
101 Result = SingleAccess;
102 }
elseif (Result == Phi && !(UniqueIncomingAccess && SingleAccess)) {
104 Phi = MSSA->createMemoryPhi(BB);
106// See if the existing phi operands match what we need. 107// Unlike normal SSA, we only allow one phi node per block, so we can't just 109if (Phi->getNumOperands() != 0) {
110// FIXME: Figure out whether this is dead code and if so remove it. 111if (!std::equal(Phi->op_begin(), Phi->op_end(), PhiOps.
begin())) {
112// These will have been filled in by the recursive read we did above. 119 Phi->addIncoming(&*PhiOps[i++], Pred);
120 InsertedPHIs.push_back(Phi);
125// Set ourselves up for the next variable by resetting visited state. 126 VisitedBlocks.erase(BB);
127 CachedPreviousDef.insert({BB, Result});
133// This starts at the memory access, and goes backwards in the block to find the 134// previous definition. If a definition is not found the block of the access, 135// it continues globally, creating phi nodes to ensure we have a single 138if (
auto *LocalResult = getPreviousDefInBlock(MA))
141return getPreviousDefRecursive(MA->
getBlock(), CachedPreviousDef);
144// This starts at the memory access, and goes backwards in the block to the find 145// the previous definition. If the definition is not found in the block of the 146// access, it returns nullptr. 150// It's possible there are no defs, or we got handed the first def to start. 152// If this is a def, we can just use the def iterators. 153if (!isa<MemoryUse>(MA)) {
156if (Iter != Defs->rend())
159// Otherwise, have to walk the all access iterator. 162if (!isa<MemoryUse>(U))
163return cast<MemoryAccess>(&U);
164// Note that if MA comes before Defs->begin(), we won't hit a def. 171// This starts at the end of block 179return &*Defs->rbegin();
182return getPreviousDefRecursive(BB, CachedPreviousDef);
184// Recurse over a set of phi uses to eliminate the trivial ones 190 std::copy(
Phi->user_begin(),
Phi->user_end(), std::back_inserter(
Uses));
192if (
MemoryPhi *UsePhi = dyn_cast<MemoryPhi>(&*U))
193 tryRemoveTrivialPhi(UsePhi);
197// Eliminate trivial phis 198// Phis are trivial if they are defined either by themselves, or all the same 200// IE phi(a, a) or b = phi(a, b) or c = phi(a, a, c) 201// We recursively try to remove them. 203assert(Phi &&
"Can only remove concrete Phi.");
204auto OperRange =
Phi->operands();
205return tryRemoveTrivialPhi(Phi, OperRange);
207template <
class RangeType>
210// Bail out on non-opt Phis. 211if (NonOptPhis.count(Phi))
214// Detect equal or self arguments 217// If the same or self, good so far 220// not the same, return the phi since it's not eliminatable by us 223Same = cast<MemoryAccess>(&*
Op);
225// Never found a non-self reference, the phi is undef 233// We should only end up recursing in case we replaced something, in which 234// case, we may have made other Phis trivial. 235return recursePhi(
Same);
239 VisitedBlocks.clear();
240 InsertedPHIs.clear();
243// In cases without unreachable blocks, because uses do not create new 244// may-defs, there are only two cases: 245// 1. There was a def already below us, and therefore, we should not have 246// created a phi node because it was already needed for the def. 248// 2. There is no def below us, and therefore, there is no extra renaming work 251// In cases with unreachable blocks, where the unnecessary Phis were 252// optimized out, adding the Use may re-insert those Phis. Hence, when 253// inserting Uses outside of the MSSA creation process, and new Phis were 254// added, rename all uses if we are asked. 256if (!RenameUses && !InsertedPHIs.empty()) {
259assert((!Defs || (++Defs->begin() == Defs->end())) &&
260"Block may have only a Phi or no defs");
263if (RenameUses && InsertedPHIs.size()) {
269// Convert to incoming value if it's a memorydef. A phi *is* already an 271if (
auto *MD = dyn_cast<MemoryDef>(FirstDef))
272 FirstDef = MD->getDefiningAccess();
276// We just inserted a phi into this block, so the incoming value will 277// become the phi anyway, so it does not matter what we pass. 278for (
auto &MP : InsertedPHIs)
279if (
MemoryPhi *Phi = cast_or_null<MemoryPhi>(MP))
280 MSSA->
renamePass(Phi->getBlock(),
nullptr, Visited);
284// Set every incoming edge {BB, MP->getBlock()} of MemoryPhi MP to NewDef. 287// Replace any operand with us an incoming block with the new defining 290assert(i != -1 &&
"Should have found the basic block in the phi");
291// We can't just compare i against getNumOperands since one is signed and the 292// other not. So use it to index into the block iterator. 301// A brief description of the algorithm: 302// First, we compute what should define the new def, using the SSA 303// construction algorithm. 304// Then, we update the defs below us (and any new phi nodes) in the graph to 305// point to the correct new defs, to ensure we only have one variable, and no 306// disconnected stores. 308// Don't bother updating dead code. 314 VisitedBlocks.clear();
315 InsertedPHIs.clear();
317// See if we had a local def, and if not, go hunting. 319bool DefBeforeSameBlock =
false;
321 !(isa<MemoryPhi>(DefBefore) &&
323 DefBeforeSameBlock =
true;
325// There is a def before us, which means we can replace any store/phi uses 326// of that thing with us, since we are in the way of whatever was there 328// We now define that def's memorydefs and memoryphis 329if (DefBeforeSameBlock) {
331// Leave the MemoryUses alone. 332// Also make sure we skip ourselves to avoid self references. 333User *Usr = U.getUser();
334return !isa<MemoryUse>(Usr) && Usr != MD;
335// Defs are automatically unoptimized when the user is set to MD below, 336// because the isOptimized() call will fail to find the same ID. 340// and that def is now our defining access. 347// Remember the index where we may insert new phis. 348unsigned NewPhiIndex = InsertedPHIs.size();
349if (!DefBeforeSameBlock) {
350// If there was a local def before us, we must have the same effect it 351// did. Because every may-def is the same, any phis/etc we would create, it 352// would also have created. If there was no local def before us, we 353// performed a global update, and have to search all successors and make 354// sure we update the first def in each of them (following all paths until 355// we hit the first def along each path). This may also insert phi nodes. 356// TODO: There are other cases we can skip this work, such as when we have a 357// single successor, and only used a straight line of single pred blocks 358// backwards to find the def. To make that work, we'd have to track whether 359// getDefRecursive only ever used the single predecessor case. These types 360// of paths also only exist in between CFG simplifications. 362// If this is the first def in the block and this insert is in an arbitrary 363// place, compute IDF and place phis. 366// If this is the last Def in the block, we may need additional Phis. 367// Compute IDF in all cases, as renaming needs to be done even when MD is 368// not the last access, because it can introduce a new access past which a 369// previous access was optimized; that access needs to be reoptimized. 371for (
constauto &VH : InsertedPHIs)
372if (
constauto *RealPHI = cast_or_null<MemoryPhi>(VH))
373 DefiningBlocks.
insert(RealPHI->getBlock());
379for (
auto *BBIDF : IDFBlocks) {
382 MPhi = MSSA->createMemoryPhi(BBIDF);
385 ExistingPhis.
insert(MPhi);
387// Add the phis created into the IDF blocks to NonOptPhis, so they are not 388// optimized out as trivial by the call to getPreviousDefFromEnd below. 389// Once they are complete, all these Phis are added to the FixupList, and 390// removed from NonOptPhis inside fixupDefs(). Existing Phis in IDF may 391// need fixing as well, and potentially be trivial before this insertion, 392// hence add all IDF Phis. See PR43044. 393 NonOptPhis.insert(MPhi);
395for (
auto &MPhi : NewInsertedPHIs) {
396auto *BBIDF = MPhi->getBlock();
399 MPhi->addIncoming(getPreviousDefFromEnd(Pred, CachedPreviousDef), Pred);
403// Re-take the index where we're adding the new phis, because the above call 404// to getPreviousDefFromEnd, may have inserted into InsertedPHIs. 405 NewPhiIndex = InsertedPHIs.size();
406for (
auto &MPhi : NewInsertedPHIs) {
407 InsertedPHIs.push_back(&*MPhi);
414// Remember the index where we stopped inserting new phis above, since the 415// fixupDefs call in the loop below may insert more, that are already minimal. 416unsigned NewPhiIndexEnd = InsertedPHIs.size();
418while (!FixupList.
empty()) {
419unsigned StartingPHISize = InsertedPHIs.size();
420 fixupDefs(FixupList);
422// Put any new phis on the fixup list, and process them 423 FixupList.
append(InsertedPHIs.begin() + StartingPHISize, InsertedPHIs.end());
426// Optimize potentially non-minimal phis added in this method. 427unsigned NewPhiSize = NewPhiIndexEnd - NewPhiIndex;
429 tryRemoveTrivialPhis(
ArrayRef<WeakVH>(&InsertedPHIs[NewPhiIndex], NewPhiSize));
431// Now that all fixups are done, rename all uses if we are asked. The defs are 432// guaranteed to be in reachable code due to the check at the method entry. 436// We are guaranteed there is a def in the block, because we just got it 437// handed to us in this function. 439// Convert to incoming value if it's a memorydef. A phi *is* already an 441if (
auto *MD = dyn_cast<MemoryDef>(FirstDef))
445// We just inserted a phi into this block, so the incoming value will become 446// the phi anyway, so it does not matter what we pass. 447for (
auto &MP : InsertedPHIs) {
448MemoryPhi *Phi = dyn_cast_or_null<MemoryPhi>(MP);
450 MSSA->
renamePass(Phi->getBlock(),
nullptr, Visited);
452// Existing Phi blocks may need renaming too, if an access was previously 453// optimized and the inserted Defs "covers" the Optimized value. 454for (
constauto &MP : ExistingPhis) {
455MemoryPhi *Phi = dyn_cast_or_null<MemoryPhi>(MP);
457 MSSA->
renamePass(Phi->getBlock(),
nullptr, Visited);
465for (
constauto &Var : Vars) {
466MemoryAccess *NewDef = dyn_cast_or_null<MemoryAccess>(Var);
469// First, see if there is a local def after the operand. 473// The temporary Phi is being fixed, unmark it for not to optimize. 474if (
MemoryPhi *Phi = dyn_cast<MemoryPhi>(NewDef))
475 NonOptPhis.erase(Phi);
477// If there is a local def after us, we only have to rename that. 478if (++DefIter != Defs->end()) {
479 cast<MemoryDef>(DefIter)->setDefiningAccess(NewDef);
483// Otherwise, we need to search down through the CFG. 484// For each of our successors, handle it directly if their is a phi, or 485// place on the fixup worklist. 493while (!Worklist.
empty()) {
496// Get the first def in the block that isn't a phi node. 498auto *FirstDef = &*Defs->begin();
499// The loop above and below should have taken care of phi nodes 500assert(!isa<MemoryPhi>(FirstDef) &&
501"Should have already handled phi nodes!");
502// We are now this def's defining access, make sure we actually dominate 505"Should have dominated the new access");
507// This may insert new phi nodes, because we are not guaranteed the 508// block we are processing has a single pred, and depending where the 509// store was inserted, it may require phi nodes below it. 510 cast<MemoryDef>(FirstDef)->setDefiningAccess(getPreviousDef(FirstDef));
513// We didn't find a def, so we must continue. 515// If there is a phi node, handle it. 516// Otherwise, put the block on the worklist 520// If we cycle, we should have ended up at a phi node that we already 521// processed. FIXME: Double check this 522if (!Seen.
insert(S).second)
533 MPhi->unorderedDeleteIncomingBlock(
From);
534 tryRemoveTrivialPhi(MPhi);
550 tryRemoveTrivialPhi(MPhi);
554/// If all arguments of a MemoryPHI are defined by the same incoming 555/// argument, return that argument. 561 MA = cast<MemoryAccess>(Arg);
572if (
MemoryDef *DefMUD = dyn_cast<MemoryDef>(InsnDefining)) {
576// If the MemoryDef is not part of the cloned region, leave it alone. 578assert(DefMUDI &&
"Found MemoryUseOrDef with no Instruction.");
579if (!IsInClonedRegion(DefMUDI->
getParent()))
582auto *NewDefMUDI = cast_or_null<Instruction>(VMap.
lookup(DefMUDI));
583 InsnDefining = NewDefMUDI ? MSSA->
getMemoryAccess(NewDefMUDI) :
nullptr;
584if (!InsnDefining || isa<MemoryUse>(InsnDefining)) {
585// The clone was simplified, it's no longer a MemoryDef, look up. 587 DefMUD->getDefiningAccess(), VMap, MPhiMap, MSSA, IsInClonedRegion);
590MemoryPhi *DefPhi = cast<MemoryPhi>(InsnDefining);
592 InsnDefining = NewDefPhi;
594assert(InsnDefining &&
"Defining instruction cannot be nullptr.");
598void MemorySSAUpdater::cloneUsesAndDefs(
601bool CloneWasSimplified) {
608// Entry does not exist if the clone of the block did not clone all 609// instructions. This occurs in LoopRotate when cloning instructions 610// from the old header to the old preheader. The cloned instruction may 611// also be a simplified Value, not an Instruction (see LoopRotate). 612// Also in LoopRotate, even when it's an instruction, due to it being 613// simplified, it may be a Use rather than a Def, so we cannot use MUD as 614// template. Calls coming from updateForClonedBlockIntoPred, ensure this. 616 dyn_cast_or_null<Instruction>(VMap.
lookup(
Insn))) {
620 MPhiMap, MSSA, IsInClonedRegion),
621/*Template=*/CloneWasSimplified ?
nullptr : MUD,
622/*CreationMustSucceed=*/false);
636// Create phi node in the backedge block and populate it with the same 637// incoming values as MPhi. Skip incoming values coming from Preheader. 638auto *NewMPhi = MSSA->createMemoryPhi(BEBlock);
639bool HasUniqueIncomingValue =
true;
641for (
unsignedI = 0, E = MPhi->getNumIncomingValues();
I != E; ++
I) {
644if (IBB != Preheader) {
645 NewMPhi->addIncoming(
IV, IBB);
646if (HasUniqueIncomingValue) {
649elseif (UniqueValue !=
IV)
650 HasUniqueIncomingValue =
false;
655// Update incoming edges into MPhi. Remove all but the incoming edge from 656// Preheader. Add an edge from NewMPhi 657auto *AccFromPreheader = MPhi->getIncomingValueForBlock(Preheader);
658 MPhi->setIncomingValue(0, AccFromPreheader);
659 MPhi->setIncomingBlock(0, Preheader);
660for (
unsignedI = MPhi->getNumIncomingValues() - 1;
I >= 1; --
I)
661 MPhi->unorderedDeleteIncoming(
I);
662 MPhi->addIncoming(NewMPhi, BEBlock);
664// If NewMPhi is a trivial phi, remove it. Its use in the header MPhi will be 665// replaced with the unique value. 666 tryRemoveTrivialPhi(NewMPhi);
672bool IgnoreIncomingWithNoClones) {
674for (
BasicBlock *BB : concat<BasicBlock *const>(LoopBlocks, ExitBlocks))
681assert(Phi && NewPhi &&
"Invalid Phi nodes.");
685for (
unsigned It = 0, E = Phi->getNumIncomingValues(); It < E; ++It) {
686MemoryAccess *IncomingAccess = Phi->getIncomingValue(It);
691elseif (IgnoreIncomingWithNoClones)
694// Now we have IncBB, and will need to add incoming from it to NewPhi. 696// If IncBB is not a predecessor of NewPhiBB, then do not add it. 697// NewPhiBB was cloned without that edge. 698if (!NewPhiBBPreds.
count(IncBB))
701// Determine incoming value and add it as incoming from IncBB. 708 MPhiMap[Phi] = SingleAccess;
719"Cloned block should have no accesses");
723MemoryPhi *NewPhi = MSSA->createMemoryPhi(NewBlock);
724 MPhiMap[MPhi] = NewPhi;
726// Update Uses and Defs. 727 cloneUsesAndDefs(BB, NewBlock, VMap, MPhiMap, IsInClonedRegion);
736 FixPhiIncomingValues(MPhi, cast<MemoryPhi>(NewPhi));
741// All defs/phis from outside BB that are used in BB, are valid uses in P1. 742// Since those defs/phis must have dominated BB, and also dominate P1. 743// Defs from BB being used in BB will be replaced with the cloned defs from 744// VM. The uses of BB's Phi (if it exists) in BB will be replaced by the 745// incoming def into the Phi from P1. 746// Instructions cloned into the predecessor are in practice sometimes 747// simplified, so disable the use of the template, and create an access from 751 MPhiMap[MPhi] = MPhi->getIncomingValueForBlock(P1);
753 BB, P1, VM, MPhiMap, [&](
BasicBlock *CheckBB) {
return BB == CheckBB; },
754/*CloneWasSimplified=*/true);
757template <
typename Iter>
758void MemorySSAUpdater::privateUpdateExitBlocksForClonedLoop(
762// Update/insert phis in all successors of exit blocks. 763for (
auto *Exit : ExitBlocks)
776 privateUpdateExitBlocksForClonedLoop(ExitBlocks, std::begin(Arr),
783auto GetPtr = [&](
const std::unique_ptr<ValueToValueMapTy> &
I) {
786usingMappedIteratorType =
789auto MapBegin = MappedIteratorType(VMaps.begin(), GetPtr);
790auto MapEnd = MappedIteratorType(VMaps.end(), GetPtr);
791 privateUpdateExitBlocksForClonedLoop(ExitBlocks, MapBegin, MapEnd, DT);
799for (
constauto &Update : Updates) {
800if (Update.getKind() == DT.
Insert)
801 InsertUpdates.
push_back({DT.
Insert, Update.getFrom(), Update.getTo()});
803 DeleteUpdates.
push_back({DT.
Delete, Update.getFrom(), Update.getTo()});
804 RevDeleteUpdates.
push_back({DT.
Insert, Update.getFrom(), Update.getTo()});
808if (!DeleteUpdates.
empty()) {
809if (!InsertUpdates.
empty()) {
812// Deletes are reversed applied, because this CFGView is pretending the 813// deletes did not happen yet, hence the edges still exist. 816// Apply all updates, with the RevDeleteUpdates as PostCFGView. 820// Note: the MSSA update below doesn't distinguish between a GD with 821// (RevDelete,false) and (Delete, true), but this matters for the DT 822// updates above; for "children" purposes they are equivalent; but the 823// updates themselves convey the desired update, used inside DT only. 826// Update DT to redelete edges; this matches the real CFG so we can 827// perform the standard update without a postview of the CFG. 840// Update for deleted edges 841for (
auto &Update : DeleteUpdates)
854// Get recursive last Def, assuming well formed MSSA and updated DT. 858// Return last Def or Phi in BB, if it exists. 860return &*(--Defs->
end());
862// Check number of predecessors, we only care if there's more than one. 865for (
auto *Pi : GD->template getChildren</*InverseEdge=*/true>(BB)) {
872// If BB has multiple predecessors, get last definition from IDom. 874// [SimpleLoopUnswitch] If BB is a dead block, about to be deleted, its 875// DT is invalidated. Return LoE as its last def. This will be added to 876// MemoryPhi node, and later deleted when the block is deleted. 880if (IDom->getBlock() != BB) {
881 BB = IDom->getBlock();
886// Single predecessor, BB cannot be dead. GetLastDef of Pred. 887assert(Count == 1 && Pred &&
"Single predecessor expected.");
888// BB can be unreachable though, return LoE if that is the case. 897// Get nearest IDom given a set of blocks. 898// TODO: this can be optimized by starting the search at the node with the 899// lowest level (highest in the tree). 900auto FindNearestCommonDominator =
903for (
auto *BB : BBSet)
908// Get all blocks that dominate PrevIDom, stop when reaching CurrIDom. Do not 910auto GetNoLongerDomBlocks =
913if (PrevIDom == CurrIDom)
915 BlocksPrevDom.push_back(PrevIDom);
919if (UpIDom == CurrIDom)
921 BlocksPrevDom.push_back(UpIDom);
926// Map a BB to its predecessors: added + previously existing. To get a 927// deterministic order, store predecessors as SetVectors. The order in each 928// will be defined by the order in Updates (fixed) and the order given by 929// children<> (also fixed). Since we further iterate over these ordered sets, 930// we lose the information of multiple edges possibly existing between two 931// blocks, so we'll keep and EdgeCount map for that. 932// An alternate implementation could keep unordered set for the predecessors, 933// traverse either Updates or children<> each time to get the deterministic 934// order, and drop the usage of EdgeCount. This alternate approach would still 935// require querying the maps for each predecessor, and children<> call has 936// additional computation inside for creating the snapshot-graph predecessors. 937// As such, we favor using a little additional storage and less compute time. 938// This decision can be revisited if we find the alternative more favorable. 946for (
constauto &Edge : Updates) {
948auto &AddedBlockSet = PredMap[BB].Added;
949 AddedBlockSet.
insert(Edge.getFrom());
952// Store all existing predecessor for each BB, at least one must exist. 955for (
auto &BBPredPair : PredMap) {
956auto *BB = BBPredPair.first;
957constauto &AddedBlockSet = BBPredPair.second.Added;
958auto &PrevBlockSet = BBPredPair.second.Prev;
959for (
auto *Pi : GD->template getChildren</*InverseEdge=*/true>(BB)) {
960if (!AddedBlockSet.count(Pi))
961 PrevBlockSet.insert(Pi);
962 EdgeCountMap[{Pi, BB}]++;
965if (PrevBlockSet.empty()) {
966assert(
pred_size(BB) == AddedBlockSet.size() &&
"Duplicate edges added.");
969 <<
"Adding a predecessor to a block with no predecessors. " 970"This must be an edge added to a new, likely cloned, block. " 971"Its memory accesses must be already correct, assuming completed " 972"via the updateExitBlocksForClonedLoop API. " 973"Assert a single such edge is added so no phi addition or " 974"additional processing is required.\n");
975assert(AddedBlockSet.size() == 1 &&
976"Can only handle adding one predecessor to a new block.");
977// Need to remove new blocks from PredMap. Remove below to not invalidate 982// Nothing to process for new/cloned blocks. 983for (
auto *BB : NewBlocks)
989// First create MemoryPhis in all blocks that don't have one. Create in the 990// order found in Updates, not in PredMap, to get deterministic numbering. 991for (
constauto &Edge : Updates) {
994 InsertedPhis.
push_back(MSSA->createMemoryPhi(BB));
997// Now we'll fill in the MemoryPhis with the right incoming values. 998for (
auto &BBPredPair : PredMap) {
999auto *BB = BBPredPair.first;
1000constauto &PrevBlockSet = BBPredPair.second.Prev;
1001constauto &AddedBlockSet = BBPredPair.second.Added;
1002assert(!PrevBlockSet.empty() &&
1003"At least one previous predecessor must exist.");
1005// TODO: if this becomes a bottleneck, we can save on GetLastDef calls by 1006// keeping this map before the loop. We can reuse already populated entries 1007// if an edge is added from the same predecessor to two different blocks, 1008// and this does happen in rotate. Note that the map needs to be updated 1009// when deleting non-necessary phis below, if the phi is in the map by 1010// replacing the value with DefP1. 1012for (
auto *AddedPred : AddedBlockSet) {
1013auto *DefPn = GetLastDef(AddedPred);
1014assert(DefPn !=
nullptr &&
"Unable to find last definition.");
1015 LastDefAddedPred[AddedPred] = DefPn;
1019// If Phi is not empty, add an incoming edge from each added pred. Must 1020// still compute blocks with defs to replace for this block below. 1022for (
auto *Pred : AddedBlockSet) {
1023auto *LastDefForPred = LastDefAddedPred[Pred];
1024for (
intI = 0, E = EdgeCountMap[{Pred, BB}];
I < E; ++
I)
1028// Pick any existing predecessor and get its definition. All other 1029// existing predecessors should have the same one, since no phi existed. 1030auto *
P1 = *PrevBlockSet.begin();
1033// Check DefP1 against all Defs in LastDefPredPair. If all the same, 1035bool InsertPhi =
false;
1036for (
auto LastDefPredPair : LastDefAddedPred)
1037if (DefP1 != LastDefPredPair.second) {
1042// Since NewPhi may be used in other newly added Phis, replace all uses 1043// of NewPhi with the definition coming from all predecessors (DefP1), 1044// before deleting it. 1050// Update Phi with new values for new predecessors and old value for all 1051// other predecessors. Since AddedBlockSet and PrevBlockSet are ordered 1052// sets, the order of entries in NewPhi is deterministic. 1053for (
auto *Pred : AddedBlockSet) {
1054auto *LastDefForPred = LastDefAddedPred[Pred];
1055for (
intI = 0, E = EdgeCountMap[{Pred, BB}];
I < E; ++
I)
1058for (
auto *Pred : PrevBlockSet)
1059for (
intI = 0, E = EdgeCountMap[{Pred, BB}];
I < E; ++
I)
1063// Get all blocks that used to dominate BB and no longer do after adding 1064// AddedBlockSet, where PrevBlockSet are the previously known predecessors. 1066BasicBlock *PrevIDom = FindNearestCommonDominator(PrevBlockSet);
1067assert(PrevIDom &&
"Previous IDom should exists");
1069assert(NewIDom &&
"BB should have a new valid idom");
1071"New idom should dominate old idom");
1072 GetNoLongerDomBlocks(PrevIDom, NewIDom, BlocksWithDefsToReplace);
1075 tryRemoveTrivialPhis(InsertedPhis);
1076// Create the set of blocks that now have a definition. We'll use this to 1077// compute IDF and add Phis there next. 1079for (
auto &VH : InsertedPhis)
1080if (
auto *MPhi = cast_or_null<MemoryPhi>(VH))
1081 BlocksToProcess.
push_back(MPhi->getBlock());
1083// Compute IDF and add Phis in all IDF blocks that do not have one. 1085if (!BlocksToProcess.
empty()) {
1088 BlocksToProcess.
end());
1089 IDFs.setDefiningBlocks(DefiningBlocks);
1090 IDFs.calculate(IDFBlocks);
1093// First create all needed Phis. 1094for (
auto *BBIDF : IDFBlocks)
1096auto *IDFPhi = MSSA->createMemoryPhi(BBIDF);
1097 InsertedPhis.push_back(IDFPhi);
1098 PhisToFill.
insert(IDFPhi);
1100// Then update or insert their correct incoming values. 1101for (
auto *BBIDF : IDFBlocks) {
1103assert(IDFPhi &&
"Phi must exist");
1104if (!PhisToFill.
count(IDFPhi)) {
1105// Update existing Phi. 1106// FIXME: some updates may be redundant, try to optimize and skip some. 1107for (
unsignedI = 0, E = IDFPhi->getNumIncomingValues();
I < E; ++
I)
1108 IDFPhi->setIncomingValue(
I, GetLastDef(IDFPhi->getIncomingBlock(
I)));
1110for (
auto *Pi : GD->template getChildren</*InverseEdge=*/true>(BBIDF))
1111 IDFPhi->addIncoming(GetLastDef(Pi), Pi);
1116// Now for all defs in BlocksWithDefsToReplace, if there are uses they no 1117// longer dominate, replace those with the closest dominating def. 1118// This will also update optimized accesses, as they're also uses. 1119for (
auto *BlockWithDefsToReplace : BlocksWithDefsToReplace) {
1121for (
auto &DefToReplaceUses : *DefsList) {
1122BasicBlock *DominatingBlock = DefToReplaceUses.getBlock();
1125if (
MemoryPhi *UsrPhi = dyn_cast<MemoryPhi>(Usr)) {
1126BasicBlock *DominatedBlock = UsrPhi->getIncomingBlock(U);
1127if (!DT.
dominates(DominatingBlock, DominatedBlock))
1128U.set(GetLastDef(DominatedBlock));
1131if (!DT.
dominates(DominatingBlock, DominatedBlock)) {
1136assert(IDom &&
"Block must have a valid IDom.");
1137U.set(GetLastDef(IDom->getBlock()));
1139 cast<MemoryUseOrDef>(Usr)->resetOptimized();
1146 tryRemoveTrivialPhis(InsertedPhis);
1149// Move What before Where in the MemorySSA IR. 1150template <
class WhereType>
1153// Mark MemoryPhi users of What not to be optimized. 1154for (
auto *U : What->
users())
1155if (
MemoryPhi *PhiUser = dyn_cast<MemoryPhi>(U))
1156 NonOptPhis.insert(PhiUser);
1158// Replace all our users with our defining access. 1161// Let MemorySSA take care of moving it around in the lists. 1162 MSSA->
moveTo(What, BB, Where);
1164// Now reinsert it into the IR and do whatever fixups needed. 1165if (
auto *MD = dyn_cast<MemoryDef>(What))
1168insertUse(cast<MemoryUse>(What),
/*RenameUses=*/true);
1170// Clear dangling pointers. We added all MemoryPhi users, but not all 1171// of them are removed by fixupDefs(). 1175// Move What before Where in the MemorySSA IR. 1180// Move What after Where in the MemorySSA IR. 1188return moveTo(What, BB, Where);
1196// All accesses in To used to be in From. Move to end and update access lists. 1204assert(Start->getParent() == To &&
"Incorrect Start instruction");
1210auto *MUD = cast<MemoryUseOrDef>(FirstInNew);
1212auto NextIt = ++MUD->getIterator();
1215 : cast<MemoryUseOrDef>(&*NextIt);
1217// Moving MUD from Accs in the moveTo above, may delete Accs, so we need 1218// to retrieve it again. 1224// If all accesses were moved and only a trivial Phi remains, we try to remove 1225// that Phi. This is needed when From is going to be deleted. 1227if (Defs && !Defs->
empty())
1228if (
auto *Phi = dyn_cast<MemoryPhi>(&*Defs->
begin()))
1229 tryRemoveTrivialPhi(Phi);
1236"To block is expected to be free of MemoryAccesses.");
1237 moveAllAccesses(
From, To, Start);
1240 MPhi->setIncomingBlock(MPhi->getBasicBlockIndex(
From), To);
1246"From block is expected to have a single predecessor (To).");
1247 moveAllAccesses(
From, To, Start);
1250 MPhi->setIncomingBlock(MPhi->getBasicBlockIndex(
From), To);
1255bool IdenticalEdgesWereMerged) {
1257"Access list should be null for a new block.");
1263"Should have moved all predecessors.");
1266assert(!Preds.
empty() &&
"Must be moving at least one predecessor to the " 1267"new immediate predecessor.");
1268MemoryPhi *NewPhi = MSSA->createMemoryPhi(New);
1270// Currently only support the case of removing a single incoming edge when 1271// identical edges were not merged. 1272if (!IdenticalEdgesWereMerged)
1274"If identical edges were not merged, we cannot have duplicate " 1275"blocks in the predecessors");
1278 NewPhi->addIncoming(MA, B);
1279 if (!IdenticalEdgesWereMerged)
1285 Phi->addIncoming(NewPhi, New);
1286 tryRemoveTrivialPhi(NewPhi);
1292"Trying to remove the live on entry def");
1293// We can only delete phi nodes if they have no uses, or we can replace all 1294// uses with a single definition. 1296if (
MemoryPhi *MP = dyn_cast<MemoryPhi>(MA)) {
1297// Note that it is sufficient to know that all edges of the phi node have 1298// the same argument. If they do, by the definition of dominance frontiers 1299// (which we used to place this phi), that argument must dominate this phi, 1300// and thus, must dominate the phi's uses, and so we will not hit the assert 1304"We can't delete this memory phi");
1306 NewDefTarget = cast<MemoryUseOrDef>(MA)->getDefiningAccess();
1311// Re-point the uses at our defining access 1312if (!isa<MemoryUse>(MA) && !MA->
use_empty()) {
1313// Reset optimized on users of this store, and reset the uses. 1315// 1. This is a slightly modified version of RAUW to avoid walking the 1317// 2. If we wanted to be complete, we would have to reset the optimized 1318// flags on users of phi nodes if doing the below makes a phi node have all 1319// the same arguments. Instead, we prefer users to removeMemoryAccess those 1320// phi nodes, because doing it here would be N^3. 1323// Note: We assume MemorySSA is not used in metadata since it's not really 1326assert(NewDefTarget != MA &&
"Going into an infinite loop");
1329if (
auto *MUD = dyn_cast<MemoryUseOrDef>(U.getUser()))
1330 MUD->resetOptimized();
1332if (
MemoryPhi *MP = dyn_cast<MemoryPhi>(U.getUser()))
1334 U.set(NewDefTarget);
1338// The call below to erase will destroy MA, so we can't change the order we 1339// are doing things here 1343// Optionally optimize Phi uses. This will recursively remove trivial phis. 1344if (!PhisToCheck.
empty()) {
1347 PhisToCheck.
clear();
1349unsigned PhisSize = PhisToOptimize.size();
1350while (PhisSize-- > 0)
1352 cast_or_null<MemoryPhi>(PhisToOptimize.pop_back_val()))
1353 tryRemoveTrivialPhi(MP);
1359// First delete all uses of BB in MemoryPhis. 1362assert(TI &&
"Basic block expected to have a terminator instruction");
1364if (!DeadBlocks.
count(Succ))
1366 MP->unorderedDeleteIncomingBlock(BB);
1367 tryRemoveTrivialPhi(MP);
1369// Drop all references of all accesses in BB 1375// Next, delete all memory accesses in each block 1388for (
constauto &VH : UpdatedPHIs)
1389if (
auto *MPhi = cast_or_null<MemoryPhi>(VH))
1390 tryRemoveTrivialPhi(MPhi);
1395// Remove memory accesses in BB for I and all following instructions. 1396auto BBI =
I->getIterator(), BBE = BB->
end();
1397// FIXME: If this becomes too expensive, iterate until the first instruction 1398// with a memory access, then iterate over MemoryAccesses. 1401// Update phis in BB's successors to remove BB. 1406 MPhi->unorderedDeleteIncomingBlock(BB);
1410// Optimize trivial phis. 1411 tryRemoveTrivialPhis(UpdatedPHIs);
1418I, Definition,
/*Template=*/nullptr, CreationMustSucceed);
1427"New and old access must be in the same block");
1437"New and old access must be in the same block");
SmallVector< AArch64_IMM::ImmInsnModel, 4 > Insn
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
DenseMap< Block *, BlockRelaxAux > Blocks
mir Rename Register Operands
static MemoryAccess * getNewDefiningAccessForClone(MemoryAccess *MA, const ValueToValueMapTy &VMap, PhiToDefMap &MPhiMap, MemorySSA *MSSA, function_ref< bool(BasicBlock *BB)> IsInClonedRegion)
static void setMemoryPhiValueForBlock(MemoryPhi *MP, const BasicBlock *BB, MemoryAccess *NewDef)
static MemoryAccess * onlySingleValue(MemoryPhi *MP)
If all arguments of a MemoryPHI are defined by the same incoming argument, return that argument.
This file exposes an interface to building/using memory SSA to walk memory instructions using a use/d...
Remove Loads Into Fake Uses
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
This file implements a set that has insertion order iteration characteristics.
static bool ProcessBlock(BasicBlock &BB, DominatorTree &DT, LoopInfo &LI, AAResults &AA)
This file defines the SmallPtrSet class.
static const uint32_t IV[8]
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
LLVM Basic Block Representation.
iterator begin()
Instruction iterator methods.
reverse_iterator rbegin()
bool hasNPredecessors(unsigned N) const
Return true if this block has exactly N predecessors.
const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
This class represents an Operation in the Expression.
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
DomTreeNodeBase * getIDom() const
void applyUpdates(ArrayRef< UpdateType > Updates)
Inform the dominator tree about a sequence of CFG edge insertions and deletions and perform a batch u...
static constexpr UpdateKind Delete
static constexpr UpdateKind Insert
DomTreeNodeBase< NodeT > * getNode(const NodeT *BB) const
getNode - return the (Post)DominatorTree node for the specified basic block.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
Instruction * findNearestCommonDominator(Instruction *I1, Instruction *I2) const
Find the nearest instruction I that dominates both I1 and I2, in the sense that a result produced bef...
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
void calculate(SmallVectorImpl< NodeTy * > &IDFBlocks)
Calculate iterated dominance frontiers.
void setDefiningBlocks(const SmallPtrSetImpl< NodeTy * > &Blocks)
Give the IDF calculator the set of blocks in which the value is defined.
BasicBlock * getSuccessor(unsigned Idx) const LLVM_READONLY
Return the specified successor. This instruction must be a terminator.
Wrapper class to LoopBlocksDFS that provides a standard begin()/end() interface for the DFS reverse p...
AllAccessType::reverse_self_iterator getReverseIterator()
DefsOnlyType::self_iterator getDefsIterator()
DefsOnlyType::reverse_self_iterator getReverseDefsIterator()
BasicBlock * getBlock() const
AllAccessType::self_iterator getIterator()
Get the iterators for the all access list and the defs only list We default to the all access list.
Represents a read-write access to memory, whether it is a must-alias, or a may-alias.
Represents phi nodes for memory accesses.
void setIncomingValue(unsigned I, MemoryAccess *V)
iterator_range< block_iterator > blocks()
void addIncoming(MemoryAccess *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
int getBasicBlockIndex(const BasicBlock *BB) const
Return the first index of the specified basic block in the value list for this PHI.
MemoryUseOrDef * createMemoryAccessBefore(Instruction *I, MemoryAccess *Definition, MemoryUseOrDef *InsertPt)
Create a MemoryAccess in MemorySSA before an existing MemoryAccess.
void insertDef(MemoryDef *Def, bool RenameUses=false)
Insert a definition into the MemorySSA IR.
void moveAfter(MemoryUseOrDef *What, MemoryUseOrDef *Where)
void removeEdge(BasicBlock *From, BasicBlock *To)
Update the MemoryPhi in To following an edge deletion between From and To.
void updateForClonedLoop(const LoopBlocksRPO &LoopBlocks, ArrayRef< BasicBlock * > ExitBlocks, const ValueToValueMapTy &VM, bool IgnoreIncomingWithNoClones=false)
Update MemorySSA after a loop was cloned, given the blocks in RPO order, the exit blocks and a 1:1 ma...
void changeToUnreachable(const Instruction *I)
Instruction I will be changed to an unreachable.
void removeDuplicatePhiEdgesBetween(const BasicBlock *From, const BasicBlock *To)
Update the MemoryPhi in To to have a single incoming edge from From, following a CFG change that repl...
void updatePhisWhenInsertingUniqueBackedgeBlock(BasicBlock *LoopHeader, BasicBlock *LoopPreheader, BasicBlock *BackedgeBlock)
Update MemorySSA when inserting a unique backedge block for a loop.
void insertUse(MemoryUse *Use, bool RenameUses=false)
void removeBlocks(const SmallSetVector< BasicBlock *, 8 > &DeadBlocks)
Remove all MemoryAcceses in a set of BasicBlocks about to be deleted.
void moveAllAfterSpliceBlocks(BasicBlock *From, BasicBlock *To, Instruction *Start)
From block was spliced into From and To.
MemoryAccess * createMemoryAccessInBB(Instruction *I, MemoryAccess *Definition, const BasicBlock *BB, MemorySSA::InsertionPlace Point, bool CreationMustSucceed=true)
Create a MemoryAccess in MemorySSA at a specified point in a block.
void removeMemoryAccess(MemoryAccess *, bool OptimizePhis=false)
Remove a MemoryAccess from MemorySSA, including updating all definitions and uses.
void applyInsertUpdates(ArrayRef< CFGUpdate > Updates, DominatorTree &DT)
Apply CFG insert updates, analogous with the DT edge updates.
MemoryUseOrDef * createMemoryAccessAfter(Instruction *I, MemoryAccess *Definition, MemoryAccess *InsertPt)
Create a MemoryAccess in MemorySSA after an existing MemoryAccess.
void updateForClonedBlockIntoPred(BasicBlock *BB, BasicBlock *P1, const ValueToValueMapTy &VM)
void applyUpdates(ArrayRef< CFGUpdate > Updates, DominatorTree &DT, bool UpdateDTFirst=false)
Apply CFG updates, analogous with the DT edge updates.
void moveAllAfterMergeBlocks(BasicBlock *From, BasicBlock *To, Instruction *Start)
From block was merged into To.
void moveToPlace(MemoryUseOrDef *What, BasicBlock *BB, MemorySSA::InsertionPlace Where)
void wireOldPredecessorsToNewImmediatePredecessor(BasicBlock *Old, BasicBlock *New, ArrayRef< BasicBlock * > Preds, bool IdenticalEdgesWereMerged=true)
A new empty BasicBlock (New) now branches directly to Old.
void updateExitBlocksForClonedLoop(ArrayRef< BasicBlock * > ExitBlocks, const ValueToValueMapTy &VMap, DominatorTree &DT)
Update phi nodes in exit block successors following cloning.
void moveBefore(MemoryUseOrDef *What, MemoryUseOrDef *Where)
Encapsulates MemorySSA, including all data associated with memory accesses.
void moveTo(MemoryUseOrDef *What, BasicBlock *BB, AccessList::iterator Where)
const AccessList * getBlockAccesses(const BasicBlock *BB) const
Return the list of MemoryAccess's for a given basic block.
void renamePass(BasicBlock *BB, MemoryAccess *IncomingVal, SmallPtrSetImpl< BasicBlock * > &Visited)
bool dominates(const MemoryAccess *A, const MemoryAccess *B) const
Given two memory accesses in potentially different blocks, determine whether MemoryAccess A dominates...
AccessList * getWritableBlockAccesses(const BasicBlock *BB) const
void insertIntoListsForBlock(MemoryAccess *, const BasicBlock *, InsertionPlace)
InsertionPlace
Used in various insertion functions to specify whether we are talking about the beginning or end of a...
void insertIntoListsBefore(MemoryAccess *, const BasicBlock *, AccessList::iterator)
MemoryUseOrDef * createDefinedAccess(Instruction *, MemoryAccess *, const MemoryUseOrDef *Template=nullptr, bool CreationMustSucceed=true)
DefsList * getWritableBlockDefs(const BasicBlock *BB) const
MemoryUseOrDef * getMemoryAccess(const Instruction *I) const
Given a memory Mod/Ref'ing instruction, get the MemorySSA access associated with it.
MemoryAccess * getLiveOnEntryDef() const
void removeFromLookups(MemoryAccess *)
Properly remove MA from all of MemorySSA's lookup tables.
const DefsList * getBlockDefs(const BasicBlock *BB) const
Return the list of MemoryDef's and MemoryPhi's for a given basic block.
void removeFromLists(MemoryAccess *, bool ShouldDelete=true)
Properly remove MA from all of MemorySSA's lists.
bool isLiveOnEntryDef(const MemoryAccess *MA) const
Return true if MA represents the live on entry value.
Class that has the common methods + fields of memory uses/defs.
MemoryAccess * getDefiningAccess() const
Get the access that produces the memory state used by this Use.
void setDefiningAccess(MemoryAccess *DMA, bool Optimized=false)
Represents read-only accesses to memory.
iterator end()
Get an iterator to the end of the SetVector.
void clear()
Completely clear the SetVector.
size_type count(const key_type &key) const
Count the number of elements of a given key in the SetVector.
bool empty() const
Determine if the SetVector is empty or not.
iterator begin()
Get an iterator to the beginning of the SetVector.
bool insert(const value_type &X)
Insert a new element into the SetVector.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Value handle that tracks a Value across RAUW.
A Use represents the edge between a Value definition and its users.
void dropAllReferences()
Drop all references to operands.
unsigned getNumOperands() const
static void ValueIsRAUWd(Value *Old, Value *New)
ValueT lookup(const KeyT &Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
iterator_range< user_iterator > users()
void replaceUsesWithIf(Value *New, llvm::function_ref< bool(Use &U)> ShouldReplace)
Go through the uses list for this definition and make each use point to "V" if the callback ShouldRep...
bool hasValueHandle() const
Return true if there is a value handle associated with this value.
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
An intrusive list with ownership and callbacks specified/controlled by ilist_traits,...
A simple intrusive list implementation.
iterator insert(iterator I, reference Node)
Insert a node by reference; never copies.
bool empty() const
Check if the list is empty in constant time.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
NodeAddr< PhiNode * > Phi
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
auto pred_end(const MachineBasicBlock *BB)
auto successors(const MachineBasicBlock *BB)
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
auto pred_size(const MachineBasicBlock *BB)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
OutputIt copy(R &&Range, OutputIt Out)
auto pred_begin(const MachineBasicBlock *BB)
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.