Movatterモバイル変換


[0]ホーム

URL:


LLVM 20.0.0git
MemorySSA.cpp
Go to the documentation of this file.
1//===- MemorySSA.cpp - Memory SSA Builder ---------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the MemorySSA class.
10//
11//===----------------------------------------------------------------------===//
12
13#include "llvm/Analysis/MemorySSA.h"
14#include "llvm/ADT/DenseMap.h"
15#include "llvm/ADT/DenseMapInfo.h"
16#include "llvm/ADT/DenseSet.h"
17#include "llvm/ADT/DepthFirstIterator.h"
18#include "llvm/ADT/Hashing.h"
19#include "llvm/ADT/STLExtras.h"
20#include "llvm/ADT/SmallPtrSet.h"
21#include "llvm/ADT/SmallVector.h"
22#include "llvm/ADT/StringExtras.h"
23#include "llvm/ADT/iterator.h"
24#include "llvm/ADT/iterator_range.h"
25#include "llvm/Analysis/AliasAnalysis.h"
26#include "llvm/Analysis/CFGPrinter.h"
27#include "llvm/Analysis/IteratedDominanceFrontier.h"
28#include "llvm/Analysis/LoopInfo.h"
29#include "llvm/Analysis/MemoryLocation.h"
30#include "llvm/Config/llvm-config.h"
31#include "llvm/IR/AssemblyAnnotationWriter.h"
32#include "llvm/IR/BasicBlock.h"
33#include "llvm/IR/Dominators.h"
34#include "llvm/IR/Function.h"
35#include "llvm/IR/Instruction.h"
36#include "llvm/IR/Instructions.h"
37#include "llvm/IR/IntrinsicInst.h"
38#include "llvm/IR/LLVMContext.h"
39#include "llvm/IR/Operator.h"
40#include "llvm/IR/PassManager.h"
41#include "llvm/IR/Use.h"
42#include "llvm/InitializePasses.h"
43#include "llvm/Pass.h"
44#include "llvm/Support/AtomicOrdering.h"
45#include "llvm/Support/Casting.h"
46#include "llvm/Support/CommandLine.h"
47#include "llvm/Support/Compiler.h"
48#include "llvm/Support/Debug.h"
49#include "llvm/Support/ErrorHandling.h"
50#include "llvm/Support/FormattedStream.h"
51#include "llvm/Support/GraphWriter.h"
52#include "llvm/Support/raw_ostream.h"
53#include <algorithm>
54#include <cassert>
55#include <iterator>
56#include <memory>
57#include <utility>
58
59using namespacellvm;
60
61#define DEBUG_TYPE "memoryssa"
62
63staticcl::opt<std::string>
64DotCFGMSSA("dot-cfg-mssa",
65cl::value_desc("file name for generated dot file"),
66cl::desc("file name for generated dot file"),cl::init(""));
67
68INITIALIZE_PASS_BEGIN(MemorySSAWrapperPass,"memoryssa","Memory SSA",false,
69true)
70INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
71INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
72INITIALIZE_PASS_END(MemorySSAWrapperPass, "memoryssa", "MemorySSA",false,
73true)
74
75static cl::opt<unsigned>MaxCheckLimit(
76 "memssa-check-limit", cl::Hidden, cl::init(100),
77 cl::desc("Themaximum number ofstores/phisMemorySSA"
78 "will consider trying to walk past (default = 100)"));
79
80// Always verify MemorySSA if expensive checking is enabled.
81#ifdef EXPENSIVE_CHECKS
82boolllvm::VerifyMemorySSA =true;
83#else
84boolllvm::VerifyMemorySSA =false;
85#endif
86
87staticcl::opt<bool, true>
88VerifyMemorySSAX("verify-memoryssa",cl::location(VerifyMemorySSA),
89cl::Hidden,cl::desc("Enable verification of MemorySSA."));
90
91conststaticcharLiveOnEntryStr[] ="liveOnEntry";
92
93namespace{
94
95/// An assembly annotator class to print Memory SSA information in
96/// comments.
97classMemorySSAAnnotatedWriter :publicAssemblyAnnotationWriter {
98constMemorySSA *MSSA;
99
100public:
101 MemorySSAAnnotatedWriter(constMemorySSA *M) : MSSA(M) {}
102
103voidemitBasicBlockStartAnnot(constBasicBlock *BB,
104formatted_raw_ostream &OS) override{
105if (MemoryAccess *MA = MSSA->getMemoryAccess(BB))
106OS <<"; " << *MA <<"\n";
107 }
108
109voidemitInstructionAnnot(constInstruction *I,
110formatted_raw_ostream &OS) override{
111if (MemoryAccess *MA = MSSA->getMemoryAccess(I))
112OS <<"; " << *MA <<"\n";
113 }
114};
115
116/// An assembly annotator class to print Memory SSA information in
117/// comments.
118classMemorySSAWalkerAnnotatedWriter :publicAssemblyAnnotationWriter {
119MemorySSA *MSSA;
120MemorySSAWalker *Walker;
121BatchAAResults BAA;
122
123public:
124 MemorySSAWalkerAnnotatedWriter(MemorySSA *M)
125 : MSSA(M), Walker(M->getWalker()), BAA(M->getAA()) {}
126
127voidemitBasicBlockStartAnnot(constBasicBlock *BB,
128formatted_raw_ostream &OS) override{
129if (MemoryAccess *MA = MSSA->getMemoryAccess(BB))
130OS <<"; " << *MA <<"\n";
131 }
132
133voidemitInstructionAnnot(constInstruction *I,
134formatted_raw_ostream &OS) override{
135if (MemoryAccess *MA = MSSA->getMemoryAccess(I)) {
136MemoryAccess *Clobber = Walker->getClobberingMemoryAccess(MA, BAA);
137OS <<"; " << *MA;
138if (Clobber) {
139OS <<" - clobbered by ";
140if (MSSA->isLiveOnEntryDef(Clobber))
141OS <<LiveOnEntryStr;
142else
143OS << *Clobber;
144 }
145OS <<"\n";
146 }
147 }
148};
149
150}// namespace
151
152namespace{
153
154/// Our current alias analysis API differentiates heavily between calls and
155/// non-calls, and functions called on one usually assert on the other.
156/// This class encapsulates the distinction to simplify other code that wants
157/// "Memory affecting instructions and related data" to use as a key.
158/// For example, this class is used as a densemap key in the use optimizer.
159classMemoryLocOrCall {
160public:
161bool IsCall =false;
162
163 MemoryLocOrCall(MemoryUseOrDef *MUD)
164 : MemoryLocOrCall(MUD->getMemoryInst()) {}
165 MemoryLocOrCall(constMemoryUseOrDef *MUD)
166 : MemoryLocOrCall(MUD->getMemoryInst()) {}
167
168 MemoryLocOrCall(Instruction *Inst) {
169if (auto *C = dyn_cast<CallBase>(Inst)) {
170 IsCall =true;
171Call =C;
172 }else {
173 IsCall =false;
174// There is no such thing as a memorylocation for a fence inst, and it is
175// unique in that regard.
176if (!isa<FenceInst>(Inst))
177 Loc =MemoryLocation::get(Inst);
178 }
179 }
180
181explicit MemoryLocOrCall(constMemoryLocation &Loc) : Loc(Loc) {}
182
183constCallBase *getCall() const{
184assert(IsCall);
185returnCall;
186 }
187
188MemoryLocation getLoc() const{
189assert(!IsCall);
190return Loc;
191 }
192
193booloperator==(const MemoryLocOrCall &Other) const{
194if (IsCall !=Other.IsCall)
195returnfalse;
196
197if (!IsCall)
198return Loc ==Other.Loc;
199
200if (Call->getCalledOperand() !=Other.Call->getCalledOperand())
201returnfalse;
202
203returnCall->arg_size() ==Other.Call->arg_size() &&
204 std::equal(Call->arg_begin(),Call->arg_end(),
205Other.Call->arg_begin());
206 }
207
208private:
209union{
210constCallBase *Call;
211MemoryLocation Loc;
212 };
213};
214
215}// end anonymous namespace
216
217namespacellvm {
218
219template <>structDenseMapInfo<MemoryLocOrCall> {
220staticinline MemoryLocOrCallgetEmptyKey() {
221return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getEmptyKey());
222 }
223
224staticinline MemoryLocOrCallgetTombstoneKey() {
225return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getTombstoneKey());
226 }
227
228staticunsignedgetHashValue(const MemoryLocOrCall &MLOC) {
229if (!MLOC.IsCall)
230returnhash_combine(
231 MLOC.IsCall,
232DenseMapInfo<MemoryLocation>::getHashValue(MLOC.getLoc()));
233
234hash_code hash =
235hash_combine(MLOC.IsCall,DenseMapInfo<const Value *>::getHashValue(
236 MLOC.getCall()->getCalledOperand()));
237
238for (constValue *Arg : MLOC.getCall()->args())
239 hash =hash_combine(hash,DenseMapInfo<const Value *>::getHashValue(Arg));
240return hash;
241 }
242
243staticboolisEqual(const MemoryLocOrCall &LHS,const MemoryLocOrCall &RHS) {
244returnLHS ==RHS;
245 }
246};
247
248}// end namespace llvm
249
250/// This does one-way checks to see if Use could theoretically be hoisted above
251/// MayClobber. This will not check the other way around.
252///
253/// This assumes that, for the purposes of MemorySSA, Use comes directly after
254/// MayClobber, with no potentially clobbering operations in between them.
255/// (Where potentially clobbering ops are memory barriers, aliased stores, etc.)
256staticboolareLoadsReorderable(constLoadInst *Use,
257constLoadInst *MayClobber) {
258bool VolatileUse =Use->isVolatile();
259bool VolatileClobber = MayClobber->isVolatile();
260// Volatile operations may never be reordered with other volatile operations.
261if (VolatileUse && VolatileClobber)
262returnfalse;
263// Otherwise, volatile doesn't matter here. From the language reference:
264// 'optimizers may change the order of volatile operations relative to
265// non-volatile operations.'"
266
267// If a load is seq_cst, it cannot be moved above other loads. If its ordering
268// is weaker, it can be moved above other loads. We just need to be sure that
269// MayClobber isn't an acquire load, because loads can't be moved above
270// acquire loads.
271//
272// Note that this explicitly *does* allow the free reordering of monotonic (or
273// weaker) loads of the same address.
274bool SeqCstUse =Use->getOrdering() == AtomicOrdering::SequentiallyConsistent;
275bool MayClobberIsAcquire =isAtLeastOrStrongerThan(MayClobber->getOrdering(),
276 AtomicOrdering::Acquire);
277return !(SeqCstUse || MayClobberIsAcquire);
278}
279
280template <typename AliasAnalysisType>
281staticbool
282instructionClobbersQuery(constMemoryDef *MD,constMemoryLocation &UseLoc,
283constInstruction *UseInst, AliasAnalysisType &AA) {
284Instruction *DefInst = MD->getMemoryInst();
285assert(DefInst &&"Defining instruction not actually an instruction");
286
287if (constIntrinsicInst *II = dyn_cast<IntrinsicInst>(DefInst)) {
288// These intrinsics will show up as affecting memory, but they are just
289// markers, mostly.
290//
291// FIXME: We probably don't actually want MemorySSA to model these at all
292// (including creating MemoryAccesses for them): we just end up inventing
293// clobbers where they don't really exist at all. Please see D43269 for
294// context.
295switch (II->getIntrinsicID()) {
296case Intrinsic::allow_runtime_check:
297case Intrinsic::allow_ubsan_check:
298case Intrinsic::invariant_start:
299case Intrinsic::invariant_end:
300case Intrinsic::assume:
301case Intrinsic::experimental_noalias_scope_decl:
302case Intrinsic::pseudoprobe:
303returnfalse;
304case Intrinsic::dbg_declare:
305case Intrinsic::dbg_label:
306case Intrinsic::dbg_value:
307llvm_unreachable("debuginfo shouldn't have associated defs!");
308default:
309break;
310 }
311 }
312
313if (auto *CB = dyn_cast_or_null<CallBase>(UseInst)) {
314ModRefInfoI = AA.getModRefInfo(DefInst, CB);
315returnisModOrRefSet(I);
316 }
317
318if (auto *DefLoad = dyn_cast<LoadInst>(DefInst))
319if (auto *UseLoad = dyn_cast_or_null<LoadInst>(UseInst))
320return !areLoadsReorderable(UseLoad, DefLoad);
321
322ModRefInfoI = AA.getModRefInfo(DefInst, UseLoc);
323returnisModSet(I);
324}
325
326template <typename AliasAnalysisType>
327staticboolinstructionClobbersQuery(MemoryDef *MD,constMemoryUseOrDef *MU,
328const MemoryLocOrCall &UseMLOC,
329 AliasAnalysisType &AA) {
330// FIXME: This is a temporary hack to allow a single instructionClobbersQuery
331// to exist while MemoryLocOrCall is pushed through places.
332if (UseMLOC.IsCall)
333returninstructionClobbersQuery(MD,MemoryLocation(), MU->getMemoryInst(),
334 AA);
335returninstructionClobbersQuery(MD, UseMLOC.getLoc(), MU->getMemoryInst(),
336 AA);
337}
338
339// Return true when MD may alias MU, return false otherwise.
340boolMemorySSAUtil::defClobbersUseOrDef(MemoryDef *MD,constMemoryUseOrDef *MU,
341AliasAnalysis &AA) {
342returninstructionClobbersQuery(MD, MU, MemoryLocOrCall(MU), AA);
343}
344
345namespace{
346
347structUpwardsMemoryQuery {
348// True if our original query started off as a call
349bool IsCall =false;
350// The pointer location we started the query with. This will be empty if
351// IsCall is true.
352MemoryLocation StartingLoc;
353// This is the instruction we were querying about.
354constInstruction *Inst =nullptr;
355// The MemoryAccess we actually got called with, used to test local domination
356constMemoryAccess *OriginalAccess =nullptr;
357bool SkipSelfAccess =false;
358
359 UpwardsMemoryQuery() =default;
360
361 UpwardsMemoryQuery(constInstruction *Inst,constMemoryAccess *Access)
362 : IsCall(isa<CallBase>(Inst)), Inst(Inst), OriginalAccess(Access) {
363if (!IsCall)
364 StartingLoc =MemoryLocation::get(Inst);
365 }
366};
367
368}// end anonymous namespace
369
370template <typename AliasAnalysisType>
371staticboolisUseTriviallyOptimizableToLiveOnEntry(AliasAnalysisType &AA,
372constInstruction *I) {
373// If the memory can't be changed, then loads of the memory can't be
374// clobbered.
375if (auto *LI = dyn_cast<LoadInst>(I)) {
376returnI->hasMetadata(LLVMContext::MD_invariant_load) ||
377 !isModSet(AA.getModRefInfoMask(MemoryLocation::get(LI)));
378 }
379returnfalse;
380}
381
382/// Verifies that `Start` is clobbered by `ClobberAt`, and that nothing
383/// inbetween `Start` and `ClobberAt` can clobbers `Start`.
384///
385/// This is meant to be as simple and self-contained as possible. Because it
386/// uses no cache, etc., it can be relatively expensive.
387///
388/// \param Start The MemoryAccess that we want to walk from.
389/// \param ClobberAt A clobber for Start.
390/// \param StartLoc The MemoryLocation for Start.
391/// \param MSSA The MemorySSA instance that Start and ClobberAt belong to.
392/// \param Query The UpwardsMemoryQuery we used for our search.
393/// \param AA The AliasAnalysis we used for our search.
394/// \param AllowImpreciseClobber Always false, unless we do relaxed verify.
395
396LLVM_ATTRIBUTE_UNUSEDstaticvoid
397checkClobberSanity(constMemoryAccess *Start,MemoryAccess *ClobberAt,
398constMemoryLocation &StartLoc,constMemorySSA &MSSA,
399const UpwardsMemoryQuery &Query,BatchAAResults &AA,
400bool AllowImpreciseClobber =false) {
401assert(MSSA.dominates(ClobberAt, Start) &&"Clobber doesn't dominate start?");
402
403if (MSSA.isLiveOnEntryDef(Start)) {
404assert(MSSA.isLiveOnEntryDef(ClobberAt) &&
405"liveOnEntry must clobber itself");
406return;
407 }
408
409bool FoundClobber =false;
410DenseSet<ConstMemoryAccessPair> VisitedPhis;
411SmallVector<ConstMemoryAccessPair, 8> Worklist;
412 Worklist.emplace_back(Start, StartLoc);
413// Walk all paths from Start to ClobberAt, while looking for clobbers. If one
414// is found, complain.
415while (!Worklist.empty()) {
416autoMAP = Worklist.pop_back_val();
417// All we care about is that nothing from Start to ClobberAt clobbers Start.
418// We learn nothing from revisiting nodes.
419if (!VisitedPhis.insert(MAP).second)
420continue;
421
422for (constauto *MA :def_chain(MAP.first)) {
423if (MA == ClobberAt) {
424if (constauto *MD = dyn_cast<MemoryDef>(MA)) {
425// instructionClobbersQuery isn't essentially free, so don't use `|=`,
426// since it won't let us short-circuit.
427//
428// Also, note that this can't be hoisted out of the `Worklist` loop,
429// since MD may only act as a clobber for 1 of N MemoryLocations.
430 FoundClobber = FoundClobber || MSSA.isLiveOnEntryDef(MD);
431if (!FoundClobber) {
432if (instructionClobbersQuery(MD,MAP.second, Query.Inst, AA))
433 FoundClobber =true;
434 }
435 }
436break;
437 }
438
439// We should never hit liveOnEntry, unless it's the clobber.
440assert(!MSSA.isLiveOnEntryDef(MA) &&"Hit liveOnEntry before clobber?");
441
442if (constauto *MD = dyn_cast<MemoryDef>(MA)) {
443// If Start is a Def, skip self.
444if (MD == Start)
445continue;
446
447assert(!instructionClobbersQuery(MD,MAP.second, Query.Inst, AA) &&
448"Found clobber before reaching ClobberAt!");
449continue;
450 }
451
452if (constauto *MU = dyn_cast<MemoryUse>(MA)) {
453 (void)MU;
454assert (MU == Start &&
455"Can only find use in def chain if Start is a use");
456continue;
457 }
458
459assert(isa<MemoryPhi>(MA));
460
461// Add reachable phi predecessors
462for (auto ItB =upward_defs_begin(
463 {const_cast<MemoryAccess *>(MA),MAP.second},
464 MSSA.getDomTree()),
465 ItE =upward_defs_end();
466 ItB != ItE; ++ItB)
467if (MSSA.getDomTree().isReachableFromEntry(ItB.getPhiArgBlock()))
468 Worklist.emplace_back(*ItB);
469 }
470 }
471
472// If the verify is done following an optimization, it's possible that
473// ClobberAt was a conservative clobbering, that we can now infer is not a
474// true clobbering access. Don't fail the verify if that's the case.
475// We do have accesses that claim they're optimized, but could be optimized
476// further. Updating all these can be expensive, so allow it for now (FIXME).
477if (AllowImpreciseClobber)
478return;
479
480// If ClobberAt is a MemoryPhi, we can assume something above it acted as a
481// clobber. Otherwise, `ClobberAt` should've acted as a clobber at some point.
482assert((isa<MemoryPhi>(ClobberAt) || FoundClobber) &&
483"ClobberAt never acted as a clobber");
484}
485
486namespace{
487
488/// Our algorithm for walking (and trying to optimize) clobbers, all wrapped up
489/// in one class.
490classClobberWalker {
491 /// Save a few bytes by using unsigned instead of size_t.
492usingListIndex =unsigned;
493
494 /// Represents a span of contiguous MemoryDefs, potentially ending in a
495 /// MemoryPhi.
496structDefPath {
497MemoryLocation Loc;
498// Note that, because we always walk in reverse, Last will always dominate
499// First. Also note that First and Last are inclusive.
500MemoryAccess *First;
501MemoryAccess *Last;
502 std::optional<ListIndex> Previous;
503
504 DefPath(constMemoryLocation &Loc,MemoryAccess *First,MemoryAccess *Last,
505 std::optional<ListIndex> Previous)
506 : Loc(Loc),First(First),Last(Last), Previous(Previous) {}
507
508 DefPath(constMemoryLocation &Loc,MemoryAccess *Init,
509 std::optional<ListIndex> Previous)
510 : DefPath(Loc,Init,Init, Previous) {}
511 };
512
513constMemorySSA &MSSA;
514DominatorTree &DT;
515BatchAAResults *AA;
516 UpwardsMemoryQuery *Query;
517unsigned *UpwardWalkLimit;
518
519// Phi optimization bookkeeping:
520// List of DefPath to process during the current phi optimization walk.
521SmallVector<DefPath, 32> Paths;
522// List of visited <Access, Location> pairs; we can skip paths already
523// visited with the same memory location.
524DenseSet<ConstMemoryAccessPair> VisitedPhis;
525
526 /// Find the nearest def or phi that `From` can legally be optimized to.
527constMemoryAccess *getWalkTarget(constMemoryPhi *From) const{
528assert(From->getNumOperands() &&"Phi with no operands?");
529
530BasicBlock *BB =From->getBlock();
531MemoryAccess *Result = MSSA.getLiveOnEntryDef();
532DomTreeNode *Node = DT.getNode(BB);
533while ((Node =Node->getIDom())) {
534auto *Defs = MSSA.getBlockDefs(Node->getBlock());
535if (Defs)
536return &*Defs->rbegin();
537 }
538returnResult;
539 }
540
541 /// Result of calling walkToPhiOrClobber.
542structUpwardsWalkResult {
543 /// The "Result" of the walk. Either a clobber, the last thing we walked, or
544 /// both. Include alias info when clobber found.
545MemoryAccess *Result;
546bool IsKnownClobber;
547 };
548
549 /// Walk to the next Phi or Clobber in the def chain starting at Desc.Last.
550 /// This will update Desc.Last as it walks. It will (optionally) also stop at
551 /// StopAt.
552 ///
553 /// This does not test for whether StopAt is a clobber
554 UpwardsWalkResult
555 walkToPhiOrClobber(DefPath &Desc,constMemoryAccess *StopAt =nullptr,
556constMemoryAccess *SkipStopAt =nullptr) const{
557assert(!isa<MemoryUse>(Desc.Last) &&"Uses don't exist in my world");
558assert(UpwardWalkLimit &&"Need a valid walk limit");
559bool LimitAlreadyReached =false;
560// (*UpwardWalkLimit) may be 0 here, due to the loop in tryOptimizePhi. Set
561// it to 1. This will not do any alias() calls. It either returns in the
562// first iteration in the loop below, or is set back to 0 if all def chains
563// are free of MemoryDefs.
564if (!*UpwardWalkLimit) {
565 *UpwardWalkLimit = 1;
566 LimitAlreadyReached =true;
567 }
568
569for (MemoryAccess *Current :def_chain(Desc.Last)) {
570Desc.Last = Current;
571if (Current == StopAt || Current == SkipStopAt)
572return {Current,false};
573
574if (auto *MD = dyn_cast<MemoryDef>(Current)) {
575if (MSSA.isLiveOnEntryDef(MD))
576return {MD,true};
577
578if (!--*UpwardWalkLimit)
579return {Current,true};
580
581if (instructionClobbersQuery(MD,Desc.Loc, Query->Inst, *AA))
582return {MD,true};
583 }
584 }
585
586if (LimitAlreadyReached)
587 *UpwardWalkLimit = 0;
588
589assert(isa<MemoryPhi>(Desc.Last) &&
590"Ended at a non-clobber that's not a phi?");
591return {Desc.Last,false};
592 }
593
594void addSearches(MemoryPhi *Phi,SmallVectorImpl<ListIndex> &PausedSearches,
595 ListIndex PriorNode) {
596auto UpwardDefsBegin =upward_defs_begin({Phi, Paths[PriorNode].Loc}, DT);
597auto UpwardDefs =make_range(UpwardDefsBegin,upward_defs_end());
598for (constMemoryAccessPair &P : UpwardDefs) {
599 PausedSearches.push_back(Paths.size());
600 Paths.emplace_back(P.second,P.first, PriorNode);
601 }
602 }
603
604 /// Represents a search that terminated after finding a clobber. This clobber
605 /// may or may not be present in the path of defs from LastNode..SearchStart,
606 /// since it may have been retrieved from cache.
607structTerminatedPath {
608MemoryAccess *Clobber;
609 ListIndex LastNode;
610 };
611
612 /// Get an access that keeps us from optimizing to the given phi.
613 ///
614 /// PausedSearches is an array of indices into the Paths array. Its incoming
615 /// value is the indices of searches that stopped at the last phi optimization
616 /// target. It's left in an unspecified state.
617 ///
618 /// If this returns std::nullopt, NewPaused is a vector of searches that
619 /// terminated at StopWhere. Otherwise, NewPaused is left in an unspecified
620 /// state.
621 std::optional<TerminatedPath>
622 getBlockingAccess(constMemoryAccess *StopWhere,
623SmallVectorImpl<ListIndex> &PausedSearches,
624SmallVectorImpl<ListIndex> &NewPaused,
625SmallVectorImpl<TerminatedPath> &Terminated) {
626assert(!PausedSearches.empty() &&"No searches to continue?");
627
628// BFS vs DFS really doesn't make a difference here, so just do a DFS with
629// PausedSearches as our stack.
630while (!PausedSearches.empty()) {
631 ListIndex PathIndex = PausedSearches.pop_back_val();
632 DefPath &Node = Paths[PathIndex];
633
634// If we've already visited this path with this MemoryLocation, we don't
635// need to do so again.
636//
637// NOTE: That we just drop these paths on the ground makes caching
638// behavior sporadic. e.g. given a diamond:
639// A
640// B C
641// D
642//
643// ...If we walk D, B, A, C, we'll only cache the result of phi
644// optimization for A, B, and D; C will be skipped because it dies here.
645// This arguably isn't the worst thing ever, since:
646// - We generally query things in a top-down order, so if we got below D
647// without needing cache entries for {C, MemLoc}, then chances are
648// that those cache entries would end up ultimately unused.
649// - We still cache things for A, so C only needs to walk up a bit.
650// If this behavior becomes problematic, we can fix without a ton of extra
651// work.
652if (!VisitedPhis.insert({Node.Last, Node.Loc}).second)
653continue;
654
655constMemoryAccess *SkipStopWhere =nullptr;
656if (Query->SkipSelfAccess &&Node.Loc == Query->StartingLoc) {
657assert(isa<MemoryDef>(Query->OriginalAccess));
658 SkipStopWhere = Query->OriginalAccess;
659 }
660
661 UpwardsWalkResult Res = walkToPhiOrClobber(Node,
662/*StopAt=*/StopWhere,
663/*SkipStopAt=*/SkipStopWhere);
664if (Res.IsKnownClobber) {
665assert(Res.Result != StopWhere && Res.Result != SkipStopWhere);
666
667// If this wasn't a cache hit, we hit a clobber when walking. That's a
668// failure.
669 TerminatedPathTerm{Res.Result, PathIndex};
670if (!MSSA.dominates(Res.Result, StopWhere))
671returnTerm;
672
673// Otherwise, it's a valid thing to potentially optimize to.
674 Terminated.push_back(Term);
675continue;
676 }
677
678if (Res.Result == StopWhere || Res.Result == SkipStopWhere) {
679// We've hit our target. Save this path off for if we want to continue
680// walking. If we are in the mode of skipping the OriginalAccess, and
681// we've reached back to the OriginalAccess, do not save path, we've
682// just looped back to self.
683if (Res.Result != SkipStopWhere)
684 NewPaused.push_back(PathIndex);
685continue;
686 }
687
688assert(!MSSA.isLiveOnEntryDef(Res.Result) &&"liveOnEntry is a clobber");
689 addSearches(cast<MemoryPhi>(Res.Result), PausedSearches, PathIndex);
690 }
691
692return std::nullopt;
693 }
694
695template <typename T,typename Walker>
696structgeneric_def_path_iterator
697 :publiciterator_facade_base<generic_def_path_iterator<T, Walker>,
698 std::forward_iterator_tag, T *> {
699 generic_def_path_iterator() =default;
700 generic_def_path_iterator(Walker *W, ListIndexN) :W(W),N(N) {}
701
702T &operator*() const{return curNode(); }
703
704 generic_def_path_iterator &operator++() {
705N = curNode().Previous;
706return *this;
707 }
708
709booloperator==(const generic_def_path_iterator &O) const{
710if (N.has_value() !=O.N.has_value())
711returnfalse;
712return !N || *N == *O.N;
713 }
714
715private:
716T &curNode() const{returnW->Paths[*N]; }
717
718 Walker *W =nullptr;
719 std::optional<ListIndex>N;
720 };
721
722usingdef_path_iterator = generic_def_path_iterator<DefPath, ClobberWalker>;
723usingconst_def_path_iterator =
724 generic_def_path_iterator<const DefPath, const ClobberWalker>;
725
726iterator_range<def_path_iterator> def_path(ListIndexFrom) {
727returnmake_range(def_path_iterator(this,From), def_path_iterator());
728 }
729
730iterator_range<const_def_path_iterator> const_def_path(ListIndexFrom) const{
731returnmake_range(const_def_path_iterator(this,From),
732 const_def_path_iterator());
733 }
734
735structOptznResult {
736 /// The path that contains our result.
737 TerminatedPath PrimaryClobber;
738 /// The paths that we can legally cache back from, but that aren't
739 /// necessarily the result of the Phi optimization.
740SmallVector<TerminatedPath, 4> OtherClobbers;
741 };
742
743 ListIndex defPathIndex(const DefPath &N) const{
744// The assert looks nicer if we don't need to do &N
745const DefPath *NP = &N;
746assert(!Paths.empty() && NP >= &Paths.front() && NP <= &Paths.back() &&
747"Out of bounds DefPath!");
748return NP - &Paths.front();
749 }
750
751 /// Try to optimize a phi as best as we can. Returns a SmallVector of Paths
752 /// that act as legal clobbers. Note that this won't return *all* clobbers.
753 ///
754 /// Phi optimization algorithm tl;dr:
755 /// - Find the earliest def/phi, A, we can optimize to
756 /// - Find if all paths from the starting memory access ultimately reach A
757 /// - If not, optimization isn't possible.
758 /// - Otherwise, walk from A to another clobber or phi, A'.
759 /// - If A' is a def, we're done.
760 /// - If A' is a phi, try to optimize it.
761 ///
762 /// A path is a series of {MemoryAccess, MemoryLocation} pairs. A path
763 /// terminates when a MemoryAccess that clobbers said MemoryLocation is found.
764 OptznResult tryOptimizePhi(MemoryPhi *Phi,MemoryAccess *Start,
765constMemoryLocation &Loc) {
766assert(Paths.empty() && VisitedPhis.empty() &&
767"Reset the optimization state.");
768
769 Paths.emplace_back(Loc, Start, Phi, std::nullopt);
770// Stores how many "valid" optimization nodes we had prior to calling
771// addSearches/getBlockingAccess. Necessary for caching if we had a blocker.
772auto PriorPathsSize = Paths.size();
773
774SmallVector<ListIndex, 16> PausedSearches;
775SmallVector<ListIndex, 8> NewPaused;
776SmallVector<TerminatedPath, 4> TerminatedPaths;
777
778 addSearches(Phi, PausedSearches, 0);
779
780// Moves the TerminatedPath with the "most dominated" Clobber to the end of
781// Paths.
782auto MoveDominatedPathToEnd = [&](SmallVectorImpl<TerminatedPath> &Paths) {
783assert(!Paths.empty() &&"Need a path to move");
784auto Dom = Paths.begin();
785for (autoI = std::next(Dom),E = Paths.end();I !=E; ++I)
786if (!MSSA.dominates(I->Clobber, Dom->Clobber))
787 Dom =I;
788autoLast = Paths.end() - 1;
789if (Last != Dom)
790 std::iter_swap(Last, Dom);
791 };
792
793MemoryPhi *Current =Phi;
794while (true) {
795assert(!MSSA.isLiveOnEntryDef(Current) &&
796"liveOnEntry wasn't treated as a clobber?");
797
798constauto *Target = getWalkTarget(Current);
799// If a TerminatedPath doesn't dominate Target, then it wasn't a legal
800// optimization for the prior phi.
801assert(all_of(TerminatedPaths, [&](const TerminatedPath &P) {
802return MSSA.dominates(P.Clobber,Target);
803 }));
804
805// FIXME: This is broken, because the Blocker may be reported to be
806// liveOnEntry, and we'll happily wait for that to disappear (read: never)
807// For the moment, this is fine, since we do nothing with blocker info.
808if (std::optional<TerminatedPath> Blocker = getBlockingAccess(
809Target, PausedSearches, NewPaused, TerminatedPaths)) {
810
811// Find the node we started at. We can't search based on N->Last, since
812// we may have gone around a loop with a different MemoryLocation.
813auto Iter =find_if(def_path(Blocker->LastNode), [&](const DefPath &N) {
814 return defPathIndex(N) < PriorPathsSize;
815 });
816assert(Iter != def_path_iterator());
817
818 DefPath &CurNode = *Iter;
819assert(CurNode.Last == Current);
820
821// Two things:
822// A. We can't reliably cache all of NewPaused back. Consider a case
823// where we have two paths in NewPaused; one of which can't optimize
824// above this phi, whereas the other can. If we cache the second path
825// back, we'll end up with suboptimal cache entries. We can handle
826// cases like this a bit better when we either try to find all
827// clobbers that block phi optimization, or when our cache starts
828// supporting unfinished searches.
829// B. We can't reliably cache TerminatedPaths back here without doing
830// extra checks; consider a case like:
831// T
832// / \
833 // D C
834// \ /
835// S
836// Where T is our target, C is a node with a clobber on it, D is a
837// diamond (with a clobber *only* on the left or right node, N), and
838// S is our start. Say we walk to D, through the node opposite N
839// (read: ignoring the clobber), and see a cache entry in the top
840// node of D. That cache entry gets put into TerminatedPaths. We then
841// walk up to C (N is later in our worklist), find the clobber, and
842// quit. If we append TerminatedPaths to OtherClobbers, we'll cache
843// the bottom part of D to the cached clobber, ignoring the clobber
844// in N. Again, this problem goes away if we start tracking all
845// blockers for a given phi optimization.
846 TerminatedPathResult{CurNode.Last, defPathIndex(CurNode)};
847return {Result, {}};
848 }
849
850// If there's nothing left to search, then all paths led to valid clobbers
851// that we got from our cache; pick the nearest to the start, and allow
852// the rest to be cached back.
853if (NewPaused.empty()) {
854 MoveDominatedPathToEnd(TerminatedPaths);
855 TerminatedPathResult = TerminatedPaths.pop_back_val();
856return {Result, std::move(TerminatedPaths)};
857 }
858
859MemoryAccess *DefChainEnd =nullptr;
860SmallVector<TerminatedPath, 4> Clobbers;
861for (ListIndex Paused : NewPaused) {
862 UpwardsWalkResult WR = walkToPhiOrClobber(Paths[Paused]);
863if (WR.IsKnownClobber)
864 Clobbers.push_back({WR.Result, Paused});
865else
866// Micro-opt: If we hit the end of the chain, save it.
867 DefChainEnd = WR.Result;
868 }
869
870if (!TerminatedPaths.empty()) {
871// If we couldn't find the dominating phi/liveOnEntry in the above loop,
872// do it now.
873if (!DefChainEnd)
874for (auto *MA :def_chain(const_cast<MemoryAccess *>(Target)))
875 DefChainEnd = MA;
876assert(DefChainEnd &&"Failed to find dominating phi/liveOnEntry");
877
878// If any of the terminated paths don't dominate the phi we'll try to
879// optimize, we need to figure out what they are and quit.
880constBasicBlock *ChainBB = DefChainEnd->getBlock();
881for (const TerminatedPath &TP : TerminatedPaths) {
882// Because we know that DefChainEnd is as "high" as we can go, we
883// don't need local dominance checks; BB dominance is sufficient.
884if (DT.dominates(ChainBB, TP.Clobber->getBlock()))
885 Clobbers.push_back(TP);
886 }
887 }
888
889// If we have clobbers in the def chain, find the one closest to Current
890// and quit.
891if (!Clobbers.empty()) {
892 MoveDominatedPathToEnd(Clobbers);
893 TerminatedPathResult = Clobbers.pop_back_val();
894return {Result, std::move(Clobbers)};
895 }
896
897assert(all_of(NewPaused,
898 [&](ListIndexI) {return Paths[I].Last == DefChainEnd; }));
899
900// Because liveOnEntry is a clobber, this must be a phi.
901auto *DefChainPhi = cast<MemoryPhi>(DefChainEnd);
902
903 PriorPathsSize = Paths.size();
904 PausedSearches.clear();
905for (ListIndexI : NewPaused)
906 addSearches(DefChainPhi, PausedSearches,I);
907 NewPaused.clear();
908
909 Current = DefChainPhi;
910 }
911 }
912
913void verifyOptResult(const OptznResult &R) const{
914assert(all_of(R.OtherClobbers, [&](const TerminatedPath &P) {
915 return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber);
916 }));
917 }
918
919void resetPhiOptznState() {
920 Paths.clear();
921 VisitedPhis.clear();
922 }
923
924public:
925 ClobberWalker(constMemorySSA &MSSA,DominatorTree &DT)
926 : MSSA(MSSA), DT(DT) {}
927
928 /// Finds the nearest clobber for the given query, optimizing phis if
929 /// possible.
930MemoryAccess *findClobber(BatchAAResults &BAA,MemoryAccess *Start,
931 UpwardsMemoryQuery &Q,unsigned &UpWalkLimit) {
932 AA = &BAA;
933 Query = &Q;
934 UpwardWalkLimit = &UpWalkLimit;
935// Starting limit must be > 0.
936if (!UpWalkLimit)
937 UpWalkLimit++;
938
939MemoryAccess *Current = Start;
940// This walker pretends uses don't exist. If we're handed one, silently grab
941// its def. (This has the nice side-effect of ensuring we never cache uses)
942if (auto *MU = dyn_cast<MemoryUse>(Start))
943 Current = MU->getDefiningAccess();
944
945 DefPath FirstDesc(Q.StartingLoc, Current, Current, std::nullopt);
946// Fast path for the overly-common case (no crazy phi optimization
947// necessary)
948 UpwardsWalkResult WalkResult = walkToPhiOrClobber(FirstDesc);
949MemoryAccess *Result;
950if (WalkResult.IsKnownClobber) {
951Result = WalkResult.Result;
952 }else {
953 OptznResult OptRes = tryOptimizePhi(cast<MemoryPhi>(FirstDesc.Last),
954 Current, Q.StartingLoc);
955 verifyOptResult(OptRes);
956 resetPhiOptznState();
957Result = OptRes.PrimaryClobber.Clobber;
958 }
959
960#ifdef EXPENSIVE_CHECKS
961if (!Q.SkipSelfAccess && *UpwardWalkLimit > 0)
962checkClobberSanity(Current, Result, Q.StartingLoc, MSSA, Q, BAA);
963#endif
964returnResult;
965 }
966};
967
968structRenamePassData {
969DomTreeNode *DTN;
970DomTreeNode::const_iterator ChildIt;
971MemoryAccess *IncomingVal;
972
973 RenamePassData(DomTreeNode *D,DomTreeNode::const_iterator It,
974MemoryAccess *M)
975 : DTN(D), ChildIt(It), IncomingVal(M) {}
976
977voidswap(RenamePassData &RHS) {
978std::swap(DTN,RHS.DTN);
979std::swap(ChildIt,RHS.ChildIt);
980std::swap(IncomingVal,RHS.IncomingVal);
981 }
982};
983
984}// end anonymous namespace
985
986namespacellvm {
987
988classMemorySSA::ClobberWalkerBase {
989 ClobberWalker Walker;
990MemorySSA *MSSA;
991
992public:
993ClobberWalkerBase(MemorySSA *M,DominatorTree *D) : Walker(*M, *D), MSSA(M) {}
994
995MemoryAccess *getClobberingMemoryAccessBase(MemoryAccess *,
996constMemoryLocation &,
997BatchAAResults &,unsigned &);
998// Third argument (bool), defines whether the clobber search should skip the
999// original queried access. If true, there will be a follow-up query searching
1000// for a clobber access past "self". Note that the Optimized access is not
1001// updated if a new clobber is found by this SkipSelf search. If this
1002// additional query becomes heavily used we may decide to cache the result.
1003// Walker instantiations will decide how to set the SkipSelf bool.
1004MemoryAccess *getClobberingMemoryAccessBase(MemoryAccess *,BatchAAResults &,
1005unsigned &,bool,
1006bool UseInvariantGroup =true);
1007};
1008
1009/// A MemorySSAWalker that does AA walks to disambiguate accesses. It no
1010/// longer does caching on its own, but the name has been retained for the
1011/// moment.
1012classMemorySSA::CachingWalker final :publicMemorySSAWalker {
1013ClobberWalkerBase *Walker;
1014
1015public:
1016CachingWalker(MemorySSA *M,ClobberWalkerBase *W)
1017 :MemorySSAWalker(M), Walker(W) {}
1018~CachingWalker()override =default;
1019
1020usingMemorySSAWalker::getClobberingMemoryAccess;
1021
1022MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,BatchAAResults &BAA,
1023unsigned &UWL) {
1024return Walker->getClobberingMemoryAccessBase(MA, BAA, UWL,false);
1025 }
1026MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
1027constMemoryLocation &Loc,
1028BatchAAResults &BAA,unsigned &UWL) {
1029return Walker->getClobberingMemoryAccessBase(MA, Loc, BAA, UWL);
1030 }
1031// This method is not accessible outside of this file.
1032MemoryAccess *getClobberingMemoryAccessWithoutInvariantGroup(
1033MemoryAccess *MA,BatchAAResults &BAA,unsigned &UWL) {
1034return Walker->getClobberingMemoryAccessBase(MA, BAA, UWL,false,false);
1035 }
1036
1037MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
1038BatchAAResults &BAA) override{
1039unsigned UpwardWalkLimit =MaxCheckLimit;
1040returngetClobberingMemoryAccess(MA, BAA, UpwardWalkLimit);
1041 }
1042MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
1043constMemoryLocation &Loc,
1044BatchAAResults &BAA) override{
1045unsigned UpwardWalkLimit =MaxCheckLimit;
1046returngetClobberingMemoryAccess(MA, Loc, BAA, UpwardWalkLimit);
1047 }
1048
1049voidinvalidateInfo(MemoryAccess *MA) override{
1050if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1051 MUD->resetOptimized();
1052 }
1053};
1054
1055classMemorySSA::SkipSelfWalker final :publicMemorySSAWalker {
1056ClobberWalkerBase *Walker;
1057
1058public:
1059SkipSelfWalker(MemorySSA *M,ClobberWalkerBase *W)
1060 :MemorySSAWalker(M), Walker(W) {}
1061~SkipSelfWalker()override =default;
1062
1063usingMemorySSAWalker::getClobberingMemoryAccess;
1064
1065MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,BatchAAResults &BAA,
1066unsigned &UWL) {
1067return Walker->getClobberingMemoryAccessBase(MA, BAA, UWL,true);
1068 }
1069MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
1070constMemoryLocation &Loc,
1071BatchAAResults &BAA,unsigned &UWL) {
1072return Walker->getClobberingMemoryAccessBase(MA, Loc, BAA, UWL);
1073 }
1074
1075MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
1076BatchAAResults &BAA) override{
1077unsigned UpwardWalkLimit =MaxCheckLimit;
1078returngetClobberingMemoryAccess(MA, BAA, UpwardWalkLimit);
1079 }
1080MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
1081constMemoryLocation &Loc,
1082BatchAAResults &BAA) override{
1083unsigned UpwardWalkLimit =MaxCheckLimit;
1084returngetClobberingMemoryAccess(MA, Loc, BAA, UpwardWalkLimit);
1085 }
1086
1087voidinvalidateInfo(MemoryAccess *MA) override{
1088if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1089 MUD->resetOptimized();
1090 }
1091};
1092
1093}// end namespace llvm
1094
1095void MemorySSA::renameSuccessorPhis(BasicBlock *BB,MemoryAccess *IncomingVal,
1096bool RenameAllUses) {
1097// Pass through values to our successors
1098for (constBasicBlock *S :successors(BB)) {
1099auto It = PerBlockAccesses.find(S);
1100// Rename the phi nodes in our successor block
1101if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front()))
1102continue;
1103AccessList *Accesses = It->second.get();
1104auto *Phi = cast<MemoryPhi>(&Accesses->front());
1105if (RenameAllUses) {
1106bool ReplacementDone =false;
1107for (unsignedI = 0, E = Phi->getNumIncomingValues();I != E; ++I)
1108if (Phi->getIncomingBlock(I) == BB) {
1109 Phi->setIncomingValue(I, IncomingVal);
1110 ReplacementDone =true;
1111 }
1112 (void) ReplacementDone;
1113assert(ReplacementDone &&"Incomplete phi during partial rename");
1114 }else
1115Phi->addIncoming(IncomingVal, BB);
1116 }
1117}
1118
1119/// Rename a single basic block into MemorySSA form.
1120/// Uses the standard SSA renaming algorithm.
1121/// \returns The new incoming value.
1122MemoryAccess *MemorySSA::renameBlock(BasicBlock *BB,MemoryAccess *IncomingVal,
1123bool RenameAllUses) {
1124auto It = PerBlockAccesses.find(BB);
1125// Skip most processing if the list is empty.
1126if (It != PerBlockAccesses.end()) {
1127AccessList *Accesses = It->second.get();
1128for (MemoryAccess &L : *Accesses) {
1129if (MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(&L)) {
1130if (MUD->getDefiningAccess() ==nullptr || RenameAllUses)
1131 MUD->setDefiningAccess(IncomingVal);
1132if (isa<MemoryDef>(&L))
1133 IncomingVal = &L;
1134 }else {
1135 IncomingVal = &L;
1136 }
1137 }
1138 }
1139return IncomingVal;
1140}
1141
1142/// This is the standard SSA renaming algorithm.
1143///
1144/// We walk the dominator tree in preorder, renaming accesses, and then filling
1145/// in phi nodes in our successors.
1146voidMemorySSA::renamePass(DomTreeNode *Root,MemoryAccess *IncomingVal,
1147SmallPtrSetImpl<BasicBlock *> &Visited,
1148bool SkipVisited,bool RenameAllUses) {
1149assert(Root &&"Trying to rename accesses in an unreachable block");
1150
1151SmallVector<RenamePassData, 32> WorkStack;
1152// Skip everything if we already renamed this block and we are skipping.
1153// Note: You can't sink this into the if, because we need it to occur
1154// regardless of whether we skip blocks or not.
1155bool AlreadyVisited = !Visited.insert(Root->getBlock()).second;
1156if (SkipVisited && AlreadyVisited)
1157return;
1158
1159 IncomingVal = renameBlock(Root->getBlock(), IncomingVal, RenameAllUses);
1160 renameSuccessorPhis(Root->getBlock(), IncomingVal, RenameAllUses);
1161 WorkStack.push_back({Root, Root->begin(), IncomingVal});
1162
1163while (!WorkStack.empty()) {
1164DomTreeNode *Node = WorkStack.back().DTN;
1165DomTreeNode::const_iterator ChildIt = WorkStack.back().ChildIt;
1166 IncomingVal = WorkStack.back().IncomingVal;
1167
1168if (ChildIt ==Node->end()) {
1169 WorkStack.pop_back();
1170 }else {
1171DomTreeNode *Child = *ChildIt;
1172 ++WorkStack.back().ChildIt;
1173BasicBlock *BB = Child->getBlock();
1174// Note: You can't sink this into the if, because we need it to occur
1175// regardless of whether we skip blocks or not.
1176 AlreadyVisited = !Visited.insert(BB).second;
1177if (SkipVisited && AlreadyVisited) {
1178// We already visited this during our renaming, which can happen when
1179// being asked to rename multiple blocks. Figure out the incoming val,
1180// which is the last def.
1181// Incoming value can only change if there is a block def, and in that
1182// case, it's the last block def in the list.
1183if (auto *BlockDefs =getWritableBlockDefs(BB))
1184 IncomingVal = &*BlockDefs->rbegin();
1185 }else
1186 IncomingVal = renameBlock(BB, IncomingVal, RenameAllUses);
1187 renameSuccessorPhis(BB, IncomingVal, RenameAllUses);
1188 WorkStack.push_back({Child, Child->begin(), IncomingVal});
1189 }
1190 }
1191}
1192
1193/// This handles unreachable block accesses by deleting phi nodes in
1194/// unreachable blocks, and marking all other unreachable MemoryAccess's as
1195/// being uses of the live on entry definition.
1196void MemorySSA::markUnreachableAsLiveOnEntry(BasicBlock *BB) {
1197assert(!DT->isReachableFromEntry(BB) &&
1198"Reachable block found while handling unreachable blocks");
1199
1200// Make sure phi nodes in our reachable successors end up with a
1201// LiveOnEntryDef for our incoming edge, even though our block is forward
1202// unreachable. We could just disconnect these blocks from the CFG fully,
1203// but we do not right now.
1204for (constBasicBlock *S :successors(BB)) {
1205if (!DT->isReachableFromEntry(S))
1206continue;
1207auto It = PerBlockAccesses.find(S);
1208// Rename the phi nodes in our successor block
1209if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front()))
1210continue;
1211AccessList *Accesses = It->second.get();
1212auto *Phi = cast<MemoryPhi>(&Accesses->front());
1213Phi->addIncoming(LiveOnEntryDef.get(), BB);
1214 }
1215
1216auto It = PerBlockAccesses.find(BB);
1217if (It == PerBlockAccesses.end())
1218return;
1219
1220auto &Accesses = It->second;
1221for (auto AI = Accesses->begin(), AE = Accesses->end(); AI != AE;) {
1222auto Next = std::next(AI);
1223// If we have a phi, just remove it. We are going to replace all
1224// users with live on entry.
1225if (auto *UseOrDef = dyn_cast<MemoryUseOrDef>(AI))
1226 UseOrDef->setDefiningAccess(LiveOnEntryDef.get());
1227else
1228 Accesses->erase(AI);
1229 AI = Next;
1230 }
1231}
1232
1233MemorySSA::MemorySSA(Function &Func,AliasAnalysis *AA,DominatorTree *DT)
1234 : DT(DT),F(&Func), LiveOnEntryDef(nullptr), Walker(nullptr),
1235 SkipWalker(nullptr) {
1236// Build MemorySSA using a batch alias analysis. This reuses the internal
1237// state that AA collects during an alias()/getModRefInfo() call. This is
1238// safe because there are no CFG changes while building MemorySSA and can
1239// significantly reduce the time spent by the compiler in AA, because we will
1240// make queries about all the instructions in the Function.
1241assert(AA &&"No alias analysis?");
1242BatchAAResults BatchAA(*AA);
1243 buildMemorySSA(BatchAA,iterator_range(F->begin(), F->end()));
1244// Intentionally leave AA to nullptr while building so we don't accidentally
1245// use non-batch AliasAnalysis.
1246 this->AA = AA;
1247// Also create the walker here.
1248getWalker();
1249}
1250
1251MemorySSA::MemorySSA(Loop &L,AliasAnalysis *AA,DominatorTree *DT)
1252 : DT(DT), L(&L), LiveOnEntryDef(nullptr), Walker(nullptr),
1253 SkipWalker(nullptr) {
1254// Build MemorySSA using a batch alias analysis. This reuses the internal
1255// state that AA collects during an alias()/getModRefInfo() call. This is
1256// safe because there are no CFG changes while building MemorySSA and can
1257// significantly reduce the time spent by the compiler in AA, because we will
1258// make queries about all the instructions in the Function.
1259assert(AA &&"No alias analysis?");
1260BatchAAResults BatchAA(*AA);
1261 buildMemorySSA(
1262 BatchAA,map_range(L.blocks(), [](constBasicBlock *BB) ->BasicBlock & {
1263 return *const_cast<BasicBlock *>(BB);
1264 }));
1265// Intentionally leave AA to nullptr while building so we don't accidentally
1266// use non-batch AliasAnalysis.
1267 this->AA = AA;
1268// Also create the walker here.
1269getWalker();
1270}
1271
1272MemorySSA::~MemorySSA() {
1273// Drop all our references
1274for (constauto &Pair : PerBlockAccesses)
1275for (MemoryAccess &MA : *Pair.second)
1276 MA.dropAllReferences();
1277}
1278
1279MemorySSA::AccessList *MemorySSA::getOrCreateAccessList(constBasicBlock *BB) {
1280auto Res = PerBlockAccesses.insert(std::make_pair(BB,nullptr));
1281
1282if (Res.second)
1283 Res.first->second = std::make_unique<AccessList>();
1284return Res.first->second.get();
1285}
1286
1287MemorySSA::DefsList *MemorySSA::getOrCreateDefsList(constBasicBlock *BB) {
1288auto Res = PerBlockDefs.insert(std::make_pair(BB,nullptr));
1289
1290if (Res.second)
1291 Res.first->second = std::make_unique<DefsList>();
1292return Res.first->second.get();
1293}
1294
1295namespacellvm {
1296
1297/// This class is a batch walker of all MemoryUse's in the program, and points
1298/// their defining access at the thing that actually clobbers them. Because it
1299/// is a batch walker that touches everything, it does not operate like the
1300/// other walkers. This walker is basically performing a top-down SSA renaming
1301/// pass, where the version stack is used as the cache. This enables it to be
1302/// significantly more time and memory efficient than using the regular walker,
1303/// which is walking bottom-up.
1304classMemorySSA::OptimizeUses {
1305public:
1306OptimizeUses(MemorySSA *MSSA,CachingWalker *Walker,BatchAAResults *BAA,
1307DominatorTree *DT)
1308 : MSSA(MSSA), Walker(Walker), AA(BAA), DT(DT) {}
1309
1310voidoptimizeUses();
1311
1312private:
1313 /// This represents where a given memorylocation is in the stack.
1314structMemlocStackInfo {
1315// This essentially is keeping track of versions of the stack. Whenever
1316// the stack changes due to pushes or pops, these versions increase.
1317unsignedlong StackEpoch;
1318unsignedlong PopEpoch;
1319// This is the lower bound of places on the stack to check. It is equal to
1320// the place the last stack walk ended.
1321// Note: Correctness depends on this being initialized to 0, which densemap
1322// does
1323unsignedlong LowerBound;
1324constBasicBlock *LowerBoundBlock;
1325// This is where the last walk for this memory location ended.
1326unsignedlong LastKill;
1327bool LastKillValid;
1328 };
1329
1330void optimizeUsesInBlock(constBasicBlock *,unsignedlong &,unsignedlong &,
1331SmallVectorImpl<MemoryAccess *> &,
1332DenseMap<MemoryLocOrCall, MemlocStackInfo> &);
1333
1334MemorySSA *MSSA;
1335 CachingWalker *Walker;
1336BatchAAResults *AA;
1337DominatorTree *DT;
1338};
1339
1340}// end namespace llvm
1341
1342/// Optimize the uses in a given block This is basically the SSA renaming
1343/// algorithm, with one caveat: We are able to use a single stack for all
1344/// MemoryUses. This is because the set of *possible* reaching MemoryDefs is
1345/// the same for every MemoryUse. The *actual* clobbering MemoryDef is just
1346/// going to be some position in that stack of possible ones.
1347///
1348/// We track the stack positions that each MemoryLocation needs
1349/// to check, and last ended at. This is because we only want to check the
1350/// things that changed since last time. The same MemoryLocation should
1351/// get clobbered by the same store (getModRefInfo does not use invariantness or
1352/// things like this, and if they start, we can modify MemoryLocOrCall to
1353/// include relevant data)
1354void MemorySSA::OptimizeUses::optimizeUsesInBlock(
1355constBasicBlock *BB,unsignedlong &StackEpoch,unsignedlong &PopEpoch,
1356SmallVectorImpl<MemoryAccess *> &VersionStack,
1357DenseMap<MemoryLocOrCall, MemlocStackInfo> &LocStackInfo) {
1358
1359 /// If no accesses, nothing to do.
1360MemorySSA::AccessList *Accesses = MSSA->getWritableBlockAccesses(BB);
1361if (Accesses ==nullptr)
1362return;
1363
1364// Pop everything that doesn't dominate the current block off the stack,
1365// increment the PopEpoch to account for this.
1366while (true) {
1367assert(
1368 !VersionStack.empty() &&
1369"Version stack should have liveOnEntry sentinel dominating everything");
1370BasicBlock *BackBlock = VersionStack.back()->getBlock();
1371if (DT->dominates(BackBlock, BB))
1372break;
1373while (VersionStack.back()->getBlock() == BackBlock)
1374 VersionStack.pop_back();
1375 ++PopEpoch;
1376 }
1377
1378for (MemoryAccess &MA : *Accesses) {
1379auto *MU = dyn_cast<MemoryUse>(&MA);
1380if (!MU) {
1381 VersionStack.push_back(&MA);
1382 ++StackEpoch;
1383continue;
1384 }
1385
1386if (MU->isOptimized())
1387continue;
1388
1389 MemoryLocOrCall UseMLOC(MU);
1390auto &LocInfo = LocStackInfo[UseMLOC];
1391// If the pop epoch changed, it means we've removed stuff from top of
1392// stack due to changing blocks. We may have to reset the lower bound or
1393// last kill info.
1394if (LocInfo.PopEpoch != PopEpoch) {
1395 LocInfo.PopEpoch = PopEpoch;
1396 LocInfo.StackEpoch = StackEpoch;
1397// If the lower bound was in something that no longer dominates us, we
1398// have to reset it.
1399// We can't simply track stack size, because the stack may have had
1400// pushes/pops in the meantime.
1401// XXX: This is non-optimal, but only is slower cases with heavily
1402// branching dominator trees. To get the optimal number of queries would
1403// be to make lowerbound and lastkill a per-loc stack, and pop it until
1404// the top of that stack dominates us. This does not seem worth it ATM.
1405// A much cheaper optimization would be to always explore the deepest
1406// branch of the dominator tree first. This will guarantee this resets on
1407// the smallest set of blocks.
1408if (LocInfo.LowerBoundBlock && LocInfo.LowerBoundBlock != BB &&
1409 !DT->dominates(LocInfo.LowerBoundBlock, BB)) {
1410// Reset the lower bound of things to check.
1411// TODO: Some day we should be able to reset to last kill, rather than
1412// 0.
1413 LocInfo.LowerBound = 0;
1414 LocInfo.LowerBoundBlock = VersionStack[0]->getBlock();
1415 LocInfo.LastKillValid =false;
1416 }
1417 }elseif (LocInfo.StackEpoch != StackEpoch) {
1418// If all that has changed is the StackEpoch, we only have to check the
1419// new things on the stack, because we've checked everything before. In
1420// this case, the lower bound of things to check remains the same.
1421 LocInfo.PopEpoch = PopEpoch;
1422 LocInfo.StackEpoch = StackEpoch;
1423 }
1424if (!LocInfo.LastKillValid) {
1425 LocInfo.LastKill = VersionStack.size() - 1;
1426 LocInfo.LastKillValid =true;
1427 }
1428
1429// At this point, we should have corrected last kill and LowerBound to be
1430// in bounds.
1431assert(LocInfo.LowerBound < VersionStack.size() &&
1432"Lower bound out of range");
1433assert(LocInfo.LastKill < VersionStack.size() &&
1434"Last kill info out of range");
1435// In any case, the new upper bound is the top of the stack.
1436unsignedlong UpperBound = VersionStack.size() - 1;
1437
1438if (UpperBound - LocInfo.LowerBound >MaxCheckLimit) {
1439LLVM_DEBUG(dbgs() <<"MemorySSA skipping optimization of " << *MU <<" ("
1440 << *(MU->getMemoryInst()) <<")"
1441 <<" because there are "
1442 << UpperBound - LocInfo.LowerBound
1443 <<" stores to disambiguate\n");
1444// Because we did not walk, LastKill is no longer valid, as this may
1445// have been a kill.
1446 LocInfo.LastKillValid =false;
1447continue;
1448 }
1449bool FoundClobberResult =false;
1450unsigned UpwardWalkLimit =MaxCheckLimit;
1451while (UpperBound > LocInfo.LowerBound) {
1452if (isa<MemoryPhi>(VersionStack[UpperBound])) {
1453// For phis, use the walker, see where we ended up, go there.
1454// The invariant.group handling in MemorySSA is ad-hoc and doesn't
1455// support updates, so don't use it to optimize uses.
1456MemoryAccess *Result =
1457 Walker->getClobberingMemoryAccessWithoutInvariantGroup(
1458 MU, *AA, UpwardWalkLimit);
1459// We are guaranteed to find it or something is wrong.
1460while (VersionStack[UpperBound] != Result) {
1461assert(UpperBound != 0);
1462 --UpperBound;
1463 }
1464 FoundClobberResult =true;
1465break;
1466 }
1467
1468MemoryDef *MD = cast<MemoryDef>(VersionStack[UpperBound]);
1469if (instructionClobbersQuery(MD, MU, UseMLOC, *AA)) {
1470 FoundClobberResult =true;
1471break;
1472 }
1473 --UpperBound;
1474 }
1475
1476// At the end of this loop, UpperBound is either a clobber, or lower bound
1477// PHI walking may cause it to be < LowerBound, and in fact, < LastKill.
1478if (FoundClobberResult || UpperBound < LocInfo.LastKill) {
1479 MU->setDefiningAccess(VersionStack[UpperBound],true);
1480 LocInfo.LastKill = UpperBound;
1481 }else {
1482// Otherwise, we checked all the new ones, and now we know we can get to
1483// LastKill.
1484 MU->setDefiningAccess(VersionStack[LocInfo.LastKill],true);
1485 }
1486 LocInfo.LowerBound = VersionStack.size() - 1;
1487 LocInfo.LowerBoundBlock = BB;
1488 }
1489}
1490
1491/// Optimize uses to point to their actual clobbering definitions.
1492voidMemorySSA::OptimizeUses::optimizeUses() {
1493SmallVector<MemoryAccess *, 16> VersionStack;
1494DenseMap<MemoryLocOrCall, MemlocStackInfo> LocStackInfo;
1495 VersionStack.push_back(MSSA->getLiveOnEntryDef());
1496
1497unsignedlong StackEpoch = 1;
1498unsignedlong PopEpoch = 1;
1499// We perform a non-recursive top-down dominator tree walk.
1500for (constauto *DomNode :depth_first(DT->getRootNode()))
1501 optimizeUsesInBlock(DomNode->getBlock(), StackEpoch, PopEpoch, VersionStack,
1502 LocStackInfo);
1503}
1504
1505void MemorySSA::placePHINodes(
1506constSmallPtrSetImpl<BasicBlock *> &DefiningBlocks) {
1507// Determine where our MemoryPhi's should go
1508ForwardIDFCalculator IDFs(*DT);
1509 IDFs.setDefiningBlocks(DefiningBlocks);
1510SmallVector<BasicBlock *, 32> IDFBlocks;
1511 IDFs.calculate(IDFBlocks);
1512
1513// Now place MemoryPhi nodes.
1514for (auto &BB : IDFBlocks)
1515 createMemoryPhi(BB);
1516}
1517
1518template <typename IterT>
1519void MemorySSA::buildMemorySSA(BatchAAResults &BAA, IterTBlocks) {
1520// We create an access to represent "live on entry", for things like
1521// arguments or users of globals, where the memory they use is defined before
1522// the beginning of the function. We do not actually insert it into the IR.
1523// We do not define a live on exit for the immediate uses, and thus our
1524// semantics do *not* imply that something with no immediate uses can simply
1525// be removed.
1526BasicBlock &StartingPoint = *Blocks.begin();
1527 LiveOnEntryDef.reset(newMemoryDef(StartingPoint.getContext(),nullptr,
1528nullptr, &StartingPoint, NextID++));
1529
1530// We maintain lists of memory accesses per-block, trading memory for time. We
1531// could just look up the memory access for every possible instruction in the
1532// stream.
1533SmallPtrSet<BasicBlock *, 32> DefiningBlocks;
1534// Go through each block, figure out where defs occur, and chain together all
1535// the accesses.
1536for (BasicBlock &B :Blocks) {
1537bool InsertIntoDef =false;
1538AccessList *Accesses =nullptr;
1539DefsList *Defs =nullptr;
1540for (Instruction &I :B) {
1541MemoryUseOrDef *MUD = createNewAccess(&I, &BAA);
1542if (!MUD)
1543continue;
1544
1545if (!Accesses)
1546 Accesses = getOrCreateAccessList(&B);
1547 Accesses->push_back(MUD);
1548if (isa<MemoryDef>(MUD)) {
1549 InsertIntoDef =true;
1550if (!Defs)
1551 Defs = getOrCreateDefsList(&B);
1552 Defs->push_back(*MUD);
1553 }
1554 }
1555if (InsertIntoDef)
1556 DefiningBlocks.insert(&B);
1557 }
1558 placePHINodes(DefiningBlocks);
1559
1560// Now do regular SSA renaming on the MemoryDef/MemoryUse. Visited will get
1561// filled in with all blocks.
1562SmallPtrSet<BasicBlock *, 16> Visited;
1563if (L) {
1564// Only building MemorySSA for a single loop. placePHINodes may have
1565// inserted a MemoryPhi in the loop's preheader. As this is outside the
1566// scope of the loop, set them to LiveOnEntry.
1567if (auto *P =getMemoryAccess(L->getLoopPreheader())) {
1568for (Use &U :make_early_inc_range(P->uses()))
1569U.set(LiveOnEntryDef.get());
1570removeFromLists(P);
1571 }
1572// Now rename accesses in the loop. Populate Visited with the exit blocks of
1573// the loop, to limit the scope of the renaming.
1574SmallVector<BasicBlock *> ExitBlocks;
1575 L->getExitBlocks(ExitBlocks);
1576 Visited.insert(ExitBlocks.begin(), ExitBlocks.end());
1577renamePass(DT->getNode(L->getLoopPreheader()), LiveOnEntryDef.get(),
1578 Visited);
1579 }else {
1580renamePass(DT->getRootNode(), LiveOnEntryDef.get(), Visited);
1581 }
1582
1583// Mark the uses in unreachable blocks as live on entry, so that they go
1584// somewhere.
1585for (auto &BB :Blocks)
1586if (!Visited.count(&BB))
1587 markUnreachableAsLiveOnEntry(&BB);
1588}
1589
1590MemorySSAWalker *MemorySSA::getWalker() {return getWalkerImpl(); }
1591
1592MemorySSA::CachingWalker *MemorySSA::getWalkerImpl() {
1593if (Walker)
1594return Walker.get();
1595
1596if (!WalkerBase)
1597 WalkerBase = std::make_unique<ClobberWalkerBase>(this, DT);
1598
1599 Walker = std::make_unique<CachingWalker>(this, WalkerBase.get());
1600return Walker.get();
1601}
1602
1603MemorySSAWalker *MemorySSA::getSkipSelfWalker() {
1604if (SkipWalker)
1605return SkipWalker.get();
1606
1607if (!WalkerBase)
1608 WalkerBase = std::make_unique<ClobberWalkerBase>(this, DT);
1609
1610 SkipWalker = std::make_unique<SkipSelfWalker>(this, WalkerBase.get());
1611return SkipWalker.get();
1612 }
1613
1614
1615// This is a helper function used by the creation routines. It places NewAccess
1616// into the access and defs lists for a given basic block, at the given
1617// insertion point.
1618voidMemorySSA::insertIntoListsForBlock(MemoryAccess *NewAccess,
1619constBasicBlock *BB,
1620InsertionPlace Point) {
1621auto *Accesses = getOrCreateAccessList(BB);
1622if (Point ==Beginning) {
1623// If it's a phi node, it goes first, otherwise, it goes after any phi
1624// nodes.
1625if (isa<MemoryPhi>(NewAccess)) {
1626 Accesses->push_front(NewAccess);
1627auto *Defs = getOrCreateDefsList(BB);
1628 Defs->push_front(*NewAccess);
1629 }else {
1630auto AI =find_if_not(
1631 *Accesses, [](constMemoryAccess &MA) {return isa<MemoryPhi>(MA); });
1632 Accesses->insert(AI, NewAccess);
1633if (!isa<MemoryUse>(NewAccess)) {
1634auto *Defs = getOrCreateDefsList(BB);
1635auto DI =find_if_not(
1636 *Defs, [](constMemoryAccess &MA) {return isa<MemoryPhi>(MA); });
1637 Defs->insert(DI, *NewAccess);
1638 }
1639 }
1640 }else {
1641 Accesses->push_back(NewAccess);
1642if (!isa<MemoryUse>(NewAccess)) {
1643auto *Defs = getOrCreateDefsList(BB);
1644 Defs->push_back(*NewAccess);
1645 }
1646 }
1647 BlockNumberingValid.erase(BB);
1648}
1649
1650voidMemorySSA::insertIntoListsBefore(MemoryAccess *What,constBasicBlock *BB,
1651AccessList::iterator InsertPt) {
1652auto *Accesses =getWritableBlockAccesses(BB);
1653bool WasEnd = InsertPt == Accesses->end();
1654 Accesses->insert(AccessList::iterator(InsertPt), What);
1655if (!isa<MemoryUse>(What)) {
1656auto *Defs = getOrCreateDefsList(BB);
1657// If we got asked to insert at the end, we have an easy job, just shove it
1658// at the end. If we got asked to insert before an existing def, we also get
1659// an iterator. If we got asked to insert before a use, we have to hunt for
1660// the next def.
1661if (WasEnd) {
1662 Defs->push_back(*What);
1663 }elseif (isa<MemoryDef>(InsertPt)) {
1664 Defs->insert(InsertPt->getDefsIterator(), *What);
1665 }else {
1666while (InsertPt != Accesses->end() && !isa<MemoryDef>(InsertPt))
1667 ++InsertPt;
1668// Either we found a def, or we are inserting at the end
1669if (InsertPt == Accesses->end())
1670 Defs->push_back(*What);
1671else
1672 Defs->insert(InsertPt->getDefsIterator(), *What);
1673 }
1674 }
1675 BlockNumberingValid.erase(BB);
1676}
1677
1678void MemorySSA::prepareForMoveTo(MemoryAccess *What,BasicBlock *BB) {
1679// Keep it in the lookup tables, remove from the lists
1680removeFromLists(What,false);
1681
1682// Note that moving should implicitly invalidate the optimized state of a
1683// MemoryUse (and Phis can't be optimized). However, it doesn't do so for a
1684// MemoryDef.
1685if (auto *MD = dyn_cast<MemoryDef>(What))
1686 MD->resetOptimized();
1687 What->setBlock(BB);
1688}
1689
1690// Move What before Where in the IR. The end result is that What will belong to
1691// the right lists and have the right Block set, but will not otherwise be
1692// correct. It will not have the right defining access, and if it is a def,
1693// things below it will not properly be updated.
1694voidMemorySSA::moveTo(MemoryUseOrDef *What,BasicBlock *BB,
1695AccessList::iterator Where) {
1696 prepareForMoveTo(What, BB);
1697insertIntoListsBefore(What, BB, Where);
1698}
1699
1700voidMemorySSA::moveTo(MemoryAccess *What,BasicBlock *BB,
1701InsertionPlace Point) {
1702if (isa<MemoryPhi>(What)) {
1703assert(Point ==Beginning &&
1704"Can only move a Phi at the beginning of the block");
1705// Update lookup table entry
1706 ValueToMemoryAccess.erase(What->getBlock());
1707bool Inserted = ValueToMemoryAccess.insert({BB, What}).second;
1708 (void)Inserted;
1709assert(Inserted &&"Cannot move a Phi to a block that already has one");
1710 }
1711
1712 prepareForMoveTo(What, BB);
1713insertIntoListsForBlock(What, BB, Point);
1714}
1715
1716MemoryPhi *MemorySSA::createMemoryPhi(BasicBlock *BB) {
1717assert(!getMemoryAccess(BB) &&"MemoryPhi already exists for this BB");
1718MemoryPhi *Phi =newMemoryPhi(BB->getContext(), BB, NextID++);
1719// Phi's always are placed at the front of the block.
1720insertIntoListsForBlock(Phi, BB,Beginning);
1721 ValueToMemoryAccess[BB] = Phi;
1722return Phi;
1723}
1724
1725MemoryUseOrDef *MemorySSA::createDefinedAccess(Instruction *I,
1726MemoryAccess *Definition,
1727constMemoryUseOrDef *Template,
1728bool CreationMustSucceed) {
1729assert(!isa<PHINode>(I) &&"Cannot create a defined access for a PHI");
1730MemoryUseOrDef *NewAccess = createNewAccess(I, AA,Template);
1731if (CreationMustSucceed)
1732assert(NewAccess !=nullptr &&"Tried to create a memory access for a "
1733"non-memory touching instruction");
1734if (NewAccess) {
1735assert((!Definition || !isa<MemoryUse>(Definition)) &&
1736"A use cannot be a defining access");
1737 NewAccess->setDefiningAccess(Definition);
1738 }
1739return NewAccess;
1740}
1741
1742// Return true if the instruction has ordering constraints.
1743// Note specifically that this only considers stores and loads
1744// because others are still considered ModRef by getModRefInfo.
1745staticinlineboolisOrdered(constInstruction *I) {
1746if (auto *SI = dyn_cast<StoreInst>(I)) {
1747if (!SI->isUnordered())
1748returntrue;
1749 }elseif (auto *LI = dyn_cast<LoadInst>(I)) {
1750if (!LI->isUnordered())
1751returntrue;
1752 }
1753returnfalse;
1754}
1755
1756/// Helper function to create new memory accesses
1757template <typename AliasAnalysisType>
1758MemoryUseOrDef *MemorySSA::createNewAccess(Instruction *I,
1759 AliasAnalysisType *AAP,
1760constMemoryUseOrDef *Template) {
1761// The assume intrinsic has a control dependency which we model by claiming
1762// that it writes arbitrarily. Debuginfo intrinsics may be considered
1763// clobbers when we have a nonstandard AA pipeline. Ignore these fake memory
1764// dependencies here.
1765// FIXME: Replace this special casing with a more accurate modelling of
1766// assume's control dependency.
1767if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1768switch (II->getIntrinsicID()) {
1769default:
1770break;
1771case Intrinsic::allow_runtime_check:
1772case Intrinsic::allow_ubsan_check:
1773case Intrinsic::assume:
1774case Intrinsic::experimental_noalias_scope_decl:
1775case Intrinsic::pseudoprobe:
1776returnnullptr;
1777 }
1778 }
1779
1780// Using a nonstandard AA pipelines might leave us with unexpected modref
1781// results for I, so add a check to not model instructions that may not read
1782// from or write to memory. This is necessary for correctness.
1783if (!I->mayReadFromMemory() && !I->mayWriteToMemory())
1784returnnullptr;
1785
1786boolDef,Use;
1787if (Template) {
1788Def = isa<MemoryDef>(Template);
1789Use = isa<MemoryUse>(Template);
1790#if !defined(NDEBUG)
1791ModRefInfoModRef = AAP->getModRefInfo(I, std::nullopt);
1792bool DefCheck, UseCheck;
1793 DefCheck =isModSet(ModRef) ||isOrdered(I);
1794 UseCheck =isRefSet(ModRef);
1795// Memory accesses should only be reduced and can not be increased since AA
1796// just might return better results as a result of some transformations.
1797assert((Def == DefCheck || !DefCheck) &&
1798"Memory accesses should only be reduced");
1799if (!Def &&Use != UseCheck) {
1800// New Access should not have more power than template access
1801assert(!UseCheck &&"Invalid template");
1802 }
1803#endif
1804 }else {
1805// Find out what affect this instruction has on memory.
1806ModRefInfoModRef = AAP->getModRefInfo(I, std::nullopt);
1807// The isOrdered check is used to ensure that volatiles end up as defs
1808// (atomics end up as ModRef right now anyway). Until we separate the
1809// ordering chain from the memory chain, this enables people to see at least
1810// some relative ordering to volatiles. Note that getClobberingMemoryAccess
1811// will still give an answer that bypasses other volatile loads. TODO:
1812// Separate memory aliasing and ordering into two different chains so that
1813// we can precisely represent both "what memory will this read/write/is
1814// clobbered by" and "what instructions can I move this past".
1815Def =isModSet(ModRef) ||isOrdered(I);
1816Use =isRefSet(ModRef);
1817 }
1818
1819// It's possible for an instruction to not modify memory at all. During
1820// construction, we ignore them.
1821if (!Def && !Use)
1822returnnullptr;
1823
1824MemoryUseOrDef *MUD;
1825if (Def) {
1826 MUD =newMemoryDef(I->getContext(),nullptr,I,I->getParent(), NextID++);
1827 }else {
1828 MUD =newMemoryUse(I->getContext(),nullptr,I,I->getParent());
1829if (isUseTriviallyOptimizableToLiveOnEntry(*AAP,I)) {
1830MemoryAccess *LiveOnEntry =getLiveOnEntryDef();
1831 MUD->setOptimized(LiveOnEntry);
1832 }
1833 }
1834 ValueToMemoryAccess[I] = MUD;
1835return MUD;
1836}
1837
1838/// Properly remove \p MA from all of MemorySSA's lookup tables.
1839voidMemorySSA::removeFromLookups(MemoryAccess *MA) {
1840assert(MA->use_empty() &&
1841"Trying to remove memory access that still has uses");
1842 BlockNumbering.erase(MA);
1843if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1844 MUD->setDefiningAccess(nullptr);
1845// Invalidate our walker's cache if necessary
1846if (!isa<MemoryUse>(MA))
1847getWalker()->invalidateInfo(MA);
1848
1849Value *MemoryInst;
1850if (constauto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1851 MemoryInst = MUD->getMemoryInst();
1852else
1853 MemoryInst = MA->getBlock();
1854
1855auto VMA = ValueToMemoryAccess.find(MemoryInst);
1856if (VMA->second == MA)
1857 ValueToMemoryAccess.erase(VMA);
1858}
1859
1860/// Properly remove \p MA from all of MemorySSA's lists.
1861///
1862/// Because of the way the intrusive list and use lists work, it is important to
1863/// do removal in the right order.
1864/// ShouldDelete defaults to true, and will cause the memory access to also be
1865/// deleted, not just removed.
1866voidMemorySSA::removeFromLists(MemoryAccess *MA,bool ShouldDelete) {
1867BasicBlock *BB = MA->getBlock();
1868// The access list owns the reference, so we erase it from the non-owning list
1869// first.
1870if (!isa<MemoryUse>(MA)) {
1871auto DefsIt = PerBlockDefs.find(BB);
1872 std::unique_ptr<DefsList> &Defs = DefsIt->second;
1873 Defs->remove(*MA);
1874if (Defs->empty())
1875 PerBlockDefs.erase(DefsIt);
1876 }
1877
1878// The erase call here will delete it. If we don't want it deleted, we call
1879// remove instead.
1880auto AccessIt = PerBlockAccesses.find(BB);
1881 std::unique_ptr<AccessList> &Accesses = AccessIt->second;
1882if (ShouldDelete)
1883 Accesses->erase(MA);
1884else
1885 Accesses->remove(MA);
1886
1887if (Accesses->empty()) {
1888 PerBlockAccesses.erase(AccessIt);
1889 BlockNumberingValid.erase(BB);
1890 }
1891}
1892
1893voidMemorySSA::print(raw_ostream &OS) const{
1894 MemorySSAAnnotatedWriter Writer(this);
1895Function *F = this->F;
1896if (L)
1897F = L->getHeader()->getParent();
1898F->print(OS, &Writer);
1899}
1900
1901#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1902LLVM_DUMP_METHODvoidMemorySSA::dump() const{print(dbgs()); }
1903#endif
1904
1905voidMemorySSA::verifyMemorySSA(VerificationLevel VL) const{
1906#if !defined(NDEBUG) && defined(EXPENSIVE_CHECKS)
1907 VL =VerificationLevel::Full;
1908#endif
1909
1910#ifndef NDEBUG
1911if (F) {
1912autoBlocks =iterator_range(F->begin(), F->end());
1913verifyOrderingDominationAndDefUses(Blocks, VL);
1914verifyDominationNumbers(Blocks);
1915if (VL ==VerificationLevel::Full)
1916verifyPrevDefInPhis(Blocks);
1917 }else {
1918assert(L &&"must either have loop or function");
1919autoBlocks =
1920map_range(L->blocks(), [](constBasicBlock *BB) ->BasicBlock & {
1921 return *const_cast<BasicBlock *>(BB);
1922 });
1923verifyOrderingDominationAndDefUses(Blocks, VL);
1924verifyDominationNumbers(Blocks);
1925if (VL ==VerificationLevel::Full)
1926verifyPrevDefInPhis(Blocks);
1927 }
1928#endif
1929// Previously, the verification used to also verify that the clobberingAccess
1930// cached by MemorySSA is the same as the clobberingAccess found at a later
1931// query to AA. This does not hold true in general due to the current fragility
1932// of BasicAA which has arbitrary caps on the things it analyzes before giving
1933// up. As a result, transformations that are correct, will lead to BasicAA
1934// returning different Alias answers before and after that transformation.
1935// Invalidating MemorySSA is not an option, as the results in BasicAA can be so
1936// random, in the worst case we'd need to rebuild MemorySSA from scratch after
1937// every transformation, which defeats the purpose of using it. For such an
1938// example, see test4 added in D51960.
1939}
1940
1941template <typename IterT>
1942voidMemorySSA::verifyPrevDefInPhis(IterTBlocks) const{
1943for (constBasicBlock &BB :Blocks) {
1944if (MemoryPhi *Phi =getMemoryAccess(&BB)) {
1945for (unsignedI = 0, E = Phi->getNumIncomingValues();I != E; ++I) {
1946auto *Pred = Phi->getIncomingBlock(I);
1947auto *IncAcc = Phi->getIncomingValue(I);
1948// If Pred has no unreachable predecessors, get last def looking at
1949// IDoms. If, while walkings IDoms, any of these has an unreachable
1950// predecessor, then the incoming def can be any access.
1951if (auto *DTNode = DT->getNode(Pred)) {
1952while (DTNode) {
1953if (auto *DefList =getBlockDefs(DTNode->getBlock())) {
1954auto *LastAcc = &*(--DefList->end());
1955assert(LastAcc == IncAcc &&
1956"Incorrect incoming access into phi.");
1957 (void)IncAcc;
1958 (void)LastAcc;
1959break;
1960 }
1961 DTNode = DTNode->getIDom();
1962 }
1963 }else {
1964// If Pred has unreachable predecessors, but has at least a Def, the
1965// incoming access can be the last Def in Pred, or it could have been
1966// optimized to LoE. After an update, though, the LoE may have been
1967// replaced by another access, so IncAcc may be any access.
1968// If Pred has unreachable predecessors and no Defs, incoming access
1969// should be LoE; However, after an update, it may be any access.
1970 }
1971 }
1972 }
1973 }
1974}
1975
1976/// Verify that all of the blocks we believe to have valid domination numbers
1977/// actually have valid domination numbers.
1978template <typename IterT>
1979voidMemorySSA::verifyDominationNumbers(IterTBlocks) const{
1980if (BlockNumberingValid.empty())
1981return;
1982
1983SmallPtrSet<const BasicBlock *, 16> ValidBlocks = BlockNumberingValid;
1984for (constBasicBlock &BB :Blocks) {
1985if (!ValidBlocks.count(&BB))
1986continue;
1987
1988 ValidBlocks.erase(&BB);
1989
1990constAccessList *Accesses =getBlockAccesses(&BB);
1991// It's correct to say an empty block has valid numbering.
1992if (!Accesses)
1993continue;
1994
1995// Block numbering starts at 1.
1996unsignedlong LastNumber = 0;
1997for (constMemoryAccess &MA : *Accesses) {
1998auto ThisNumberIter = BlockNumbering.find(&MA);
1999assert(ThisNumberIter != BlockNumbering.end() &&
2000"MemoryAccess has no domination number in a valid block!");
2001
2002unsignedlong ThisNumber = ThisNumberIter->second;
2003assert(ThisNumber > LastNumber &&
2004"Domination numbers should be strictly increasing!");
2005 (void)LastNumber;
2006 LastNumber = ThisNumber;
2007 }
2008 }
2009
2010assert(ValidBlocks.empty() &&
2011"All valid BasicBlocks should exist in F -- dangling pointers?");
2012}
2013
2014/// Verify ordering: the order and existence of MemoryAccesses matches the
2015/// order and existence of memory affecting instructions.
2016/// Verify domination: each definition dominates all of its uses.
2017/// Verify def-uses: the immediate use information - walk all the memory
2018/// accesses and verifying that, for each use, it appears in the appropriate
2019/// def's use list
2020template <typename IterT>
2021voidMemorySSA::verifyOrderingDominationAndDefUses(IterTBlocks,
2022VerificationLevel VL) const{
2023// Walk all the blocks, comparing what the lookups think and what the access
2024// lists think, as well as the order in the blocks vs the order in the access
2025// lists.
2026SmallVector<MemoryAccess *, 32> ActualAccesses;
2027SmallVector<MemoryAccess *, 32> ActualDefs;
2028for (BasicBlock &B :Blocks) {
2029constAccessList *AL =getBlockAccesses(&B);
2030constauto *DL =getBlockDefs(&B);
2031MemoryPhi *Phi =getMemoryAccess(&B);
2032if (Phi) {
2033// Verify ordering.
2034 ActualAccesses.push_back(Phi);
2035 ActualDefs.push_back(Phi);
2036// Verify domination
2037for (constUse &U : Phi->uses()) {
2038assert(dominates(Phi, U) &&"Memory PHI does not dominate it's uses");
2039 (void)U;
2040 }
2041// Verify def-uses for full verify.
2042if (VL ==VerificationLevel::Full) {
2043assert(Phi->getNumOperands() ==pred_size(&B) &&
2044"Incomplete MemoryPhi Node");
2045for (unsignedI = 0, E = Phi->getNumIncomingValues();I != E; ++I) {
2046 verifyUseInDefs(Phi->getIncomingValue(I), Phi);
2047assert(is_contained(predecessors(&B), Phi->getIncomingBlock(I)) &&
2048"Incoming phi block not a block predecessor");
2049 }
2050 }
2051 }
2052
2053for (Instruction &I :B) {
2054MemoryUseOrDef *MA =getMemoryAccess(&I);
2055assert((!MA || (AL && (isa<MemoryUse>(MA) ||DL))) &&
2056"We have memory affecting instructions "
2057"in this block but they are not in the "
2058"access list or defs list");
2059if (MA) {
2060// Verify ordering.
2061 ActualAccesses.push_back(MA);
2062if (MemoryAccess *MD = dyn_cast<MemoryDef>(MA)) {
2063// Verify ordering.
2064 ActualDefs.push_back(MA);
2065// Verify domination.
2066for (constUse &U : MD->uses()) {
2067assert(dominates(MD, U) &&
2068"Memory Def does not dominate it's uses");
2069 (void)U;
2070 }
2071 }
2072// Verify def-uses for full verify.
2073if (VL ==VerificationLevel::Full)
2074 verifyUseInDefs(MA->getDefiningAccess(), MA);
2075 }
2076 }
2077// Either we hit the assert, really have no accesses, or we have both
2078// accesses and an access list. Same with defs.
2079if (!AL && !DL)
2080continue;
2081// Verify ordering.
2082assert(AL->size() == ActualAccesses.size() &&
2083"We don't have the same number of accesses in the block as on the "
2084"access list");
2085assert((DL || ActualDefs.size() == 0) &&
2086"Either we should have a defs list, or we should have no defs");
2087assert((!DL ||DL->size() == ActualDefs.size()) &&
2088"We don't have the same number of defs in the block as on the "
2089"def list");
2090auto ALI = AL->begin();
2091auto AAI = ActualAccesses.begin();
2092while (ALI != AL->end() && AAI != ActualAccesses.end()) {
2093assert(&*ALI == *AAI &&"Not the same accesses in the same order");
2094 ++ALI;
2095 ++AAI;
2096 }
2097 ActualAccesses.clear();
2098if (DL) {
2099auto DLI =DL->begin();
2100auto ADI = ActualDefs.begin();
2101while (DLI !=DL->end() && ADI != ActualDefs.end()) {
2102assert(&*DLI == *ADI &&"Not the same defs in the same order");
2103 ++DLI;
2104 ++ADI;
2105 }
2106 }
2107 ActualDefs.clear();
2108 }
2109}
2110
2111/// Verify the def-use lists in MemorySSA, by verifying that \p Use
2112/// appears in the use list of \p Def.
2113void MemorySSA::verifyUseInDefs(MemoryAccess *Def,MemoryAccess *Use) const{
2114// The live on entry use may cause us to get a NULL def here
2115if (!Def)
2116assert(isLiveOnEntryDef(Use) &&
2117"Null def but use not point to live on entry def");
2118else
2119assert(is_contained(Def->users(),Use) &&
2120"Did not find use in def's use list");
2121}
2122
2123/// Perform a local numbering on blocks so that instruction ordering can be
2124/// determined in constant time.
2125/// TODO: We currently just number in order. If we numbered by N, we could
2126/// allow at least N-1 sequences of insertBefore or insertAfter (and at least
2127/// log2(N) sequences of mixed before and after) without needing to invalidate
2128/// the numbering.
2129void MemorySSA::renumberBlock(constBasicBlock *B) const{
2130// The pre-increment ensures the numbers really start at 1.
2131unsignedlong CurrentNumber = 0;
2132constAccessList *AL =getBlockAccesses(B);
2133assert(AL !=nullptr &&"Asking to renumber an empty block");
2134for (constauto &I : *AL)
2135 BlockNumbering[&I] = ++CurrentNumber;
2136 BlockNumberingValid.insert(B);
2137}
2138
2139/// Determine, for two memory accesses in the same block,
2140/// whether \p Dominator dominates \p Dominatee.
2141/// \returns True if \p Dominator dominates \p Dominatee.
2142boolMemorySSA::locallyDominates(constMemoryAccess *Dominator,
2143constMemoryAccess *Dominatee) const{
2144constBasicBlock *DominatorBlock = Dominator->getBlock();
2145
2146assert((DominatorBlock == Dominatee->getBlock()) &&
2147"Asking for local domination when accesses are in different blocks!");
2148// A node dominates itself.
2149if (Dominatee == Dominator)
2150returntrue;
2151
2152// When Dominatee is defined on function entry, it is not dominated by another
2153// memory access.
2154if (isLiveOnEntryDef(Dominatee))
2155returnfalse;
2156
2157// When Dominator is defined on function entry, it dominates the other memory
2158// access.
2159if (isLiveOnEntryDef(Dominator))
2160returntrue;
2161
2162if (!BlockNumberingValid.count(DominatorBlock))
2163 renumberBlock(DominatorBlock);
2164
2165unsignedlong DominatorNum = BlockNumbering.lookup(Dominator);
2166// All numbers start with 1
2167assert(DominatorNum != 0 &&"Block was not numbered properly");
2168unsignedlong DominateeNum = BlockNumbering.lookup(Dominatee);
2169assert(DominateeNum != 0 &&"Block was not numbered properly");
2170return DominatorNum < DominateeNum;
2171}
2172
2173boolMemorySSA::dominates(constMemoryAccess *Dominator,
2174constMemoryAccess *Dominatee) const{
2175if (Dominator == Dominatee)
2176returntrue;
2177
2178if (isLiveOnEntryDef(Dominatee))
2179returnfalse;
2180
2181if (Dominator->getBlock() != Dominatee->getBlock())
2182return DT->dominates(Dominator->getBlock(), Dominatee->getBlock());
2183returnlocallyDominates(Dominator, Dominatee);
2184}
2185
2186boolMemorySSA::dominates(constMemoryAccess *Dominator,
2187constUse &Dominatee) const{
2188if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Dominatee.getUser())) {
2189BasicBlock *UseBB = MP->getIncomingBlock(Dominatee);
2190// The def must dominate the incoming block of the phi.
2191if (UseBB != Dominator->getBlock())
2192return DT->dominates(Dominator->getBlock(), UseBB);
2193// If the UseBB and the DefBB are the same, compare locally.
2194returnlocallyDominates(Dominator, cast<MemoryAccess>(Dominatee));
2195 }
2196// If it's not a PHI node use, the normal dominates can already handle it.
2197returndominates(Dominator, cast<MemoryAccess>(Dominatee.getUser()));
2198}
2199
2200voidMemorySSA::ensureOptimizedUses() {
2201if (IsOptimized)
2202return;
2203
2204BatchAAResults BatchAA(*AA);
2205ClobberWalkerBase WalkerBase(this, DT);
2206CachingWalker WalkerLocal(this, &WalkerBase);
2207OptimizeUses(this, &WalkerLocal, &BatchAA, DT).optimizeUses();
2208 IsOptimized =true;
2209}
2210
2211voidMemoryAccess::print(raw_ostream &OS) const{
2212switch (getValueID()) {
2213case MemoryPhiVal:returnstatic_cast<constMemoryPhi *>(this)->print(OS);
2214case MemoryDefVal:returnstatic_cast<constMemoryDef *>(this)->print(OS);
2215case MemoryUseVal:returnstatic_cast<constMemoryUse *>(this)->print(OS);
2216 }
2217llvm_unreachable("invalid value id");
2218}
2219
2220voidMemoryDef::print(raw_ostream &OS) const{
2221MemoryAccess *UO = getDefiningAccess();
2222
2223auto printID = [&OS](MemoryAccess *A) {
2224if (A &&A->getID())
2225OS <<A->getID();
2226else
2227OS <<LiveOnEntryStr;
2228 };
2229
2230OS << getID() <<" = MemoryDef(";
2231 printID(UO);
2232OS <<")";
2233
2234if (isOptimized()) {
2235OS <<"->";
2236 printID(getOptimized());
2237 }
2238}
2239
2240voidMemoryPhi::print(raw_ostream &OS) const{
2241 ListSeparator LS(",");
2242OS << getID() <<" = MemoryPhi(";
2243for (constauto &Op : operands()) {
2244BasicBlock *BB = getIncomingBlock(Op);
2245MemoryAccess *MA = cast<MemoryAccess>(Op);
2246
2247OS << LS <<'{';
2248if (BB->hasName())
2249OS << BB->getName();
2250else
2251 BB->printAsOperand(OS,false);
2252OS <<',';
2253if (unsignedID = MA->getID())
2254OS <<ID;
2255else
2256OS <<LiveOnEntryStr;
2257OS <<'}';
2258 }
2259OS <<')';
2260}
2261
2262voidMemoryUse::print(raw_ostream &OS) const{
2263MemoryAccess *UO = getDefiningAccess();
2264OS <<"MemoryUse(";
2265if (UO && UO->getID())
2266OS << UO->getID();
2267else
2268OS <<LiveOnEntryStr;
2269OS <<')';
2270}
2271
2272voidMemoryAccess::dump() const{
2273// Cannot completely remove virtual function even in release mode.
2274#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2275print(dbgs());
2276dbgs() <<"\n";
2277#endif
2278}
2279
2280classDOTFuncMSSAInfo {
2281private:
2282constFunction &F;
2283 MemorySSAAnnotatedWriter MSSAWriter;
2284
2285public:
2286DOTFuncMSSAInfo(constFunction &F,MemorySSA &MSSA)
2287 :F(F), MSSAWriter(&MSSA) {}
2288
2289constFunction *getFunction() {return &F; }
2290 MemorySSAAnnotatedWriter &getWriter() {return MSSAWriter; }
2291};
2292
2293namespacellvm {
2294
2295template <>
2296structGraphTraits<DOTFuncMSSAInfo *> :publicGraphTraits<const BasicBlock *> {
2297staticNodeRefgetEntryNode(DOTFuncMSSAInfo *CFGInfo) {
2298return &(CFGInfo->getFunction()->getEntryBlock());
2299 }
2300
2301// nodes_iterator/begin/end - Allow iteration over all nodes in the graph
2302usingnodes_iterator =pointer_iterator<Function::const_iterator>;
2303
2304staticnodes_iteratornodes_begin(DOTFuncMSSAInfo *CFGInfo) {
2305returnnodes_iterator(CFGInfo->getFunction()->begin());
2306 }
2307
2308staticnodes_iteratornodes_end(DOTFuncMSSAInfo *CFGInfo) {
2309returnnodes_iterator(CFGInfo->getFunction()->end());
2310 }
2311
2312staticsize_tsize(DOTFuncMSSAInfo *CFGInfo) {
2313return CFGInfo->getFunction()->size();
2314 }
2315};
2316
2317template <>
2318structDOTGraphTraits<DOTFuncMSSAInfo *> :publicDefaultDOTGraphTraits {
2319
2320DOTGraphTraits(bool IsSimple =false) :DefaultDOTGraphTraits(IsSimple) {}
2321
2322static std::stringgetGraphName(DOTFuncMSSAInfo *CFGInfo) {
2323return"MSSA CFG for '" + CFGInfo->getFunction()->getName().str() +
2324"' function";
2325 }
2326
2327 std::stringgetNodeLabel(constBasicBlock *Node,DOTFuncMSSAInfo *CFGInfo) {
2328returnDOTGraphTraits<DOTFuncInfo *>::getCompleteNodeLabel(
2329 Node,nullptr,
2330 [CFGInfo](raw_string_ostream &OS,constBasicBlock &BB) ->void {
2331 BB.print(OS, &CFGInfo->getWriter(),true,true);
2332 },
2333 [](std::string &S,unsigned &I,unsignedIdx) ->void {
2334 std::string Str = S.substr(I,Idx -I);
2335StringRef SR = Str;
2336if (SR.count(" = MemoryDef(") || SR.count(" = MemoryPhi(") ||
2337 SR.count("MemoryUse("))
2338return;
2339DOTGraphTraits<DOTFuncInfo *>::eraseComment(S,I,Idx);
2340 });
2341 }
2342
2343static std::stringgetEdgeSourceLabel(constBasicBlock *Node,
2344const_succ_iteratorI) {
2345returnDOTGraphTraits<DOTFuncInfo *>::getEdgeSourceLabel(Node,I);
2346 }
2347
2348 /// Display the raw branch weights from PGO.
2349 std::stringgetEdgeAttributes(constBasicBlock *Node,const_succ_iteratorI,
2350DOTFuncMSSAInfo *CFGInfo) {
2351return"";
2352 }
2353
2354 std::stringgetNodeAttributes(constBasicBlock *Node,
2355DOTFuncMSSAInfo *CFGInfo) {
2356returngetNodeLabel(Node, CFGInfo).find(';') != std::string::npos
2357 ?"style=filled, fillcolor=lightpink"
2358 :"";
2359 }
2360};
2361
2362}// namespace llvm
2363
2364AnalysisKey MemorySSAAnalysis::Key;
2365
2366MemorySSAAnalysis::ResultMemorySSAAnalysis::run(Function &F,
2367FunctionAnalysisManager &AM) {
2368auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
2369auto &AA = AM.getResult<AAManager>(F);
2370returnMemorySSAAnalysis::Result(std::make_unique<MemorySSA>(F, &AA, &DT));
2371}
2372
2373boolMemorySSAAnalysis::Result::invalidate(
2374Function &F,constPreservedAnalyses &PA,
2375FunctionAnalysisManager::Invalidator &Inv) {
2376auto PAC = PA.getChecker<MemorySSAAnalysis>();
2377return !(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()) ||
2378 Inv.invalidate<AAManager>(F, PA) ||
2379 Inv.invalidate<DominatorTreeAnalysis>(F, PA);
2380}
2381
2382PreservedAnalysesMemorySSAPrinterPass::run(Function &F,
2383FunctionAnalysisManager &AM) {
2384auto &MSSA = AM.getResult<MemorySSAAnalysis>(F).getMSSA();
2385if (EnsureOptimizedUses)
2386 MSSA.ensureOptimizedUses();
2387if (DotCFGMSSA !="") {
2388DOTFuncMSSAInfo CFGInfo(F, MSSA);
2389WriteGraph(&CFGInfo,"",false,"MSSA",DotCFGMSSA);
2390 }else {
2391OS <<"MemorySSA for function: " <<F.getName() <<"\n";
2392 MSSA.print(OS);
2393 }
2394
2395returnPreservedAnalyses::all();
2396}
2397
2398PreservedAnalysesMemorySSAWalkerPrinterPass::run(Function &F,
2399FunctionAnalysisManager &AM) {
2400auto &MSSA = AM.getResult<MemorySSAAnalysis>(F).getMSSA();
2401OS <<"MemorySSA (walker) for function: " <<F.getName() <<"\n";
2402 MemorySSAWalkerAnnotatedWriter Writer(&MSSA);
2403F.print(OS, &Writer);
2404
2405returnPreservedAnalyses::all();
2406}
2407
2408PreservedAnalysesMemorySSAVerifierPass::run(Function &F,
2409FunctionAnalysisManager &AM) {
2410 AM.getResult<MemorySSAAnalysis>(F).getMSSA().verifyMemorySSA();
2411
2412returnPreservedAnalyses::all();
2413}
2414
2415charMemorySSAWrapperPass::ID = 0;
2416
2417MemorySSAWrapperPass::MemorySSAWrapperPass() :FunctionPass(ID) {
2418initializeMemorySSAWrapperPassPass(*PassRegistry::getPassRegistry());
2419}
2420
2421voidMemorySSAWrapperPass::releaseMemory() { MSSA.reset(); }
2422
2423voidMemorySSAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const{
2424 AU.setPreservesAll();
2425 AU.addRequiredTransitive<DominatorTreeWrapperPass>();
2426 AU.addRequiredTransitive<AAResultsWrapperPass>();
2427}
2428
2429boolMemorySSAWrapperPass::runOnFunction(Function &F) {
2430auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2431auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
2432 MSSA.reset(newMemorySSA(F, &AA, &DT));
2433returnfalse;
2434}
2435
2436voidMemorySSAWrapperPass::verifyAnalysis() const{
2437if (VerifyMemorySSA)
2438 MSSA->verifyMemorySSA();
2439}
2440
2441voidMemorySSAWrapperPass::print(raw_ostream &OS,constModule *M) const{
2442 MSSA->print(OS);
2443}
2444
2445MemorySSAWalker::MemorySSAWalker(MemorySSA *M) : MSSA(M) {}
2446
2447/// Walk the use-def chains starting at \p StartingAccess and find
2448/// the MemoryAccess that actually clobbers Loc.
2449///
2450/// \returns our clobbering memory access
2451MemoryAccess *MemorySSA::ClobberWalkerBase::getClobberingMemoryAccessBase(
2452MemoryAccess *StartingAccess,constMemoryLocation &Loc,
2453BatchAAResults &BAA,unsigned &UpwardWalkLimit) {
2454assert(!isa<MemoryUse>(StartingAccess) &&"Use cannot be defining access");
2455
2456// If location is undefined, conservatively return starting access.
2457if (Loc.Ptr ==nullptr)
2458return StartingAccess;
2459
2460Instruction *I =nullptr;
2461if (auto *StartingUseOrDef = dyn_cast<MemoryUseOrDef>(StartingAccess)) {
2462if (MSSA->isLiveOnEntryDef(StartingUseOrDef))
2463return StartingUseOrDef;
2464
2465I = StartingUseOrDef->getMemoryInst();
2466
2467// Conservatively, fences are always clobbers, so don't perform the walk if
2468// we hit a fence.
2469if (!isa<CallBase>(I) &&I->isFenceLike())
2470return StartingUseOrDef;
2471 }
2472
2473 UpwardsMemoryQuery Q;
2474 Q.OriginalAccess = StartingAccess;
2475 Q.StartingLoc = Loc;
2476 Q.Inst =nullptr;
2477 Q.IsCall =false;
2478
2479// Unlike the other function, do not walk to the def of a def, because we are
2480// handed something we already believe is the clobbering access.
2481// We never set SkipSelf to true in Q in this method.
2482MemoryAccess *Clobber =
2483 Walker.findClobber(BAA, StartingAccess, Q, UpwardWalkLimit);
2484LLVM_DEBUG({
2485dbgs() <<"Clobber starting at access " << *StartingAccess <<"\n";
2486if (I)
2487dbgs() <<" for instruction " << *I <<"\n";
2488dbgs() <<" is " << *Clobber <<"\n";
2489 });
2490return Clobber;
2491}
2492
2493staticconstInstruction *
2494getInvariantGroupClobberingInstruction(Instruction &I,DominatorTree &DT) {
2495if (!I.hasMetadata(LLVMContext::MD_invariant_group) ||I.isVolatile())
2496returnnullptr;
2497
2498// We consider bitcasts and zero GEPs to be the same pointer value. Start by
2499// stripping bitcasts and zero GEPs, then we will recursively look at loads
2500// and stores through bitcasts and zero GEPs.
2501Value *PointerOperand =getLoadStorePointerOperand(&I)->stripPointerCasts();
2502
2503// It's not safe to walk the use list of a global value because function
2504// passes aren't allowed to look outside their functions.
2505// FIXME: this could be fixed by filtering instructions from outside of
2506// current function.
2507if (isa<Constant>(PointerOperand))
2508returnnullptr;
2509
2510constInstruction *MostDominatingInstruction = &I;
2511
2512for (constUser *Us : PointerOperand->users()) {
2513auto *U = dyn_cast<Instruction>(Us);
2514if (!U || U == &I || !DT.dominates(U, MostDominatingInstruction))
2515continue;
2516
2517// If we hit a load/store with an invariant.group metadata and the same
2518// pointer operand, we can assume that value pointed to by the pointer
2519// operand didn't change.
2520if (U->hasMetadata(LLVMContext::MD_invariant_group) &&
2521getLoadStorePointerOperand(U) == PointerOperand && !U->isVolatile()) {
2522 MostDominatingInstruction = U;
2523 }
2524 }
2525
2526return MostDominatingInstruction == &I ? nullptr : MostDominatingInstruction;
2527}
2528
2529MemoryAccess *MemorySSA::ClobberWalkerBase::getClobberingMemoryAccessBase(
2530MemoryAccess *MA,BatchAAResults &BAA,unsigned &UpwardWalkLimit,
2531bool SkipSelf,bool UseInvariantGroup) {
2532auto *StartingAccess = dyn_cast<MemoryUseOrDef>(MA);
2533// If this is a MemoryPhi, we can't do anything.
2534if (!StartingAccess)
2535return MA;
2536
2537if (UseInvariantGroup) {
2538if (auto *I =getInvariantGroupClobberingInstruction(
2539 *StartingAccess->getMemoryInst(), MSSA->getDomTree())) {
2540assert(isa<LoadInst>(I) || isa<StoreInst>(I));
2541
2542auto *ClobberMA = MSSA->getMemoryAccess(I);
2543assert(ClobberMA);
2544if (isa<MemoryUse>(ClobberMA))
2545return ClobberMA->getDefiningAccess();
2546return ClobberMA;
2547 }
2548 }
2549
2550bool IsOptimized =false;
2551
2552// If this is an already optimized use or def, return the optimized result.
2553// Note: Currently, we store the optimized def result in a separate field,
2554// since we can't use the defining access.
2555if (StartingAccess->isOptimized()) {
2556if (!SkipSelf || !isa<MemoryDef>(StartingAccess))
2557return StartingAccess->getOptimized();
2558 IsOptimized =true;
2559 }
2560
2561constInstruction *I = StartingAccess->getMemoryInst();
2562// We can't sanely do anything with a fence, since they conservatively clobber
2563// all memory, and have no locations to get pointers from to try to
2564// disambiguate.
2565if (!isa<CallBase>(I) &&I->isFenceLike())
2566return StartingAccess;
2567
2568 UpwardsMemoryQuery Q(I, StartingAccess);
2569
2570if (isUseTriviallyOptimizableToLiveOnEntry(BAA,I)) {
2571MemoryAccess *LiveOnEntry = MSSA->getLiveOnEntryDef();
2572 StartingAccess->setOptimized(LiveOnEntry);
2573return LiveOnEntry;
2574 }
2575
2576MemoryAccess *OptimizedAccess;
2577if (!IsOptimized) {
2578// Start with the thing we already think clobbers this location
2579MemoryAccess *DefiningAccess = StartingAccess->getDefiningAccess();
2580
2581// At this point, DefiningAccess may be the live on entry def.
2582// If it is, we will not get a better result.
2583if (MSSA->isLiveOnEntryDef(DefiningAccess)) {
2584 StartingAccess->setOptimized(DefiningAccess);
2585return DefiningAccess;
2586 }
2587
2588 OptimizedAccess =
2589 Walker.findClobber(BAA, DefiningAccess, Q, UpwardWalkLimit);
2590 StartingAccess->setOptimized(OptimizedAccess);
2591 }else
2592 OptimizedAccess = StartingAccess->getOptimized();
2593
2594LLVM_DEBUG(dbgs() <<"Starting Memory SSA clobber for " << *I <<" is ");
2595LLVM_DEBUG(dbgs() << *StartingAccess <<"\n");
2596LLVM_DEBUG(dbgs() <<"Optimized Memory SSA clobber for " << *I <<" is ");
2597LLVM_DEBUG(dbgs() << *OptimizedAccess <<"\n");
2598
2599MemoryAccess *Result;
2600if (SkipSelf && isa<MemoryPhi>(OptimizedAccess) &&
2601 isa<MemoryDef>(StartingAccess) && UpwardWalkLimit) {
2602assert(isa<MemoryDef>(Q.OriginalAccess));
2603 Q.SkipSelfAccess =true;
2604 Result = Walker.findClobber(BAA, OptimizedAccess, Q, UpwardWalkLimit);
2605 }else
2606 Result = OptimizedAccess;
2607
2608LLVM_DEBUG(dbgs() <<"Result Memory SSA clobber [SkipSelf = " << SkipSelf);
2609LLVM_DEBUG(dbgs() <<"] for " << *I <<" is " << *Result <<"\n");
2610
2611return Result;
2612}
2613
2614MemoryAccess *
2615DoNothingMemorySSAWalker::getClobberingMemoryAccess(MemoryAccess *MA,
2616BatchAAResults &) {
2617if (auto *Use = dyn_cast<MemoryUseOrDef>(MA))
2618returnUse->getDefiningAccess();
2619return MA;
2620}
2621
2622MemoryAccess *DoNothingMemorySSAWalker::getClobberingMemoryAccess(
2623MemoryAccess *StartingAccess,constMemoryLocation &,BatchAAResults &) {
2624if (auto *Use = dyn_cast<MemoryUseOrDef>(StartingAccess))
2625returnUse->getDefiningAccess();
2626return StartingAccess;
2627}
2628
2629void MemoryPhi::deleteMe(DerivedUser *Self) {
2630deletestatic_cast<MemoryPhi *>(Self);
2631}
2632
2633void MemoryDef::deleteMe(DerivedUser *Self) {
2634deletestatic_cast<MemoryDef *>(Self);
2635}
2636
2637void MemoryUse::deleteMe(DerivedUser *Self) {
2638deletestatic_cast<MemoryUse *>(Self);
2639}
2640
2641bool upward_defs_iterator::IsGuaranteedLoopInvariant(constValue *Ptr) const{
2642auto IsGuaranteedLoopInvariantBase = [](constValue *Ptr) {
2643Ptr =Ptr->stripPointerCasts();
2644if (!isa<Instruction>(Ptr))
2645returntrue;
2646return isa<AllocaInst>(Ptr);
2647 };
2648
2649Ptr =Ptr->stripPointerCasts();
2650if (auto *I = dyn_cast<Instruction>(Ptr)) {
2651if (I->getParent()->isEntryBlock())
2652returntrue;
2653 }
2654if (auto *GEP = dyn_cast<GEPOperator>(Ptr)) {
2655return IsGuaranteedLoopInvariantBase(GEP->getPointerOperand()) &&
2656GEP->hasAllConstantIndices();
2657 }
2658return IsGuaranteedLoopInvariantBase(Ptr);
2659}
DL
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Definition:ARMSLSHardening.cpp:73
AliasAnalysis.h
AssemblyAnnotationWriter.h
AtomicOrdering.h
Atomic ordering constants.
true
basic Basic Alias true
Definition:BasicAliasAnalysis.cpp:1981
From
BlockVerifier::State From
Definition:BlockVerifier.cpp:57
B
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
A
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
D
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
E
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
CFGPrinter.h
Casting.h
CommandLine.h
Compiler.h
LLVM_DUMP_METHOD
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
Definition:Compiler.h:622
LLVM_ATTRIBUTE_UNUSED
#define LLVM_ATTRIBUTE_UNUSED
Definition:Compiler.h:282
Access
DXIL Resource Access
Definition:DXILResourceAccess.cpp:296
Idx
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
Definition:DeadArgumentElimination.cpp:353
Debug.h
LLVM_DEBUG
#define LLVM_DEBUG(...)
Definition:Debug.h:106
DenseMapInfo.h
This file defines DenseMapInfo traits for DenseMap.
DenseMap.h
This file defines the DenseMap class.
DenseSet.h
This file defines the DenseSet and SmallDenseSet classes.
DepthFirstIterator.h
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
Dominators.h
Other
std::optional< std::vector< StOtherPiece > > Other
Definition:ELFYAML.cpp:1315
Blocks
DenseMap< Block *, BlockRelaxAux > Blocks
Definition:ELF_riscv.cpp:507
memssa
early cse memssa
Definition:EarlyCSE.cpp:1959
FormattedStream.h
GraphWriter.h
Hashing.h
GEP
Hexagon Common GEP
Definition:HexagonCommonGEP.cpp:170
stores
hexagon widen stores
Definition:HexagonLoadStoreWidening.cpp:200
BasicBlock.h
Function.h
Instruction.h
IntrinsicInst.h
Operator.h
PassManager.h
This header defines various interfaces for pass management in LLVM.
Use.h
This defines the Use class.
InitializePasses.h
Instructions.h
TemplateParamKind::Template
@ Template
IteratedDominanceFrontier.h
LLVMContext.h
LoopInfo.h
F
#define F(x, y, z)
Definition:MD5.cpp:55
I
#define I(x, y, z)
Definition:MD5.cpp:58
MemoryLocation.h
This file provides utility analysis objects describing memory locations.
instructionClobbersQuery
static bool instructionClobbersQuery(const MemoryDef *MD, const MemoryLocation &UseLoc, const Instruction *UseInst, AliasAnalysisType &AA)
Definition:MemorySSA.cpp:282
MaxCheckLimit
Memory static true cl::opt< unsigned > MaxCheckLimit("memssa-check-limit", cl::Hidden, cl::init(100), cl::desc("The maximum number of stores/phis MemorySSA" "will consider trying to walk past (default = 100)"))
SSA
Memory SSA
Definition:MemorySSA.cpp:72
memoryssa
memoryssa
Definition:MemorySSA.cpp:72
VerifyMemorySSAX
static cl::opt< bool, true > VerifyMemorySSAX("verify-memoryssa", cl::location(VerifyMemorySSA), cl::Hidden, cl::desc("Enable verification of MemorySSA."))
LiveOnEntryStr
static const char LiveOnEntryStr[]
Definition:MemorySSA.cpp:91
checkClobberSanity
static LLVM_ATTRIBUTE_UNUSED void checkClobberSanity(const MemoryAccess *Start, MemoryAccess *ClobberAt, const MemoryLocation &StartLoc, const MemorySSA &MSSA, const UpwardsMemoryQuery &Query, BatchAAResults &AA, bool AllowImpreciseClobber=false)
Verifies that Start is clobbered by ClobberAt, and that nothing inbetween Start and ClobberAt can clo...
Definition:MemorySSA.cpp:397
isUseTriviallyOptimizableToLiveOnEntry
static bool isUseTriviallyOptimizableToLiveOnEntry(AliasAnalysisType &AA, const Instruction *I)
Definition:MemorySSA.cpp:371
areLoadsReorderable
static bool areLoadsReorderable(const LoadInst *Use, const LoadInst *MayClobber)
This does one-way checks to see if Use could theoretically be hoisted above MayClobber.
Definition:MemorySSA.cpp:256
getInvariantGroupClobberingInstruction
static const Instruction * getInvariantGroupClobberingInstruction(Instruction &I, DominatorTree &DT)
Definition:MemorySSA.cpp:2494
DotCFGMSSA
static cl::opt< std::string > DotCFGMSSA("dot-cfg-mssa", cl::value_desc("file name for generated dot file"), cl::desc("file name for generated dot file"), cl::init(""))
isOrdered
static bool isOrdered(const Instruction *I)
Definition:MemorySSA.cpp:1745
MemorySSA.h
This file exposes an interface to building/using memory SSA to walk memory instructions using a use/d...
getNodeLabel
static std::string getNodeLabel(const ValueInfo &VI, GlobalValueSummary *GVS)
Definition:ModuleSummaryIndex.cpp:511
II
uint64_t IntrinsicInst * II
Definition:NVVMIntrRange.cpp:51
P
#define P(N)
INITIALIZE_PASS_DEPENDENCY
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition:PassSupport.h:55
INITIALIZE_PASS_END
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition:PassSupport.h:57
INITIALIZE_PASS_BEGIN
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition:PassSupport.h:52
Pass.h
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
STLExtras.h
This file contains some templates that are useful if you are working with the STL at all.
OS
raw_pwrite_stream & OS
Definition:SampleProfWriter.cpp:51
SmallPtrSet.h
This file defines the SmallPtrSet class.
SmallVector.h
This file defines the SmallVector class.
StringExtras.h
This file contains some functions that are useful when dealing with strings.
Ptr
@ Ptr
Definition:TargetLibraryInfo.cpp:77
RHS
Value * RHS
Definition:X86PartialReduction.cpp:74
LHS
Value * LHS
Definition:X86PartialReduction.cpp:73
DOTFuncMSSAInfo
Definition:MemorySSA.cpp:2280
DOTFuncMSSAInfo::DOTFuncMSSAInfo
DOTFuncMSSAInfo(const Function &F, MemorySSA &MSSA)
Definition:MemorySSA.cpp:2286
DOTFuncMSSAInfo::getFunction
const Function * getFunction()
Definition:MemorySSA.cpp:2289
DOTFuncMSSAInfo::getWriter
MemorySSAAnnotatedWriter & getWriter()
Definition:MemorySSA.cpp:2290
Node
Definition:ItaniumDemangle.h:163
T
llvm::AAManager
A manager for alias analyses.
Definition:AliasAnalysis.h:933
llvm::AAResultsWrapperPass
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
Definition:AliasAnalysis.h:981
llvm::AAResults
Definition:AliasAnalysis.h:314
llvm::AllAnalysesOn
This templated class represents "all analyses that operate over <a particular IR unit>" (e....
Definition:Analysis.h:49
llvm::AnalysisManager::Invalidator
API to communicate dependencies between analyses during invalidation.
Definition:PassManager.h:292
llvm::AnalysisManager::Invalidator::invalidate
bool invalidate(IRUnitT &IR, const PreservedAnalyses &PA)
Trigger the invalidation of some other analysis pass if not already handled and return whether it was...
Definition:PassManager.h:310
llvm::AnalysisManager
A container for analyses that lazily runs them and caches their results.
Definition:PassManager.h:253
llvm::AnalysisManager::getResult
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition:PassManager.h:410
llvm::AnalysisUsage
Represent the analysis usage information of a pass.
Definition:PassAnalysisSupport.h:47
llvm::AnalysisUsage::setPreservesAll
void setPreservesAll()
Set by analyses that do not transform their input at all.
Definition:PassAnalysisSupport.h:130
llvm::AnalysisUsage::addRequiredTransitive
AnalysisUsage & addRequiredTransitive()
Definition:PassAnalysisSupport.h:81
llvm::AssemblyAnnotationWriter
Definition:AssemblyAnnotationWriter.h:27
llvm::AssemblyAnnotationWriter::emitBasicBlockStartAnnot
virtual void emitBasicBlockStartAnnot(const BasicBlock *, formatted_raw_ostream &)
emitBasicBlockStartAnnot - This may be implemented to emit a string right after the basic block label...
Definition:AssemblyAnnotationWriter.h:39
llvm::AssemblyAnnotationWriter::emitInstructionAnnot
virtual void emitInstructionAnnot(const Instruction *, formatted_raw_ostream &)
emitInstructionAnnot - This may be implemented to emit a string right before an instruction is emitte...
Definition:AssemblyAnnotationWriter.h:51
llvm::BasicBlock
LLVM Basic Block Representation.
Definition:BasicBlock.h:61
llvm::BasicBlock::print
void print(raw_ostream &OS, AssemblyAnnotationWriter *AAW=nullptr, bool ShouldPreserveUseListOrder=false, bool IsForDebug=false) const
Print the basic block to an output stream with an optional AssemblyAnnotationWriter.
Definition:AsmWriter.cpp:4901
llvm::BasicBlock::getParent
const Function * getParent() const
Return the enclosing method, or null if none.
Definition:BasicBlock.h:220
llvm::BasicBlock::getContext
LLVMContext & getContext() const
Get the context in which this basic block lives.
Definition:BasicBlock.cpp:168
llvm::BatchAAResults
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
Definition:AliasAnalysis.h:630
llvm::CallBase
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition:InstrTypes.h:1112
llvm::DWARFExpression::Operation
This class represents an Operation in the Expression.
Definition:DWARFExpression.h:32
llvm::DenseMapBase::find
iterator find(const_arg_type_t< KeyT > Val)
Definition:DenseMap.h:156
llvm::DenseMapBase::erase
bool erase(const KeyT &Val)
Definition:DenseMap.h:321
llvm::DenseMapBase::end
iterator end()
Definition:DenseMap.h:84
llvm::DenseMapBase::insert
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition:DenseMap.h:211
llvm::DenseMap
Definition:DenseMap.h:727
llvm::DenseSet
Implements a dense probed hash-table based set.
Definition:DenseSet.h:278
llvm::DerivedUser
Extension point for the Value hierarchy.
Definition:DerivedUser.h:27
llvm::DoNothingMemorySSAWalker::getClobberingMemoryAccess
MemoryAccess * getClobberingMemoryAccess(MemoryAccess *, BatchAAResults &) override
Does the same thing as getClobberingMemoryAccess(const Instruction *I), but takes a MemoryAccess inst...
Definition:MemorySSA.cpp:2615
llvm::DomTreeNodeBase< BasicBlock >
llvm::DomTreeNodeBase::begin
iterator begin()
Definition:GenericDomTree.h:76
llvm::DomTreeNodeBase::getBlock
NodeT * getBlock() const
Definition:GenericDomTree.h:89
llvm::DomTreeNodeBase< BasicBlock >::const_iterator
typename SmallVector< DomTreeNodeBase *, 4 >::const_iterator const_iterator
Definition:GenericDomTree.h:74
llvm::DominatorTreeAnalysis
Analysis pass which computes a DominatorTree.
Definition:Dominators.h:279
llvm::DominatorTreeBase::getRootNode
DomTreeNodeBase< NodeT > * getRootNode()
getRootNode - This returns the entry node for the CFG of the function.
Definition:GenericDomTree.h:421
llvm::DominatorTreeBase::getNode
DomTreeNodeBase< NodeT > * getNode(const NodeT *BB) const
getNode - return the (Post)DominatorTree node for the specified basic block.
Definition:GenericDomTree.h:401
llvm::DominatorTreeWrapperPass
Legacy analysis pass which computes a DominatorTree.
Definition:Dominators.h:317
llvm::DominatorTree
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition:Dominators.h:162
llvm::DominatorTree::isReachableFromEntry
bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
Definition:Dominators.cpp:321
llvm::DominatorTree::dominates
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Definition:Dominators.cpp:122
llvm::FunctionPass
FunctionPass class - This class is used to implement most global optimizations.
Definition:Pass.h:310
llvm::Function
Definition:Function.h:63
llvm::Function::getEntryBlock
const BasicBlock & getEntryBlock() const
Definition:Function.h:809
llvm::Function::begin
iterator begin()
Definition:Function.h:853
llvm::Function::size
size_t size() const
Definition:Function.h:858
llvm::Function::end
iterator end()
Definition:Function.h:855
llvm::IDFCalculator
Definition:IteratedDominanceFrontier.h:39
llvm::Init
Definition:Record.h:285
llvm::Instruction
Definition:Instruction.h:68
llvm::IntrinsicInst
A wrapper class for inspecting calls to intrinsic functions.
Definition:IntrinsicInst.h:48
llvm::LoadInst
An instruction for reading from memory.
Definition:Instructions.h:176
llvm::LoadInst::isVolatile
bool isVolatile() const
Return true if this is a load from a volatile memory location.
Definition:Instructions.h:205
llvm::LoadInst::getOrdering
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
Definition:Instructions.h:220
llvm::LoopBase::getExitBlocks
void getExitBlocks(SmallVectorImpl< BlockT * > &ExitBlocks) const
Return all of the successor blocks of this loop.
Definition:GenericLoopInfoImpl.h:64
llvm::LoopBase::getHeader
BlockT * getHeader() const
Definition:GenericLoopInfo.h:90
llvm::LoopBase::blocks
iterator_range< block_iterator > blocks() const
Definition:GenericLoopInfo.h:180
llvm::LoopBase::getLoopPreheader
BlockT * getLoopPreheader() const
If there is a preheader for this loop, return it.
Definition:GenericLoopInfoImpl.h:210
llvm::Loop
Represents a single loop in the control flow graph.
Definition:LoopInfo.h:39
llvm::MachineBasicBlock::begin
iterator begin()
Definition:MachineBasicBlock.h:355
llvm::MachineBasicBlock::end
iterator end()
Definition:MachineBasicBlock.h:357
llvm::MachineBasicBlock::size
unsigned size() const
Definition:MachineBasicBlock.h:325
llvm::MemoryAccess
Definition:MemorySSA.h:142
llvm::MemoryAccess::dump
void dump() const
Definition:MemorySSA.cpp:2272
llvm::MemoryAccess::getBlock
BasicBlock * getBlock() const
Definition:MemorySSA.h:161
llvm::MemoryAccess::print
void print(raw_ostream &OS) const
Definition:MemorySSA.cpp:2211
llvm::MemoryAccess::getID
unsigned getID() const
Used for debugging and tracking things about MemoryAccesses.
Definition:MemorySSA.h:662
llvm::MemoryAccess::setBlock
void setBlock(BasicBlock *BB)
Used by MemorySSA to change the block of a MemoryAccess when it is moved.
Definition:MemorySSA.h:214
llvm::MemoryDef
Represents a read-write access to memory, whether it is a must-alias, or a may-alias.
Definition:MemorySSA.h:370
llvm::MemoryDef::print
void print(raw_ostream &OS) const
Definition:MemorySSA.cpp:2220
llvm::MemoryDef::resetOptimized
void resetOptimized()
Definition:MemorySSA.h:404
llvm::MemoryLocation
Representation for a specific memory location.
Definition:MemoryLocation.h:227
llvm::MemoryLocation::get
static MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
Definition:MemoryLocation.cpp:35
llvm::MemoryLocation::Ptr
const Value * Ptr
The address of the start of the location.
Definition:MemoryLocation.h:235
llvm::MemoryPhi
Represents phi nodes for memory accesses.
Definition:MemorySSA.h:478
llvm::MemoryPhi::print
void print(raw_ostream &OS) const
Definition:MemorySSA.cpp:2240
llvm::MemorySSAAnalysis
An analysis that produces MemorySSA for a function.
Definition:MemorySSA.h:928
llvm::MemorySSAAnalysis::run
Result run(Function &F, FunctionAnalysisManager &AM)
Definition:MemorySSA.cpp:2366
llvm::MemorySSAPrinterPass::run
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
Definition:MemorySSA.cpp:2382
llvm::MemorySSAUtil::defClobbersUseOrDef
static bool defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU, AliasAnalysis &AA)
Definition:MemorySSA.cpp:340
llvm::MemorySSAWalkerPrinterPass::run
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
Definition:MemorySSA.cpp:2398
llvm::MemorySSAWalker
This is the generic walker interface for walkers of MemorySSA.
Definition:MemorySSA.h:1016
llvm::MemorySSAWalker::getClobberingMemoryAccess
MemoryAccess * getClobberingMemoryAccess(const Instruction *I, BatchAAResults &AA)
Given a memory Mod/Ref/ModRef'ing instruction, calling this will give you the nearest dominating Memo...
Definition:MemorySSA.h:1045
llvm::MemorySSAWalker::MemorySSAWalker
MemorySSAWalker(MemorySSA *)
Definition:MemorySSA.cpp:2445
llvm::MemorySSAWalker::invalidateInfo
virtual void invalidateInfo(MemoryAccess *)
Given a memory access, invalidate anything this walker knows about that access.
Definition:MemorySSA.h:1093
llvm::MemorySSAWrapperPass
Legacy analysis pass which computes MemorySSA.
Definition:MemorySSA.h:985
llvm::MemorySSAWrapperPass::verifyAnalysis
void verifyAnalysis() const override
verifyAnalysis() - This member can be implemented by a analysis pass to check state of analysis infor...
Definition:MemorySSA.cpp:2436
llvm::MemorySSAWrapperPass::releaseMemory
void releaseMemory() override
releaseMemory() - This member can be implemented by a pass if it wants to be able to release its memo...
Definition:MemorySSA.cpp:2421
llvm::MemorySSAWrapperPass::runOnFunction
bool runOnFunction(Function &) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
Definition:MemorySSA.cpp:2429
llvm::MemorySSAWrapperPass::ID
static char ID
Definition:MemorySSA.h:989
llvm::MemorySSAWrapperPass::getAnalysisUsage
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
Definition:MemorySSA.cpp:2423
llvm::MemorySSAWrapperPass::print
void print(raw_ostream &OS, const Module *M=nullptr) const override
print - Print out the internal state of the pass.
Definition:MemorySSA.cpp:2441
llvm::MemorySSAWrapperPass::MemorySSAWrapperPass
MemorySSAWrapperPass()
Definition:MemorySSA.cpp:2417
llvm::MemorySSA::CachingWalker
A MemorySSAWalker that does AA walks to disambiguate accesses.
Definition:MemorySSA.cpp:1012
llvm::MemorySSA::CachingWalker::getClobberingMemoryAccess
MemoryAccess * getClobberingMemoryAccess(MemoryAccess *MA, BatchAAResults &BAA) override
Does the same thing as getClobberingMemoryAccess(const Instruction *I), but takes a MemoryAccess inst...
Definition:MemorySSA.cpp:1037
llvm::MemorySSA::CachingWalker::getClobberingMemoryAccessWithoutInvariantGroup
MemoryAccess * getClobberingMemoryAccessWithoutInvariantGroup(MemoryAccess *MA, BatchAAResults &BAA, unsigned &UWL)
Definition:MemorySSA.cpp:1032
llvm::MemorySSA::CachingWalker::getClobberingMemoryAccess
MemoryAccess * getClobberingMemoryAccess(MemoryAccess *MA, const MemoryLocation &Loc, BatchAAResults &BAA, unsigned &UWL)
Definition:MemorySSA.cpp:1026
llvm::MemorySSA::CachingWalker::invalidateInfo
void invalidateInfo(MemoryAccess *MA) override
Given a memory access, invalidate anything this walker knows about that access.
Definition:MemorySSA.cpp:1049
llvm::MemorySSA::CachingWalker::~CachingWalker
~CachingWalker() override=default
llvm::MemorySSA::CachingWalker::getClobberingMemoryAccess
MemoryAccess * getClobberingMemoryAccess(MemoryAccess *MA, BatchAAResults &BAA, unsigned &UWL)
Definition:MemorySSA.cpp:1022
llvm::MemorySSA::CachingWalker::CachingWalker
CachingWalker(MemorySSA *M, ClobberWalkerBase *W)
Definition:MemorySSA.cpp:1016
llvm::MemorySSA::CachingWalker::getClobberingMemoryAccess
MemoryAccess * getClobberingMemoryAccess(MemoryAccess *MA, const MemoryLocation &Loc, BatchAAResults &BAA) override
Given a potentially clobbering memory access and a new location, calling this will give you the neare...
Definition:MemorySSA.cpp:1042
llvm::MemorySSA::ClobberWalkerBase
Definition:MemorySSA.cpp:988
llvm::MemorySSA::ClobberWalkerBase::ClobberWalkerBase
ClobberWalkerBase(MemorySSA *M, DominatorTree *D)
Definition:MemorySSA.cpp:993
llvm::MemorySSA::ClobberWalkerBase::getClobberingMemoryAccessBase
MemoryAccess * getClobberingMemoryAccessBase(MemoryAccess *, const MemoryLocation &, BatchAAResults &, unsigned &)
Walk the use-def chains starting at StartingAccess and find the MemoryAccess that actually clobbers L...
Definition:MemorySSA.cpp:2451
llvm::MemorySSA::OptimizeUses
This class is a batch walker of all MemoryUse's in the program, and points their defining access at t...
Definition:MemorySSA.cpp:1304
llvm::MemorySSA::OptimizeUses::optimizeUses
void optimizeUses()
Optimize uses to point to their actual clobbering definitions.
Definition:MemorySSA.cpp:1492
llvm::MemorySSA::OptimizeUses::OptimizeUses
OptimizeUses(MemorySSA *MSSA, CachingWalker *Walker, BatchAAResults *BAA, DominatorTree *DT)
Definition:MemorySSA.cpp:1306
llvm::MemorySSA::SkipSelfWalker
Definition:MemorySSA.cpp:1055
llvm::MemorySSA::SkipSelfWalker::getClobberingMemoryAccess
MemoryAccess * getClobberingMemoryAccess(MemoryAccess *MA, const MemoryLocation &Loc, BatchAAResults &BAA) override
Given a potentially clobbering memory access and a new location, calling this will give you the neare...
Definition:MemorySSA.cpp:1080
llvm::MemorySSA::SkipSelfWalker::getClobberingMemoryAccess
MemoryAccess * getClobberingMemoryAccess(MemoryAccess *MA, const MemoryLocation &Loc, BatchAAResults &BAA, unsigned &UWL)
Definition:MemorySSA.cpp:1069
llvm::MemorySSA::SkipSelfWalker::~SkipSelfWalker
~SkipSelfWalker() override=default
llvm::MemorySSA::SkipSelfWalker::getClobberingMemoryAccess
MemoryAccess * getClobberingMemoryAccess(MemoryAccess *MA, BatchAAResults &BAA, unsigned &UWL)
Definition:MemorySSA.cpp:1065
llvm::MemorySSA::SkipSelfWalker::SkipSelfWalker
SkipSelfWalker(MemorySSA *M, ClobberWalkerBase *W)
Definition:MemorySSA.cpp:1059
llvm::MemorySSA::SkipSelfWalker::getClobberingMemoryAccess
MemoryAccess * getClobberingMemoryAccess(MemoryAccess *MA, BatchAAResults &BAA) override
Does the same thing as getClobberingMemoryAccess(const Instruction *I), but takes a MemoryAccess inst...
Definition:MemorySSA.cpp:1075
llvm::MemorySSA::SkipSelfWalker::invalidateInfo
void invalidateInfo(MemoryAccess *MA) override
Given a memory access, invalidate anything this walker knows about that access.
Definition:MemorySSA.cpp:1087
llvm::MemorySSA
Encapsulates MemorySSA, including all data associated with memory accesses.
Definition:MemorySSA.h:701
llvm::MemorySSA::dump
void dump() const
Definition:MemorySSA.cpp:1902
llvm::MemorySSA::moveTo
void moveTo(MemoryUseOrDef *What, BasicBlock *BB, AccessList::iterator Where)
Definition:MemorySSA.cpp:1694
llvm::MemorySSA::DefsList
simple_ilist< MemoryAccess, ilist_tag< MSSAHelpers::DefsOnlyTag > > DefsList
Definition:MemorySSA.h:754
llvm::MemorySSA::getBlockAccesses
const AccessList * getBlockAccesses(const BasicBlock *BB) const
Return the list of MemoryAccess's for a given basic block.
Definition:MemorySSA.h:759
llvm::MemorySSA::renamePass
void renamePass(BasicBlock *BB, MemoryAccess *IncomingVal, SmallPtrSetImpl< BasicBlock * > &Visited)
Definition:MemorySSA.h:831
llvm::MemorySSA::getSkipSelfWalker
MemorySSAWalker * getSkipSelfWalker()
Definition:MemorySSA.cpp:1603
llvm::MemorySSA::MemorySSA
MemorySSA(Function &, AliasAnalysis *, DominatorTree *)
Definition:MemorySSA.cpp:1233
llvm::MemorySSA::dominates
bool dominates(const MemoryAccess *A, const MemoryAccess *B) const
Given two memory accesses in potentially different blocks, determine whether MemoryAccess A dominates...
Definition:MemorySSA.cpp:2173
llvm::MemorySSA::verifyOrderingDominationAndDefUses
void verifyOrderingDominationAndDefUses(IterT Blocks, VerificationLevel=VerificationLevel::Fast) const
Verify ordering: the order and existence of MemoryAccesses matches the order and existence of memory ...
Definition:MemorySSA.cpp:2021
llvm::MemorySSA::getWritableBlockAccesses
AccessList * getWritableBlockAccesses(const BasicBlock *BB) const
Definition:MemorySSA.h:812
llvm::MemorySSA::verifyMemorySSA
void verifyMemorySSA(VerificationLevel=VerificationLevel::Fast) const
Verify that MemorySSA is self consistent (IE definitions dominate all uses, uses appear in the right ...
Definition:MemorySSA.cpp:1905
llvm::MemorySSA::VerificationLevel
VerificationLevel
Definition:MemorySSA.h:783
llvm::MemorySSA::VerificationLevel::Full
@ Full
llvm::MemorySSA::insertIntoListsForBlock
void insertIntoListsForBlock(MemoryAccess *, const BasicBlock *, InsertionPlace)
Definition:MemorySSA.cpp:1618
llvm::MemorySSA::getWalker
MemorySSAWalker * getWalker()
Definition:MemorySSA.cpp:1590
llvm::MemorySSA::InsertionPlace
InsertionPlace
Used in various insertion functions to specify whether we are talking about the beginning or end of a...
Definition:MemorySSA.h:790
llvm::MemorySSA::Beginning
@ Beginning
Definition:MemorySSA.h:790
llvm::MemorySSA::insertIntoListsBefore
void insertIntoListsBefore(MemoryAccess *, const BasicBlock *, AccessList::iterator)
Definition:MemorySSA.cpp:1650
llvm::MemorySSA::createDefinedAccess
MemoryUseOrDef * createDefinedAccess(Instruction *, MemoryAccess *, const MemoryUseOrDef *Template=nullptr, bool CreationMustSucceed=true)
Definition:MemorySSA.cpp:1725
llvm::MemorySSA::getWritableBlockDefs
DefsList * getWritableBlockDefs(const BasicBlock *BB) const
Definition:MemorySSA.h:818
llvm::MemorySSA::getDomTree
DominatorTree & getDomTree() const
Definition:MemorySSA.h:727
llvm::MemorySSA::getMemoryAccess
MemoryUseOrDef * getMemoryAccess(const Instruction *I) const
Given a memory Mod/Ref'ing instruction, get the MemorySSA access associated with it.
Definition:MemorySSA.h:719
llvm::MemorySSA::getLiveOnEntryDef
MemoryAccess * getLiveOnEntryDef() const
Definition:MemorySSA.h:743
llvm::MemorySSA::removeFromLookups
void removeFromLookups(MemoryAccess *)
Properly remove MA from all of MemorySSA's lookup tables.
Definition:MemorySSA.cpp:1839
llvm::MemorySSA::ensureOptimizedUses
void ensureOptimizedUses()
By default, uses are not optimized during MemorySSA construction.
Definition:MemorySSA.cpp:2200
llvm::MemorySSA::verifyDominationNumbers
void verifyDominationNumbers(IterT Blocks) const
Verify that all of the blocks we believe to have valid domination numbers actually have valid dominat...
Definition:MemorySSA.cpp:1979
llvm::MemorySSA::verifyPrevDefInPhis
void verifyPrevDefInPhis(IterT Blocks) const
Definition:MemorySSA.cpp:1942
llvm::MemorySSA::getBlockDefs
const DefsList * getBlockDefs(const BasicBlock *BB) const
Return the list of MemoryDef's and MemoryPhi's for a given basic block.
Definition:MemorySSA.h:767
llvm::MemorySSA::locallyDominates
bool locallyDominates(const MemoryAccess *A, const MemoryAccess *B) const
Given two memory accesses in the same basic block, determine whether MemoryAccess A dominates MemoryA...
Definition:MemorySSA.cpp:2142
llvm::MemorySSA::removeFromLists
void removeFromLists(MemoryAccess *, bool ShouldDelete=true)
Properly remove MA from all of MemorySSA's lists.
Definition:MemorySSA.cpp:1866
llvm::MemorySSA::isLiveOnEntryDef
bool isLiveOnEntryDef(const MemoryAccess *MA) const
Return true if MA represents the live on entry value.
Definition:MemorySSA.h:739
llvm::MemorySSA::~MemorySSA
~MemorySSA()
Definition:MemorySSA.cpp:1272
llvm::MemorySSA::AccessList
iplist< MemoryAccess, ilist_tag< MSSAHelpers::AllAccessTag > > AccessList
Definition:MemorySSA.h:752
llvm::MemorySSA::print
void print(raw_ostream &) const
Definition:MemorySSA.cpp:1893
llvm::MemoryUseOrDef
Class that has the common methods + fields of memory uses/defs.
Definition:MemorySSA.h:249
llvm::MemoryUseOrDef::getDefiningAccess
MemoryAccess * getDefiningAccess() const
Get the access that produces the memory state used by this Use.
Definition:MemorySSA.h:259
llvm::MemoryUseOrDef::setDefiningAccess
void setDefiningAccess(MemoryAccess *DMA, bool Optimized=false)
Definition:MemorySSA.h:292
llvm::MemoryUseOrDef::setOptimized
void setOptimized(MemoryAccess *)
Sets the optimized use for a MemoryDef.
Definition:MemorySSA.h:682
llvm::MemoryUseOrDef::getMemoryInst
Instruction * getMemoryInst() const
Get the instruction that this MemoryUse represents.
Definition:MemorySSA.h:256
llvm::MemoryUse
Represents read-only accesses to memory.
Definition:MemorySSA.h:309
llvm::MemoryUse::print
void print(raw_ostream &OS) const
Definition:MemorySSA.cpp:2262
llvm::Module
A Module instance is used to store all the information related to an LLVM module.
Definition:Module.h:65
llvm::PassRegistry::getPassRegistry
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
Definition:PassRegistry.cpp:24
llvm::PreservedAnalyses
A set of analyses that are preserved following a run of a transformation pass.
Definition:Analysis.h:111
llvm::PreservedAnalyses::all
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition:Analysis.h:117
llvm::PreservedAnalyses::getChecker
PreservedAnalysisChecker getChecker() const
Build a checker for this PreservedAnalyses and the specified analysis type.
Definition:Analysis.h:264
llvm::SmallPtrSetImplBase::empty
bool empty() const
Definition:SmallPtrSet.h:93
llvm::SmallPtrSetImpl
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition:SmallPtrSet.h:363
llvm::SmallPtrSetImpl::erase
bool erase(PtrType Ptr)
Remove pointer from the set.
Definition:SmallPtrSet.h:401
llvm::SmallPtrSetImpl::count
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition:SmallPtrSet.h:452
llvm::SmallPtrSetImpl::insert
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition:SmallPtrSet.h:384
llvm::SmallPtrSet
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition:SmallPtrSet.h:519
llvm::SmallVectorBase::empty
bool empty() const
Definition:SmallVector.h:81
llvm::SmallVectorBase::size
size_t size() const
Definition:SmallVector.h:78
llvm::SmallVectorImpl
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition:SmallVector.h:573
llvm::SmallVectorImpl::pop_back_val
T pop_back_val()
Definition:SmallVector.h:673
llvm::SmallVectorImpl::emplace_back
reference emplace_back(ArgTypes &&... Args)
Definition:SmallVector.h:937
llvm::SmallVectorImpl::clear
void clear()
Definition:SmallVector.h:610
llvm::SmallVectorTemplateBase::pop_back
void pop_back()
Definition:SmallVector.h:425
llvm::SmallVectorTemplateBase::push_back
void push_back(const T &Elt)
Definition:SmallVector.h:413
llvm::SmallVectorTemplateCommon::end
iterator end()
Definition:SmallVector.h:269
llvm::SmallVectorTemplateCommon::front
reference front()
Definition:SmallVector.h:299
llvm::SmallVectorTemplateCommon::begin
iterator begin()
Definition:SmallVector.h:267
llvm::SmallVectorTemplateCommon::back
reference back()
Definition:SmallVector.h:308
llvm::SmallVector
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition:SmallVector.h:1196
llvm::StringRef
StringRef - Represent a constant reference to a string, i.e.
Definition:StringRef.h:51
llvm::StringRef::str
std::string str() const
str - Get the contents as an std::string.
Definition:StringRef.h:229
llvm::StringRef::count
size_t count(char C) const
Return the number of occurrences of C in the string.
Definition:StringRef.h:451
llvm::SuccIterator
Definition:CFG.h:141
llvm::Target
Target - Wrapper for Target specific information.
Definition:TargetRegistry.h:144
llvm::Use
A Use represents the edge between a Value definition and its users.
Definition:Use.h:43
llvm::Use::getUser
User * getUser() const
Returns the User that contains this Use.
Definition:Use.h:72
llvm::User
Definition:User.h:44
llvm::User::dropAllReferences
void dropAllReferences()
Drop all references to operands.
Definition:User.h:345
llvm::Value
LLVM Value Representation.
Definition:Value.h:74
llvm::Value::users
iterator_range< user_iterator > users()
Definition:Value.h:421
llvm::Value::printAsOperand
void printAsOperand(raw_ostream &O, bool PrintType=true, const Module *M=nullptr) const
Print the name of this Value out to the specified raw_ostream.
Definition:AsmWriter.cpp:5144
llvm::Value::stripPointerCasts
const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
Definition:Value.cpp:694
llvm::Value::use_empty
bool use_empty() const
Definition:Value.h:344
llvm::Value::uses
iterator_range< use_iterator > uses()
Definition:Value.h:376
llvm::Value::hasName
bool hasName() const
Definition:Value.h:261
llvm::Value::getName
StringRef getName() const
Return a constant reference to the value's name.
Definition:Value.cpp:309
llvm::cl::opt
Definition:CommandLine.h:1423
llvm::detail::DenseSetImpl::insert
std::pair< iterator, bool > insert(const ValueT &V)
Definition:DenseSet.h:213
llvm::detail::DenseSetImpl::clear
void clear()
Definition:DenseSet.h:92
llvm::detail::DenseSetImpl::empty
bool empty() const
Definition:DenseSet.h:80
llvm::formatted_raw_ostream
formatted_raw_ostream - A raw_ostream that wraps another one and keeps track of line and column posit...
Definition:FormattedStream.h:30
llvm::hash_code
An opaque object representing a hash code.
Definition:Hashing.h:75
llvm::iplist_impl< simple_ilist< T, Options... >, ilist_traits< T > >::iterator
base_list_type::iterator iterator
Definition:ilist.h:121
llvm::iplist
An intrusive list with ownership and callbacks specified/controlled by ilist_traits,...
Definition:ilist.h:328
llvm::iterator_facade_base
CRTP base class which implements the entire standard iterator facade in terms of a minimal subset of ...
Definition:iterator.h:80
llvm::iterator_range
A range adaptor for a pair of iterators.
Definition:iterator_range.h:42
llvm::pointer_iterator
Definition:iterator.h:348
llvm::raw_ostream
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition:raw_ostream.h:52
llvm::raw_string_ostream
A raw_ostream that writes to an std::string.
Definition:raw_ostream.h:661
llvm::simple_ilist
A simple intrusive list implementation.
Definition:simple_ilist.h:81
llvm::simple_ilist::rbegin
reverse_iterator rbegin()
Definition:simple_ilist.h:129
llvm::sys::Memory
This class provides various memory handling functions that manipulate MemoryBlock instances.
Definition:Memory.h:53
unsigned
iterator.h
iterator_range.h
This provides a very simple, boring adaptor for a begin and end iterator into a range type.
ErrorHandling.h
llvm_unreachable
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition:ErrorHandling.h:143
false
Definition:StackSlotColoring.cpp:193
llvm::ARM::ProfileKind::M
@ M
llvm::CallingConv::C
@ C
The default llvm calling convention, compatible with C.
Definition:CallingConv.h:34
llvm::CallingConv::ID
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition:CallingConv.h:24
llvm::M68kBeads::Term
@ Term
Definition:M68kBaseInfo.h:116
llvm::M68k::MemAddrModeKind::U
@ U
llvm::MCID::Call
@ Call
Definition:MCInstrDesc.h:156
llvm::RISCVFenceField::W
@ W
Definition:RISCVBaseInfo.h:374
llvm::RISCVFenceField::R
@ R
Definition:RISCVBaseInfo.h:373
llvm::RISCVFenceField::O
@ O
Definition:RISCVBaseInfo.h:372
llvm::cl::Hidden
@ Hidden
Definition:CommandLine.h:137
llvm::cl::init
initializer< Ty > init(const Ty &Val)
Definition:CommandLine.h:443
llvm::cl::location
LocationClass< Ty > location(Ty &L)
Definition:CommandLine.h:463
llvm::ms_demangle::QualifierMangleMode::Result
@ Result
llvm::rdf::Phi
NodeAddr< PhiNode * > Phi
Definition:RDFGraph.h:390
llvm::rdf::Def
NodeAddr< DefNode * > Def
Definition:RDFGraph.h:384
llvm
This is an optimization pass for GlobalISel generic memory operations.
Definition:AddressRanges.h:18
llvm::all_of
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition:STLExtras.h:1739
llvm::initializeMemorySSAWrapperPassPass
void initializeMemorySSAWrapperPassPass(PassRegistry &)
llvm::operator*
APInt operator*(APInt a, uint64_t RHS)
Definition:APInt.h:2204
llvm::successors
auto successors(const MachineBasicBlock *BB)
Definition:MachineBasicBlock.h:1376
llvm::getLoadStorePointerOperand
const Value * getLoadStorePointerOperand(const Value *V)
A helper function that returns the pointer operand of a load or store instruction.
Definition:Instructions.h:4984
llvm::make_range
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
Definition:iterator_range.h:77
llvm::maximum
LLVM_READONLY APFloat maximum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 maximum semantics.
Definition:APFloat.h:1604
llvm::make_early_inc_range
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition:STLExtras.h:657
llvm::pred_size
auto pred_size(const MachineBasicBlock *BB)
Definition:MachineBasicBlock.h:1381
llvm::WriteGraph
raw_ostream & WriteGraph(raw_ostream &O, const GraphType &G, bool ShortNames=false, const Twine &Title="")
Definition:GraphWriter.h:359
llvm::upward_defs_begin
upward_defs_iterator upward_defs_begin(const MemoryAccessPair &Pair, DominatorTree &DT)
Definition:MemorySSA.h:1300
llvm::operator==
bool operator==(const AddressRangeValuePair &LHS, const AddressRangeValuePair &RHS)
Definition:AddressRanges.h:153
llvm::map_range
auto map_range(ContainerTy &&C, FuncTy F)
Definition:STLExtras.h:377
llvm::def_chain
iterator_range< def_chain_iterator< T > > def_chain(T MA, MemoryAccess *UpTo=nullptr)
Definition:MemorySSA.h:1355
llvm::isModSet
bool isModSet(const ModRefInfo MRI)
Definition:ModRef.h:48
llvm::find_if_not
auto find_if_not(R &&Range, UnaryPredicate P)
Definition:STLExtras.h:1771
llvm::dbgs
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition:Debug.cpp:163
llvm::isAtLeastOrStrongerThan
bool isAtLeastOrStrongerThan(AtomicOrdering AO, AtomicOrdering Other)
Definition:AtomicOrdering.h:106
llvm::isModOrRefSet
bool isModOrRefSet(const ModRefInfo MRI)
Definition:ModRef.h:42
llvm::isa
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition:Casting.h:548
llvm::ModRefInfo
ModRefInfo
Flags indicating whether a memory access modifies or references memory.
Definition:ModRef.h:27
llvm::ModRefInfo::ModRef
@ ModRef
The access may reference and may modify the value stored in memory.
llvm::IRMemLocation::First
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
llvm::VerifyMemorySSA
bool VerifyMemorySSA
Enables verification of MemorySSA.
Definition:MemorySSA.cpp:84
llvm::MemoryAccessPair
std::pair< MemoryAccess *, MemoryLocation > MemoryAccessPair
Definition:MemorySSA.h:1116
llvm::upward_defs_end
upward_defs_iterator upward_defs_end()
Definition:MemorySSA.h:1304
llvm::PseudoProbeReservedId::Last
@ Last
llvm::find_if
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition:STLExtras.h:1766
llvm::predecessors
auto predecessors(const MachineBasicBlock *BB)
Definition:MachineBasicBlock.h:1377
llvm::is_contained
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition:STLExtras.h:1903
llvm::depth_first
iterator_range< df_iterator< T > > depth_first(const T &G)
Definition:DepthFirstIterator.h:233
llvm::hash_combine
hash_code hash_combine(const Ts &...args)
Combine values into a single hash_code.
Definition:Hashing.h:590
llvm::isRefSet
bool isRefSet(const ModRefInfo MRI)
Definition:ModRef.h:51
std::swap
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition:BitVector.h:860
raw_ostream.h
MAP
#define MAP(n)
N
#define N
llvm::AnalysisKey
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition:Analysis.h:28
llvm::DOTGraphTraits< DOTFuncMSSAInfo * >::getEdgeAttributes
std::string getEdgeAttributes(const BasicBlock *Node, const_succ_iterator I, DOTFuncMSSAInfo *CFGInfo)
Display the raw branch weights from PGO.
Definition:MemorySSA.cpp:2349
llvm::DOTGraphTraits< DOTFuncMSSAInfo * >::getNodeAttributes
std::string getNodeAttributes(const BasicBlock *Node, DOTFuncMSSAInfo *CFGInfo)
Definition:MemorySSA.cpp:2354
llvm::DOTGraphTraits< DOTFuncMSSAInfo * >::getEdgeSourceLabel
static std::string getEdgeSourceLabel(const BasicBlock *Node, const_succ_iterator I)
Definition:MemorySSA.cpp:2343
llvm::DOTGraphTraits< DOTFuncMSSAInfo * >::getNodeLabel
std::string getNodeLabel(const BasicBlock *Node, DOTFuncMSSAInfo *CFGInfo)
Definition:MemorySSA.cpp:2327
llvm::DOTGraphTraits< DOTFuncMSSAInfo * >::getGraphName
static std::string getGraphName(DOTFuncMSSAInfo *CFGInfo)
Definition:MemorySSA.cpp:2322
llvm::DOTGraphTraits< DOTFuncMSSAInfo * >::DOTGraphTraits
DOTGraphTraits(bool IsSimple=false)
Definition:MemorySSA.cpp:2320
llvm::DOTGraphTraits
DOTGraphTraits - Template class that can be specialized to customize how graphs are converted to 'dot...
Definition:DOTGraphTraits.h:166
llvm::DWARFExpression::Operation::Description
Description of the encoding of one expression Op.
Definition:DWARFExpression.h:66
llvm::DefaultDOTGraphTraits
DefaultDOTGraphTraits - This class provides the default implementations of all of the DOTGraphTraits ...
Definition:DOTGraphTraits.h:28
llvm::DenseMapInfo< MemoryLocOrCall >::isEqual
static bool isEqual(const MemoryLocOrCall &LHS, const MemoryLocOrCall &RHS)
Definition:MemorySSA.cpp:243
llvm::DenseMapInfo< MemoryLocOrCall >::getHashValue
static unsigned getHashValue(const MemoryLocOrCall &MLOC)
Definition:MemorySSA.cpp:228
llvm::DenseMapInfo< MemoryLocOrCall >::getEmptyKey
static MemoryLocOrCall getEmptyKey()
Definition:MemorySSA.cpp:220
llvm::DenseMapInfo< MemoryLocOrCall >::getTombstoneKey
static MemoryLocOrCall getTombstoneKey()
Definition:MemorySSA.cpp:224
llvm::DenseMapInfo
An information struct used to provide DenseMap with the various necessary components for a given valu...
Definition:DenseMapInfo.h:52
llvm::GraphTraits< DOTFuncMSSAInfo * >::size
static size_t size(DOTFuncMSSAInfo *CFGInfo)
Definition:MemorySSA.cpp:2312
llvm::GraphTraits< DOTFuncMSSAInfo * >::getEntryNode
static NodeRef getEntryNode(DOTFuncMSSAInfo *CFGInfo)
Definition:MemorySSA.cpp:2297
llvm::GraphTraits< DOTFuncMSSAInfo * >::nodes_begin
static nodes_iterator nodes_begin(DOTFuncMSSAInfo *CFGInfo)
Definition:MemorySSA.cpp:2304
llvm::GraphTraits< DOTFuncMSSAInfo * >::nodes_end
static nodes_iterator nodes_end(DOTFuncMSSAInfo *CFGInfo)
Definition:MemorySSA.cpp:2308
llvm::GraphTraits
Definition:GraphTraits.h:38
llvm::MemorySSAAnalysis::Result
Definition:MemorySSA.h:937
llvm::MemorySSAAnalysis::Result::invalidate
bool invalidate(Function &F, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)
Definition:LoopAnalysisManager.cpp:29
llvm::MemorySSAVerifierPass::run
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
Definition:MemorySSA.cpp:2408
llvm::cl::desc
Definition:CommandLine.h:409
llvm::cl::value_desc
Definition:CommandLine.h:418

Generated on Thu Jul 17 2025 11:00:27 for LLVM by doxygen 1.9.6
[8]ページ先頭

©2009-2025 Movatter.jp