Movatterモバイル変換


[0]ホーム

URL:


LLVM 20.0.0git
BasicAliasAnalysis.cpp
Go to the documentation of this file.
1//===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the primary stateless implementation of the
10// Alias Analysis interface that implements identities (two different
11// globals cannot alias, etc), but does no stateful analysis.
12//
13//===----------------------------------------------------------------------===//
14
15#include "llvm/Analysis/BasicAliasAnalysis.h"
16#include "llvm/ADT/APInt.h"
17#include "llvm/ADT/ScopeExit.h"
18#include "llvm/ADT/SmallPtrSet.h"
19#include "llvm/ADT/SmallVector.h"
20#include "llvm/ADT/Statistic.h"
21#include "llvm/Analysis/AliasAnalysis.h"
22#include "llvm/Analysis/AssumptionCache.h"
23#include "llvm/Analysis/CFG.h"
24#include "llvm/Analysis/CaptureTracking.h"
25#include "llvm/Analysis/MemoryBuiltins.h"
26#include "llvm/Analysis/MemoryLocation.h"
27#include "llvm/Analysis/TargetLibraryInfo.h"
28#include "llvm/Analysis/ValueTracking.h"
29#include "llvm/IR/Argument.h"
30#include "llvm/IR/Attributes.h"
31#include "llvm/IR/Constant.h"
32#include "llvm/IR/ConstantRange.h"
33#include "llvm/IR/Constants.h"
34#include "llvm/IR/DataLayout.h"
35#include "llvm/IR/DerivedTypes.h"
36#include "llvm/IR/Dominators.h"
37#include "llvm/IR/Function.h"
38#include "llvm/IR/GetElementPtrTypeIterator.h"
39#include "llvm/IR/GlobalAlias.h"
40#include "llvm/IR/GlobalVariable.h"
41#include "llvm/IR/InstrTypes.h"
42#include "llvm/IR/Instruction.h"
43#include "llvm/IR/Instructions.h"
44#include "llvm/IR/IntrinsicInst.h"
45#include "llvm/IR/Intrinsics.h"
46#include "llvm/IR/Operator.h"
47#include "llvm/IR/PatternMatch.h"
48#include "llvm/IR/Type.h"
49#include "llvm/IR/User.h"
50#include "llvm/IR/Value.h"
51#include "llvm/InitializePasses.h"
52#include "llvm/Pass.h"
53#include "llvm/Support/Casting.h"
54#include "llvm/Support/CommandLine.h"
55#include "llvm/Support/Compiler.h"
56#include "llvm/Support/KnownBits.h"
57#include "llvm/Support/SaveAndRestore.h"
58#include <cassert>
59#include <cstdint>
60#include <cstdlib>
61#include <optional>
62#include <utility>
63
64#define DEBUG_TYPE "basicaa"
65
66using namespacellvm;
67
68/// Enable analysis of recursive PHI nodes.
69staticcl::opt<bool>EnableRecPhiAnalysis("basic-aa-recphi",cl::Hidden,
70cl::init(true));
71
72staticcl::opt<bool>EnableSeparateStorageAnalysis("basic-aa-separate-storage",
73cl::Hidden,cl::init(true));
74
75/// SearchLimitReached / SearchTimes shows how often the limit of
76/// to decompose GEPs is reached. It will affect the precision
77/// of basic alias analysis.
78STATISTIC(SearchLimitReached,"Number of times the limit to "
79"decompose GEPs is reached");
80STATISTIC(SearchTimes,"Number of times a GEP is decomposed");
81
82// The max limit of the search depth in DecomposeGEPExpression() and
83// getUnderlyingObject().
84staticconstunsignedMaxLookupSearchDepth = 6;
85
86boolBasicAAResult::invalidate(Function &Fn,constPreservedAnalyses &PA,
87FunctionAnalysisManager::Invalidator &Inv) {
88// We don't care if this analysis itself is preserved, it has no state. But
89// we need to check that the analyses it depends on have been. Note that we
90// may be created without handles to some analyses and in that case don't
91// depend on them.
92if (Inv.invalidate<AssumptionAnalysis>(Fn, PA) ||
93 (DT_ && Inv.invalidate<DominatorTreeAnalysis>(Fn, PA)))
94returntrue;
95
96// Otherwise this analysis result remains valid.
97returnfalse;
98}
99
100//===----------------------------------------------------------------------===//
101// Useful predicates
102//===----------------------------------------------------------------------===//
103
104/// Returns the size of the object specified by V or UnknownSize if unknown.
105static std::optional<TypeSize>getObjectSize(constValue *V,
106constDataLayout &DL,
107constTargetLibraryInfo &TLI,
108bool NullIsValidLoc,
109bool RoundToAlign =false) {
110uint64_tSize;
111ObjectSizeOpts Opts;
112 Opts.RoundToAlign = RoundToAlign;
113 Opts.NullIsUnknownSize = NullIsValidLoc;
114if (getObjectSize(V,Size,DL, &TLI, Opts))
115returnTypeSize::getFixed(Size);
116return std::nullopt;
117}
118
119/// Returns true if we can prove that the object specified by V is smaller than
120/// Size. Bails out early unless the root object is passed as the first
121/// parameter.
122staticboolisObjectSmallerThan(constValue *V,TypeSizeSize,
123constDataLayout &DL,
124constTargetLibraryInfo &TLI,
125bool NullIsValidLoc) {
126// Note that the meanings of the "object" are slightly different in the
127// following contexts:
128// c1: llvm::getObjectSize()
129// c2: llvm.objectsize() intrinsic
130// c3: isObjectSmallerThan()
131// c1 and c2 share the same meaning; however, the meaning of "object" in c3
132// refers to the "entire object".
133//
134// Consider this example:
135// char *p = (char*)malloc(100)
136// char *q = p+80;
137//
138// In the context of c1 and c2, the "object" pointed by q refers to the
139// stretch of memory of q[0:19]. So, getObjectSize(q) should return 20.
140//
141// In the context of c3, the "object" refers to the chunk of memory being
142// allocated. So, the "object" has 100 bytes, and q points to the middle the
143// "object". However, unless p, the root object, is passed as the first
144// parameter, the call to isIdentifiedObject() makes isObjectSmallerThan()
145// bail out early.
146if (!isIdentifiedObject(V))
147returnfalse;
148
149// This function needs to use the aligned object size because we allow
150// reads a bit past the end given sufficient alignment.
151 std::optional<TypeSize> ObjectSize =getObjectSize(V,DL, TLI, NullIsValidLoc,
152/*RoundToAlign*/true);
153
154return ObjectSize &&TypeSize::isKnownLT(*ObjectSize,Size);
155}
156
157/// Return the minimal extent from \p V to the end of the underlying object,
158/// assuming the result is used in an aliasing query. E.g., we do use the query
159/// location size and the fact that null pointers cannot alias here.
160staticTypeSizegetMinimalExtentFrom(constValue &V,
161constLocationSize &LocSize,
162constDataLayout &DL,
163bool NullIsValidLoc) {
164// If we have dereferenceability information we know a lower bound for the
165// extent as accesses for a lower offset would be valid. We need to exclude
166// the "or null" part if null is a valid pointer. We can ignore frees, as an
167// access after free would be undefined behavior.
168bool CanBeNull, CanBeFreed;
169uint64_t DerefBytes =
170 V.getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
171 DerefBytes = (CanBeNull && NullIsValidLoc) ? 0 : DerefBytes;
172// If queried with a precise location size, we assume that location size to be
173// accessed, thus valid.
174if (LocSize.isPrecise())
175 DerefBytes = std::max(DerefBytes, LocSize.getValue().getKnownMinValue());
176returnTypeSize::getFixed(DerefBytes);
177}
178
179/// Returns true if we can prove that the object specified by V has size Size.
180staticboolisObjectSize(constValue *V,TypeSizeSize,constDataLayout &DL,
181constTargetLibraryInfo &TLI,bool NullIsValidLoc) {
182 std::optional<TypeSize> ObjectSize =
183getObjectSize(V,DL, TLI, NullIsValidLoc);
184return ObjectSize && *ObjectSize ==Size;
185}
186
187/// Return true if both V1 and V2 are VScale
188staticboolareBothVScale(constValue *V1,constValue *V2) {
189returnPatternMatch::match(V1,PatternMatch::m_VScale()) &&
190PatternMatch::match(V2,PatternMatch::m_VScale());
191}
192
193//===----------------------------------------------------------------------===//
194// CaptureAnalysis implementations
195//===----------------------------------------------------------------------===//
196
197CaptureAnalysis::~CaptureAnalysis() =default;
198
199boolSimpleCaptureAnalysis::isNotCapturedBefore(constValue *Object,
200constInstruction *I,
201bool OrAt) {
202returnisNonEscapingLocalObject(Object, &IsCapturedCache);
203}
204
205staticboolisNotInCycle(constInstruction *I,constDominatorTree *DT,
206constLoopInfo *LI) {
207BasicBlock *BB =const_cast<BasicBlock *>(I->getParent());
208SmallVector<BasicBlock *> Succs(successors(BB));
209return Succs.empty() ||
210 !isPotentiallyReachableFromMany(Succs, BB,nullptr, DT, LI);
211}
212
213boolEarliestEscapeAnalysis::isNotCapturedBefore(constValue *Object,
214constInstruction *I,
215bool OrAt) {
216if (!isIdentifiedFunctionLocal(Object))
217returnfalse;
218
219auto Iter = EarliestEscapes.insert({Object,nullptr});
220if (Iter.second) {
221Instruction *EarliestCapture =FindEarliestCapture(
222 Object, *const_cast<Function *>(DT.getRoot()->getParent()),
223/*ReturnCaptures=*/false,/*StoreCaptures=*/true, DT);
224if (EarliestCapture)
225 Inst2Obj[EarliestCapture].push_back(Object);
226 Iter.first->second = EarliestCapture;
227 }
228
229// No capturing instruction.
230if (!Iter.first->second)
231returntrue;
232
233// No context instruction means any use is capturing.
234if (!I)
235returnfalse;
236
237if (I == Iter.first->second) {
238if (OrAt)
239returnfalse;
240returnisNotInCycle(I, &DT, LI);
241 }
242
243return !isPotentiallyReachable(Iter.first->second,I,nullptr, &DT, LI);
244}
245
246voidEarliestEscapeAnalysis::removeInstruction(Instruction *I) {
247auto Iter = Inst2Obj.find(I);
248if (Iter != Inst2Obj.end()) {
249for (constValue *Obj : Iter->second)
250 EarliestEscapes.erase(Obj);
251 Inst2Obj.erase(I);
252 }
253}
254
255//===----------------------------------------------------------------------===//
256// GetElementPtr Instruction Decomposition and Analysis
257//===----------------------------------------------------------------------===//
258
259namespace{
260/// Represents zext(sext(trunc(V))).
261structCastedValue {
262constValue *V;
263unsigned ZExtBits = 0;
264unsigned SExtBits = 0;
265unsigned TruncBits = 0;
266 /// Whether trunc(V) is non-negative.
267bool IsNonNegative =false;
268
269explicit CastedValue(constValue *V) : V(V) {}
270explicit CastedValue(constValue *V,unsigned ZExtBits,unsigned SExtBits,
271unsigned TruncBits,bool IsNonNegative)
272 : V(V), ZExtBits(ZExtBits), SExtBits(SExtBits), TruncBits(TruncBits),
273 IsNonNegative(IsNonNegative) {}
274
275unsignedgetBitWidth() const{
276returnV->getType()->getPrimitiveSizeInBits() - TruncBits + ZExtBits +
277 SExtBits;
278 }
279
280 CastedValue withValue(constValue *NewV,bool PreserveNonNeg) const{
281return CastedValue(NewV, ZExtBits, SExtBits, TruncBits,
282 IsNonNegative && PreserveNonNeg);
283 }
284
285 /// Replace V with zext(NewV)
286 CastedValue withZExtOfValue(constValue *NewV,bool ZExtNonNegative) const{
287unsigned ExtendBy =V->getType()->getPrimitiveSizeInBits() -
288 NewV->getType()->getPrimitiveSizeInBits();
289if (ExtendBy <= TruncBits)
290// zext<nneg>(trunc(zext(NewV))) == zext<nneg>(trunc(NewV))
291// The nneg can be preserved on the outer zext here.
292return CastedValue(NewV, ZExtBits, SExtBits, TruncBits - ExtendBy,
293 IsNonNegative);
294
295// zext(sext(zext(NewV))) == zext(zext(zext(NewV)))
296 ExtendBy -= TruncBits;
297// zext<nneg>(zext(NewV)) == zext(NewV)
298// zext(zext<nneg>(NewV)) == zext<nneg>(NewV)
299// The nneg can be preserved from the inner zext here but must be dropped
300// from the outer.
301return CastedValue(NewV, ZExtBits + SExtBits + ExtendBy, 0, 0,
302 ZExtNonNegative);
303 }
304
305 /// Replace V with sext(NewV)
306 CastedValue withSExtOfValue(constValue *NewV) const{
307unsigned ExtendBy =V->getType()->getPrimitiveSizeInBits() -
308 NewV->getType()->getPrimitiveSizeInBits();
309if (ExtendBy <= TruncBits)
310// zext<nneg>(trunc(sext(NewV))) == zext<nneg>(trunc(NewV))
311// The nneg can be preserved on the outer zext here
312return CastedValue(NewV, ZExtBits, SExtBits, TruncBits - ExtendBy,
313 IsNonNegative);
314
315// zext(sext(sext(NewV)))
316 ExtendBy -= TruncBits;
317// zext<nneg>(sext(sext(NewV))) = zext<nneg>(sext(NewV))
318// The nneg can be preserved on the outer zext here
319return CastedValue(NewV, ZExtBits, SExtBits + ExtendBy, 0, IsNonNegative);
320 }
321
322APInt evaluateWith(APIntN) const{
323assert(N.getBitWidth() ==V->getType()->getPrimitiveSizeInBits() &&
324"Incompatible bit width");
325if (TruncBits)N =N.trunc(N.getBitWidth() - TruncBits);
326if (SExtBits)N =N.sext(N.getBitWidth() + SExtBits);
327if (ZExtBits)N =N.zext(N.getBitWidth() + ZExtBits);
328returnN;
329 }
330
331ConstantRange evaluateWith(ConstantRangeN) const{
332assert(N.getBitWidth() ==V->getType()->getPrimitiveSizeInBits() &&
333"Incompatible bit width");
334if (TruncBits)N =N.truncate(N.getBitWidth() - TruncBits);
335if (IsNonNegative && !N.isAllNonNegative())
336N =N.intersectWith(
337ConstantRange(APInt::getZero(N.getBitWidth()),
338APInt::getSignedMinValue(N.getBitWidth())));
339if (SExtBits)N =N.signExtend(N.getBitWidth() + SExtBits);
340if (ZExtBits)N =N.zeroExtend(N.getBitWidth() + ZExtBits);
341returnN;
342 }
343
344bool canDistributeOver(bool NUW,bool NSW) const{
345// zext(x op<nuw> y) == zext(x) op<nuw> zext(y)
346// sext(x op<nsw> y) == sext(x) op<nsw> sext(y)
347// trunc(x op y) == trunc(x) op trunc(y)
348return (!ZExtBits || NUW) && (!SExtBits || NSW);
349 }
350
351bool hasSameCastsAs(const CastedValue &Other) const{
352if (V->getType() !=Other.V->getType())
353returnfalse;
354
355if (ZExtBits ==Other.ZExtBits && SExtBits ==Other.SExtBits &&
356 TruncBits ==Other.TruncBits)
357returntrue;
358// If either CastedValue has a nneg zext then the sext/zext bits are
359// interchangable for that value.
360if (IsNonNegative ||Other.IsNonNegative)
361return (ZExtBits + SExtBits ==Other.ZExtBits +Other.SExtBits &&
362 TruncBits ==Other.TruncBits);
363returnfalse;
364 }
365};
366
367/// Represents zext(sext(trunc(V))) * Scale + Offset.
368structLinearExpression {
369 CastedValue Val;
370APInt Scale;
371APIntOffset;
372
373 /// True if all operations in this expression are NUW.
374bool IsNUW;
375 /// True if all operations in this expression are NSW.
376bool IsNSW;
377
378 LinearExpression(const CastedValue &Val,constAPInt &Scale,
379constAPInt &Offset,bool IsNUW,bool IsNSW)
380 : Val(Val), Scale(Scale),Offset(Offset), IsNUW(IsNUW), IsNSW(IsNSW) {}
381
382 LinearExpression(const CastedValue &Val)
383 : Val(Val), IsNUW(true), IsNSW(true) {
384unsignedBitWidth = Val.getBitWidth();
385 Scale =APInt(BitWidth, 1);
386Offset =APInt(BitWidth, 0);
387 }
388
389 LinearExpression mul(constAPInt &Other,bool MulIsNUW,bool MulIsNSW) const{
390// The check for zero offset is necessary, because generally
391// (X +nsw Y) *nsw Z does not imply (X *nsw Z) +nsw (Y *nsw Z).
392bool NSW = IsNSW && (Other.isOne() || (MulIsNSW &&Offset.isZero()));
393bool NUW = IsNUW && (Other.isOne() || MulIsNUW);
394return LinearExpression(Val, Scale *Other,Offset *Other, NUW, NSW);
395 }
396};
397}
398
399/// Analyzes the specified value as a linear expression: "A*V + B", where A and
400/// B are constant integers.
401static LinearExpressionGetLinearExpression(
402const CastedValue &Val,constDataLayout &DL,unsignedDepth,
403AssumptionCache *AC,DominatorTree *DT) {
404// Limit our recursion depth.
405if (Depth == 6)
406return Val;
407
408if (constConstantInt *Const = dyn_cast<ConstantInt>(Val.V))
409return LinearExpression(Val,APInt(Val.getBitWidth(), 0),
410 Val.evaluateWith(Const->getValue()),true,true);
411
412if (constBinaryOperator *BOp = dyn_cast<BinaryOperator>(Val.V)) {
413if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) {
414APIntRHS = Val.evaluateWith(RHSC->getValue());
415// The only non-OBO case we deal with is or, and only limited to the
416// case where it is both nuw and nsw.
417bool NUW =true, NSW =true;
418if (isa<OverflowingBinaryOperator>(BOp)) {
419 NUW &= BOp->hasNoUnsignedWrap();
420 NSW &= BOp->hasNoSignedWrap();
421 }
422if (!Val.canDistributeOver(NUW, NSW))
423return Val;
424
425// While we can distribute over trunc, we cannot preserve nowrap flags
426// in that case.
427if (Val.TruncBits)
428 NUW = NSW =false;
429
430 LinearExpression E(Val);
431switch (BOp->getOpcode()) {
432default:
433// We don't understand this instruction, so we can't decompose it any
434// further.
435return Val;
436case Instruction::Or:
437// X|C == X+C if it is disjoint. Otherwise we can't analyze it.
438if (!cast<PossiblyDisjointInst>(BOp)->isDisjoint())
439return Val;
440
441 [[fallthrough]];
442case Instruction::Add: {
443 E =GetLinearExpression(Val.withValue(BOp->getOperand(0),false),DL,
444Depth + 1, AC, DT);
445 E.Offset +=RHS;
446 E.IsNUW &= NUW;
447 E.IsNSW &= NSW;
448break;
449 }
450case Instruction::Sub: {
451 E =GetLinearExpression(Val.withValue(BOp->getOperand(0),false),DL,
452Depth + 1, AC, DT);
453 E.Offset -=RHS;
454 E.IsNUW =false;// sub nuw x, y is not add nuw x, -y.
455 E.IsNSW &= NSW;
456break;
457 }
458case Instruction::Mul:
459 E =GetLinearExpression(Val.withValue(BOp->getOperand(0),false),DL,
460Depth + 1, AC, DT)
461 .mul(RHS, NUW, NSW);
462break;
463case Instruction::Shl:
464// We're trying to linearize an expression of the kind:
465// shl i8 -128, 36
466// where the shift count exceeds the bitwidth of the type.
467// We can't decompose this further (the expression would return
468// a poison value).
469if (RHS.getLimitedValue() > Val.getBitWidth())
470return Val;
471
472 E =GetLinearExpression(Val.withValue(BOp->getOperand(0), NSW),DL,
473Depth + 1, AC, DT);
474 E.Offset <<=RHS.getLimitedValue();
475 E.Scale <<=RHS.getLimitedValue();
476 E.IsNUW &= NUW;
477 E.IsNSW &= NSW;
478break;
479 }
480return E;
481 }
482 }
483
484if (constauto *ZExt = dyn_cast<ZExtInst>(Val.V))
485returnGetLinearExpression(
486 Val.withZExtOfValue(ZExt->getOperand(0), ZExt->hasNonNeg()),DL,
487Depth + 1, AC, DT);
488
489if (isa<SExtInst>(Val.V))
490returnGetLinearExpression(
491 Val.withSExtOfValue(cast<CastInst>(Val.V)->getOperand(0)),
492DL,Depth + 1, AC, DT);
493
494return Val;
495}
496
497namespace{
498// A linear transformation of a Value; this class represents
499// ZExt(SExt(Trunc(V, TruncBits), SExtBits), ZExtBits) * Scale.
500structVariableGEPIndex {
501 CastedValue Val;
502APInt Scale;
503
504// Context instruction to use when querying information about this index.
505constInstruction *CxtI;
506
507 /// True if all operations in this expression are NSW.
508bool IsNSW;
509
510 /// True if the index should be subtracted rather than added. We don't simply
511 /// negate the Scale, to avoid losing the NSW flag: X - INT_MIN*1 may be
512 /// non-wrapping, while X + INT_MIN*(-1) wraps.
513bool IsNegated;
514
515bool hasNegatedScaleOf(const VariableGEPIndex &Other) const{
516if (IsNegated ==Other.IsNegated)
517return Scale == -Other.Scale;
518return Scale ==Other.Scale;
519 }
520
521voiddump() const{
522print(dbgs());
523dbgs() <<"\n";
524 }
525voidprint(raw_ostream &OS) const{
526OS <<"(V=" << Val.V->getName()
527 <<", zextbits=" << Val.ZExtBits
528 <<", sextbits=" << Val.SExtBits
529 <<", truncbits=" << Val.TruncBits
530 <<", scale=" << Scale
531 <<", nsw=" << IsNSW
532 <<", negated=" << IsNegated <<")";
533 }
534};
535}
536
537// Represents the internal structure of a GEP, decomposed into a base pointer,
538// constant offsets, and variable scaled indices.
539structBasicAAResult::DecomposedGEP {
540// Base pointer of the GEP
541constValue *Base;
542// Total constant offset from base.
543APIntOffset;
544// Scaled variable (non-constant) indices.
545SmallVector<VariableGEPIndex, 4>VarIndices;
546// Nowrap flags common to all GEP operations involved in expression.
547GEPNoWrapFlagsNWFlags =GEPNoWrapFlags::all();
548
549voiddump() const{
550print(dbgs());
551dbgs() <<"\n";
552 }
553voidprint(raw_ostream &OS) const{
554OS <<", inbounds=" << (NWFlags.isInBounds() ?"1" :"0")
555 <<", nuw=" << (NWFlags.hasNoUnsignedWrap() ?"1" :"0")
556 <<"(DecomposedGEP Base=" <<Base->getName() <<", Offset=" <<Offset
557 <<", VarIndices=[";
558for (size_t i = 0; i <VarIndices.size(); i++) {
559if (i != 0)
560OS <<", ";
561VarIndices[i].print(OS);
562 }
563OS <<"])";
564 }
565};
566
567
568/// If V is a symbolic pointer expression, decompose it into a base pointer
569/// with a constant offset and a number of scaled symbolic offsets.
570///
571/// The scaled symbolic offsets (represented by pairs of a Value* and a scale
572/// in the VarIndices vector) are Value*'s that are known to be scaled by the
573/// specified amount, but which may have other unrepresented high bits. As
574/// such, the gep cannot necessarily be reconstructed from its decomposed form.
575BasicAAResult::DecomposedGEP
576BasicAAResult::DecomposeGEPExpression(constValue *V,constDataLayout &DL,
577AssumptionCache *AC,DominatorTree *DT) {
578// Limit recursion depth to limit compile time in crazy cases.
579unsigned MaxLookup =MaxLookupSearchDepth;
580 SearchTimes++;
581constInstruction *CxtI = dyn_cast<Instruction>(V);
582
583unsigned IndexSize =DL.getIndexTypeSizeInBits(V->getType());
584 DecomposedGEP Decomposed;
585 Decomposed.Offset =APInt(IndexSize, 0);
586do {
587// See if this is a bitcast or GEP.
588constOperator *Op = dyn_cast<Operator>(V);
589if (!Op) {
590// The only non-operator case we can handle are GlobalAliases.
591if (constGlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
592if (!GA->isInterposable()) {
593 V = GA->getAliasee();
594continue;
595 }
596 }
597 Decomposed.Base =V;
598return Decomposed;
599 }
600
601if (Op->getOpcode() == Instruction::BitCast ||
602Op->getOpcode() == Instruction::AddrSpaceCast) {
603Value *NewV =Op->getOperand(0);
604// Don't look through casts between address spaces with differing index
605// widths.
606if (DL.getIndexTypeSizeInBits(NewV->getType()) != IndexSize) {
607 Decomposed.Base =V;
608return Decomposed;
609 }
610V = NewV;
611continue;
612 }
613
614constGEPOperator *GEPOp = dyn_cast<GEPOperator>(Op);
615if (!GEPOp) {
616if (constauto *PHI = dyn_cast<PHINode>(V)) {
617// Look through single-arg phi nodes created by LCSSA.
618if (PHI->getNumIncomingValues() == 1) {
619V =PHI->getIncomingValue(0);
620continue;
621 }
622 }elseif (constauto *Call = dyn_cast<CallBase>(V)) {
623// CaptureTracking can know about special capturing properties of some
624// intrinsics like launder.invariant.group, that can't be expressed with
625// the attributes, but have properties like returning aliasing pointer.
626// Because some analysis may assume that nocaptured pointer is not
627// returned from some special intrinsic (because function would have to
628// be marked with returns attribute), it is crucial to use this function
629// because it should be in sync with CaptureTracking. Not using it may
630// cause weird miscompilations where 2 aliasing pointers are assumed to
631// noalias.
632if (auto *RP =getArgumentAliasingToReturnedPointer(Call,false)) {
633V =RP;
634continue;
635 }
636 }
637
638 Decomposed.Base =V;
639return Decomposed;
640 }
641
642// Track the common nowrap flags for all GEPs we see.
643 Decomposed.NWFlags &= GEPOp->getNoWrapFlags();
644
645assert(GEPOp->getSourceElementType()->isSized() &&"GEP must be sized");
646
647// Walk the indices of the GEP, accumulating them into BaseOff/VarIndices.
648gep_type_iterator GTI =gep_type_begin(GEPOp);
649for (User::const_op_iteratorI = GEPOp->op_begin() + 1, E = GEPOp->op_end();
650I != E; ++I, ++GTI) {
651constValue *Index = *I;
652// Compute the (potentially symbolic) offset in bytes for this index.
653if (StructType *STy = GTI.getStructTypeOrNull()) {
654// For a struct, add the member offset.
655unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
656if (FieldNo == 0)
657continue;
658
659 Decomposed.Offset +=DL.getStructLayout(STy)->getElementOffset(FieldNo);
660continue;
661 }
662
663// For an array/pointer, add the element offset, explicitly scaled.
664if (constConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) {
665if (CIdx->isZero())
666continue;
667
668// Don't attempt to analyze GEPs if the scalable index is not zero.
669TypeSize AllocTypeSize = GTI.getSequentialElementStride(DL);
670if (AllocTypeSize.isScalable()) {
671 Decomposed.Base =V;
672return Decomposed;
673 }
674
675 Decomposed.Offset += AllocTypeSize.getFixedValue() *
676 CIdx->getValue().sextOrTrunc(IndexSize);
677continue;
678 }
679
680TypeSize AllocTypeSize = GTI.getSequentialElementStride(DL);
681if (AllocTypeSize.isScalable()) {
682 Decomposed.Base =V;
683return Decomposed;
684 }
685
686// If the integer type is smaller than the index size, it is implicitly
687// sign extended or truncated to index size.
688bool NUSW = GEPOp->hasNoUnsignedSignedWrap();
689bool NUW = GEPOp->hasNoUnsignedWrap();
690bool NonNeg = NUSW && NUW;
691unsigned Width =Index->getType()->getIntegerBitWidth();
692unsigned SExtBits = IndexSize > Width ? IndexSize - Width : 0;
693unsigned TruncBits = IndexSize < Width ? Width - IndexSize : 0;
694 LinearExpressionLE =GetLinearExpression(
695 CastedValue(Index, 0, SExtBits, TruncBits, NonNeg), DL, 0, AC, DT);
696
697// Scale by the type size.
698unsignedTypeSize = AllocTypeSize.getFixedValue();
699LE =LE.mul(APInt(IndexSize,TypeSize), NUW, NUSW);
700 Decomposed.Offset +=LE.Offset;
701APInt Scale =LE.Scale;
702if (!LE.IsNUW)
703 Decomposed.NWFlags = Decomposed.NWFlags.withoutNoUnsignedWrap();
704
705// If we already had an occurrence of this index variable, merge this
706// scale into it. For example, we want to handle:
707// A[x][x] -> x*16 + x*4 -> x*20
708// This also ensures that 'x' only appears in the index list once.
709for (unsigned i = 0, e = Decomposed.VarIndices.size(); i != e; ++i) {
710if ((Decomposed.VarIndices[i].Val.V ==LE.Val.V ||
711areBothVScale(Decomposed.VarIndices[i].Val.V,LE.Val.V)) &&
712 Decomposed.VarIndices[i].Val.hasSameCastsAs(LE.Val)) {
713 Scale += Decomposed.VarIndices[i].Scale;
714// We cannot guarantee no-wrap for the merge.
715LE.IsNSW =LE.IsNUW =false;
716 Decomposed.VarIndices.erase(Decomposed.VarIndices.begin() + i);
717break;
718 }
719 }
720
721if (!!Scale) {
722 VariableGEPIndexEntry = {LE.Val, Scale, CxtI,LE.IsNSW,
723/* IsNegated */false};
724 Decomposed.VarIndices.push_back(Entry);
725 }
726 }
727
728// Analyze the base pointer next.
729V = GEPOp->getOperand(0);
730 }while (--MaxLookup);
731
732// If the chain of expressions is too deep, just return early.
733 Decomposed.Base =V;
734 SearchLimitReached++;
735return Decomposed;
736}
737
738ModRefInfoBasicAAResult::getModRefInfoMask(constMemoryLocation &Loc,
739AAQueryInfo &AAQI,
740bool IgnoreLocals) {
741assert(Visited.empty() &&"Visited must be cleared after use!");
742auto_ =make_scope_exit([&] { Visited.clear(); });
743
744unsigned MaxLookup = 8;
745SmallVector<const Value *, 16> Worklist;
746 Worklist.push_back(Loc.Ptr);
747ModRefInfo Result =ModRefInfo::NoModRef;
748
749do {
750constValue *V =getUnderlyingObject(Worklist.pop_back_val());
751if (!Visited.insert(V).second)
752continue;
753
754// Ignore allocas if we were instructed to do so.
755if (IgnoreLocals && isa<AllocaInst>(V))
756continue;
757
758// If the location points to memory that is known to be invariant for
759// the life of the underlying SSA value, then we can exclude Mod from
760// the set of valid memory effects.
761//
762// An argument that is marked readonly and noalias is known to be
763// invariant while that function is executing.
764if (constArgument *Arg = dyn_cast<Argument>(V)) {
765if (Arg->hasNoAliasAttr() && Arg->onlyReadsMemory()) {
766 Result |=ModRefInfo::Ref;
767continue;
768 }
769 }
770
771// A global constant can't be mutated.
772if (constGlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
773// Note: this doesn't require GV to be "ODR" because it isn't legal for a
774// global to be marked constant in some modules and non-constant in
775// others. GV may even be a declaration, not a definition.
776if (!GV->isConstant())
777returnModRefInfo::ModRef;
778continue;
779 }
780
781// If both select values point to local memory, then so does the select.
782if (constSelectInst *SI = dyn_cast<SelectInst>(V)) {
783 Worklist.push_back(SI->getTrueValue());
784 Worklist.push_back(SI->getFalseValue());
785continue;
786 }
787
788// If all values incoming to a phi node point to local memory, then so does
789// the phi.
790if (constPHINode *PN = dyn_cast<PHINode>(V)) {
791// Don't bother inspecting phi nodes with many operands.
792if (PN->getNumIncomingValues() > MaxLookup)
793returnModRefInfo::ModRef;
794append_range(Worklist, PN->incoming_values());
795continue;
796 }
797
798// Otherwise be conservative.
799returnModRefInfo::ModRef;
800 }while (!Worklist.empty() && --MaxLookup);
801
802// If we hit the maximum number of instructions to examine, be conservative.
803if (!Worklist.empty())
804returnModRefInfo::ModRef;
805
806return Result;
807}
808
809staticboolisIntrinsicCall(constCallBase *Call,Intrinsic::ID IID) {
810constIntrinsicInst *II = dyn_cast<IntrinsicInst>(Call);
811returnII &&II->getIntrinsicID() == IID;
812}
813
814/// Returns the behavior when calling the given call site.
815MemoryEffectsBasicAAResult::getMemoryEffects(constCallBase *Call,
816AAQueryInfo &AAQI) {
817MemoryEffects Min = Call->getAttributes().getMemoryEffects();
818
819if (constFunction *F = dyn_cast<Function>(Call->getCalledOperand())) {
820MemoryEffects FuncME = AAQI.AAR.getMemoryEffects(F);
821// Operand bundles on the call may also read or write memory, in addition
822// to the behavior of the called function.
823if (Call->hasReadingOperandBundles())
824 FuncME |=MemoryEffects::readOnly();
825if (Call->hasClobberingOperandBundles())
826 FuncME |=MemoryEffects::writeOnly();
827 Min &= FuncME;
828 }
829
830return Min;
831}
832
833/// Returns the behavior when calling the given function. For use when the call
834/// site is not known.
835MemoryEffectsBasicAAResult::getMemoryEffects(constFunction *F) {
836switch (F->getIntrinsicID()) {
837case Intrinsic::experimental_guard:
838case Intrinsic::experimental_deoptimize:
839// These intrinsics can read arbitrary memory, and additionally modref
840// inaccessible memory to model control dependence.
841returnMemoryEffects::readOnly() |
842MemoryEffects::inaccessibleMemOnly(ModRefInfo::ModRef);
843 }
844
845returnF->getMemoryEffects();
846}
847
848ModRefInfoBasicAAResult::getArgModRefInfo(constCallBase *Call,
849unsigned ArgIdx) {
850if (Call->paramHasAttr(ArgIdx, Attribute::WriteOnly))
851returnModRefInfo::Mod;
852
853if (Call->paramHasAttr(ArgIdx, Attribute::ReadOnly))
854returnModRefInfo::Ref;
855
856if (Call->paramHasAttr(ArgIdx, Attribute::ReadNone))
857returnModRefInfo::NoModRef;
858
859returnModRefInfo::ModRef;
860}
861
862#ifndef NDEBUG
863staticconstFunction *getParent(constValue *V) {
864if (constInstruction *inst = dyn_cast<Instruction>(V)) {
865if (!inst->getParent())
866returnnullptr;
867return inst->getParent()->getParent();
868 }
869
870if (constArgument *arg = dyn_cast<Argument>(V))
871return arg->getParent();
872
873returnnullptr;
874}
875
876staticboolnotDifferentParent(constValue *O1,constValue *O2) {
877
878constFunction *F1 =getParent(O1);
879constFunction *F2 =getParent(O2);
880
881return !F1 || !F2 || F1 == F2;
882}
883#endif
884
885AliasResultBasicAAResult::alias(constMemoryLocation &LocA,
886constMemoryLocation &LocB,AAQueryInfo &AAQI,
887constInstruction *CtxI) {
888assert(notDifferentParent(LocA.Ptr, LocB.Ptr) &&
889"BasicAliasAnalysis doesn't support interprocedural queries.");
890return aliasCheck(LocA.Ptr, LocA.Size, LocB.Ptr, LocB.Size, AAQI, CtxI);
891}
892
893/// Checks to see if the specified callsite can clobber the specified memory
894/// object.
895///
896/// Since we only look at local properties of this function, we really can't
897/// say much about this query. We do, however, use simple "address taken"
898/// analysis on local objects.
899ModRefInfoBasicAAResult::getModRefInfo(constCallBase *Call,
900constMemoryLocation &Loc,
901AAQueryInfo &AAQI) {
902assert(notDifferentParent(Call, Loc.Ptr) &&
903"AliasAnalysis query involving multiple functions!");
904
905constValue *Object =getUnderlyingObject(Loc.Ptr);
906
907// Calls marked 'tail' cannot read or write allocas from the current frame
908// because the current frame might be destroyed by the time they run. However,
909// a tail call may use an alloca with byval. Calling with byval copies the
910// contents of the alloca into argument registers or stack slots, so there is
911// no lifetime issue.
912if (isa<AllocaInst>(Object))
913if (constCallInst *CI = dyn_cast<CallInst>(Call))
914if (CI->isTailCall() &&
915 !CI->getAttributes().hasAttrSomewhere(Attribute::ByVal))
916returnModRefInfo::NoModRef;
917
918// Stack restore is able to modify unescaped dynamic allocas. Assume it may
919// modify them even though the alloca is not escaped.
920if (auto *AI = dyn_cast<AllocaInst>(Object))
921if (!AI->isStaticAlloca() &&isIntrinsicCall(Call, Intrinsic::stackrestore))
922returnModRefInfo::Mod;
923
924// A call can access a locally allocated object either because it is passed as
925// an argument to the call, or because it has escaped prior to the call.
926//
927// Make sure the object has not escaped here, and then check that none of the
928// call arguments alias the object below.
929//
930// We model calls that can return twice (setjmp) as clobbering non-escaping
931// objects, to model any accesses that may occur prior to the second return.
932// As an exception, ignore allocas, as setjmp is not required to preserve
933// non-volatile stores for them.
934if (!isa<Constant>(Object) && Call != Object &&
935 AAQI.CA->isNotCapturedBefore(Object, Call,/*OrAt*/false) &&
936 (isa<AllocaInst>(Object) || !Call->hasFnAttr(Attribute::ReturnsTwice))) {
937
938// Optimistically assume that call doesn't touch Object and check this
939// assumption in the following loop.
940ModRefInfo Result =ModRefInfo::NoModRef;
941
942unsigned OperandNo = 0;
943for (auto CI = Call->data_operands_begin(), CE = Call->data_operands_end();
944 CI != CE; ++CI, ++OperandNo) {
945if (!(*CI)->getType()->isPointerTy())
946continue;
947
948// Call doesn't access memory through this operand, so we don't care
949// if it aliases with Object.
950if (Call->doesNotAccessMemory(OperandNo))
951continue;
952
953// If this is a no-capture pointer argument, see if we can tell that it
954// is impossible to alias the pointer we're checking.
955AliasResult AR =
956 AAQI.AAR.alias(MemoryLocation::getBeforeOrAfter(*CI),
957MemoryLocation::getBeforeOrAfter(Object), AAQI);
958// Operand doesn't alias 'Object', continue looking for other aliases
959if (AR ==AliasResult::NoAlias)
960continue;
961// Operand aliases 'Object', but call doesn't modify it. Strengthen
962// initial assumption and keep looking in case if there are more aliases.
963if (Call->onlyReadsMemory(OperandNo)) {
964 Result |=ModRefInfo::Ref;
965continue;
966 }
967// Operand aliases 'Object' but call only writes into it.
968if (Call->onlyWritesMemory(OperandNo)) {
969 Result |=ModRefInfo::Mod;
970continue;
971 }
972// This operand aliases 'Object' and call reads and writes into it.
973// Setting ModRef will not yield an early return below, MustAlias is not
974// used further.
975 Result =ModRefInfo::ModRef;
976break;
977 }
978
979// Early return if we improved mod ref information
980if (!isModAndRefSet(Result))
981return Result;
982 }
983
984// If the call is malloc/calloc like, we can assume that it doesn't
985// modify any IR visible value. This is only valid because we assume these
986// routines do not read values visible in the IR. TODO: Consider special
987// casing realloc and strdup routines which access only their arguments as
988// well. Or alternatively, replace all of this with inaccessiblememonly once
989// that's implemented fully.
990if (isMallocOrCallocLikeFn(Call, &TLI)) {
991// Be conservative if the accessed pointer may alias the allocation -
992// fallback to the generic handling below.
993if (AAQI.AAR.alias(MemoryLocation::getBeforeOrAfter(Call), Loc, AAQI) ==
994AliasResult::NoAlias)
995returnModRefInfo::NoModRef;
996 }
997
998// Like assumes, invariant.start intrinsics were also marked as arbitrarily
999// writing so that proper control dependencies are maintained but they never
1000// mod any particular memory location visible to the IR.
1001// *Unlike* assumes (which are now modeled as NoModRef), invariant.start
1002// intrinsic is now modeled as reading memory. This prevents hoisting the
1003// invariant.start intrinsic over stores. Consider:
1004// *ptr = 40;
1005// *ptr = 50;
1006// invariant_start(ptr)
1007// int val = *ptr;
1008// print(val);
1009//
1010// This cannot be transformed to:
1011//
1012// *ptr = 40;
1013// invariant_start(ptr)
1014// *ptr = 50;
1015// int val = *ptr;
1016// print(val);
1017//
1018// The transformation will cause the second store to be ignored (based on
1019// rules of invariant.start) and print 40, while the first program always
1020// prints 50.
1021if (isIntrinsicCall(Call, Intrinsic::invariant_start))
1022returnModRefInfo::Ref;
1023
1024// Be conservative.
1025returnModRefInfo::ModRef;
1026}
1027
1028ModRefInfoBasicAAResult::getModRefInfo(constCallBase *Call1,
1029constCallBase *Call2,
1030AAQueryInfo &AAQI) {
1031// Guard intrinsics are marked as arbitrarily writing so that proper control
1032// dependencies are maintained but they never mods any particular memory
1033// location.
1034//
1035// *Unlike* assumes, guard intrinsics are modeled as reading memory since the
1036// heap state at the point the guard is issued needs to be consistent in case
1037// the guard invokes the "deopt" continuation.
1038
1039// NB! This function is *not* commutative, so we special case two
1040// possibilities for guard intrinsics.
1041
1042if (isIntrinsicCall(Call1, Intrinsic::experimental_guard))
1043returnisModSet(getMemoryEffects(Call2, AAQI).getModRef())
1044 ?ModRefInfo::Ref
1045 :ModRefInfo::NoModRef;
1046
1047if (isIntrinsicCall(Call2, Intrinsic::experimental_guard))
1048returnisModSet(getMemoryEffects(Call1, AAQI).getModRef())
1049 ?ModRefInfo::Mod
1050 :ModRefInfo::NoModRef;
1051
1052// Be conservative.
1053returnModRefInfo::ModRef;
1054}
1055
1056/// Provides a bunch of ad-hoc rules to disambiguate a GEP instruction against
1057/// another pointer.
1058///
1059/// We know that V1 is a GEP, but we don't know anything about V2.
1060/// UnderlyingV1 is getUnderlyingObject(GEP1), UnderlyingV2 is the same for
1061/// V2.
1062AliasResult BasicAAResult::aliasGEP(
1063constGEPOperator *GEP1,LocationSize V1Size,
1064constValue *V2,LocationSize V2Size,
1065constValue *UnderlyingV1,constValue *UnderlyingV2,AAQueryInfo &AAQI) {
1066auto BaseObjectsAlias = [&]() {
1067AliasResult BaseAlias =
1068 AAQI.AAR.alias(MemoryLocation::getBeforeOrAfter(UnderlyingV1),
1069MemoryLocation::getBeforeOrAfter(UnderlyingV2), AAQI);
1070return BaseAlias ==AliasResult::NoAlias ?AliasResult::NoAlias
1071 :AliasResult::MayAlias;
1072 };
1073
1074if (!V1Size.hasValue() && !V2Size.hasValue()) {
1075// TODO: This limitation exists for compile-time reasons. Relax it if we
1076// can avoid exponential pathological cases.
1077if (!isa<GEPOperator>(V2))
1078returnAliasResult::MayAlias;
1079
1080// If both accesses have unknown size, we can only check whether the base
1081// objects don't alias.
1082return BaseObjectsAlias();
1083 }
1084
1085DominatorTree *DT = getDT(AAQI);
1086 DecomposedGEP DecompGEP1 = DecomposeGEPExpression(GEP1, DL, &AC, DT);
1087 DecomposedGEP DecompGEP2 = DecomposeGEPExpression(V2, DL, &AC, DT);
1088
1089// Bail if we were not able to decompose anything.
1090if (DecompGEP1.Base == GEP1 && DecompGEP2.Base == V2)
1091returnAliasResult::MayAlias;
1092
1093// Fall back to base objects if pointers have different index widths.
1094if (DecompGEP1.Offset.getBitWidth() != DecompGEP2.Offset.getBitWidth())
1095return BaseObjectsAlias();
1096
1097// Swap GEP1 and GEP2 if GEP2 has more variable indices.
1098if (DecompGEP1.VarIndices.size() < DecompGEP2.VarIndices.size()) {
1099std::swap(DecompGEP1, DecompGEP2);
1100std::swap(V1Size, V2Size);
1101std::swap(UnderlyingV1, UnderlyingV2);
1102 }
1103
1104// Subtract the GEP2 pointer from the GEP1 pointer to find out their
1105// symbolic difference.
1106 subtractDecomposedGEPs(DecompGEP1, DecompGEP2, AAQI);
1107
1108// If an inbounds GEP would have to start from an out of bounds address
1109// for the two to alias, then we can assume noalias.
1110// TODO: Remove !isScalable() once BasicAA fully support scalable location
1111// size
1112
1113if (DecompGEP1.NWFlags.isInBounds() && DecompGEP1.VarIndices.empty() &&
1114 V2Size.hasValue() && !V2Size.isScalable() &&
1115 DecompGEP1.Offset.sge(V2Size.getValue()) &&
1116isBaseOfObject(DecompGEP2.Base))
1117returnAliasResult::NoAlias;
1118
1119// Symmetric case to above.
1120if (DecompGEP2.NWFlags.isInBounds() && DecompGEP1.VarIndices.empty() &&
1121 V1Size.hasValue() && !V1Size.isScalable() &&
1122 DecompGEP1.Offset.sle(-V1Size.getValue()) &&
1123isBaseOfObject(DecompGEP1.Base))
1124returnAliasResult::NoAlias;
1125
1126// For GEPs with identical offsets, we can preserve the size and AAInfo
1127// when performing the alias check on the underlying objects.
1128if (DecompGEP1.Offset == 0 && DecompGEP1.VarIndices.empty())
1129return AAQI.AAR.alias(MemoryLocation(DecompGEP1.Base, V1Size),
1130MemoryLocation(DecompGEP2.Base, V2Size), AAQI);
1131
1132// Do the base pointers alias?
1133AliasResult BaseAlias =
1134 AAQI.AAR.alias(MemoryLocation::getBeforeOrAfter(DecompGEP1.Base),
1135MemoryLocation::getBeforeOrAfter(DecompGEP2.Base), AAQI);
1136
1137// If we get a No or May, then return it immediately, no amount of analysis
1138// will improve this situation.
1139if (BaseAlias !=AliasResult::MustAlias) {
1140assert(BaseAlias ==AliasResult::NoAlias ||
1141 BaseAlias ==AliasResult::MayAlias);
1142return BaseAlias;
1143 }
1144
1145// If there is a constant difference between the pointers, but the difference
1146// is less than the size of the associated memory object, then we know
1147// that the objects are partially overlapping. If the difference is
1148// greater, we know they do not overlap.
1149if (DecompGEP1.VarIndices.empty()) {
1150APInt &Off = DecompGEP1.Offset;
1151
1152// Initialize for Off >= 0 (V2 <= GEP1) case.
1153LocationSize VLeftSize = V2Size;
1154LocationSize VRightSize = V1Size;
1155constbool Swapped =Off.isNegative();
1156
1157if (Swapped) {
1158// Swap if we have the situation where:
1159// + +
1160// | BaseOffset |
1161// ---------------->|
1162// |-->V1Size |-------> V2Size
1163// GEP1 V2
1164std::swap(VLeftSize, VRightSize);
1165Off = -Off;
1166 }
1167
1168if (!VLeftSize.hasValue())
1169returnAliasResult::MayAlias;
1170
1171constTypeSize LSize = VLeftSize.getValue();
1172if (!LSize.isScalable()) {
1173if (Off.ult(LSize)) {
1174// Conservatively drop processing if a phi was visited and/or offset is
1175// too big.
1176AliasResult AR =AliasResult::PartialAlias;
1177if (VRightSize.hasValue() && !VRightSize.isScalable() &&
1178Off.ule(INT32_MAX) && (Off + VRightSize.getValue()).ule(LSize)) {
1179// Memory referenced by right pointer is nested. Save the offset in
1180// cache. Note that originally offset estimated as GEP1-V2, but
1181// AliasResult contains the shift that represents GEP1+Offset=V2.
1182 AR.setOffset(-Off.getSExtValue());
1183 AR.swap(Swapped);
1184 }
1185return AR;
1186 }
1187returnAliasResult::NoAlias;
1188 }else {
1189// We can use the getVScaleRange to prove that Off >= (CR.upper * LSize).
1190ConstantRange CR =getVScaleRange(&F,Off.getBitWidth());
1191bool Overflow;
1192APInt UpperRange = CR.getUnsignedMax().umul_ov(
1193APInt(Off.getBitWidth(), LSize.getKnownMinValue()), Overflow);
1194if (!Overflow &&Off.uge(UpperRange))
1195returnAliasResult::NoAlias;
1196 }
1197 }
1198
1199// VScale Alias Analysis - Given one scalable offset between accesses and a
1200// scalable typesize, we can divide each side by vscale, treating both values
1201// as a constant. We prove that Offset/vscale >= TypeSize/vscale.
1202if (DecompGEP1.VarIndices.size() == 1 &&
1203 DecompGEP1.VarIndices[0].Val.TruncBits == 0 &&
1204 DecompGEP1.Offset.isZero() &&
1205PatternMatch::match(DecompGEP1.VarIndices[0].Val.V,
1206PatternMatch::m_VScale())) {
1207const VariableGEPIndex &ScalableVar = DecompGEP1.VarIndices[0];
1208APInt Scale =
1209 ScalableVar.IsNegated ? -ScalableVar.Scale : ScalableVar.Scale;
1210LocationSize VLeftSize = Scale.isNegative() ? V1Size : V2Size;
1211
1212// Check if the offset is known to not overflow, if it does then attempt to
1213// prove it with the known values of vscale_range.
1214bool Overflows = !DecompGEP1.VarIndices[0].IsNSW;
1215if (Overflows) {
1216ConstantRange CR =getVScaleRange(&F, Scale.getBitWidth());
1217 (void)CR.getSignedMax().smul_ov(Scale, Overflows);
1218 }
1219
1220if (!Overflows) {
1221// Note that we do not check that the typesize is scalable, as vscale >= 1
1222// so noalias still holds so long as the dependency distance is at least
1223// as big as the typesize.
1224if (VLeftSize.hasValue() &&
1225 Scale.abs().uge(VLeftSize.getValue().getKnownMinValue()))
1226returnAliasResult::NoAlias;
1227 }
1228 }
1229
1230// If the difference between pointers is Offset +<nuw> Indices then we know
1231// that the addition does not wrap the pointer index type (add nuw) and the
1232// constant Offset is a lower bound on the distance between the pointers. We
1233// can then prove NoAlias via Offset u>= VLeftSize.
1234// + + +
1235// | BaseOffset | +<nuw> Indices |
1236// ---------------->|-------------------->|
1237// |-->V2Size | |-------> V1Size
1238// LHS RHS
1239if (!DecompGEP1.VarIndices.empty() &&
1240 DecompGEP1.NWFlags.hasNoUnsignedWrap() && V2Size.hasValue() &&
1241 !V2Size.isScalable() && DecompGEP1.Offset.uge(V2Size.getValue()))
1242returnAliasResult::NoAlias;
1243
1244// Bail on analysing scalable LocationSize
1245if (V1Size.isScalable() || V2Size.isScalable())
1246returnAliasResult::MayAlias;
1247
1248// We need to know both acess sizes for all the following heuristics.
1249if (!V1Size.hasValue() || !V2Size.hasValue())
1250returnAliasResult::MayAlias;
1251
1252APInt GCD;
1253ConstantRange OffsetRange =ConstantRange(DecompGEP1.Offset);
1254for (unsigned i = 0, e = DecompGEP1.VarIndices.size(); i != e; ++i) {
1255const VariableGEPIndex &Index = DecompGEP1.VarIndices[i];
1256constAPInt &Scale =Index.Scale;
1257APInt ScaleForGCD = Scale;
1258if (!Index.IsNSW)
1259 ScaleForGCD =
1260APInt::getOneBitSet(Scale.getBitWidth(), Scale.countr_zero());
1261
1262if (i == 0)
1263 GCD = ScaleForGCD.abs();
1264else
1265 GCD =APIntOps::GreatestCommonDivisor(GCD, ScaleForGCD.abs());
1266
1267ConstantRange CR =computeConstantRange(Index.Val.V,/* ForSigned */false,
1268true, &AC,Index.CxtI);
1269KnownBits Known =
1270computeKnownBits(Index.Val.V, DL, 0, &AC,Index.CxtI, DT);
1271 CR = CR.intersectWith(
1272ConstantRange::fromKnownBits(Known,/* Signed */true),
1273ConstantRange::Signed);
1274 CR =Index.Val.evaluateWith(CR).sextOrTrunc(OffsetRange.getBitWidth());
1275
1276assert(OffsetRange.getBitWidth() == Scale.getBitWidth() &&
1277"Bit widths are normalized to MaxIndexSize");
1278if (Index.IsNSW)
1279 CR = CR.smul_sat(ConstantRange(Scale));
1280else
1281 CR = CR.smul_fast(ConstantRange(Scale));
1282
1283if (Index.IsNegated)
1284 OffsetRange = OffsetRange.sub(CR);
1285else
1286 OffsetRange = OffsetRange.add(CR);
1287 }
1288
1289// We now have accesses at two offsets from the same base:
1290// 1. (...)*GCD + DecompGEP1.Offset with size V1Size
1291// 2. 0 with size V2Size
1292// Using arithmetic modulo GCD, the accesses are at
1293// [ModOffset..ModOffset+V1Size) and [0..V2Size). If the first access fits
1294// into the range [V2Size..GCD), then we know they cannot overlap.
1295APInt ModOffset = DecompGEP1.Offset.srem(GCD);
1296if (ModOffset.isNegative())
1297 ModOffset += GCD;// We want mod, not rem.
1298if (ModOffset.uge(V2Size.getValue()) &&
1299 (GCD - ModOffset).uge(V1Size.getValue()))
1300returnAliasResult::NoAlias;
1301
1302// Compute ranges of potentially accessed bytes for both accesses. If the
1303// interseciton is empty, there can be no overlap.
1304unsigned BW = OffsetRange.getBitWidth();
1305ConstantRange Range1 = OffsetRange.add(
1306ConstantRange(APInt(BW, 0),APInt(BW, V1Size.getValue())));
1307ConstantRange Range2 =
1308ConstantRange(APInt(BW, 0),APInt(BW, V2Size.getValue()));
1309if (Range1.intersectWith(Range2).isEmptySet())
1310returnAliasResult::NoAlias;
1311
1312// Try to determine the range of values for VarIndex such that
1313// VarIndex <= -MinAbsVarIndex || MinAbsVarIndex <= VarIndex.
1314 std::optional<APInt> MinAbsVarIndex;
1315if (DecompGEP1.VarIndices.size() == 1) {
1316// VarIndex = Scale*V.
1317const VariableGEPIndex &Var = DecompGEP1.VarIndices[0];
1318if (Var.Val.TruncBits == 0 &&
1319isKnownNonZero(Var.Val.V,SimplifyQuery(DL, DT, &AC, Var.CxtI))) {
1320// Check if abs(V*Scale) >= abs(Scale) holds in the presence of
1321// potentially wrapping math.
1322auto MultiplyByScaleNoWrap = [](const VariableGEPIndex &Var) {
1323if (Var.IsNSW)
1324returntrue;
1325
1326int ValOrigBW = Var.Val.V->getType()->getPrimitiveSizeInBits();
1327// If Scale is small enough so that abs(V*Scale) >= abs(Scale) holds.
1328// The max value of abs(V) is 2^ValOrigBW - 1. Multiplying with a
1329// constant smaller than 2^(bitwidth(Val) - ValOrigBW) won't wrap.
1330int MaxScaleValueBW = Var.Val.getBitWidth() - ValOrigBW;
1331if (MaxScaleValueBW <= 0)
1332returnfalse;
1333return Var.Scale.ule(
1334APInt::getMaxValue(MaxScaleValueBW).zext(Var.Scale.getBitWidth()));
1335 };
1336// Refine MinAbsVarIndex, if abs(Scale*V) >= abs(Scale) holds in the
1337// presence of potentially wrapping math.
1338if (MultiplyByScaleNoWrap(Var)) {
1339// If V != 0 then abs(VarIndex) >= abs(Scale).
1340 MinAbsVarIndex = Var.Scale.abs();
1341 }
1342 }
1343 }elseif (DecompGEP1.VarIndices.size() == 2) {
1344// VarIndex = Scale*V0 + (-Scale)*V1.
1345// If V0 != V1 then abs(VarIndex) >= abs(Scale).
1346// Check that MayBeCrossIteration is false, to avoid reasoning about
1347// inequality of values across loop iterations.
1348const VariableGEPIndex &Var0 = DecompGEP1.VarIndices[0];
1349const VariableGEPIndex &Var1 = DecompGEP1.VarIndices[1];
1350if (Var0.hasNegatedScaleOf(Var1) && Var0.Val.TruncBits == 0 &&
1351 Var0.Val.hasSameCastsAs(Var1.Val) && !AAQI.MayBeCrossIteration &&
1352isKnownNonEqual(Var0.Val.V, Var1.Val.V, DL, &AC,/* CxtI */nullptr,
1353 DT))
1354 MinAbsVarIndex = Var0.Scale.abs();
1355 }
1356
1357if (MinAbsVarIndex) {
1358// The constant offset will have added at least +/-MinAbsVarIndex to it.
1359APInt OffsetLo = DecompGEP1.Offset - *MinAbsVarIndex;
1360APInt OffsetHi = DecompGEP1.Offset + *MinAbsVarIndex;
1361// We know that Offset <= OffsetLo || Offset >= OffsetHi
1362if (OffsetLo.isNegative() && (-OffsetLo).uge(V1Size.getValue()) &&
1363 OffsetHi.isNonNegative() && OffsetHi.uge(V2Size.getValue()))
1364returnAliasResult::NoAlias;
1365 }
1366
1367if (constantOffsetHeuristic(DecompGEP1, V1Size, V2Size, &AC, DT, AAQI))
1368returnAliasResult::NoAlias;
1369
1370// Statically, we can see that the base objects are the same, but the
1371// pointers have dynamic offsets which we can't resolve. And none of our
1372// little tricks above worked.
1373returnAliasResult::MayAlias;
1374}
1375
1376staticAliasResultMergeAliasResults(AliasResultA,AliasResultB) {
1377// If the results agree, take it.
1378if (A ==B)
1379returnA;
1380// A mix of PartialAlias and MustAlias is PartialAlias.
1381if ((A ==AliasResult::PartialAlias &&B ==AliasResult::MustAlias) ||
1382 (B ==AliasResult::PartialAlias &&A ==AliasResult::MustAlias))
1383returnAliasResult::PartialAlias;
1384// Otherwise, we don't know anything.
1385returnAliasResult::MayAlias;
1386}
1387
1388/// Provides a bunch of ad-hoc rules to disambiguate a Select instruction
1389/// against another.
1390AliasResult
1391BasicAAResult::aliasSelect(constSelectInst *SI,LocationSize SISize,
1392constValue *V2,LocationSize V2Size,
1393AAQueryInfo &AAQI) {
1394// If the values are Selects with the same condition, we can do a more precise
1395// check: just check for aliases between the values on corresponding arms.
1396if (constSelectInst *SI2 = dyn_cast<SelectInst>(V2))
1397if (isValueEqualInPotentialCycles(SI->getCondition(), SI2->getCondition(),
1398 AAQI)) {
1399AliasResult Alias =
1400 AAQI.AAR.alias(MemoryLocation(SI->getTrueValue(), SISize),
1401MemoryLocation(SI2->getTrueValue(), V2Size), AAQI);
1402if (Alias ==AliasResult::MayAlias)
1403returnAliasResult::MayAlias;
1404AliasResult ThisAlias =
1405 AAQI.AAR.alias(MemoryLocation(SI->getFalseValue(), SISize),
1406MemoryLocation(SI2->getFalseValue(), V2Size), AAQI);
1407returnMergeAliasResults(ThisAlias, Alias);
1408 }
1409
1410// If both arms of the Select node NoAlias or MustAlias V2, then returns
1411// NoAlias / MustAlias. Otherwise, returns MayAlias.
1412AliasResult Alias = AAQI.AAR.alias(MemoryLocation(SI->getTrueValue(), SISize),
1413MemoryLocation(V2, V2Size), AAQI);
1414if (Alias ==AliasResult::MayAlias)
1415returnAliasResult::MayAlias;
1416
1417AliasResult ThisAlias =
1418 AAQI.AAR.alias(MemoryLocation(SI->getFalseValue(), SISize),
1419MemoryLocation(V2, V2Size), AAQI);
1420returnMergeAliasResults(ThisAlias, Alias);
1421}
1422
1423/// Provide a bunch of ad-hoc rules to disambiguate a PHI instruction against
1424/// another.
1425AliasResult BasicAAResult::aliasPHI(constPHINode *PN,LocationSize PNSize,
1426constValue *V2,LocationSize V2Size,
1427AAQueryInfo &AAQI) {
1428if (!PN->getNumIncomingValues())
1429returnAliasResult::NoAlias;
1430// If the values are PHIs in the same block, we can do a more precise
1431// as well as efficient check: just check for aliases between the values
1432// on corresponding edges. Don't do this if we are analyzing across
1433// iterations, as we may pick a different phi entry in different iterations.
1434if (constPHINode *PN2 = dyn_cast<PHINode>(V2))
1435if (PN2->getParent() == PN->getParent() && !AAQI.MayBeCrossIteration) {
1436 std::optional<AliasResult> Alias;
1437for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1438AliasResult ThisAlias = AAQI.AAR.alias(
1439MemoryLocation(PN->getIncomingValue(i), PNSize),
1440MemoryLocation(
1441 PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)), V2Size),
1442 AAQI);
1443if (Alias)
1444 *Alias =MergeAliasResults(*Alias, ThisAlias);
1445else
1446 Alias = ThisAlias;
1447if (*Alias ==AliasResult::MayAlias)
1448break;
1449 }
1450return *Alias;
1451 }
1452
1453SmallVector<Value *, 4> V1Srcs;
1454// If a phi operand recurses back to the phi, we can still determine NoAlias
1455// if we don't alias the underlying objects of the other phi operands, as we
1456// know that the recursive phi needs to be based on them in some way.
1457bool isRecursive =false;
1458auto CheckForRecPhi = [&](Value *PV) {
1459if (!EnableRecPhiAnalysis)
1460returnfalse;
1461if (getUnderlyingObject(PV) == PN) {
1462 isRecursive =true;
1463returntrue;
1464 }
1465returnfalse;
1466 };
1467
1468SmallPtrSet<Value *, 4> UniqueSrc;
1469Value *OnePhi =nullptr;
1470for (Value *PV1 : PN->incoming_values()) {
1471// Skip the phi itself being the incoming value.
1472if (PV1 == PN)
1473continue;
1474
1475if (isa<PHINode>(PV1)) {
1476if (OnePhi && OnePhi != PV1) {
1477// To control potential compile time explosion, we choose to be
1478// conserviate when we have more than one Phi input. It is important
1479// that we handle the single phi case as that lets us handle LCSSA
1480// phi nodes and (combined with the recursive phi handling) simple
1481// pointer induction variable patterns.
1482returnAliasResult::MayAlias;
1483 }
1484 OnePhi = PV1;
1485 }
1486
1487if (CheckForRecPhi(PV1))
1488continue;
1489
1490if (UniqueSrc.insert(PV1).second)
1491 V1Srcs.push_back(PV1);
1492 }
1493
1494if (OnePhi && UniqueSrc.size() > 1)
1495// Out of an abundance of caution, allow only the trivial lcssa and
1496// recursive phi cases.
1497returnAliasResult::MayAlias;
1498
1499// If V1Srcs is empty then that means that the phi has no underlying non-phi
1500// value. This should only be possible in blocks unreachable from the entry
1501// block, but return MayAlias just in case.
1502if (V1Srcs.empty())
1503returnAliasResult::MayAlias;
1504
1505// If this PHI node is recursive, indicate that the pointer may be moved
1506// across iterations. We can only prove NoAlias if different underlying
1507// objects are involved.
1508if (isRecursive)
1509 PNSize =LocationSize::beforeOrAfterPointer();
1510
1511// In the recursive alias queries below, we may compare values from two
1512// different loop iterations.
1513SaveAndRestore SavedMayBeCrossIteration(AAQI.MayBeCrossIteration,true);
1514
1515AliasResult Alias = AAQI.AAR.alias(MemoryLocation(V1Srcs[0], PNSize),
1516MemoryLocation(V2, V2Size), AAQI);
1517
1518// Early exit if the check of the first PHI source against V2 is MayAlias.
1519// Other results are not possible.
1520if (Alias ==AliasResult::MayAlias)
1521returnAliasResult::MayAlias;
1522// With recursive phis we cannot guarantee that MustAlias/PartialAlias will
1523// remain valid to all elements and needs to conservatively return MayAlias.
1524if (isRecursive && Alias !=AliasResult::NoAlias)
1525returnAliasResult::MayAlias;
1526
1527// If all sources of the PHI node NoAlias or MustAlias V2, then returns
1528// NoAlias / MustAlias. Otherwise, returns MayAlias.
1529for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) {
1530Value *V = V1Srcs[i];
1531
1532AliasResult ThisAlias = AAQI.AAR.alias(
1533MemoryLocation(V, PNSize),MemoryLocation(V2, V2Size), AAQI);
1534 Alias =MergeAliasResults(ThisAlias, Alias);
1535if (Alias ==AliasResult::MayAlias)
1536break;
1537 }
1538
1539return Alias;
1540}
1541
1542/// Provides a bunch of ad-hoc rules to disambiguate in common cases, such as
1543/// array references.
1544AliasResult BasicAAResult::aliasCheck(constValue *V1,LocationSize V1Size,
1545constValue *V2,LocationSize V2Size,
1546AAQueryInfo &AAQI,
1547constInstruction *CtxI) {
1548// If either of the memory references is empty, it doesn't matter what the
1549// pointer values are.
1550if (V1Size.isZero() || V2Size.isZero())
1551returnAliasResult::NoAlias;
1552
1553// Strip off any casts if they exist.
1554 V1 = V1->stripPointerCastsForAliasAnalysis();
1555V2 =V2->stripPointerCastsForAliasAnalysis();
1556
1557// If V1 or V2 is undef, the result is NoAlias because we can always pick a
1558// value for undef that aliases nothing in the program.
1559if (isa<UndefValue>(V1) || isa<UndefValue>(V2))
1560returnAliasResult::NoAlias;
1561
1562// Are we checking for alias of the same value?
1563// Because we look 'through' phi nodes, we could look at "Value" pointers from
1564// different iterations. We must therefore make sure that this is not the
1565// case. The function isValueEqualInPotentialCycles ensures that this cannot
1566// happen by looking at the visited phi nodes and making sure they cannot
1567// reach the value.
1568if (isValueEqualInPotentialCycles(V1, V2, AAQI))
1569returnAliasResult::MustAlias;
1570
1571if (!V1->getType()->isPointerTy() || !V2->getType()->isPointerTy())
1572returnAliasResult::NoAlias;// Scalars cannot alias each other
1573
1574// Figure out what objects these things are pointing to if we can.
1575constValue *O1 =getUnderlyingObject(V1,MaxLookupSearchDepth);
1576constValue *O2 =getUnderlyingObject(V2,MaxLookupSearchDepth);
1577
1578// Null values in the default address space don't point to any object, so they
1579// don't alias any other pointer.
1580if (constConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O1))
1581if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
1582returnAliasResult::NoAlias;
1583if (constConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O2))
1584if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
1585returnAliasResult::NoAlias;
1586
1587if (O1 != O2) {
1588// If V1/V2 point to two different objects, we know that we have no alias.
1589if (isIdentifiedObject(O1) &&isIdentifiedObject(O2))
1590returnAliasResult::NoAlias;
1591
1592// Function arguments can't alias with things that are known to be
1593// unambigously identified at the function level.
1594if ((isa<Argument>(O1) &&isIdentifiedFunctionLocal(O2)) ||
1595 (isa<Argument>(O2) &&isIdentifiedFunctionLocal(O1)))
1596returnAliasResult::NoAlias;
1597
1598// If one pointer is the result of a call/invoke or load and the other is a
1599// non-escaping local object within the same function, then we know the
1600// object couldn't escape to a point where the call could return it.
1601//
1602// Note that if the pointers are in different functions, there are a
1603// variety of complications. A call with a nocapture argument may still
1604// temporary store the nocapture argument's value in a temporary memory
1605// location if that memory location doesn't escape. Or it may pass a
1606// nocapture value to other functions as long as they don't capture it.
1607if (isEscapeSource(O1) && AAQI.CA->isNotCapturedBefore(
1608 O2, dyn_cast<Instruction>(O1),/*OrAt*/true))
1609returnAliasResult::NoAlias;
1610if (isEscapeSource(O2) && AAQI.CA->isNotCapturedBefore(
1611 O1, dyn_cast<Instruction>(O2),/*OrAt*/true))
1612returnAliasResult::NoAlias;
1613 }
1614
1615// If the size of one access is larger than the entire object on the other
1616// side, then we know such behavior is undefined and can assume no alias.
1617bool NullIsValidLocation =NullPointerIsDefined(&F);
1618if ((isObjectSmallerThan(
1619 O2,getMinimalExtentFrom(*V1, V1Size, DL, NullIsValidLocation), DL,
1620 TLI, NullIsValidLocation)) ||
1621 (isObjectSmallerThan(
1622 O1,getMinimalExtentFrom(*V2, V2Size, DL, NullIsValidLocation), DL,
1623 TLI, NullIsValidLocation)))
1624returnAliasResult::NoAlias;
1625
1626if (EnableSeparateStorageAnalysis) {
1627for (AssumptionCache::ResultElem &Elem : AC.assumptionsFor(O1)) {
1628if (!Elem || Elem.Index ==AssumptionCache::ExprResultIdx)
1629continue;
1630
1631AssumeInst *Assume = cast<AssumeInst>(Elem);
1632OperandBundleUse OBU =Assume->getOperandBundleAt(Elem.Index);
1633if (OBU.getTagName() =="separate_storage") {
1634assert(OBU.Inputs.size() == 2);
1635constValue *Hint1 = OBU.Inputs[0].get();
1636constValue *Hint2 = OBU.Inputs[1].get();
1637// This is often a no-op; instcombine rewrites this for us. No-op
1638// getUnderlyingObject calls are fast, though.
1639constValue *HintO1 =getUnderlyingObject(Hint1);
1640constValue *HintO2 =getUnderlyingObject(Hint2);
1641
1642DominatorTree *DT = getDT(AAQI);
1643auto ValidAssumeForPtrContext = [&](constValue *Ptr) {
1644if (constInstruction *PtrI = dyn_cast<Instruction>(Ptr)) {
1645returnisValidAssumeForContext(Assume, PtrI, DT,
1646/* AllowEphemerals */true);
1647 }
1648if (constArgument *PtrA = dyn_cast<Argument>(Ptr)) {
1649constInstruction *FirstI =
1650 &*PtrA->getParent()->getEntryBlock().begin();
1651returnisValidAssumeForContext(Assume, FirstI, DT,
1652/* AllowEphemerals */true);
1653 }
1654returnfalse;
1655 };
1656
1657if ((O1 == HintO1 && O2 == HintO2) || (O1 == HintO2 && O2 == HintO1)) {
1658// Note that we go back to V1 and V2 for the
1659// ValidAssumeForPtrContext checks; they're dominated by O1 and O2,
1660// so strictly more assumptions are valid for them.
1661if ((CtxI &&isValidAssumeForContext(Assume, CtxI, DT,
1662/* AllowEphemerals */true)) ||
1663 ValidAssumeForPtrContext(V1) || ValidAssumeForPtrContext(V2)) {
1664returnAliasResult::NoAlias;
1665 }
1666 }
1667 }
1668 }
1669 }
1670
1671// If one the accesses may be before the accessed pointer, canonicalize this
1672// by using unknown after-pointer sizes for both accesses. This is
1673// equivalent, because regardless of which pointer is lower, one of them
1674// will always came after the other, as long as the underlying objects aren't
1675// disjoint. We do this so that the rest of BasicAA does not have to deal
1676// with accesses before the base pointer, and to improve cache utilization by
1677// merging equivalent states.
1678if (V1Size.mayBeBeforePointer() || V2Size.mayBeBeforePointer()) {
1679 V1Size =LocationSize::afterPointer();
1680 V2Size =LocationSize::afterPointer();
1681 }
1682
1683// FIXME: If this depth limit is hit, then we may cache sub-optimal results
1684// for recursive queries. For this reason, this limit is chosen to be large
1685// enough to be very rarely hit, while still being small enough to avoid
1686// stack overflows.
1687if (AAQI.Depth >= 512)
1688returnAliasResult::MayAlias;
1689
1690// Check the cache before climbing up use-def chains. This also terminates
1691// otherwise infinitely recursive queries. Include MayBeCrossIteration in the
1692// cache key, because some cases where MayBeCrossIteration==false returns
1693// MustAlias or NoAlias may become MayAlias under MayBeCrossIteration==true.
1694AAQueryInfo::LocPair Locs({V1, V1Size, AAQI.MayBeCrossIteration},
1695 {V2, V2Size, AAQI.MayBeCrossIteration});
1696constbool Swapped = V1 >V2;
1697if (Swapped)
1698std::swap(Locs.first, Locs.second);
1699constauto &Pair = AAQI.AliasCache.try_emplace(
1700 Locs,AAQueryInfo::CacheEntry{AliasResult::NoAlias, 0});
1701if (!Pair.second) {
1702auto &Entry = Pair.first->second;
1703if (!Entry.isDefinitive()) {
1704// Remember that we used an assumption. This may either be a direct use
1705// of an assumption, or a use of an entry that may itself be based on an
1706// assumption.
1707 ++AAQI.NumAssumptionUses;
1708if (Entry.isAssumption())
1709 ++Entry.NumAssumptionUses;
1710 }
1711// Cache contains sorted {V1,V2} pairs but we should return original order.
1712autoResult =Entry.Result;
1713Result.swap(Swapped);
1714returnResult;
1715 }
1716
1717int OrigNumAssumptionUses = AAQI.NumAssumptionUses;
1718unsigned OrigNumAssumptionBasedResults = AAQI.AssumptionBasedResults.size();
1719AliasResultResult =
1720 aliasCheckRecursive(V1, V1Size, V2, V2Size, AAQI, O1, O2);
1721
1722auto It = AAQI.AliasCache.find(Locs);
1723assert(It != AAQI.AliasCache.end() &&"Must be in cache");
1724auto &Entry = It->second;
1725
1726// Check whether a NoAlias assumption has been used, but disproven.
1727bool AssumptionDisproven =
1728Entry.NumAssumptionUses > 0 &&Result !=AliasResult::NoAlias;
1729if (AssumptionDisproven)
1730Result =AliasResult::MayAlias;
1731
1732// This is a definitive result now, when considered as a root query.
1733 AAQI.NumAssumptionUses -=Entry.NumAssumptionUses;
1734Entry.Result =Result;
1735// Cache contains sorted {V1,V2} pairs.
1736Entry.Result.swap(Swapped);
1737
1738// If the assumption has been disproven, remove any results that may have
1739// been based on this assumption. Do this after the Entry updates above to
1740// avoid iterator invalidation.
1741if (AssumptionDisproven)
1742while (AAQI.AssumptionBasedResults.size() > OrigNumAssumptionBasedResults)
1743 AAQI.AliasCache.erase(AAQI.AssumptionBasedResults.pop_back_val());
1744
1745// The result may still be based on assumptions higher up in the chain.
1746// Remember it, so it can be purged from the cache later.
1747if (OrigNumAssumptionUses != AAQI.NumAssumptionUses &&
1748 Result !=AliasResult::MayAlias) {
1749 AAQI.AssumptionBasedResults.push_back(Locs);
1750Entry.NumAssumptionUses =AAQueryInfo::CacheEntry::AssumptionBased;
1751 }else {
1752Entry.NumAssumptionUses =AAQueryInfo::CacheEntry::Definitive;
1753 }
1754
1755// Depth is incremented before this function is called, so Depth==1 indicates
1756// a root query.
1757if (AAQI.Depth == 1) {
1758// Any remaining assumption based results must be based on proven
1759// assumptions, so convert them to definitive results.
1760for (constauto &Loc : AAQI.AssumptionBasedResults) {
1761auto It = AAQI.AliasCache.find(Loc);
1762if (It != AAQI.AliasCache.end())
1763 It->second.NumAssumptionUses =AAQueryInfo::CacheEntry::Definitive;
1764 }
1765 AAQI.AssumptionBasedResults.clear();
1766 AAQI.NumAssumptionUses = 0;
1767 }
1768returnResult;
1769}
1770
1771AliasResult BasicAAResult::aliasCheckRecursive(
1772constValue *V1,LocationSize V1Size,
1773constValue *V2,LocationSize V2Size,
1774AAQueryInfo &AAQI,constValue *O1,constValue *O2) {
1775if (constGEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) {
1776AliasResultResult = aliasGEP(GV1, V1Size, V2, V2Size, O1, O2, AAQI);
1777if (Result !=AliasResult::MayAlias)
1778returnResult;
1779 }elseif (constGEPOperator *GV2 = dyn_cast<GEPOperator>(V2)) {
1780AliasResultResult = aliasGEP(GV2, V2Size, V1, V1Size, O2, O1, AAQI);
1781Result.swap();
1782if (Result !=AliasResult::MayAlias)
1783returnResult;
1784 }
1785
1786if (constPHINode *PN = dyn_cast<PHINode>(V1)) {
1787AliasResultResult = aliasPHI(PN, V1Size, V2, V2Size, AAQI);
1788if (Result !=AliasResult::MayAlias)
1789returnResult;
1790 }elseif (constPHINode *PN = dyn_cast<PHINode>(V2)) {
1791AliasResultResult = aliasPHI(PN, V2Size, V1, V1Size, AAQI);
1792Result.swap();
1793if (Result !=AliasResult::MayAlias)
1794returnResult;
1795 }
1796
1797if (constSelectInst *S1 = dyn_cast<SelectInst>(V1)) {
1798AliasResultResult = aliasSelect(S1, V1Size, V2, V2Size, AAQI);
1799if (Result !=AliasResult::MayAlias)
1800returnResult;
1801 }elseif (constSelectInst *S2 = dyn_cast<SelectInst>(V2)) {
1802AliasResultResult = aliasSelect(S2, V2Size, V1, V1Size, AAQI);
1803Result.swap();
1804if (Result !=AliasResult::MayAlias)
1805returnResult;
1806 }
1807
1808// If both pointers are pointing into the same object and one of them
1809// accesses the entire object, then the accesses must overlap in some way.
1810if (O1 == O2) {
1811bool NullIsValidLocation =NullPointerIsDefined(&F);
1812if (V1Size.isPrecise() && V2Size.isPrecise() &&
1813 (isObjectSize(O1, V1Size.getValue(), DL, TLI, NullIsValidLocation) ||
1814isObjectSize(O2, V2Size.getValue(), DL, TLI, NullIsValidLocation)))
1815returnAliasResult::PartialAlias;
1816 }
1817
1818returnAliasResult::MayAlias;
1819}
1820
1821/// Check whether two Values can be considered equivalent.
1822///
1823/// If the values may come from different cycle iterations, this will also
1824/// check that the values are not part of cycle. We have to do this because we
1825/// are looking through phi nodes, that is we say
1826/// noalias(V, phi(VA, VB)) if noalias(V, VA) and noalias(V, VB).
1827bool BasicAAResult::isValueEqualInPotentialCycles(constValue *V,
1828constValue *V2,
1829constAAQueryInfo &AAQI) {
1830if (V != V2)
1831returnfalse;
1832
1833if (!AAQI.MayBeCrossIteration)
1834returntrue;
1835
1836// Non-instructions and instructions in the entry block cannot be part of
1837// a loop.
1838constInstruction *Inst = dyn_cast<Instruction>(V);
1839if (!Inst || Inst->getParent()->isEntryBlock())
1840returntrue;
1841
1842returnisNotInCycle(Inst, getDT(AAQI),/*LI*/nullptr);
1843}
1844
1845/// Computes the symbolic difference between two de-composed GEPs.
1846void BasicAAResult::subtractDecomposedGEPs(DecomposedGEP &DestGEP,
1847const DecomposedGEP &SrcGEP,
1848constAAQueryInfo &AAQI) {
1849// Drop nuw flag from GEP if subtraction of constant offsets overflows in an
1850// unsigned sense.
1851if (DestGEP.Offset.ult(SrcGEP.Offset))
1852 DestGEP.NWFlags = DestGEP.NWFlags.withoutNoUnsignedWrap();
1853
1854 DestGEP.Offset -= SrcGEP.Offset;
1855for (const VariableGEPIndex &Src : SrcGEP.VarIndices) {
1856// Find V in Dest. This is N^2, but pointer indices almost never have more
1857// than a few variable indexes.
1858bool Found =false;
1859for (autoI :enumerate(DestGEP.VarIndices)) {
1860 VariableGEPIndex &Dest =I.value();
1861if ((!isValueEqualInPotentialCycles(Dest.Val.V, Src.Val.V, AAQI) &&
1862 !areBothVScale(Dest.Val.V, Src.Val.V)) ||
1863 !Dest.Val.hasSameCastsAs(Src.Val))
1864continue;
1865
1866// Normalize IsNegated if we're going to lose the NSW flag anyway.
1867if (Dest.IsNegated) {
1868 Dest.Scale = -Dest.Scale;
1869 Dest.IsNegated =false;
1870 Dest.IsNSW =false;
1871 }
1872
1873// If we found it, subtract off Scale V's from the entry in Dest. If it
1874// goes to zero, remove the entry.
1875if (Dest.Scale != Src.Scale) {
1876// Drop nuw flag from GEP if subtraction of V's Scale overflows in an
1877// unsigned sense.
1878if (Dest.Scale.ult(Src.Scale))
1879 DestGEP.NWFlags = DestGEP.NWFlags.withoutNoUnsignedWrap();
1880
1881 Dest.Scale -= Src.Scale;
1882 Dest.IsNSW =false;
1883 }else {
1884 DestGEP.VarIndices.erase(DestGEP.VarIndices.begin() +I.index());
1885 }
1886 Found =true;
1887break;
1888 }
1889
1890// If we didn't consume this entry, add it to the end of the Dest list.
1891if (!Found) {
1892 VariableGEPIndexEntry = {Src.Val, Src.Scale, Src.CxtI, Src.IsNSW,
1893/* IsNegated */true};
1894 DestGEP.VarIndices.push_back(Entry);
1895
1896// Drop nuw flag when we have unconsumed variable indices from SrcGEP.
1897 DestGEP.NWFlags = DestGEP.NWFlags.withoutNoUnsignedWrap();
1898 }
1899 }
1900}
1901
1902bool BasicAAResult::constantOffsetHeuristic(const DecomposedGEP &GEP,
1903LocationSize MaybeV1Size,
1904LocationSize MaybeV2Size,
1905AssumptionCache *AC,
1906DominatorTree *DT,
1907constAAQueryInfo &AAQI) {
1908if (GEP.VarIndices.size() != 2 || !MaybeV1Size.hasValue() ||
1909 !MaybeV2Size.hasValue())
1910returnfalse;
1911
1912constuint64_t V1Size = MaybeV1Size.getValue();
1913constuint64_t V2Size = MaybeV2Size.getValue();
1914
1915const VariableGEPIndex &Var0 =GEP.VarIndices[0], &Var1 =GEP.VarIndices[1];
1916
1917if (Var0.Val.TruncBits != 0 || !Var0.Val.hasSameCastsAs(Var1.Val) ||
1918 !Var0.hasNegatedScaleOf(Var1) ||
1919 Var0.Val.V->getType() != Var1.Val.V->getType())
1920returnfalse;
1921
1922// We'll strip off the Extensions of Var0 and Var1 and do another round
1923// of GetLinearExpression decomposition. In the example above, if Var0
1924// is zext(%x + 1) we should get V1 == %x and V1Offset == 1.
1925
1926 LinearExpression E0 =
1927GetLinearExpression(CastedValue(Var0.Val.V), DL, 0, AC, DT);
1928 LinearExpression E1 =
1929GetLinearExpression(CastedValue(Var1.Val.V), DL, 0, AC, DT);
1930if (E0.Scale != E1.Scale || !E0.Val.hasSameCastsAs(E1.Val) ||
1931 !isValueEqualInPotentialCycles(E0.Val.V, E1.Val.V, AAQI))
1932returnfalse;
1933
1934// We have a hit - Var0 and Var1 only differ by a constant offset!
1935
1936// If we've been sext'ed then zext'd the maximum difference between Var0 and
1937// Var1 is possible to calculate, but we're just interested in the absolute
1938// minimum difference between the two. The minimum distance may occur due to
1939// wrapping; consider "add i3 %i, 5": if %i == 7 then 7 + 5 mod 8 == 4, and so
1940// the minimum distance between %i and %i + 5 is 3.
1941APInt MinDiff = E0.Offset - E1.Offset, Wrapped = -MinDiff;
1942 MinDiff =APIntOps::umin(MinDiff, Wrapped);
1943APInt MinDiffBytes =
1944 MinDiff.zextOrTrunc(Var0.Scale.getBitWidth()) * Var0.Scale.abs();
1945
1946// We can't definitely say whether GEP1 is before or after V2 due to wrapping
1947// arithmetic (i.e. for some values of GEP1 and V2 GEP1 < V2, and for other
1948// values GEP1 > V2). We'll therefore only declare NoAlias if both V1Size and
1949// V2Size can fit in the MinDiffBytes gap.
1950return MinDiffBytes.uge(V1Size +GEP.Offset.abs()) &&
1951 MinDiffBytes.uge(V2Size +GEP.Offset.abs());
1952}
1953
1954//===----------------------------------------------------------------------===//
1955// BasicAliasAnalysis Pass
1956//===----------------------------------------------------------------------===//
1957
1958AnalysisKey BasicAA::Key;
1959
1960BasicAAResultBasicAA::run(Function &F,FunctionAnalysisManager &AM) {
1961auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
1962auto &AC = AM.getResult<AssumptionAnalysis>(F);
1963auto *DT = &AM.getResult<DominatorTreeAnalysis>(F);
1964returnBasicAAResult(F.getDataLayout(),F, TLI, AC, DT);
1965}
1966
1967BasicAAWrapperPass::BasicAAWrapperPass() :FunctionPass(ID) {
1968initializeBasicAAWrapperPassPass(*PassRegistry::getPassRegistry());
1969}
1970
1971charBasicAAWrapperPass::ID = 0;
1972
1973void BasicAAWrapperPass::anchor() {}
1974
1975INITIALIZE_PASS_BEGIN(BasicAAWrapperPass,"basic-aa",
1976"Basic Alias Analysis (stateless AA impl)",true,true)
1977INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
1978INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
1979INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
1980INITIALIZE_PASS_END(BasicAAWrapperPass, "basic-aa",
1981 "Basic AliasAnalysis (stateless AAimpl)",true,true)
1982
1983FunctionPass *llvm::createBasicAAWrapperPass() {
1984returnnewBasicAAWrapperPass();
1985}
1986
1987boolBasicAAWrapperPass::runOnFunction(Function &F) {
1988auto &ACT = getAnalysis<AssumptionCacheTracker>();
1989auto &TLIWP = getAnalysis<TargetLibraryInfoWrapperPass>();
1990auto &DTWP = getAnalysis<DominatorTreeWrapperPass>();
1991
1992 Result.reset(newBasicAAResult(F.getDataLayout(),F,
1993 TLIWP.getTLI(F), ACT.getAssumptionCache(F),
1994 &DTWP.getDomTree()));
1995
1996returnfalse;
1997}
1998
1999voidBasicAAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const{
2000 AU.setPreservesAll();
2001 AU.addRequiredTransitive<AssumptionCacheTracker>();
2002 AU.addRequiredTransitive<DominatorTreeWrapperPass>();
2003 AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>();
2004}
S1
static const LLT S1
Definition:AMDGPULegalizerInfo.cpp:282
PHI
Rewrite undef for PHI
Definition:AMDGPURewriteUndefForPHI.cpp:100
APInt.h
This file implements a class to represent arbitrary precision integral constant values and operations...
DL
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Definition:ARMSLSHardening.cpp:73
aa
aa
Definition:AliasAnalysis.cpp:730
AliasAnalysis.h
CFG.h
print
static void print(raw_ostream &Out, object::Archive::Kind Kind, T Val)
Definition:ArchiveWriter.cpp:205
AssumptionCache.h
Attributes.h
This file contains the simple types necessary to represent the attributes associated with functions a...
EnableRecPhiAnalysis
static cl::opt< bool > EnableRecPhiAnalysis("basic-aa-recphi", cl::Hidden, cl::init(true))
Enable analysis of recursive PHI nodes.
getParent
static const Function * getParent(const Value *V)
Definition:BasicAliasAnalysis.cpp:863
isObjectSmallerThan
static bool isObjectSmallerThan(const Value *V, TypeSize Size, const DataLayout &DL, const TargetLibraryInfo &TLI, bool NullIsValidLoc)
Returns true if we can prove that the object specified by V is smaller than Size.
Definition:BasicAliasAnalysis.cpp:122
isObjectSize
static bool isObjectSize(const Value *V, TypeSize Size, const DataLayout &DL, const TargetLibraryInfo &TLI, bool NullIsValidLoc)
Returns true if we can prove that the object specified by V has size Size.
Definition:BasicAliasAnalysis.cpp:180
EnableSeparateStorageAnalysis
static cl::opt< bool > EnableSeparateStorageAnalysis("basic-aa-separate-storage", cl::Hidden, cl::init(true))
notDifferentParent
static bool notDifferentParent(const Value *O1, const Value *O2)
Definition:BasicAliasAnalysis.cpp:876
GetLinearExpression
static LinearExpression GetLinearExpression(const CastedValue &Val, const DataLayout &DL, unsigned Depth, AssumptionCache *AC, DominatorTree *DT)
Analyzes the specified value as a linear expression: "A*V + B", where A and B are constant integers.
Definition:BasicAliasAnalysis.cpp:401
isNotInCycle
static bool isNotInCycle(const Instruction *I, const DominatorTree *DT, const LoopInfo *LI)
Definition:BasicAliasAnalysis.cpp:205
areBothVScale
static bool areBothVScale(const Value *V1, const Value *V2)
Return true if both V1 and V2 are VScale.
Definition:BasicAliasAnalysis.cpp:188
getMinimalExtentFrom
static TypeSize getMinimalExtentFrom(const Value &V, const LocationSize &LocSize, const DataLayout &DL, bool NullIsValidLoc)
Return the minimal extent from V to the end of the underlying object, assuming the result is used in ...
Definition:BasicAliasAnalysis.cpp:160
MergeAliasResults
static AliasResult MergeAliasResults(AliasResult A, AliasResult B)
Definition:BasicAliasAnalysis.cpp:1376
isIntrinsicCall
static bool isIntrinsicCall(const CallBase *Call, Intrinsic::ID IID)
Definition:BasicAliasAnalysis.cpp:809
true
basic Basic Alias true
Definition:BasicAliasAnalysis.cpp:1981
MaxLookupSearchDepth
static const unsigned MaxLookupSearchDepth
Definition:BasicAliasAnalysis.cpp:84
BasicAliasAnalysis.h
This is the interface for LLVM's primary stateless and local alias analysis.
Analysis
block Block Frequency Analysis
Definition:BlockFrequencyInfo.cpp:300
B
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
A
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
CaptureTracking.h
Casting.h
CommandLine.h
Compiler.h
ConstantRange.h
Constants.h
This file contains the declarations for the subclasses of Constant, which represent the different fla...
DataLayout.h
DerivedTypes.h
Dominators.h
Size
uint64_t Size
Definition:ELFObjHandler.cpp:81
Other
std::optional< std::vector< StOtherPiece > > Other
Definition:ELFYAML.cpp:1315
GetElementPtrTypeIterator.h
GlobalAlias.h
GlobalVariable.h
GEP
Hexagon Common GEP
Definition:HexagonCommonGEP.cpp:170
_
#define _
Definition:HexagonMCCodeEmitter.cpp:46
Argument.h
Constant.h
Function.h
Instruction.h
IntrinsicInst.h
Operator.h
Type.h
User.h
Value.h
InitializePasses.h
InstrTypes.h
Instructions.h
Intrinsics.h
KnownBits.h
F
#define F(x, y, z)
Definition:MD5.cpp:55
I
#define I(x, y, z)
Definition:MD5.cpp:58
MemoryBuiltins.h
MemoryLocation.h
This file provides utility analysis objects describing memory locations.
II
uint64_t IntrinsicInst * II
Definition:NVVMIntrRange.cpp:51
INITIALIZE_PASS_DEPENDENCY
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition:PassSupport.h:55
INITIALIZE_PASS_END
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition:PassSupport.h:57
INITIALIZE_PASS_BEGIN
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition:PassSupport.h:52
Pass.h
PatternMatch.h
impl
place backedge safepoints impl
Definition:PlaceSafepoints.cpp:173
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
OS
raw_pwrite_stream & OS
Definition:SampleProfWriter.cpp:51
SaveAndRestore.h
This file provides utility classes that use RAII to save and restore values.
ScopeExit.h
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
SmallPtrSet.h
This file defines the SmallPtrSet class.
SmallVector.h
This file defines the SmallVector class.
Statistic.h
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
STATISTIC
#define STATISTIC(VARNAME, DESC)
Definition:Statistic.h:166
Ptr
@ Ptr
Definition:TargetLibraryInfo.cpp:77
TargetLibraryInfo.h
getBitWidth
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
Definition:ValueTracking.cpp:93
ValueTracking.h
RHS
Value * RHS
Definition:X86PartialReduction.cpp:74
llvm::AAQueryInfo
This class stores info we want to provide to or retain within an alias query.
Definition:AliasAnalysis.h:238
llvm::AAQueryInfo::AAR
AAResults & AAR
Definition:AliasAnalysis.h:263
llvm::AAQueryInfo::AssumptionBasedResults
SmallVector< AAQueryInfo::LocPair, 4 > AssumptionBasedResults
Location pairs for which an assumption based result is currently stored.
Definition:AliasAnalysis.h:279
llvm::AAQueryInfo::Depth
unsigned Depth
Query depth used to distinguish recursive queries.
Definition:AliasAnalysis.h:271
llvm::AAQueryInfo::NumAssumptionUses
int NumAssumptionUses
How many active NoAlias assumption uses there are.
Definition:AliasAnalysis.h:274
llvm::AAQueryInfo::LocPair
std::pair< AACacheLoc, AACacheLoc > LocPair
Definition:AliasAnalysis.h:240
llvm::AAQueryInfo::AliasCache
AliasCacheT AliasCache
Definition:AliasAnalysis.h:266
llvm::AAQueryInfo::MayBeCrossIteration
bool MayBeCrossIteration
Tracks whether the accesses may be on different cycle iterations.
Definition:AliasAnalysis.h:295
llvm::AAQueryInfo::CA
CaptureAnalysis * CA
Definition:AliasAnalysis.h:268
llvm::AAResults::alias
AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB)
The main low level interface to the alias analysis implementation.
Definition:AliasAnalysis.cpp:105
llvm::AAResults::getMemoryEffects
MemoryEffects getMemoryEffects(const CallBase *Call)
Return the behavior of the given call site.
Definition:AliasAnalysis.cpp:387
llvm::APInt
Class for arbitrary precision integers.
Definition:APInt.h:78
llvm::APInt::umul_ov
APInt umul_ov(const APInt &RHS, bool &Overflow) const
Definition:APInt.cpp:1945
llvm::APInt::zext
APInt zext(unsigned width) const
Zero extend to a new width.
Definition:APInt.cpp:986
llvm::APInt::zextOrTrunc
APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
Definition:APInt.cpp:1007
llvm::APInt::getMaxValue
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
Definition:APInt.h:206
llvm::APInt::abs
APInt abs() const
Get the absolute value.
Definition:APInt.h:1773
llvm::APInt::getBitWidth
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition:APInt.h:1468
llvm::APInt::isNegative
bool isNegative() const
Determine sign of this APInt.
Definition:APInt.h:329
llvm::APInt::countr_zero
unsigned countr_zero() const
Count the number of trailing zero bits.
Definition:APInt.h:1618
llvm::APInt::getSignedMinValue
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
Definition:APInt.h:219
llvm::APInt::srem
APInt srem(const APInt &RHS) const
Function for signed remainder operation.
Definition:APInt.cpp:1710
llvm::APInt::smul_ov
APInt smul_ov(const APInt &RHS, bool &Overflow) const
Definition:APInt.cpp:1934
llvm::APInt::isNonNegative
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
Definition:APInt.h:334
llvm::APInt::getZero
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
Definition:APInt.h:200
llvm::APInt::getOneBitSet
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
Definition:APInt.h:239
llvm::APInt::uge
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
Definition:APInt.h:1221
llvm::AliasResult
The possible results of an alias query.
Definition:AliasAnalysis.h:77
llvm::AliasResult::swap
void swap(bool DoSwap=true)
Helper for processing AliasResult for swapped memory location pairs.
Definition:AliasAnalysis.h:135
llvm::AliasResult::MayAlias
@ MayAlias
The two locations may or may not alias.
Definition:AliasAnalysis.h:98
llvm::AliasResult::NoAlias
@ NoAlias
The two locations do not alias at all.
Definition:AliasAnalysis.h:95
llvm::AliasResult::PartialAlias
@ PartialAlias
The two locations alias, but only due to a partial overlap.
Definition:AliasAnalysis.h:100
llvm::AliasResult::MustAlias
@ MustAlias
The two locations precisely alias each other.
Definition:AliasAnalysis.h:102
llvm::AliasResult::setOffset
void setOffset(int32_t NewOffset)
Definition:AliasAnalysis.h:127
llvm::AnalysisManager::Invalidator
API to communicate dependencies between analyses during invalidation.
Definition:PassManager.h:292
llvm::AnalysisManager::Invalidator::invalidate
bool invalidate(IRUnitT &IR, const PreservedAnalyses &PA)
Trigger the invalidation of some other analysis pass if not already handled and return whether it was...
Definition:PassManager.h:310
llvm::AnalysisManager
A container for analyses that lazily runs them and caches their results.
Definition:PassManager.h:253
llvm::AnalysisManager::getResult
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition:PassManager.h:410
llvm::AnalysisUsage
Represent the analysis usage information of a pass.
Definition:PassAnalysisSupport.h:47
llvm::AnalysisUsage::setPreservesAll
void setPreservesAll()
Set by analyses that do not transform their input at all.
Definition:PassAnalysisSupport.h:130
llvm::AnalysisUsage::addRequiredTransitive
AnalysisUsage & addRequiredTransitive()
Definition:PassAnalysisSupport.h:81
llvm::Argument
This class represents an incoming formal argument to a Function.
Definition:Argument.h:31
llvm::AssumeInst
This represents the llvm.assume intrinsic.
Definition:IntrinsicInst.h:1843
llvm::AssumptionAnalysis
A function analysis which provides an AssumptionCache.
Definition:AssumptionCache.h:173
llvm::AssumptionCacheTracker
An immutable pass that tracks lazily created AssumptionCache objects.
Definition:AssumptionCache.h:204
llvm::AssumptionCache
A cache of @llvm.assume calls within a function.
Definition:AssumptionCache.h:42
llvm::AssumptionCache::ExprResultIdx
@ ExprResultIdx
Definition:AssumptionCache.h:46
llvm::AssumptionCache::assumptionsFor
MutableArrayRef< ResultElem > assumptionsFor(const Value *V)
Access the list of assumptions which affect this value.
Definition:AssumptionCache.h:157
llvm::BasicAAResult
This is the AA result object for the basic, local, and stateless alias analysis.
Definition:BasicAliasAnalysis.h:41
llvm::BasicAAResult::getModRefInfo
ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc, AAQueryInfo &AAQI)
Checks to see if the specified callsite can clobber the specified memory object.
Definition:BasicAliasAnalysis.cpp:899
llvm::BasicAAResult::getArgModRefInfo
ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx)
Get the location associated with a pointer argument of a callsite.
Definition:BasicAliasAnalysis.cpp:848
llvm::BasicAAResult::getMemoryEffects
MemoryEffects getMemoryEffects(const CallBase *Call, AAQueryInfo &AAQI)
Returns the behavior when calling the given call site.
Definition:BasicAliasAnalysis.cpp:815
llvm::BasicAAResult::getModRefInfoMask
ModRefInfo getModRefInfoMask(const MemoryLocation &Loc, AAQueryInfo &AAQI, bool IgnoreLocals=false)
Returns a bitmask that should be unconditionally applied to the ModRef info of a memory location.
Definition:BasicAliasAnalysis.cpp:738
llvm::BasicAAResult::invalidate
bool invalidate(Function &Fn, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)
Handle invalidation events in the new pass manager.
Definition:BasicAliasAnalysis.cpp:86
llvm::BasicAAResult::alias
AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB, AAQueryInfo &AAQI, const Instruction *CtxI)
Definition:BasicAliasAnalysis.cpp:885
llvm::BasicAAWrapperPass
Legacy wrapper pass to provide the BasicAAResult object.
Definition:BasicAliasAnalysis.h:164
llvm::BasicAAWrapperPass::ID
static char ID
Definition:BasicAliasAnalysis.h:170
llvm::BasicAAWrapperPass::runOnFunction
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
Definition:BasicAliasAnalysis.cpp:1987
llvm::BasicAAWrapperPass::getAnalysisUsage
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
Definition:BasicAliasAnalysis.cpp:1999
llvm::BasicAAWrapperPass::BasicAAWrapperPass
BasicAAWrapperPass()
Definition:BasicAliasAnalysis.cpp:1967
llvm::BasicAA::run
BasicAAResult run(Function &F, FunctionAnalysisManager &AM)
Definition:BasicAliasAnalysis.cpp:1960
llvm::BasicBlock
LLVM Basic Block Representation.
Definition:BasicBlock.h:61
llvm::BasicBlock::getParent
const Function * getParent() const
Return the enclosing method, or null if none.
Definition:BasicBlock.h:220
llvm::BinaryOperator
Definition:InstrTypes.h:170
llvm::CallBase
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition:InstrTypes.h:1112
llvm::CallInst
This class represents a function call, abstracting a target machine's calling convention.
Definition:Instructions.h:1479
llvm::ConstantInt
This is the shared class of boolean and integer constants.
Definition:Constants.h:83
llvm::ConstantPointerNull
A constant pointer value that points to null.
Definition:Constants.h:552
llvm::ConstantRange
This class represents a range of values.
Definition:ConstantRange.h:47
llvm::ConstantRange::add
ConstantRange add(const ConstantRange &Other) const
Return a new range representing the possible values resulting from an addition of a value in this ran...
Definition:ConstantRange.cpp:1067
llvm::ConstantRange::Signed
@ Signed
Definition:ConstantRange.h:327
llvm::ConstantRange::fromKnownBits
static ConstantRange fromKnownBits(const KnownBits &Known, bool IsSigned)
Initialize a range based on a known bits constraint.
Definition:ConstantRange.cpp:60
llvm::ConstantRange::smul_fast
ConstantRange smul_fast(const ConstantRange &Other) const
Return range of possible values for a signed multiplication of this and Other.
Definition:ConstantRange.cpp:1262
llvm::ConstantRange::isEmptySet
bool isEmptySet() const
Return true if this set contains no members.
Definition:ConstantRange.cpp:418
llvm::ConstantRange::smul_sat
ConstantRange smul_sat(const ConstantRange &Other) const
Perform a signed saturating multiplication of two constant ranges.
Definition:ConstantRange.cpp:1894
llvm::ConstantRange::getUnsignedMax
APInt getUnsignedMax() const
Return the largest unsigned value contained in the ConstantRange.
Definition:ConstantRange.cpp:483
llvm::ConstantRange::intersectWith
ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
Definition:ConstantRange.cpp:581
llvm::ConstantRange::getSignedMax
APInt getSignedMax() const
Return the largest signed value contained in the ConstantRange.
Definition:ConstantRange.cpp:495
llvm::ConstantRange::getBitWidth
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
Definition:ConstantRange.h:209
llvm::ConstantRange::sub
ConstantRange sub(const ConstantRange &Other) const
Return a new range representing the possible values resulting from a subtraction of a value in this r...
Definition:ConstantRange.cpp:1114
llvm::DWARFExpression::Operation
This class represents an Operation in the Expression.
Definition:DWARFExpression.h:32
llvm::DataLayout
A parsed version of the target data layout string in and methods for querying it.
Definition:DataLayout.h:63
llvm::DenseMapBase::find
iterator find(const_arg_type_t< KeyT > Val)
Definition:DenseMap.h:156
llvm::DenseMapBase::try_emplace
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
Definition:DenseMap.h:226
llvm::DenseMapBase::erase
bool erase(const KeyT &Val)
Definition:DenseMap.h:321
llvm::DenseMapBase::end
iterator end()
Definition:DenseMap.h:84
llvm::DominatorTreeAnalysis
Analysis pass which computes a DominatorTree.
Definition:Dominators.h:279
llvm::DominatorTreeBase::getRoot
NodeT * getRoot() const
Definition:GenericDomTree.h:512
llvm::DominatorTreeWrapperPass
Legacy analysis pass which computes a DominatorTree.
Definition:Dominators.h:317
llvm::DominatorTree
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition:Dominators.h:162
llvm::EarliestEscapeAnalysis::removeInstruction
void removeInstruction(Instruction *I)
Definition:BasicAliasAnalysis.cpp:246
llvm::EarliestEscapeAnalysis::isNotCapturedBefore
bool isNotCapturedBefore(const Value *Object, const Instruction *I, bool OrAt) override
Check whether Object is not captured before instruction I.
Definition:BasicAliasAnalysis.cpp:213
llvm::FunctionPass
FunctionPass class - This class is used to implement most global optimizations.
Definition:Pass.h:310
llvm::Function
Definition:Function.h:63
llvm::GEPNoWrapFlags
Represents flags for the getelementptr instruction/expression.
Definition:GEPNoWrapFlags.h:26
llvm::GEPNoWrapFlags::all
static GEPNoWrapFlags all()
Definition:GEPNoWrapFlags.h:47
llvm::GEPNoWrapFlags::hasNoUnsignedWrap
bool hasNoUnsignedWrap() const
Definition:GEPNoWrapFlags.h:65
llvm::GEPNoWrapFlags::isInBounds
bool isInBounds() const
Definition:GEPNoWrapFlags.h:63
llvm::GEPOperator
Definition:Operator.h:425
llvm::GEPOperator::hasNoUnsignedSignedWrap
bool hasNoUnsignedSignedWrap() const
Definition:Operator.h:437
llvm::GEPOperator::getSourceElementType
Type * getSourceElementType() const
Definition:Operator.cpp:70
llvm::GEPOperator::hasNoUnsignedWrap
bool hasNoUnsignedWrap() const
Definition:Operator.h:441
llvm::GEPOperator::getNoWrapFlags
GEPNoWrapFlags getNoWrapFlags() const
Definition:Operator.h:430
llvm::GlobalAlias
Definition:GlobalAlias.h:28
llvm::GlobalValue::getParent
Module * getParent()
Get the module that this global value is contained inside of...
Definition:GlobalValue.h:657
llvm::GlobalVariable
Definition:GlobalVariable.h:39
llvm::Instruction
Definition:Instruction.h:68
llvm::IntrinsicInst
A wrapper class for inspecting calls to intrinsic functions.
Definition:IntrinsicInst.h:48
llvm::LocationSize
Definition:MemoryLocation.h:68
llvm::LocationSize::hasValue
bool hasValue() const
Definition:MemoryLocation.h:165
llvm::LocationSize::mayBeBeforePointer
bool mayBeBeforePointer() const
Whether accesses before the base pointer are possible.
Definition:MemoryLocation.h:187
llvm::LocationSize::beforeOrAfterPointer
static constexpr LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
Definition:MemoryLocation.h:137
llvm::LocationSize::isScalable
bool isScalable() const
Definition:MemoryLocation.h:168
llvm::LocationSize::getValue
TypeSize getValue() const
Definition:MemoryLocation.h:170
llvm::LocationSize::isZero
bool isZero() const
Definition:MemoryLocation.h:182
llvm::LocationSize::isPrecise
bool isPrecise() const
Definition:MemoryLocation.h:179
llvm::LocationSize::afterPointer
static constexpr LocationSize afterPointer()
Any location after the base pointer (but still within the underlying object).
Definition:MemoryLocation.h:131
llvm::LoopInfo
Definition:LoopInfo.h:407
llvm::MemoryEffectsBase
Definition:ModRef.h:72
llvm::MemoryEffectsBase::readOnly
static MemoryEffectsBase readOnly()
Create MemoryEffectsBase that can read any memory.
Definition:ModRef.h:122
llvm::MemoryEffectsBase::inaccessibleMemOnly
static MemoryEffectsBase inaccessibleMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Create MemoryEffectsBase that can only access inaccessible memory.
Definition:ModRef.h:138
llvm::MemoryEffectsBase::writeOnly
static MemoryEffectsBase writeOnly()
Create MemoryEffectsBase that can write any memory.
Definition:ModRef.h:127
llvm::MemoryLocation
Representation for a specific memory location.
Definition:MemoryLocation.h:227
llvm::MemoryLocation::Size
LocationSize Size
The maximum size of the location, in address-units, or UnknownSize if the size is not known.
Definition:MemoryLocation.h:244
llvm::MemoryLocation::getBeforeOrAfter
static MemoryLocation getBeforeOrAfter(const Value *Ptr, const AAMDNodes &AATags=AAMDNodes())
Return a location that may access any location before or after Ptr, while remaining within the underl...
Definition:MemoryLocation.h:295
llvm::MemoryLocation::Ptr
const Value * Ptr
The address of the start of the location.
Definition:MemoryLocation.h:235
llvm::Operator
This is a utility class that provides an abstraction for the common functionality between Instruction...
Definition:Operator.h:32
llvm::PHINode
Definition:Instructions.h:2600
llvm::PHINode::incoming_values
op_range incoming_values()
Definition:Instructions.h:2665
llvm::PHINode::getIncomingBlock
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Definition:Instructions.h:2695
llvm::PHINode::getIncomingValue
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
Definition:Instructions.h:2675
llvm::PHINode::getNumIncomingValues
unsigned getNumIncomingValues() const
Return the number of incoming edges.
Definition:Instructions.h:2671
llvm::PassRegistry::getPassRegistry
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
Definition:PassRegistry.cpp:24
llvm::PreservedAnalyses
A set of analyses that are preserved following a run of a transformation pass.
Definition:Analysis.h:111
llvm::SelectInst
This class represents the LLVM 'select' instruction.
Definition:Instructions.h:1657
llvm::SimpleCaptureAnalysis::isNotCapturedBefore
bool isNotCapturedBefore(const Value *Object, const Instruction *I, bool OrAt) override
Check whether Object is not captured before instruction I.
Definition:BasicAliasAnalysis.cpp:199
llvm::SmallPtrSetImplBase::size
size_type size() const
Definition:SmallPtrSet.h:94
llvm::SmallPtrSetImpl::insert
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition:SmallPtrSet.h:384
llvm::SmallPtrSet
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition:SmallPtrSet.h:519
llvm::SmallVectorBase::empty
bool empty() const
Definition:SmallVector.h:81
llvm::SmallVectorBase::size
size_t size() const
Definition:SmallVector.h:78
llvm::SmallVectorImpl::pop_back_val
T pop_back_val()
Definition:SmallVector.h:673
llvm::SmallVectorImpl::clear
void clear()
Definition:SmallVector.h:610
llvm::SmallVectorTemplateBase::push_back
void push_back(const T &Elt)
Definition:SmallVector.h:413
llvm::SmallVector
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition:SmallVector.h:1196
llvm::StructType
Class to represent struct types.
Definition:DerivedTypes.h:218
llvm::TargetLibraryAnalysis
Analysis pass providing the TargetLibraryInfo.
Definition:TargetLibraryInfo.h:614
llvm::TargetLibraryInfoWrapperPass
Definition:TargetLibraryInfo.h:639
llvm::TargetLibraryInfo
Provides information about what library functions are available for the current target.
Definition:TargetLibraryInfo.h:280
llvm::TypeSize
Definition:TypeSize.h:334
llvm::TypeSize::getFixed
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition:TypeSize.h:345
llvm::Type::isPointerTy
bool isPointerTy() const
True if this is an instance of PointerType.
Definition:Type.h:264
llvm::Type::isSized
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition:Type.h:310
llvm::Type::getPrimitiveSizeInBits
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
llvm::Use
A Use represents the edge between a Value definition and its users.
Definition:Use.h:43
llvm::User::op_begin
op_iterator op_begin()
Definition:User.h:280
llvm::User::getOperand
Value * getOperand(unsigned i) const
Definition:User.h:228
llvm::User::op_end
op_iterator op_end()
Definition:User.h:282
llvm::Value
LLVM Value Representation.
Definition:Value.h:74
llvm::Value::getType
Type * getType() const
All values are typed, get the type of this value.
Definition:Value.h:255
llvm::Value::getName
StringRef getName() const
Return a constant reference to the value's name.
Definition:Value.cpp:309
llvm::Value::stripPointerCastsForAliasAnalysis
const Value * stripPointerCastsForAliasAnalysis() const
Strip off pointer casts, all-zero GEPs, single-argument phi nodes and invariant group info.
Definition:Value.cpp:710
llvm::cl::opt
Definition:CommandLine.h:1423
llvm::details::FixedOrScalableQuantity::getFixedValue
constexpr ScalarTy getFixedValue() const
Definition:TypeSize.h:202
llvm::details::FixedOrScalableQuantity< TypeSize, uint64_t >::isKnownLT
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition:TypeSize.h:218
llvm::details::FixedOrScalableQuantity::isScalable
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition:TypeSize.h:171
llvm::details::FixedOrScalableQuantity::getKnownMinValue
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition:TypeSize.h:168
llvm::generic_gep_type_iterator
Definition:GetElementPtrTypeIterator.h:31
llvm::generic_gep_type_iterator::getStructTypeOrNull
StructType * getStructTypeOrNull() const
Definition:GetElementPtrTypeIterator.h:166
llvm::generic_gep_type_iterator::getSequentialElementStride
TypeSize getSequentialElementStride(const DataLayout &DL) const
Definition:GetElementPtrTypeIterator.h:154
llvm::ilist_detail::node_parent_access::getParent
const ParentTy * getParent() const
Definition:ilist_node.h:32
llvm::raw_ostream
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition:raw_ostream.h:52
uint64_t
unsigned
llvm::AArch64CC::LE
@ LE
Definition:AArch64BaseInfo.h:268
llvm::AArch64::RP
@ RP
Definition:AArch64ISelLowering.h:541
llvm::AMDGPU::IsaInfo::TargetIDSetting::Off
@ Off
llvm::APIntOps::umin
const APInt & umin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be unsigned.
Definition:APInt.h:2227
llvm::APIntOps::GreatestCommonDivisor
APInt GreatestCommonDivisor(APInt A, APInt B)
Compute GCD of two unsigned APInt values.
Definition:APInt.cpp:771
llvm::COFF::Entry
@ Entry
Definition:COFF.h:844
llvm::M68k::MemAddrModeKind::V
@ V
llvm::NVPTX::PTXLdStInstCode::V2
@ V2
Definition:NVPTX.h:163
llvm::PatternMatch::match
bool match(Val *V, const Pattern &P)
Definition:PatternMatch.h:49
llvm::PatternMatch::m_VScale
VScaleVal_match m_VScale()
Definition:PatternMatch.h:3010
llvm::SIEncodingFamily::SI
@ SI
Definition:SIDefines.h:36
llvm::cl::Hidden
@ Hidden
Definition:CommandLine.h:137
llvm::cl::init
initializer< Ty > init(const Ty &Val)
Definition:CommandLine.h:443
llvm::dwarf::Index
Index
Definition:Dwarf.h:882
llvm::lowertypetests::DropTestKind::Assume
@ Assume
Do not drop type tests (default).
llvm::ms_demangle::QualifierMangleMode::Result
@ Result
llvm
This is an optimization pass for GlobalISel generic memory operations.
Definition:AddressRanges.h:18
llvm::dump
void dump(const SparseBitVector< ElementSize > &LHS, raw_ostream &out)
Definition:SparseBitVector.h:877
llvm::Offset
@ Offset
Definition:DWP.cpp:480
llvm::isValidAssumeForContext
bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI, const DominatorTree *DT=nullptr, bool AllowEphemerals=false)
Return true if it is valid to use the assumptions provided by an assume intrinsic,...
Definition:ValueTracking.cpp:509
llvm::make_scope_exit
detail::scope_exit< std::decay_t< Callable > > make_scope_exit(Callable &&F)
Definition:ScopeExit.h:59
llvm::Depth
@ Depth
Definition:SIMachineScheduler.h:36
llvm::getArgumentAliasingToReturnedPointer
const Value * getArgumentAliasingToReturnedPointer(const CallBase *Call, bool MustPreserveNullness)
This function returns call pointer argument that is considered the same by aliasing rules.
Definition:ValueTracking.cpp:6699
llvm::enumerate
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition:STLExtras.h:2448
llvm::isPotentiallyReachableFromMany
bool isPotentiallyReachableFromMany(SmallVectorImpl< BasicBlock * > &Worklist, const BasicBlock *StopBB, const SmallPtrSetImpl< BasicBlock * > *ExclusionSet, const DominatorTree *DT=nullptr, const LoopInfo *LI=nullptr)
Determine whether there is at least one path from a block in 'Worklist' to 'StopBB' without passing t...
Definition:CFG.cpp:239
llvm::successors
auto successors(const MachineBasicBlock *BB)
Definition:MachineBasicBlock.h:1376
llvm::isBaseOfObject
bool isBaseOfObject(const Value *V)
Return true if we know V to the base address of the corresponding memory object.
Definition:AliasAnalysis.cpp:829
llvm::append_range
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition:STLExtras.h:2115
llvm::getUnderlyingObject
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
Definition:ValueTracking.cpp:6768
llvm::isNonEscapingLocalObject
bool isNonEscapingLocalObject(const Value *V, SmallDenseMap< const Value *, bool, 8 > *IsCapturedCache=nullptr)
Returns true if the pointer is to a function-local object that never escapes from the function.
Definition:CaptureTracking.cpp:458
llvm::computeConstantRange
ConstantRange computeConstantRange(const Value *V, bool ForSigned, bool UseInstrInfo=true, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Determine the possible constant range of an integer or vector of integer value.
Definition:ValueTracking.cpp:10078
llvm::getObjectSize
bool getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL, const TargetLibraryInfo *TLI, ObjectSizeOpts Opts={})
Compute the size of the object pointed by Ptr.
Definition:MemoryBuiltins.cpp:578
llvm::isModSet
bool isModSet(const ModRefInfo MRI)
Definition:ModRef.h:48
llvm::NullPointerIsDefined
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
Definition:Function.cpp:1187
llvm::dbgs
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition:Debug.cpp:163
llvm::getVScaleRange
ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
Definition:ValueTracking.cpp:1058
llvm::createBasicAAWrapperPass
FunctionPass * createBasicAAWrapperPass()
Definition:BasicAliasAnalysis.cpp:1983
llvm::isMallocOrCallocLikeFn
bool isMallocOrCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI)
Tests if a value is a call or invoke to a library function that allocates memory similar to malloc or...
Definition:MemoryBuiltins.cpp:306
llvm::isKnownNonZero
bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
Definition:ValueTracking.cpp:3487
llvm::ModRefInfo
ModRefInfo
Flags indicating whether a memory access modifies or references memory.
Definition:ModRef.h:27
llvm::ModRefInfo::Ref
@ Ref
The access may reference the value stored in memory.
llvm::ModRefInfo::ModRef
@ ModRef
The access may reference and may modify the value stored in memory.
llvm::ModRefInfo::Mod
@ Mod
The access may modify the value stored in memory.
llvm::ModRefInfo::NoModRef
@ NoModRef
The access neither references nor modifies the value stored in memory.
llvm::FindEarliestCapture
Instruction * FindEarliestCapture(const Value *V, Function &F, bool ReturnCaptures, bool StoreCaptures, const DominatorTree &DT, unsigned MaxUsesToExplore=0)
Definition:CaptureTracking.cpp:261
llvm::isKnownNonEqual
bool isKnownNonEqual(const Value *V1, const Value *V2, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if the given values are known to be non-equal when defined.
Definition:ValueTracking.cpp:318
llvm::initializeBasicAAWrapperPassPass
void initializeBasicAAWrapperPassPass(PassRegistry &)
llvm::computeKnownBits
void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
Definition:ValueTracking.cpp:164
llvm::isModAndRefSet
bool isModAndRefSet(const ModRefInfo MRI)
Definition:ModRef.h:45
llvm::isIdentifiedFunctionLocal
bool isIdentifiedFunctionLocal(const Value *V)
Return true if V is umabigously identified at the function-level.
Definition:AliasAnalysis.cpp:825
llvm::BitWidth
constexpr unsigned BitWidth
Definition:BitmaskEnum.h:217
llvm::isEscapeSource
bool isEscapeSource(const Value *V)
Returns true if the pointer is one which would have been considered an escape by isNonEscapingLocalOb...
Definition:AliasAnalysis.cpp:837
llvm::gep_type_begin
gep_type_iterator gep_type_begin(const User *GEP)
Definition:GetElementPtrTypeIterator.h:173
llvm::InlinerFunctionImportStatsOpts::Basic
@ Basic
llvm::isIdentifiedObject
bool isIdentifiedObject(const Value *V)
Return true if this pointer refers to a distinct and identifiable object.
Definition:AliasAnalysis.cpp:813
llvm::isPotentiallyReachable
bool isPotentiallyReachable(const Instruction *From, const Instruction *To, const SmallPtrSetImpl< BasicBlock * > *ExclusionSet=nullptr, const DominatorTree *DT=nullptr, const LoopInfo *LI=nullptr)
Determine whether instruction 'To' is reachable from 'From', without passing through any blocks in Ex...
Definition:CFG.cpp:281
std::swap
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition:BitVector.h:860
N
#define N
llvm::BasicAAResult::DecomposedGEP
Definition:BasicAliasAnalysis.cpp:539
llvm::BasicAAResult::DecomposedGEP::Offset
APInt Offset
Definition:BasicAliasAnalysis.cpp:543
llvm::BasicAAResult::DecomposedGEP::dump
void dump() const
Definition:BasicAliasAnalysis.cpp:549
llvm::BasicAAResult::DecomposedGEP::VarIndices
SmallVector< VariableGEPIndex, 4 > VarIndices
Definition:BasicAliasAnalysis.cpp:545
llvm::BasicAAResult::DecomposedGEP::Base
const Value * Base
Definition:BasicAliasAnalysis.cpp:541
llvm::BasicAAResult::DecomposedGEP::print
void print(raw_ostream &OS) const
Definition:BasicAliasAnalysis.cpp:553
llvm::BasicAAResult::DecomposedGEP::NWFlags
GEPNoWrapFlags NWFlags
Definition:BasicAliasAnalysis.cpp:547
llvm::AAQueryInfo::CacheEntry
Definition:AliasAnalysis.h:241
llvm::AAQueryInfo::CacheEntry::Definitive
static constexpr int Definitive
Cache entry is neither an assumption nor does it use a (non-definitive) assumption.
Definition:AliasAnalysis.h:244
llvm::AAQueryInfo::CacheEntry::AssumptionBased
static constexpr int AssumptionBased
Cache entry is not an assumption itself, but may be using an assumption from higher up the stack.
Definition:AliasAnalysis.h:247
llvm::AnalysisKey
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition:Analysis.h:28
llvm::AssumptionCache::ResultElem
Definition:AssumptionCache.h:48
llvm::CaptureAnalysis::~CaptureAnalysis
virtual ~CaptureAnalysis()=0
llvm::CaptureAnalysis::isNotCapturedBefore
virtual bool isNotCapturedBefore(const Value *Object, const Instruction *I, bool OrAt)=0
Check whether Object is not captured before instruction I.
llvm::KnownBits
Definition:KnownBits.h:23
llvm::ObjectSizeOpts
Various options to control the behavior of getObjectSize.
Definition:MemoryBuiltins.h:138
llvm::ObjectSizeOpts::NullIsUnknownSize
bool NullIsUnknownSize
If this is true, null pointers in address space 0 will be treated as though they can't be evaluated.
Definition:MemoryBuiltins.h:162
llvm::ObjectSizeOpts::RoundToAlign
bool RoundToAlign
Whether to round the result up to the alignment of allocas, byval arguments, and global variables.
Definition:MemoryBuiltins.h:158
llvm::OperandBundleUse
A lightweight accessor for an operand bundle meant to be passed around by value.
Definition:InstrTypes.h:1007
llvm::OperandBundleUse::getTagName
StringRef getTagName() const
Return the tag of this operand bundle as a string.
Definition:InstrTypes.h:1026
llvm::OperandBundleUse::Inputs
ArrayRef< Use > Inputs
Definition:InstrTypes.h:1008
llvm::SaveAndRestore
A utility class that uses RAII to save and restore the value of a variable.
Definition:SaveAndRestore.h:23
llvm::SimplifyQuery
Definition:SimplifyQuery.h:70

Generated on Fri Jul 18 2025 10:00:24 for LLVM by doxygen 1.9.6
[8]ページ先頭

©2009-2025 Movatter.jp