1//===- LazyValueInfo.cpp - Value constraint analysis ------------*- C++ -*-===// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 7//===----------------------------------------------------------------------===// 9// This file defines the interface for lazy computation of value constraint 12//===----------------------------------------------------------------------===// 45using namespacePatternMatch;
47#define DEBUG_TYPE "lazy-value-info" 49// This is the number of worklist items we will process to try to discover an 50// answer for a given value. 58"Lazy Value Information Analysis",
false,
true)
72/// Returns true if this lattice value represents at most one possible value. 73/// This is as precise as any lattice value can get while still representing 78// Integer constants are single element ranges 81// Non integer constants 86//===----------------------------------------------------------------------===// 87// LazyValueInfoCache Decl 88//===----------------------------------------------------------------------===// 91 /// A callback value handle updates the cache when values are erased. 92classLazyValueInfoCache;
94 LazyValueInfoCache *Parent;
96 LVIValueHandle(
Value *V, LazyValueInfoCache *
P =
nullptr)
99void deleted()
override;
100void allUsesReplacedWith(
Value *V)
override{
104}
// end anonymous namespace 109/// This is the cache kept by LazyValueInfo which 110/// maintains information about queries across the clients' queries. 111classLazyValueInfoCache {
112 /// This is all of the cached information for one basic block. It contains 113 /// the per-value lattice elements, as well as a separate set for 114 /// overdefined values to reduce memory usage. Additionally pointers 115 /// dereferenced in the block are cached for nullability queries. 116structBlockCacheEntry {
119// std::nullopt indicates that the nonnull pointers for this basic block 120// block have not been computed yet. 121 std::optional<NonNullPointerSet> NonNullPointers;
124 /// Cached information per basic block. 127 /// Set of value handles used to erase values from the cache on deletion. 130const BlockCacheEntry *getBlockEntry(
BasicBlock *BB)
const{
131auto It = BlockCache.
find_as(BB);
132if (It == BlockCache.
end())
134return It->second.get();
137 BlockCacheEntry *getOrCreateBlockEntry(
BasicBlock *BB) {
138auto It = BlockCache.
find_as(BB);
139if (It == BlockCache.
end())
140 It = BlockCache.
insert({BB, std::make_unique<BlockCacheEntry>()}).first;
142return It->second.get();
145void addValueHandle(
Value *Val) {
146auto HandleIt = ValueHandles.
find_as(Val);
147if (HandleIt == ValueHandles.
end())
148 ValueHandles.
insert({Val,
this});
154 BlockCacheEntry *
Entry = getOrCreateBlockEntry(BB);
156// Insert over-defined values into their own cache to reduce memory 158if (
Result.isOverdefined())
159Entry->OverDefined.insert(Val);
166 std::optional<ValueLatticeElement> getCachedValueInfo(
Value *V,
168const BlockCacheEntry *
Entry = getBlockEntry(BB);
172if (
Entry->OverDefined.count(V))
175auto LatticeIt =
Entry->LatticeElements.find_as(V);
176if (LatticeIt ==
Entry->LatticeElements.end())
179return LatticeIt->second;
185 BlockCacheEntry *
Entry = getOrCreateBlockEntry(BB);
186if (!
Entry->NonNullPointers) {
187Entry->NonNullPointers = InitFn(BB);
192returnEntry->NonNullPointers->count(V);
195 /// clear - Empty the cache. 198 ValueHandles.
clear();
201 /// Inform the cache that a given value has been deleted. 202void eraseValue(
Value *V);
204 /// This is part of the update interface to inform the cache 205 /// that a block has been deleted. 208 /// Updates the cache to remove any influence an overdefined value in 209 /// OldSucc might have (unless also overdefined in NewSucc). This just 210 /// flushes elements from the cache and does not add any. 215void LazyValueInfoCache::eraseValue(
Value *V) {
216for (
auto &Pair : BlockCache) {
217 Pair.second->LatticeElements.erase(V);
218 Pair.second->OverDefined.erase(V);
219if (Pair.second->NonNullPointers)
220 Pair.second->NonNullPointers->erase(V);
223auto HandleIt = ValueHandles.
find_as(V);
224if (HandleIt != ValueHandles.
end())
225 ValueHandles.
erase(HandleIt);
228void LVIValueHandle::deleted() {
229// This erasure deallocates *this, so it MUST happen after we're done 230// using any and all members of *this. 231 Parent->eraseValue(*
this);
234void LazyValueInfoCache::eraseBlock(
BasicBlock *BB) {
235 BlockCache.erase(BB);
238void LazyValueInfoCache::threadEdgeImpl(
BasicBlock *OldSucc,
240// When an edge in the graph has been threaded, values that we could not 241// determine a value for before (i.e. were marked overdefined) may be 242// possible to solve now. We do NOT try to proactively update these values. 243// Instead, we clear their entries from the cache, and allow lazy updating to 244// recompute them when needed. 246// The updating process is fairly simple: we need to drop cached info 247// for all values that were marked overdefined in OldSucc, and for those same 248// values in any successor of OldSucc (except NewSucc) in which they were 249// also marked overdefined. 250 std::vector<BasicBlock*> worklist;
251 worklist.push_back(OldSucc);
253const BlockCacheEntry *
Entry = getBlockEntry(OldSucc);
254if (!Entry ||
Entry->OverDefined.empty())
255return;
// Nothing to process here. 257Entry->OverDefined.end());
259// Use a worklist to perform a depth-first search of OldSucc's successors. 260// NOTE: We do not need a visited list since any blocks we have already 261// visited will have had their overdefined markers cleared already, and we 262// thus won't loop to their successors. 263while (!worklist.empty()) {
267// Skip blocks only accessible through NewSucc. 268if (ToUpdate == NewSucc)
continue;
270// If a value was marked overdefined in OldSucc, and is here too... 271auto OI = BlockCache.find_as(ToUpdate);
272if (OI == BlockCache.end() || OI->second->OverDefined.empty())
274auto &ValueSet = OI->second->OverDefined;
277for (
Value *V : ValsToClear) {
278if (!ValueSet.erase(V))
281// If we removed anything, then we potentially need to update 282// blocks successors too. 286if (!changed)
continue;
294/// An assembly annotator class to print LazyValueCache information in 298// While analyzing which blocks we can solve values for, we need the dominator 304 : LVIImpl(
L), DT(DTree) {}
306void emitBasicBlockStartAnnot(
constBasicBlock *BB,
313// The actual implementation of the lazy analysis and update. 316 /// Cached results from previous queries 317 LazyValueInfoCache TheCache;
319 /// This stack holds the state of the value solver during a query. 320 /// It basically emulates the callstack of the naive 321 /// recursive value lookup process. 324 /// Keeps track of which block-value pairs are in BlockValueStack. 327 /// Push BV onto BlockValueStack unless it's already in there. 328 /// Returns true on success. 329bool pushBlockValue(
const std::pair<BasicBlock *, Value *> &BV) {
330if (!BlockValueSet.
insert(BV).second)
331returnfalse;
// It's already in the stack. 334 << BV.first->getName() <<
"\n");
340constDataLayout &DL;
///< A mandatory DataLayout 342 /// Declaration of the llvm.experimental.guard() intrinsic, 343 /// if it exists in the module. 346 std::optional<ValueLatticeElement> getBlockValue(
Value *Val,
BasicBlock *BB,
352// These methods process one work item and may add more. A false value 353// returned means that the work item was not completely processed and must 354// be revisited after going through the new items. 356 std::optional<ValueLatticeElement> solveBlockValueImpl(
Value *Val,
358 std::optional<ValueLatticeElement> solveBlockValueNonLocal(
Value *Val,
360 std::optional<ValueLatticeElement> solveBlockValuePHINode(
PHINode *PN,
362 std::optional<ValueLatticeElement> solveBlockValueSelect(
SelectInst *S,
366 std::optional<ValueLatticeElement> solveBlockValueBinaryOpImpl(
370 std::optional<ValueLatticeElement>
372 std::optional<ValueLatticeElement> solveBlockValueCast(
CastInst *CI,
374 std::optional<ValueLatticeElement>
376 std::optional<ValueLatticeElement> solveBlockValueIntrinsic(
IntrinsicInst *
II,
378 std::optional<ValueLatticeElement>
380 std::optional<ValueLatticeElement>
383void intersectAssumeOrGuardBlockValueConstantRange(
Value *Val,
389// For the following methods, if UseBlockValue is true, the function may 390// push additional values to the worklist and return nullopt. If 391// UseBlockValue is false, it will never return nullopt. 393 std::optional<ValueLatticeElement>
398 std::optional<ValueLatticeElement>
399 getValueFromICmpCondition(
Value *Val,
ICmpInst *ICI,
bool isTrueDest,
402 std::optional<ValueLatticeElement>
404bool UseBlockValue,
unsignedDepth = 0);
406 std::optional<ValueLatticeElement> getEdgeValueLocal(
Value *Val,
412 /// This is the query interface to determine the lattice value for the 413 /// specified Value* at the context instruction (if specified) or at the 414 /// start of the block. 418 /// This is the query interface to determine the lattice value for the 419 /// specified Value* at the specified instruction using only information 420 /// from assumes/guards and range metadata. Unlike getValueInBlock(), no 421 /// recursive query is performed. 424 /// This is the query interface to determine the lattice 425 /// value for the specified Value* that is true on the specified edge. 432 /// Complete flush all previously computed values 437 /// Printing the LazyValueInfo Analysis. 439 LazyValueInfoAnnotatedWriter Writer(
this, DTree);
443 /// This is part of the update interface to remove information related to this 444 /// value from the cache. 447 /// This is part of the update interface to inform the cache 448 /// that a block has been deleted. 450 TheCache.eraseBlock(BB);
453 /// This is the update interface to inform the cache that an edge from 454 /// PredBB to OldSucc has been threaded to be from PredBB to NewSucc. 459 : AC(AC),
DL(
DL), GuardDecl(GuardDecl) {}
463void LazyValueInfoImpl::solve() {
467unsigned processedCount = 0;
468while (!BlockValueStack.empty()) {
470// Abort if we have to process too many values to get a result for this one. 471// Because of the design of the overdefined cache currently being per-block 472// to avoid naming-related issues (IE it wants to try to give different 473// results for the same name in different blocks), overdefined results don't 474// get cached globally, which in turn means we will often try to rediscover 475// the same overdefined result again and again. Once something like 476// PredicateInfo is used in LVI or CVP, we should be able to make the 477// overdefined cache global, and remove this throttle. 480dbgs() <<
"Giving up on stack because we are getting too deep\n");
481// Fill in the original values 482while (!StartingStack.
empty()) {
483 std::pair<BasicBlock *, Value *> &
e = StartingStack.
back();
484 TheCache.insertResult(
e.second,
e.first,
488 BlockValueSet.clear();
489 BlockValueStack.clear();
492 std::pair<BasicBlock *, Value *>
e = BlockValueStack.back();
493assert(BlockValueSet.count(e) &&
"Stack value should be in BlockValueSet!");
494unsigned StackSize = BlockValueStack.size();
497if (solveBlockValue(
e.second,
e.first)) {
498// The work item was completely processed. 499assert(BlockValueStack.size() == StackSize &&
500 BlockValueStack.back() == e &&
"Nothing should have been pushed!");
502 std::optional<ValueLatticeElement> BBLV =
503 TheCache.getCachedValueInfo(
e.second,
e.first);
504assert(BBLV &&
"Result should be in cache!");
506dbgs() <<
"POP " << *
e.second <<
" in " <<
e.first->getName() <<
" = " 510 BlockValueStack.pop_back();
511 BlockValueSet.erase(e);
513// More work needs to be done before revisiting. 514assert(BlockValueStack.size() == StackSize + 1 &&
515"Exactly one element should have been pushed!");
520std::optional<ValueLatticeElement>
523// If already a constant, there is nothing to compute. 524if (
Constant *VC = dyn_cast<Constant>(Val))
527if (std::optional<ValueLatticeElement> OptLatticeVal =
528 TheCache.getCachedValueInfo(Val, BB)) {
529 intersectAssumeOrGuardBlockValueConstantRange(Val, *OptLatticeVal, CxtI);
533// We have hit a cycle, assume overdefined. 534if (!pushBlockValue({ BB, Val }))
537// Yet to be resolved. 545case Instruction::Call:
546case Instruction::Invoke:
547if (std::optional<ConstantRange>
Range = cast<CallBase>(BBI)->
getRange())
550case Instruction::Load:
552if (isa<IntegerType>(BBI->
getType())) {
558// Nothing known - will be intersected with other facts 563assert(!isa<Constant>(Val) &&
"Value should not be constant");
564assert(!TheCache.getCachedValueInfo(Val, BB) &&
565"Value should not be in cache");
567// Hold off inserting this value into the Cache in case we have to return 568// false and come back later. 569 std::optional<ValueLatticeElement> Res = solveBlockValueImpl(Val, BB);
571// Work pushed, will revisit 574 TheCache.insertResult(Val, BB, *Res);
578std::optional<ValueLatticeElement>
582return solveBlockValueNonLocal(Val, BB);
584if (
PHINode *PN = dyn_cast<PHINode>(BBI))
585return solveBlockValuePHINode(PN, BB);
587if (
auto *SI = dyn_cast<SelectInst>(BBI))
588return solveBlockValueSelect(SI, BB);
590// If this value is a nonnull pointer, record it's range and bailout. Note 591// that for all other pointer typed values, we terminate the search at the 592// definition. We could easily extend this to look through geps, bitcasts, 593// and the like to prove non-nullness, but it's not clear that's worth it 594// compile time wise. The context-insensitive value walk done inside 595// isKnownNonZero gets most of the profitable cases at much less expense. 596// This does mean that we have a sensitivity to where the defining 597// instruction is placed, even if it could legally be hoisted much higher. 598// That is unfortunate. 604if (
auto *CI = dyn_cast<CastInst>(BBI))
605return solveBlockValueCast(CI, BB);
608return solveBlockValueBinaryOp(BO, BB);
610if (
auto *IEI = dyn_cast<InsertElementInst>(BBI))
611return solveBlockValueInsertElement(IEI, BB);
613if (
auto *EVI = dyn_cast<ExtractValueInst>(BBI))
614return solveBlockValueExtractValue(EVI, BB);
616if (
auto *
II = dyn_cast<IntrinsicInst>(BBI))
617return solveBlockValueIntrinsic(
II, BB);
621 <<
"' - unknown inst def found.\n");
626// TODO: Use NullPointerIsDefined instead. 627if (
Ptr->getType()->getPointerAddressSpace() == 0)
633if (
LoadInst *L = dyn_cast<LoadInst>(
I)) {
635 }
elseif (
StoreInst *S = dyn_cast<StoreInst>(
I)) {
638if (
MI->isVolatile())
return;
640// FIXME: check whether it has a valuerange that excludes zero? 642if (!Len || Len->isZero())
return;
650bool LazyValueInfoImpl::isNonNullAtEndOfBlock(
Value *Val,
BasicBlock *BB) {
656return TheCache.isNonNullAtEndOfBlock(Val, BB, [](
BasicBlock *BB) {
657 NonNullPointerSet NonNullPointers;
660return NonNullPointers;
664std::optional<ValueLatticeElement>
668// If this is the entry block, we must be asking about an argument. 670assert(isa<Argument>(Val) &&
"Unknown live-in to the entry block");
671if (std::optional<ConstantRange>
Range = cast<Argument>(Val)->
getRange())
676// Loop over all of our predecessors, merging what we know from them into 677// result. If we encounter an unexplored predecessor, we eagerly explore it 678// in a depth first manner. In practice, this has the effect of discovering 679// paths we can't analyze eagerly without spending compile times analyzing 680// other paths. This heuristic benefits from the fact that predecessors are 681// frequently arranged such that dominating ones come first and we quickly 682// find a path to function entry. TODO: We should consider explicitly 683// canonicalizing to make this true rather than relying on this happy 686 std::optional<ValueLatticeElement> EdgeResult = getEdgeValue(Val, Pred, BB);
688// Explore that input, then return here 691Result.mergeIn(*EdgeResult);
693// If we hit overdefined, exit early. The BlockVals entry is already set 695if (
Result.isOverdefined()) {
697 <<
"' - overdefined because of pred '" 698 << Pred->getName() <<
"' (non local).\n");
703// Return the merged value, which is more precise than 'overdefined'. 708std::optional<ValueLatticeElement>
712// Loop over all of our predecessors, merging what we know from them into 713// result. See the comment about the chosen traversal order in 714// solveBlockValueNonLocal; the same reasoning applies here. 718// Note that we can provide PN as the context value to getEdgeValue, even 719// though the results will be cached, because PN is the value being used as 720// the cache key in the caller. 721 std::optional<ValueLatticeElement> EdgeResult =
722 getEdgeValue(PhiVal, PhiBB, BB, PN);
724// Explore that input, then return here 727Result.mergeIn(*EdgeResult);
729// If we hit overdefined, exit early. The BlockVals entry is already set 731if (
Result.isOverdefined()) {
733 <<
"' - overdefined because of pred (local).\n");
739// Return the merged value, which is more precise than 'overdefined'. 740assert(!
Result.isOverdefined() &&
"Possible PHI in entry block?");
744// If we can determine a constraint on the value given conditions assumed by 745// the program, intersect those constraints with BBLV 746void LazyValueInfoImpl::intersectAssumeOrGuardBlockValueConstantRange(
748 BBI = BBI ? BBI : dyn_cast<Instruction>(Val);
757// Only check assumes in the block of the context instruction. Other 758// assumes will have already been taken into account when the value was 759// propagated from predecessor blocks. 760auto *
I = cast<CallInst>(AssumeVH);
764 BBLV = BBLV.
intersect(*getValueFromCondition(Val,
I->getArgOperand(0),
766/*UseBlockValue*/false));
769// If guards are not used in the module, don't spend time looking for them 770if (GuardDecl && !GuardDecl->
use_empty() &&
778/*UseBlockValue*/false));
783// Check whether we're checking at the terminator, and the pointer has 784// been dereferenced in this block. 787 isNonNullAtEndOfBlock(Val, BB))
792std::optional<ValueLatticeElement>
794// Recurse on our inputs if needed 795 std::optional<ValueLatticeElement> OptTrueVal =
796 getBlockValue(
SI->getTrueValue(), BB, SI);
801 std::optional<ValueLatticeElement> OptFalseVal =
802 getBlockValue(
SI->getFalseValue(), BB, SI);
813// Is this a min specifically of our two inputs? (Avoid the risk of 814// ValueTracking getting smarter looking back past our immediate inputs.) 816 ((LHS ==
SI->getTrueValue() && RHS ==
SI->getFalseValue()) ||
817 (RHS ==
SI->getTrueValue() && LHS ==
SI->getFalseValue()))) {
823return TrueCR.
smin(FalseCR);
825return TrueCR.
umin(FalseCR);
827return TrueCR.
smax(FalseCR);
829return TrueCR.
umax(FalseCR);
833 ResultCR,
TrueVal.isConstantRangeIncludingUndef() ||
834FalseVal.isConstantRangeIncludingUndef());
838if (LHS ==
SI->getTrueValue())
840 TrueCR.
abs(),
TrueVal.isConstantRangeIncludingUndef());
841if (LHS ==
SI->getFalseValue())
843 FalseCR.
abs(),
FalseVal.isConstantRangeIncludingUndef());
848if (LHS ==
SI->getTrueValue())
851if (LHS ==
SI->getFalseValue())
857// Can we constrain the facts about the true and false values by using the 858// condition itself? This shows up with idioms like e.g. select(a > 5, a, 5). 859// TODO: We could potentially refine an overdefined true value above. 861// If the value is undef, a different value may be chosen in 862// the select condition. 865TrueVal.intersect(*getValueFromCondition(
SI->getTrueValue(),
Cond,
867/*UseBlockValue*/false));
869FalseVal.intersect(*getValueFromCondition(
SI->getFalseValue(),
Cond,
871/*UseBlockValue*/false));
879std::optional<ConstantRange>
881 std::optional<ValueLatticeElement> OptVal = getBlockValue(V, BB, CxtI);
884return OptVal->asConstantRange(
V->getType());
887std::optional<ValueLatticeElement>
889// Filter out casts we don't know how to reason about before attempting to 890// recurse on our operand. This can cut a long search short if we know we're 891// not going to be able to get any useful information anways. 893case Instruction::Trunc:
894case Instruction::SExt:
895case Instruction::ZExt:
898// Unhandled instructions are overdefined. 900 <<
"' - overdefined (unknown cast).\n");
904// Figure out the range of the LHS. If that fails, we still apply the 905// transfer rule on the full set since we may be able to locally infer 907 std::optional<ConstantRange> LHSRes = getRangeFor(CI->
getOperand(0), CI, BB);
909// More work to do before applying this transfer rule. 915// NOTE: We're currently limited by the set of operations that ConstantRange 916// can evaluate symbolically. Enhancing that set will allows us to analyze 922std::optional<ValueLatticeElement>
923LazyValueInfoImpl::solveBlockValueBinaryOpImpl(
930auto ThreadBinOpOverSelect =
932bool XIsLHS) -> std::optional<ValueLatticeElement> {
934// Only handle selects with constant values. 935Constant *TrueC = dyn_cast<Constant>(
Y->getTrueValue());
938Constant *FalseC = dyn_cast<Constant>(
Y->getFalseValue());
946/*UseBlockValue=*/false)
947 ->asConstantRange(
X->getType()));
950/*UseBlockValue=*/false)
951 ->asConstantRange(
X->getType()));
957 OpFn(TrueX, TrueY).unionWith(OpFn(FalseX, FalseY)));
959 OpFn(TrueY, TrueX).unionWith(OpFn(FalseY, FalseX)));
962// Figure out the ranges of the operands. If that fails, use a 963// conservative range, but apply the transfer rule anyways. This 964// lets us pick up facts from expressions like "and i32 (call i32 966 std::optional<ConstantRange> LHSRes = getRangeFor(LHS,
I, BB);
970// Try to thread binop over rhs select 971if (
auto *SI = dyn_cast<SelectInst>(RHS)) {
972if (
auto Res = ThreadBinOpOverSelect(LHS, *LHSRes, SI,
/*XIsLHS=*/true))
976 std::optional<ConstantRange> RHSRes = getRangeFor(RHS,
I, BB);
980// Try to thread binop over lhs select 981if (
auto *SI = dyn_cast<SelectInst>(LHS)) {
982if (
auto Res = ThreadBinOpOverSelect(RHS, *RHSRes, SI,
/*XIsLHS=*/false))
991std::optional<ValueLatticeElement>
994"all operands to binary operators are sized");
995if (
auto *OBO = dyn_cast<OverflowingBinaryOperator>(BO)) {
996unsigned NoWrapKind = OBO->getNoWrapKind();
997return solveBlockValueBinaryOpImpl(
1004return solveBlockValueBinaryOpImpl(
1010std::optional<ValueLatticeElement>
1013return solveBlockValueBinaryOpImpl(
1019std::optional<ValueLatticeElement>
1024 <<
"' - unknown intrinsic.\n");
1030 std::optional<ConstantRange>
Range = getRangeFor(
Op,
II, BB);
1041std::optional<ValueLatticeElement>
1044 std::optional<ValueLatticeElement> OptEltVal =
1050 std::optional<ValueLatticeElement> OptVecVal =
1055// Bail out if the inserted element is a constant expression. Unlike other 1056// ValueLattice types, these are not considered an implicit splat when a 1057// vector type is used. 1058// We could call ConstantFoldInsertElementInstruction here to handle these. 1059if (OptEltVal->isConstant())
1066std::optional<ValueLatticeElement>
1071return solveBlockValueOverflowIntrinsic(WO, BB);
1073// Handle extractvalue of insertvalue to allow further simplification 1074// based on replaced with.overflow intrinsics. 1078return getBlockValue(V, BB, EVI);
1081 <<
"' - overdefined (unknown extractvalue).\n");
1090// Handle range checking idiom produced by InstCombine. We will subtract the 1091// offset from the allowed range for RHS in this case. 1098// Handle the symmetric case. This appears in saturation patterns like 1099// (x == 16) ? 16 : (x + 1). 1105// If (x | y) < C, then (x < C) && (y < C). 1110// If (x & y) > C, then (x > C) && (y > C). 1118/// Get value range for a "(Val + Offset) Pred RHS" condition. 1119std::optional<ValueLatticeElement>
1124bool UseBlockValue) {
1127if (
ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
1129 }
elseif (UseBlockValue) {
1130 std::optional<ValueLatticeElement>
R =
1131 getBlockValue(RHS, CxtI->
getParent(), CxtI);
1142static std::optional<ConstantRange>
1152if (
RHS.isMaxSignedValue())
1153return std::nullopt;
// Could also return full/empty here, if we wanted. 1157if (
auto CR = Fn(
RHS))
1158return Invert ? CR->inverse() : CR;
1162/// Get value range for a "ctpop(Val) Pred RHS" condition. 1167auto *RHSConst = dyn_cast<ConstantInt>(
RHS);
1183std::optional<ValueLatticeElement> LazyValueInfoImpl::getValueFromICmpCondition(
1184Value *Val,
ICmpInst *ICI,
bool isTrueDest,
bool UseBlockValue) {
1188// Get the predicate that must hold along the considered edge. 1192if (isa<Constant>(RHS)) {
1196elseif (!isa<UndefValue>(RHS))
1208return getValueFromSimpleICmpCondition(EdgePred, RHS,
Offset, ICI,
1213return getValueFromSimpleICmpCondition(SwappedPred, LHS,
Offset, ICI,
1222// If (Val & Mask) == C then all the masked bits are known and we can 1223// compute a value range based on that. 1237// If (X urem Modulus) >= C, then X >= C. 1238// If trunc X >= C, then X >= C. 1239// TODO: An upper bound could be computed as well. 1243// Use the icmp region so we don't have to deal with different predicates. 1251// icmp slt (ashr X, ShAmtC), C --> icmp slt X, C << ShAmtC 1252// Preconditions: (C << ShAmtC) >> ShAmtC == C 1258 EdgePred, *
C, [&](
constAPInt &RHS) -> std::optional<ConstantRange> {
1260if ((
New.ashr(*ShAmtC)) != RHS)
1269// a - b or ptrtoint(a) - ptrtoint(b) ==/!= 0 if a ==/!= b 1272// Peek through ptrtoints 1275if ((
X == LHS &&
Y == RHS) || (
X == RHS &&
Y == LHS)) {
1286// Handle conditions of the form 1287// extractvalue(op.with.overflow(%x, C), 1). 1290// TODO: This only works with a constant RHS for now. We could also compute 1291// the range of the RHS, but this doesn't fit into the current structure of 1292// the edge value calculation. 1297// Calculate the possible values of %x for which no overflow occurs. 1301// If overflow is false, %x is constrained to NWR. If overflow is true, %x is 1302// constrained to it's inverse (all values that might cause overflow). 1308std::optional<ValueLatticeElement>
1310bool IsTrueDest,
bool UseBlockValue,
1313return getValueFromICmpCondition(Val, ICI, IsTrueDest, UseBlockValue);
1315if (
auto *EVI = dyn_cast<ExtractValueInst>(
Cond))
1325return getValueFromCondition(Val,
N, !IsTrueDest, UseBlockValue,
Depth);
1336 std::optional<ValueLatticeElement> LV =
1337 getValueFromCondition(Val, L, IsTrueDest, UseBlockValue,
Depth);
1340 std::optional<ValueLatticeElement> RV =
1341 getValueFromCondition(Val, R, IsTrueDest, UseBlockValue,
Depth);
1345// if (L && R) -> intersect L and R 1346// if (!(L || R)) -> intersect !L and !R 1347// if (L || R) -> union L and R 1348// if (!(L && R)) -> union !L and !R 1349if (IsTrueDest ^ IsAnd) {
1354return LV->intersect(*RV);
1357// Return true if Usr has Op as an operand, otherwise false. 1362// Return true if the instruction type of Val is supported by 1363// constantFoldUser(). Currently CastInst, BinaryOperator and FreezeInst only. 1364// Call this before calling constantFoldUser() to find out if it's even worth 1365// attempting to call it. 1367return isa<CastInst>(Usr) || isa<BinaryOperator>(Usr) || isa<FreezeInst>(Usr);
1370// Check if Usr can be simplified to an integer constant when the value of one 1371// of its operands Op is an integer constant OpConstVal. If so, return it as an 1372// lattice value range with a single element or otherwise return an overdefined 1375constAPInt &OpConstVal,
1379// Check if Usr can be simplified to a constant. 1380if (
auto *CI = dyn_cast<CastInst>(Usr)) {
1382if (
auto *
C = dyn_cast_or_null<ConstantInt>(
1387 }
elseif (
auto *BO = dyn_cast<BinaryOperator>(Usr)) {
1390assert((Op0Match || Op1Match) &&
1391"Operand 0 nor Operand 1 isn't a match");
1394if (
auto *
C = dyn_cast_or_null<ConstantInt>(
1398 }
elseif (isa<FreezeInst>(Usr)) {
1399assert(cast<FreezeInst>(Usr)->getOperand(0) ==
Op &&
"Operand 0 isn't Op");
1405/// Compute the value of Val on the edge BBFrom -> BBTo. 1406std::optional<ValueLatticeElement>
1409// TODO: Handle more complex conditionals. If (v == 0 || v2 < 1) is false, we 1412// If this is a conditional branch and only one successor goes to BBTo, then 1413// we may be able to infer something from the condition. 1414if (BI->isConditional() &&
1415 BI->getSuccessor(0) != BI->getSuccessor(1)) {
1416bool isTrueDest = BI->getSuccessor(0) == BBTo;
1417assert(BI->getSuccessor(!isTrueDest) == BBTo &&
1418"BBTo isn't a successor of BBFrom");
1419Value *Condition = BI->getCondition();
1421// If V is the condition of the branch itself, then we know exactly what 1423// NB: The condition on a `br` can't be a vector type. 1424if (Condition == Val)
1428// If the condition of the branch is an equality comparison, we may be 1429// able to infer the value. 1430 std::optional<ValueLatticeElement>
Result =
1431 getValueFromCondition(Val, Condition, isTrueDest, UseBlockValue);
1435if (!
Result->isOverdefined())
1438if (
User *Usr = dyn_cast<User>(Val)) {
1439assert(
Result->isOverdefined() &&
"Result isn't overdefined");
1440// Check with isOperationFoldable() first to avoid linearly iterating 1441// over the operands unnecessarily which can be expensive for 1442// instructions with many operands. 1446// If Val has Condition as an operand and Val can be folded into a 1447// constant with either Condition == true or Condition == false, 1448// propagate the constant. 1450// ; %Val is true on the edge to %then. 1451// %Val = and i1 %Condition, true. 1452// br %Condition, label %then, label %else 1453APInt ConditionVal(1, isTrueDest ? 1 : 0);
1456// If one of Val's operand has an inferred value, we may be able to 1457// infer the value of Val. 1459// ; %Val is 94 on the edge to %then. 1460// %Val = add i8 %Op, 1 1461// %Condition = icmp eq i8 %Op, 93 1462// br i1 %Condition, label %then, label %else 1463for (
unsigned i = 0; i < Usr->getNumOperands(); ++i) {
1464Value *
Op = Usr->getOperand(i);
1466Op, Condition, isTrueDest,
/*UseBlockValue*/false);
1467if (std::optional<APInt> OpConst =
1476if (!
Result->isOverdefined())
1481// If the edge was formed by a switch on the value, then we may know exactly 1484Value *Condition =
SI->getCondition();
1485if (!isa<IntegerType>(Val->
getType()))
1487bool ValUsesConditionAndMayBeFoldable =
false;
1488if (Condition != Val) {
1489// Check if Val has Condition as an operand. 1490if (
User *Usr = dyn_cast<User>(Val))
1493if (!ValUsesConditionAndMayBeFoldable)
1496assert((Condition == Val || ValUsesConditionAndMayBeFoldable) &&
1497"Condition != Val nor Val doesn't use Condition");
1499bool DefaultCase =
SI->getDefaultDest() == BBTo;
1503for (
auto Case :
SI->cases()) {
1504APInt CaseValue = Case.getCaseValue()->getValue();
1506if (ValUsesConditionAndMayBeFoldable) {
1507User *Usr = cast<User>(Val);
1516// It is possible that the default destination is the destination of 1517// some cases. We cannot perform difference for those cases. 1518// We know Condition != CaseValue in BBTo. In some cases we can use 1519// this to infer Val == f(Condition) is != f(CaseValue). For now, we 1520// only do this when f is identity (i.e. Val == Condition), but we 1521// should be able to do this for any injective f. 1522if (Case.getCaseSuccessor() != BBTo && Condition == Val)
1524 }
elseif (Case.getCaseSuccessor() == BBTo)
1525 EdgesVals = EdgesVals.
unionWith(EdgeVal);
1532/// Compute the value of Val on the edge BBFrom -> BBTo or the value at 1533/// the basic block if the edge does not constrain Val. 1534std::optional<ValueLatticeElement>
1537// If already a constant, there is nothing to compute. 1538if (
Constant *VC = dyn_cast<Constant>(Val))
1541 std::optional<ValueLatticeElement> LocalResult =
1542 getEdgeValueLocal(Val, BBFrom, BBTo,
/*UseBlockValue*/true);
1547// Can't get any more precise here 1550 std::optional<ValueLatticeElement> OptInBlock =
1556// We can use the context instruction (generically the ultimate instruction 1557// the calling pass is trying to simplify) here, even though the result of 1558// this function is generally cached when called from the solve* functions 1559// (and that cached result might be used with queries using a different 1560// context instruction), because when this function is called from the solve* 1561// functions, the context instruction is not provided. When called from 1562// LazyValueInfoImpl::getValueOnEdge, the context instruction is provided, 1563// but then the result is not cached. 1564 intersectAssumeOrGuardBlockValueConstantRange(Val,
InBlock, CxtI);
1566return LocalResult->intersect(
InBlock);
1571LLVM_DEBUG(
dbgs() <<
"LVI Getting block end value " << *V <<
" at '" 1574assert(BlockValueStack.empty() && BlockValueSet.empty());
1575 std::optional<ValueLatticeElement> OptResult = getBlockValue(V, BB, CxtI);
1578 OptResult = getBlockValue(V, BB, CxtI);
1579assert(OptResult &&
"Value not available after solving");
1591if (
auto *
C = dyn_cast<Constant>(V))
1595if (
auto *
I = dyn_cast<Instruction>(V))
1597 intersectAssumeOrGuardBlockValueConstantRange(V, Result, CxtI);
1610 std::optional<ValueLatticeElement> Result =
1611 getEdgeValue(V, FromBB, ToBB, CxtI);
1613// As the worklist only explicitly tracks block values (but not edge values) 1614// we may have to call solve() multiple times, as the edge value calculation 1615// may request additional block values. 1617 Result = getEdgeValue(V, FromBB, ToBB, CxtI);
1626auto *CxtI = cast<Instruction>(U.getUser());
1629// Check whether the only (possibly transitive) use of the value is in a 1630// position where V can be constrained by a select or branch condition. 1631constUse *CurrU = &U;
1632// TODO: Increase limit? 1633constunsigned MaxUsesToInspect = 3;
1634for (
unsignedI = 0;
I < MaxUsesToInspect; ++
I) {
1635 std::optional<ValueLatticeElement> CondVal;
1636auto *CurrI = cast<Instruction>(CurrU->getUser());
1637if (
auto *SI = dyn_cast<SelectInst>(CurrI)) {
1638// If the value is undef, a different value may be chosen in 1639// the select condition and at use. 1642if (CurrU->getOperandNo() == 1)
1644 *getValueFromCondition(V, SI->getCondition(),
/*IsTrueDest*/true,
1645/*UseBlockValue*/false);
1646elseif (CurrU->getOperandNo() == 2)
1648 *getValueFromCondition(V, SI->getCondition(),
/*IsTrueDest*/false,
1649/*UseBlockValue*/false);
1650 }
elseif (
auto *
PHI = dyn_cast<PHINode>(CurrI)) {
1651// TODO: Use non-local query? 1652 CondVal = *getEdgeValueLocal(V,
PHI->getIncomingBlock(*CurrU),
1653PHI->getParent(),
/*UseBlockValue*/false);
1658// Only follow one-use chain, to allow direct intersection of conditions. 1659// If there are multiple uses, we would have to intersect with the union of 1660// all conditions at different uses. 1661// Stop walking if we hit a non-speculatable instruction. Even if the 1662// result is only used under a specific condition, executing the 1663// instruction itself may cause side effects or UB already. 1664// This also disallows looking through phi nodes: If the phi node is part 1665// of a cycle, we might end up reasoning about values from different cycle 1666// iterations (PR60629). 1667if (!CurrI->hasOneUse() ||
1677 TheCache.threadEdgeImpl(OldSucc, NewSucc);
1680//===----------------------------------------------------------------------===// 1681// LazyValueInfo Impl 1682//===----------------------------------------------------------------------===// 1685 Info.AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(
F);
1687if (
auto *Impl = Info.getImpl())
1702/// This lazily constructs the LazyValueInfoImpl. 1705assert(M &&
"getCache() called with a null Module");
1723// If the cache was allocated, free it. 1724if (
auto *Impl = getImpl()) {
1732// We need to invalidate if we have either failed to preserve this analyses 1733// result directly or if any of its dependencies have been invalidated. 1750/// Returns true if we can statically tell that this value will never be a 1751/// "useful" constant. In practice, this means we've got something like an 1752/// alloca or a malloc call for which a comparison against a constant can 1753/// only be guarding dead code. Note that we are potentially giving up some 1754/// precision in dead code (a constant result) in favour of avoiding a 1755/// expensive search for a easily answered common query. 1757 V = V->stripPointerCasts();
1758// The return val of alloc cannot be a Constant. 1759if (isa<AllocaInst>(V))
1765// Bail out early if V is known not to be a Constant. 1773if (Result.isConstant())
1774return Result.getConstant();
1775if (Result.isConstantRange()) {
1778return ConstantInt::get(V->getType(), *SingleVal);
1788return Result.asConstantRange(V->getType(), UndefAllowed);
1793auto *Inst = cast<Instruction>(U.getUser());
1796return Result.asConstantRange(U->getType(), UndefAllowed);
1799/// Determine whether the specified value is known to be a 1800/// constant on the specified edge. Return null if not. 1808if (Result.isConstant())
1809return Result.getConstant();
1810if (Result.isConstantRange()) {
1813return ConstantInt::get(V->getType(), *SingleVal);
1825// TODO: Should undef be allowed here? 1826return Result.asConstantRange(V->getType(),
/*UndefAllowed*/true);
1832// If we know the value is a constant, evaluate the conditional. 1848// If this is an equality comparison, we can try to fold it knowing that 1851// !C1 == C -> false iff C1 == C. 1857// !C1 != C -> true iff C1 == C. 1869/// Determine whether the specified value comparison with a constant is known to 1870/// be true or false on the specified CFG edge. Pred is a CmpInst predicate. 1884bool UseBlockValue) {
1885// Is or is not NonNull are common predicates being queried. If 1886// isKnownNonZero can tell us the result of the predicate, we can 1887// return it quickly. But this is only a fastpath, and falling 1888// through would still be correct. 1891if (V->getType()->isPointerTy() &&
C->isNullValue() &&
1900auto &Impl = getOrCreateImpl(M);
1902 UseBlockValue ? Impl.getValueInBlock(V, CxtI->
getParent(), CxtI)
1903 : Impl.getValueAt(V, CxtI);
1908// Note: The following bit of code is somewhat distinct from the rest of LVI; 1909// LVI as a whole tries to compute a lattice value which is conservatively 1910// correct at a given location. In this case, we have a predicate which we 1911// weren't able to prove about the merged result, and we're pushing that 1912// predicate back along each incoming edge to see if we can prove it 1913// separately for each input. As a motivating example, consider: 1915// %v1 = ... ; constantrange<1, 5> 1918// %v2 = ... ; constantrange<10, 20> 1921// %phi = phi [%v1, %v2] ; constantrange<1,20> 1922// %pred = icmp eq i32 %phi, 8 1923// We can't tell from the lattice value for '%phi' that '%pred' is false 1924// along each path, but by checking the predicate over each input separately, 1926// We limit the search to one step backwards from the current BB and value. 1927// We could consider extending this to search further backwards through the 1928// CFG and/or value graph, but there are non-obvious compile time vs quality 1932// Function entry or an unreachable block. Bail to avoid confusing 1938// If V is a PHI node in the same block as the context, we need to ask 1939// questions about the predicate as applied to the incoming value along 1940// each edge. This is useful for eliminating cases where the predicate is 1941// known along all incoming edges. 1942if (
auto *
PHI = dyn_cast<PHINode>(V))
1943if (
PHI->getParent() == BB) {
1945for (
unsigned i = 0, e =
PHI->getNumIncomingValues(); i < e; i++) {
1948// Note that PredBB may be BB itself. 1952// Keep going as long as we've seen a consistent known result for 1954 Baseline = (i == 0) ? Result
/* First iteration */ 1955 : (Baseline == Result ? Baseline
1956 :
nullptr);
/* All others */ 1964// For a comparison where the V is outside this block, it's possible 1965// that we've branched on it before. Look to see if the value is known 1966// on all incoming edges. 1967if (!isa<Instruction>(V) || cast<Instruction>(V)->getParent() != BB) {
1968// For predecessor edge, determine if the comparison is true or false 1969// on that edge. If they're all true or all false, we can conclude 1970// the value of the comparison in this block. 1973// Check that all remaining incoming values match the first one. 1979// If we terminated early, then one of the values didn't match. 1991bool UseBlockValue) {
1992if (
auto *
C = dyn_cast<Constant>(
RHS))
1994if (
auto *
C = dyn_cast<Constant>(
LHS))
1998// Got two non-Constant values. Try to determine the comparison results based 1999// on the block values of the two operands, e.g. because they have 2000// non-overlapping ranges. 2005if (L.isOverdefined())
2011return L.getCompare(Pred, Ty, R, M->getDataLayout());
2018if (
auto *Impl = getImpl())
2019 Impl->threadEdge(PredBB, OldSucc, NewSucc);
2023if (
auto *Impl = getImpl())
2024 Impl->forgetValue(V);
2028if (
auto *Impl = getImpl())
2029 Impl->eraseBlock(BB);
2033if (
auto *Impl = getImpl())
2038if (
auto *Impl = getImpl())
2039 Impl->printLVI(
F, DTree,
OS);
2042// Print the LVI for the function arguments at the start of each basic block. 2043void LazyValueInfoAnnotatedWriter::emitBasicBlockStartAnnot(
2045// Find if there are latticevalues defined for arguments of the function. 2047for (
constauto &Arg :
F->args()) {
2050if (Result.isUnknown())
2052OS <<
"; LatticeVal for: '" << Arg <<
"' is: " << Result <<
"\n";
2056// This function prints the LVI analysis for the instruction I at the beginning 2057// of various basic blocks. It relies on calculated values that are stored in 2058// the LazyValueInfoCache, and in the absence of cached values, recalculate the 2059// LazyValueInfo for `I`, and print that info. 2060void LazyValueInfoAnnotatedWriter::emitInstructionAnnot(
2063auto *ParentBB =
I->getParent();
2065// We can generate (solve) LVI values only for blocks that are dominated by 2066// the I's parent. However, to avoid generating LVI for all dominating blocks, 2067// that contain redundant/uninteresting information, we print LVI for 2068// blocks that may use this LVI information (such as immediate successor 2069// blocks, and blocks that contain uses of `I`). 2071if (!BlocksContainingLVI.
insert(BB).second)
2075OS <<
"; LatticeVal for: '" << *
I <<
"' in BB: '";
2080 printResult(ParentBB);
2081// Print the LVI analysis results for the immediate successor blocks, that 2082// are dominated by `ParentBB`. 2085 printResult(BBSucc);
2087// Print LVI in blocks where `I` is used. 2088for (
constauto *U :
I->users())
2089if (
auto *UseI = dyn_cast<Instruction>(U))
2090if (!isa<PHINode>(UseI) || DT.
dominates(ParentBB, UseI->getParent()))
2091 printResult(UseI->getParent());
2097OS <<
"LVI for function '" <<
F.getName() <<
"':\n";
2100 LVI.printLVI(
F, DTree,
OS);
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
block Block Frequency Analysis
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Given that RA is a live value
This file defines the DenseSet and SmallDenseSet classes.
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
static std::optional< ConstantRange > getRange(Value *V, const InstrInfoQuery &IIQ)
Helper method to get range from metadata or attribute.
static bool isOperationFoldable(User *Usr)
static void AddNonNullPointersByInstruction(Instruction *I, NonNullPointerSet &PtrSet)
static std::optional< ConstantRange > getRangeViaSLT(CmpInst::Predicate Pred, APInt RHS, function_ref< std::optional< ConstantRange >(const APInt &)> Fn)
static const unsigned MaxProcessedPerValue
static ValueLatticeElement getValueFromICmpCtpop(ICmpInst::Predicate Pred, Value *RHS)
Get value range for a "ctpop(Val) Pred RHS" condition.
static bool usesOperand(User *Usr, Value *Op)
static ValueLatticeElement constantFoldUser(User *Usr, Value *Op, const APInt &OpConstVal, const DataLayout &DL)
static void AddNonNullPointer(Value *Ptr, NonNullPointerSet &PtrSet)
static ValueLatticeElement getFromRangeMetadata(Instruction *BBI)
static Constant * getPredicateResult(CmpInst::Predicate Pred, Constant *C, const ValueLatticeElement &Val, const DataLayout &DL)
static ValueLatticeElement getValueFromOverflowCondition(Value *Val, WithOverflowInst *WO, bool IsTrueDest)
static bool isKnownNonConstant(Value *V)
Returns true if we can statically tell that this value will never be a "useful" constant.
static bool matchICmpOperand(APInt &Offset, Value *LHS, Value *Val, ICmpInst::Predicate Pred)
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
FunctionAnalysisManager FAM
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
static bool InBlock(const Value *V, const BasicBlock *BB)
Class for arbitrary precision integers.
APInt zext(unsigned width) const
Zero extend to a new width.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
This templated class represents "all analyses that operate over <a particular IR unit>" (e....
API to communicate dependencies between analyses during invalidation.
A container for analyses that lazily runs them and caches their results.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
void setPreservesAll()
Set by analyses that do not transform their input at all.
This class represents an incoming formal argument to a Function.
A function analysis which provides an AssumptionCache.
An immutable pass that tracks lazily created AssumptionCache objects.
A cache of @llvm.assume calls within a function.
void clear()
Clear the cache of @llvm.assume intrinsics for a function.
MutableArrayRef< ResultElem > assumptionsFor(const Value *V)
Access the list of assumptions which affect this value.
LLVM Basic Block Representation.
iterator begin()
Instruction iterator methods.
bool isEntryBlock() const
Return true if this is the entry block of the containing function.
const Function * getParent() const
Return the enclosing method, or null if none.
const DataLayout & getDataLayout() const
Get the data layout of the module this basic block belongs to.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
const Instruction & back() const
const Module * getModule() const
Return the module owning the function this basic block belongs to, or nullptr if the function does no...
unsigned getNoWrapKind() const
Returns one of OBO::NoSignedWrap or OBO::NoUnsignedWrap.
Instruction::BinaryOps getBinaryOp() const
Returns the binary operation underlying the intrinsic.
BinaryOps getOpcode() const
Conditional or Unconditional Branch instruction.
Value handle with callbacks on RAUW and destruction.
This is the base class for all instructions that perform data casts.
Instruction::CastOps getOpcode() const
Return the opcode of this CastInst.
Type * getDestTy() const
Return the destination type, as a convenience.
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ ICMP_ULT
unsigned less than
@ ICMP_SGE
signed greater or equal
@ ICMP_ULE
unsigned less or equal
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Predicate getPredicate() const
Return the predicate for this instruction.
This is the shared class of boolean and integer constants.
static ConstantInt * getTrue(LLVMContext &Context)
static ConstantInt * getFalse(LLVMContext &Context)
static ConstantPointerNull * get(PointerType *T)
Static factory methods - Return objects of the specified value.
This class represents a range of values.
ConstantRange subtract(const APInt &CI) const
Subtract the specified constant from the endpoints of this constant range.
const APInt * getSingleElement() const
If this set contains a single element, return it, otherwise return null.
static ConstantRange fromKnownBits(const KnownBits &Known, bool IsSigned)
Initialize a range based on a known bits constraint.
ConstantRange castOp(Instruction::CastOps CastOp, uint32_t BitWidth) const
Return a new range representing the possible values resulting from an application of the specified ca...
ConstantRange umin(const ConstantRange &Other) const
Return a new range representing the possible values resulting from an unsigned minimum of a value in ...
APInt getUnsignedMin() const
Return the smallest unsigned value contained in the ConstantRange.
ConstantRange difference(const ConstantRange &CR) const
Subtract the specified range from this range (aka relative complement of the sets).
bool icmp(CmpInst::Predicate Pred, const ConstantRange &Other) const
Does the predicate Pred hold between ranges this and Other? NOTE: false does not mean that inverse pr...
static ConstantRange intrinsic(Intrinsic::ID IntrinsicID, ArrayRef< ConstantRange > Ops)
Compute range of intrinsic result for the given operand ranges.
bool isEmptySet() const
Return true if this set contains no members.
ConstantRange abs(bool IntMinIsPoison=false) const
Calculate absolute value range.
static bool isIntrinsicSupported(Intrinsic::ID IntrinsicID)
Returns true if ConstantRange calculations are supported for intrinsic with IntrinsicID.
ConstantRange overflowingBinaryOp(Instruction::BinaryOps BinOp, const ConstantRange &Other, unsigned NoWrapKind) const
Return a new range representing the possible values resulting from an application of the specified ov...
bool isSingleElement() const
Return true if this set contains exactly one member.
ConstantRange umax(const ConstantRange &Other) const
Return a new range representing the possible values resulting from an unsigned maximum of a value in ...
static ConstantRange makeAllowedICmpRegion(CmpInst::Predicate Pred, const ConstantRange &Other)
Produce the smallest range such that all values that may satisfy the given predicate with any value c...
ConstantRange unionWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the union of this range with another range.
static ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
ConstantRange inverse() const
Return a new range that is the logical not of the current set.
APInt getUnsignedMax() const
Return the largest unsigned value contained in the ConstantRange.
ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
static ConstantRange makeMaskNotEqualRange(const APInt &Mask, const APInt &C)
Initialize a range containing all values X that satisfy (X & Mask) != C.
static ConstantRange getNonEmpty(APInt Lower, APInt Upper)
Create non-empty constant range with the given bounds.
ConstantRange smin(const ConstantRange &Other) const
Return a new range representing the possible values resulting from a signed minimum of a value in thi...
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
ConstantRange smax(const ConstantRange &Other) const
Return a new range representing the possible values resulting from a signed maximum of a value in thi...
ConstantRange binaryOp(Instruction::BinaryOps BinOp, const ConstantRange &Other) const
Return a new range representing the possible values resulting from an application of the specified bi...
static ConstantRange makeExactNoWrapRegion(Instruction::BinaryOps BinOp, const APInt &Other, unsigned NoWrapKind)
Produce the range that contains X if and only if "X BinOp Other" does not wrap.
This is an important base class in LLVM.
static Constant * getIntegerValue(Type *Ty, const APInt &V)
Return the value for an integer or pointer constant, or a vector thereof, with the given scalar value...
ConstantRange toConstantRange() const
Convert constant to an approximate constant range.
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
iterator find_as(const LookupKeyT &Val)
Alternate version of find() which allows a different, and possibly less expensive,...
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Implements a dense probed hash-table based set.
Analysis pass which computes a DominatorTree.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
This instruction extracts a struct member or array element value from an aggregate value.
ArrayRef< unsigned > getIndices() const
unsigned getNumIndices() const
Value * getAggregateOperand()
idx_iterator idx_begin() const
FunctionPass class - This class is used to implement most global optimizations.
This instruction compares its operands according to the predicate given to the constructor.
static bool isEquality(Predicate P)
Return true if this predicate is either EQ or NE.
This instruction inserts a single (scalar) element into a VectorType value.
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
A wrapper class for inspecting calls to intrinsic functions.
Analysis to compute lazy value information.
Result run(Function &F, FunctionAnalysisManager &FAM)
ValueLatticeElement getValueOnEdge(Value *V, BasicBlock *FromBB, BasicBlock *ToBB, Instruction *CxtI=nullptr)
This is the query interface to determine the lattice value for the specified Value* that is true on t...
ValueLatticeElement getValueAt(Value *V, Instruction *CxtI)
This is the query interface to determine the lattice value for the specified Value* at the specified ...
void threadEdge(BasicBlock *PredBB, BasicBlock *OldSucc, BasicBlock *NewSucc)
This is the update interface to inform the cache that an edge from PredBB to OldSucc has been threade...
void printLVI(Function &F, DominatorTree &DTree, raw_ostream &OS)
Printing the LazyValueInfo Analysis.
void forgetValue(Value *V)
This is part of the update interface to remove information related to this value from the cache.
void eraseBlock(BasicBlock *BB)
This is part of the update interface to inform the cache that a block has been deleted.
void clear()
Complete flush all previously computed values.
LazyValueInfoImpl(AssumptionCache *AC, const DataLayout &DL, Function *GuardDecl)
ValueLatticeElement getValueInBlock(Value *V, BasicBlock *BB, Instruction *CxtI=nullptr)
This is the query interface to determine the lattice value for the specified Value* at the context in...
ValueLatticeElement getValueAtUse(const Use &U)
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
Wrapper around LazyValueInfo.
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
void releaseMemory() override
releaseMemory() - This member can be implemented by a pass if it wants to be able to release its memo...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
LazyValueInfoWrapperPass()
This pass computes, caches, and vends lazy value constraint information.
void eraseBlock(BasicBlock *BB)
Inform the analysis cache that we have erased a block.
ConstantRange getConstantRangeAtUse(const Use &U, bool UndefAllowed)
Return the ConstantRange constraint that is known to hold for the value at a specific use-site.
ConstantRange getConstantRange(Value *V, Instruction *CxtI, bool UndefAllowed)
Return the ConstantRange constraint that is known to hold for the specified value at the specified in...
void threadEdge(BasicBlock *PredBB, BasicBlock *OldSucc, BasicBlock *NewSucc)
Inform the analysis cache that we have threaded an edge from PredBB to OldSucc to be from PredBB to N...
Constant * getPredicateOnEdge(CmpInst::Predicate Pred, Value *V, Constant *C, BasicBlock *FromBB, BasicBlock *ToBB, Instruction *CxtI=nullptr)
Determine whether the specified value comparison with a constant is known to be true or false on the ...
Constant * getConstantOnEdge(Value *V, BasicBlock *FromBB, BasicBlock *ToBB, Instruction *CxtI=nullptr)
Determine whether the specified value is known to be a constant on the specified edge.
ConstantRange getConstantRangeOnEdge(Value *V, BasicBlock *FromBB, BasicBlock *ToBB, Instruction *CxtI=nullptr)
Return the ConstantRage constraint that is known to hold for the specified value on the specified edg...
Constant * getConstant(Value *V, Instruction *CxtI)
Determine whether the specified value is known to be a constant at the specified instruction.
void printLVI(Function &F, DominatorTree &DTree, raw_ostream &OS)
Print the \LazyValueInfo Analysis.
void forgetValue(Value *V)
Remove information related to this value from the cache.
void clear()
Complete flush all previously computed values.
Constant * getPredicateAt(CmpInst::Predicate Pred, Value *V, Constant *C, Instruction *CxtI, bool UseBlockValue)
Determine whether the specified value comparison with a constant is known to be true or false at the ...
bool invalidate(Function &F, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)
Handle invalidation events in the new pass manager.
An instruction for reading from memory.
This is the common base class for memset/memcpy/memmove.
This class wraps the llvm.memcpy/memmove intrinsics.
A Module instance is used to store all the information related to an LLVM module.
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
PreservedAnalysisChecker getChecker() const
Build a checker for this PreservedAnalyses and the specified analysis type.
This class represents the LLVM 'select' instruction.
Implements a dense probed hash-table based set with some number of buckets stored inline.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
The instances of the Type class are immutable: once they are created, they are never changed.
unsigned getIntegerBitWidth() const
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
static IntegerType * getInt1Ty(LLVMContext &C)
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
bool isIntegerTy() const
True if this is an instance of IntegerType.
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
This class represents lattice values for constants.
static ValueLatticeElement getRange(ConstantRange CR, bool MayIncludeUndef=false)
bool isOverdefined() const
static ValueLatticeElement getNot(Constant *C)
bool isNotConstant() const
std::optional< APInt > asConstantInteger() const
const ConstantRange & getConstantRange(bool UndefAllowed=true) const
Returns the constant range for this value.
bool isConstantRange(bool UndefAllowed=true) const
Returns true if this value is a constant range.
static ValueLatticeElement get(Constant *C)
Constant * getNotConstant() const
ValueLatticeElement intersect(const ValueLatticeElement &Other) const
Combine two sets of facts about the same value into a single set of facts.
Constant * getConstant() const
bool mergeIn(const ValueLatticeElement &RHS, MergeOptions Opts=MergeOptions())
Updates this object to approximate both this object and RHS.
static ValueLatticeElement getOverdefined()
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
const Value * stripInBoundsOffsets(function_ref< void(const Value *)> Func=[](const Value *) {}) const
Strip off pointer casts and inbounds GEPs.
void printAsOperand(raw_ostream &O, bool PrintType=true, const Module *M=nullptr) const
Print the name of this Value out to the specified raw_ostream.
LLVMContext & getContext() const
All values hold a context through their type.
StringRef getName() const
Return a constant reference to the value's name.
Represents an op.with.overflow intrinsic.
std::pair< iterator, bool > insert(const ValueT &V)
iterator find_as(const LookupKeyT &Val)
Alternative version of find() which allows a different, and possibly less expensive,...
bool erase(const ValueT &V)
formatted_raw_ostream - A raw_ostream that wraps another one and keeps track of line and column posit...
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
self_iterator getIterator()
This class implements an extremely fast bulk output stream that can only output to a stream.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
Function * getDeclarationIfExists(Module *M, ID id, ArrayRef< Type * > Tys, FunctionType *FT=nullptr)
This version supports overloaded intrinsics.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
PtrToIntSameSize_match< OpTy > m_PtrToIntSameSize(const DataLayout &DL, const OpTy &Op)
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::URem > m_URem(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
bool match(Val *V, const Pattern &P)
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
match_combine_or< BinaryOp_match< LHS, RHS, Instruction::Add >, DisjointOr_match< LHS, RHS > > m_AddLike(const LHS &L, const RHS &R)
Match either "add" or "or disjoint".
apint_match m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
BinaryOp_match< cst_pred_ty< is_all_ones >, ValTy, Instruction::Xor, true > m_Not(const ValTy &V)
Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
BinaryOp_match< LHS, RHS, Instruction::Or, true > m_c_Or(const LHS &L, const RHS &R)
Matches an Or with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
This is an optimization pass for GlobalISel generic memory operations.
bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI, const DominatorTree *DT=nullptr, bool AllowEphemerals=false)
Return true if it is valid to use the assumptions provided by an assume intrinsic,...
bool isSafeToSpeculativelyExecuteWithVariableReplaced(const Instruction *I)
Don't use information from its non-constant operands.
auto pred_end(const MachineBasicBlock *BB)
auto successors(const MachineBasicBlock *BB)
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
bool isGuaranteedNotToBeUndef(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be undef, but may be poison.
ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
Value * simplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty, const SimplifyQuery &Q)
Given operands for a CastInst, fold the result or return null.
FunctionPass * createLazyValueInfoPass()
createLazyValueInfoPass - This creates an instance of the LazyValueInfo pass.
constexpr unsigned MaxAnalysisRecursionDepth
@ SPF_ABS
Floating point maxnum.
@ SPF_NABS
Absolute value.
@ SPF_UMIN
Signed minimum.
@ SPF_UMAX
Signed maximum.
@ SPF_SMAX
Unsigned minimum.
SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, Instruction::CastOps *CastOp=nullptr, unsigned Depth=0)
Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind and providing the out param...
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Value * simplifyExtractValueInst(Value *Agg, ArrayRef< unsigned > Idxs, const SimplifyQuery &Q)
Given operands for an ExtractValueInst, fold the result or return null.
bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
Value * simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a BinaryOperator, fold the result or return null.
DWARFExpression::Operation Op
static bool hasSingleValue(const ValueLatticeElement &Val)
constexpr unsigned BitWidth
auto pred_begin(const MachineBasicBlock *BB)
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
void initializeLazyValueInfoWrapperPassPass(PassRegistry &)
A special type used by analysis passes to provide an address that identifies that particular analysis...
Incoming for lane maks phi as machine instruction, incoming register Reg and incoming block Block are...
SelectPatternFlavor Flavor
static bool isMinOrMax(SelectPatternFlavor SPF)
When implementing this min/max pattern as fcmp; select, does the fcmp have to be ordered?