1//===- InlineFunction.cpp - Code to perform function inlining -------------===// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 7//===----------------------------------------------------------------------===// 9// This file implements inlining of a function into a call site, resolving 10// parameters and the return value as appropriate. 12//===----------------------------------------------------------------------===// 86#define DEBUG_TYPE "inline-function" 95cl::desc(
"Convert noalias attributes to metadata during inlining."));
100cl::desc(
"Use the llvm.experimental.noalias.scope.decl " 101"intrinsic during inlining."));
103// Disabled by default, because the added alignment assumptions may increase 104// compile-time and block optimizations. This option is not suitable for use 105// with frontends that emit comprehensive parameter alignment annotations. 109cl::desc(
"Convert align attributes to assumptions during inlining."));
112"max-inst-checked-for-throw-during-inlining",
cl::Hidden,
113cl::desc(
"the maximum number of instructions analyzed for may throw during " 114"attribute inference in inlined body"),
119 /// A class for recording information about inlining a landing pad. 120classLandingPadInliningInfo {
121 /// Destination of the invoke's unwind. 124 /// Destination for the callee's resume. 127 /// LandingPadInst associated with the invoke. 130 /// PHI for EH values from landingpad insts. 131PHINode *InnerEHValuesPHI =
nullptr;
137 : OuterResumeDest(
II->getUnwindDest()) {
138// If there are PHI nodes in the unwind destination block, we need to keep 139// track of which values came into them from the invoke before removing 140// the edge from this block. 143for (; isa<PHINode>(
I); ++
I) {
144// Save the value to use for this edge. 146 UnwindDestPHIValues.
push_back(
PHI->getIncomingValueForBlock(InvokeBB));
149 CallerLPad = cast<LandingPadInst>(
I);
152 /// The outer unwind destination is the target of 153 /// unwind edges introduced for calls within the inlined function. 155return OuterResumeDest;
162 /// Forward the 'resume' instruction to the caller's landing pad block. 163 /// When the landing pad block has only one predecessor, this is 164 /// a simple branch. When there is more than one predecessor, we need to 165 /// split the landing pad block after the landingpad instruction and jump 170 /// Add incoming-PHI values to the unwind destination block for the given 171 /// basic block, using the values for the original invoke's source block. 172void addIncomingPHIValuesFor(
BasicBlock *BB)
const{
173 addIncomingPHIValuesForInto(BB, OuterResumeDest);
178for (
unsigned i = 0, e = UnwindDestPHIValues.
size(); i != e; ++i, ++
I) {
180phi->addIncoming(UnwindDestPHIValues[i], src);
184}
// end anonymous namespace 188while (It != BB.
end()) {
189if (
auto *IntrinsicCall = dyn_cast<ConvergenceControlInst>(It)) {
190if (IntrinsicCall->isEntry()) {
199/// Get or create a target for the branch from ResumeInsts. 200BasicBlock *LandingPadInliningInfo::getInnerResumeDest() {
201if (InnerResumeDest)
return InnerResumeDest;
203// Split the landing pad. 207 OuterResumeDest->
getName() +
".body");
209// The number of incoming edges we expect to the inner landing pad. 210constunsigned PHICapacity = 2;
212// Create corresponding new PHIs for all the PHIs in the outer landing pad. 215for (
unsigned i = 0, e = UnwindDestPHIValues.
size(); i != e; ++i, ++
I) {
216PHINode *OuterPHI = cast<PHINode>(
I);
218 OuterPHI->
getName() +
".lpad-body");
224// Create a PHI for the exception values. 229 InnerEHValuesPHI->
addIncoming(CallerLPad, OuterResumeDest);
232return InnerResumeDest;
235/// Forward the 'resume' instruction to the caller's landing pad block. 236/// When the landing pad block has only one predecessor, this is a simple 237/// branch. When there is more than one predecessor, we need to split the 238/// landing pad block after the landingpad instruction and jump to there. 239void LandingPadInliningInfo::forwardResume(
246// Update the PHIs in the destination. They were inserted in an order which 248 addIncomingPHIValuesForInto(Src, Dest);
254/// Helper for getUnwindDestToken/getUnwindDestTokenHelper. 256if (
auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
257return FPI->getParentPad();
258return cast<CatchSwitchInst>(EHPad)->getParentPad();
263/// Helper for getUnwindDestToken that does the descendant-ward part of 269while (!Worklist.
empty()) {
271// We only put pads on the worklist that aren't in the MemoMap. When 272// we find an unwind dest for a pad we may update its ancestors, but 273// the queue only ever contains uncles/great-uncles/etc. of CurrentPad, 274// so they should never get updated while queued on the worklist. 276Value *UnwindDestToken =
nullptr;
277if (
auto *CatchSwitch = dyn_cast<CatchSwitchInst>(CurrentPad)) {
278if (CatchSwitch->hasUnwindDest()) {
279 UnwindDestToken = &*CatchSwitch->getUnwindDest()->getFirstNonPHIIt();
281// Catchswitch doesn't have a 'nounwind' variant, and one might be 282// annotated as "unwinds to caller" when really it's nounwind (see 283// e.g. SimplifyCFGOpt::SimplifyUnreachable), so we can't infer the 284// parent's unwind dest from this. We can check its catchpads' 285// descendants, since they might include a cleanuppad with an 286// "unwinds to caller" cleanupret, which can be trusted. 287for (
auto HI = CatchSwitch->handler_begin(),
288 HE = CatchSwitch->handler_end();
289 HI != HE && !UnwindDestToken; ++HI) {
294// Intentionally ignore invokes here -- since the catchswitch is 295// marked "unwind to caller", it would be a verifier error if it 296// contained an invoke which unwinds out of it, so any invoke we'd 297// encounter must unwind to some child of the catch. 298if (!isa<CleanupPadInst>(Child) && !isa<CatchSwitchInst>(Child))
302auto Memo = MemoMap.
find(ChildPad);
303if (Memo == MemoMap.
end()) {
304// Haven't figured out this child pad yet; queue it. 308// We've already checked this child, but might have found that 309// it offers no proof either way. 310Value *ChildUnwindDestToken = Memo->second;
311if (!ChildUnwindDestToken)
313// We already know the child's unwind dest, which can either 314// be ConstantTokenNone to indicate unwind to caller, or can 315// be another child of the catchpad. Only the former indicates 316// the unwind dest of the catchswitch. 317if (isa<ConstantTokenNone>(ChildUnwindDestToken)) {
318 UnwindDestToken = ChildUnwindDestToken;
326auto *CleanupPad = cast<CleanupPadInst>(CurrentPad);
327for (
User *U : CleanupPad->users()) {
328if (
auto *CleanupRet = dyn_cast<CleanupReturnInst>(U)) {
329if (
BasicBlock *RetUnwindDest = CleanupRet->getUnwindDest())
330 UnwindDestToken = &*RetUnwindDest->getFirstNonPHIIt();
335Value *ChildUnwindDestToken;
336if (
auto *Invoke = dyn_cast<InvokeInst>(U)) {
337 ChildUnwindDestToken = &*Invoke->getUnwindDest()->getFirstNonPHIIt();
338 }
elseif (isa<CleanupPadInst>(U) || isa<CatchSwitchInst>(U)) {
340auto Memo = MemoMap.
find(ChildPad);
341if (Memo == MemoMap.
end()) {
342// Haven't resolved this child yet; queue it and keep searching. 346// We've checked this child, but still need to ignore it if it 347// had no proof either way. 348 ChildUnwindDestToken = Memo->second;
349if (!ChildUnwindDestToken)
352// Not a relevant user of the cleanuppad 355// In a well-formed program, the child/invoke must either unwind to 356// an(other) child of the cleanup, or exit the cleanup. In the 357// first case, continue searching. 358if (isa<Instruction>(ChildUnwindDestToken) &&
361 UnwindDestToken = ChildUnwindDestToken;
365// If we haven't found an unwind dest for CurrentPad, we may have queued its 366// children, so move on to the next in the worklist. 370// Now we know that CurrentPad unwinds to UnwindDestToken. It also exits 371// any ancestors of CurrentPad up to but not including UnwindDestToken's 372// parent pad. Record this in the memo map, and check to see if the 373// original EHPad being queried is one of the ones exited. 375if (
auto *UnwindPad = dyn_cast<Instruction>(UnwindDestToken))
378 UnwindParent =
nullptr;
379bool ExitedOriginalPad =
false;
381 ExitedPad && ExitedPad != UnwindParent;
382 ExitedPad = dyn_cast<Instruction>(
getParentPad(ExitedPad))) {
383// Skip over catchpads since they just follow their catchswitches. 384if (isa<CatchPadInst>(ExitedPad))
386 MemoMap[ExitedPad] = UnwindDestToken;
387 ExitedOriginalPad |= (ExitedPad == EHPad);
390if (ExitedOriginalPad)
391return UnwindDestToken;
393// Continue the search. 396// No definitive information is contained within this funclet. 400/// Given an EH pad, find where it unwinds. If it unwinds to an EH pad, 401/// return that pad instruction. If it unwinds to caller, return 402/// ConstantTokenNone. If it does not have a definitive unwind destination, 405/// This routine gets invoked for calls in funclets in inlinees when inlining 406/// an invoke. Since many funclets don't have calls inside them, it's queried 407/// on-demand rather than building a map of pads to unwind dests up front. 408/// Determining a funclet's unwind dest may require recursively searching its 409/// descendants, and also ancestors and cousins if the descendants don't provide 410/// an answer. Since most funclets will have their unwind dest immediately 411/// available as the unwind dest of a catchswitch or cleanupret, this routine 412/// searches top-down from the given pad and then up. To avoid worst-case 413/// quadratic run-time given that approach, it uses a memo map to avoid 414/// re-processing funclet trees. The callers that rewrite the IR as they go 415/// take advantage of this, for correctness, by checking/forcing rewritten 416/// pads' entries to match the original callee view. 419// Catchpads unwind to the same place as their catchswitch; 420// redirct any queries on catchpads so the code below can 421// deal with just catchswitches and cleanuppads. 422if (
auto *CPI = dyn_cast<CatchPadInst>(EHPad))
423 EHPad = CPI->getCatchSwitch();
425// Check if we've already determined the unwind dest for this pad. 426auto Memo = MemoMap.
find(EHPad);
427if (Memo != MemoMap.
end())
430// Search EHPad and, if necessary, its descendants. 432assert((UnwindDestToken ==
nullptr) != (MemoMap.
count(EHPad) != 0));
434return UnwindDestToken;
436// No information is available for this EHPad from itself or any of its 437// descendants. An unwind all the way out to a pad in the caller would 438// need also to agree with the unwind dest of the parent funclet, so 439// search up the chain to try to find a funclet with information. Put 440// null entries in the memo map to avoid re-processing as we go up. 441 MemoMap[EHPad] =
nullptr;
449auto *AncestorPad = dyn_cast<Instruction>(AncestorToken);
451// Skip over catchpads since they just follow their catchswitches. 452if (isa<CatchPadInst>(AncestorPad))
454// If the MemoMap had an entry mapping AncestorPad to nullptr, since we 455// haven't yet called getUnwindDestTokenHelper for AncestorPad in this 456// call to getUnwindDestToken, that would mean that AncestorPad had no 457// information in itself, its descendants, or its ancestors. If that 458// were the case, then we should also have recorded the lack of information 459// for the descendant that we're coming from. So assert that we don't 460// find a null entry in the MemoMap for AncestorPad. 461assert(!MemoMap.
count(AncestorPad) || MemoMap[AncestorPad]);
462auto AncestorMemo = MemoMap.
find(AncestorPad);
463if (AncestorMemo == MemoMap.
end()) {
466 UnwindDestToken = AncestorMemo->second;
470 LastUselessPad = AncestorPad;
471 MemoMap[LastUselessPad] =
nullptr;
473 TempMemos.
insert(LastUselessPad);
477// We know that getUnwindDestTokenHelper was called on LastUselessPad and 478// returned nullptr (and likewise for EHPad and any of its ancestors up to 479// LastUselessPad), so LastUselessPad has no information from below. Since 480// getUnwindDestTokenHelper must investigate all downward paths through 481// no-information nodes to prove that a node has no information like this, 482// and since any time it finds information it records it in the MemoMap for 483// not just the immediately-containing funclet but also any ancestors also 484// exited, it must be the case that, walking downward from LastUselessPad, 485// visiting just those nodes which have not been mapped to an unwind dest 486// by getUnwindDestTokenHelper (the nullptr TempMemos notwithstanding, since 487// they are just used to keep getUnwindDestTokenHelper from repeating work), 488// any node visited must have been exhaustively searched with no information 491while (!Worklist.
empty()) {
493auto Memo = MemoMap.
find(UselessPad);
494if (Memo != MemoMap.
end() && Memo->second) {
495// Here the name 'UselessPad' is a bit of a misnomer, because we've found 496// that it is a funclet that does have information about unwinding to 497// a particular destination; its parent was a useless pad. 498// Since its parent has no information, the unwind edge must not escape 499// the parent, and must target a sibling of this pad. This local unwind 500// gives us no information about EHPad. Leave it and the subtree rooted 505// We know we don't have information for UselesPad. If it has an entry in 506// the MemoMap (mapping it to nullptr), it must be one of the TempMemos 507// added on this invocation of getUnwindDestToken; if a previous invocation 508// recorded nullptr, it would have had to prove that the ancestors of 509// UselessPad, which include LastUselessPad, had no information, and that 510// in turn would have required proving that the descendants of 511// LastUselesPad, which include EHPad, have no information about 512// LastUselessPad, which would imply that EHPad was mapped to nullptr in 513// the MemoMap on that invocation, which isn't the case if we got here. 515// Assert as we enumerate users that 'UselessPad' doesn't have any unwind 516// information that we'd be contradicting by making a map entry for it 517// (which is something that getUnwindDestTokenHelper must have proved for 518// us to get here). Just assert on is direct users here; the checks in 519// this downward walk at its descendants will verify that they don't have 520// any unwind edges that exit 'UselessPad' either (i.e. they either have no 521// unwind edges or unwind to a sibling). 522 MemoMap[UselessPad] = UnwindDestToken;
523if (
auto *CatchSwitch = dyn_cast<CatchSwitchInst>(UselessPad)) {
524assert(CatchSwitch->getUnwindDest() ==
nullptr &&
"Expected useless pad");
525for (
BasicBlock *HandlerBlock : CatchSwitch->handlers()) {
526auto *CatchPad = &*HandlerBlock->getFirstNonPHIIt();
527for (
User *U : CatchPad->users()) {
528assert((!isa<InvokeInst>(U) ||
531 ->getFirstNonPHIIt()) == CatchPad)) &&
532"Expected useless pad");
533if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U))
534 Worklist.
push_back(cast<Instruction>(U));
538assert(isa<CleanupPadInst>(UselessPad));
540assert(!isa<CleanupReturnInst>(U) &&
"Expected useless pad");
542 (!isa<InvokeInst>(U) ||
544 &*cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHIIt()) ==
546"Expected useless pad");
547if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U))
548 Worklist.
push_back(cast<Instruction>(U));
553return UnwindDestToken;
556/// When we inline a basic block into an invoke, 557/// we have to turn all of the calls that can throw into invokes. 558/// This function analyze BB to see if there are any calls, and if so, 559/// it rewrites them to be invokes that jump to InvokeDest and fills in the PHI 560/// nodes in that block with the values specified in InvokeDestPHIValues. 565// We only need to check for function calls: inlined invoke 566// instructions require no special handling. 572// We do not need to (and in fact, cannot) convert possibly throwing calls 573// to @llvm.experimental_deoptimize (resp. @llvm.experimental.guard) into 574// invokes. The caller's "segment" of the deoptimization continuation 575// attached to the newly inlined @llvm.experimental_deoptimize 576// (resp. @llvm.experimental.guard) call should contain the exception 577// handling logic, if any. 579if (
F->getIntrinsicID() == Intrinsic::experimental_deoptimize ||
580F->getIntrinsicID() == Intrinsic::experimental_guard)
584// This call is nested inside a funclet. If that funclet has an unwind 585// destination within the inlinee, then unwinding out of this call would 586// be UB. Rewriting this call to an invoke which targets the inlined 587// invoke's unwind dest would give the call's parent funclet multiple 588// unwind destinations, which is something that subsequent EH table 589// generation can't handle and that the veirifer rejects. So when we 590// see such a call, leave it as a call. 591auto *FuncletPad = cast<Instruction>(FuncletBundle->Inputs[0]);
592Value *UnwindDestToken =
594if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken))
598if (
auto *CatchPad = dyn_cast<CatchPadInst>(FuncletPad))
599 MemoKey = CatchPad->getCatchSwitch();
601 MemoKey = FuncletPad;
602assert(FuncletUnwindMap->count(MemoKey) &&
603 (*FuncletUnwindMap)[MemoKey] == UnwindDestToken &&
604"must get memoized to avoid confusing later searches");
614/// If we inlined an invoke site, we need to convert calls 615/// in the body of the inlined function into invokes. 617/// II is the invoke instruction being inlined. FirstNewBlock is the first 618/// block of the inlined code (the last block is the end of the function), 619/// and InlineCodeInfo is information about the code that got inlined. 626// The inlined code is currently at the end of the function, scan from the 627// start of the inlined code to its end, checking for stuff we need to 629 LandingPadInliningInfo Invoke(
II);
631// Get all of the inlined landing pad instructions. 635if (
InvokeInst *
II = dyn_cast<InvokeInst>(
I->getTerminator()))
636 InlinedLPads.
insert(
II->getLandingPadInst());
638// Append the clauses from the outer landing pad instruction into the inlined 639// landing pad instructions. 643 InlinedLPad->reserveClauses(OuterNum);
644for (
unsigned OuterIdx = 0; OuterIdx != OuterNum; ++OuterIdx)
645 InlinedLPad->addClause(OuterLPad->
getClause(OuterIdx));
647 InlinedLPad->setCleanup(
true);
654 &*BB, Invoke.getOuterResumeDest()))
655// Update any PHI nodes in the exceptional block to indicate that there 656// is now a new entry in them. 657 Invoke.addIncomingPHIValuesFor(NewBB);
659// Forward any resumes that are remaining here. 660if (
ResumeInst *RI = dyn_cast<ResumeInst>(BB->getTerminator()))
661 Invoke.forwardResume(RI, InlinedLPads);
664// Now that everything is happy, we have one final detail. The PHI nodes in 665// the exception destination block still have entries due to the original 666// invoke instruction. Eliminate these entries (which might even delete the 671/// If we inlined an invoke site, we need to convert calls 672/// in the body of the inlined function into invokes. 674/// II is the invoke instruction being inlined. FirstNewBlock is the first 675/// block of the inlined code (the last block is the end of the function), 676/// and InlineCodeInfo is information about the code that got inlined. 684// If there are PHI nodes in the unwind destination block, we need to keep 685// track of which values came into them from the invoke before removing the 686// edge from this block. 690// Save the value to use for this edge. 691 UnwindDestPHIValues.
push_back(
PHI.getIncomingValueForBlock(InvokeBB));
694// Add incoming-PHI values to the unwind destination block for the given basic 695// block, using the values for the original invoke's source block. 698for (
Value *V : UnwindDestPHIValues) {
700PHI->addIncoming(V, Src);
705// This connects all the instructions which 'unwind to caller' to the invoke 710if (
auto *CRI = dyn_cast<CleanupReturnInst>(BB->getTerminator())) {
711if (CRI->unwindsToCaller()) {
712auto *CleanupPad = CRI->getCleanupPad();
714 CRI->eraseFromParent();
716// Finding a cleanupret with an unwind destination would confuse 717// subsequent calls to getUnwindDestToken, so map the cleanuppad 718// to short-circuit any such calls and recognize this as an "unwind 719// to caller" cleanup. 721 isa<ConstantTokenNone>(FuncletUnwindMap[CleanupPad]));
722 FuncletUnwindMap[CleanupPad] =
732if (
auto *CatchSwitch = dyn_cast<CatchSwitchInst>(
I)) {
733if (CatchSwitch->unwindsToCaller()) {
734Value *UnwindDestToken;
736 dyn_cast<Instruction>(CatchSwitch->getParentPad())) {
737// This catchswitch is nested inside another funclet. If that 738// funclet has an unwind destination within the inlinee, then 739// unwinding out of this catchswitch would be UB. Rewriting this 740// catchswitch to unwind to the inlined invoke's unwind dest would 741// give the parent funclet multiple unwind destinations, which is 742// something that subsequent EH table generation can't handle and 743// that the veirifer rejects. So when we see such a call, leave it 744// as "unwind to caller". 746if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken))
749// This catchswitch has no parent to inherit constraints from, and 750// none of its descendants can have an unwind edge that exits it and 751// targets another funclet in the inlinee. It may or may not have a 752// descendant that definitively has an unwind to caller. In either 753// case, we'll have to assume that any unwinds out of it may need to 754// be routed to the caller, so treat it as though it has a definitive 759 CatchSwitch->getParentPad(), UnwindDest,
760 CatchSwitch->getNumHandlers(), CatchSwitch->
getName(),
761 CatchSwitch->getIterator());
762for (
BasicBlock *PadBB : CatchSwitch->handlers())
763 NewCatchSwitch->addHandler(PadBB);
764// Propagate info for the old catchswitch over to the new one in 765// the unwind map. This also serves to short-circuit any subsequent 766// checks for the unwind dest of this catchswitch, which would get 767// confused if they found the outer handler in the callee. 768 FuncletUnwindMap[NewCatchSwitch] = UnwindDestToken;
769 Replacement = NewCatchSwitch;
771 }
elseif (!isa<FuncletPadInst>(
I)) {
777I->replaceAllUsesWith(Replacement);
788 &*BB, UnwindDest, &FuncletUnwindMap))
789// Update any PHI nodes in the exceptional block to indicate that there 790// is now a new entry in them. 793// Now that everything is happy, we have one final detail. The PHI nodes in 794// the exception destination block still have entries due to the original 795// invoke instruction. Eliminate these entries (which might even delete the 801MDNode *CallsiteStackContext) {
804// Because of the context trimming performed during matching, the callsite 805// context could have more stack ids than the MIB. We match up to the end of 806// the shortest stack context. 807for (
auto MIBStackIter = MIBStackContext->
op_begin(),
808 CallsiteStackIter = CallsiteStackContext->
op_begin();
809 MIBStackIter != MIBStackContext->
op_end() &&
810 CallsiteStackIter != CallsiteStackContext->
op_end();
811 MIBStackIter++, CallsiteStackIter++) {
812auto *Val1 = mdconst::dyn_extract<ConstantInt>(*MIBStackIter);
813auto *Val2 = mdconst::dyn_extract<ConstantInt>(*CallsiteStackIter);
815if (Val1->getZExtValue() != Val2->getZExtValue())
822 Call->setMetadata(LLVMContext::MD_memprof,
nullptr);
826 Call->setMetadata(LLVMContext::MD_callsite,
nullptr);
830const std::vector<Metadata *> &MIBList) {
832// Remove existing memprof, which will either be replaced or may not be needed 833// if we are able to use a single allocation type function attribute. 837CallStack.addCallStack(cast<MDNode>(MIB));
838bool MemprofMDAttached =
CallStack.buildAndAttachMIBMetadata(CI);
840if (!MemprofMDAttached)
841// If we used a function attribute remove the callsite metadata as well. 845// Update the metadata on the inlined copy ClonedCall of a call OrigCall in the 846// inlined callee body, based on the callsite metadata InlinedCallsiteMD from 847// the call that was inlined. 850MDNode *InlinedCallsiteMD) {
852MDNode *ClonedCallsiteMD =
nullptr;
853// Check if the call originally had callsite metadata, and update it for the 854// new call in the inlined body. 856// The cloned call's context is now the concatenation of the original call's 857// callsite metadata and the callsite metadata on the call where it was 860 ClonedCall->
setMetadata(LLVMContext::MD_callsite, ClonedCallsiteMD);
863// Update any memprof metadata on the cloned call. 867// We currently expect that allocations with memprof metadata also have 868// callsite metadata for the allocation's part of the context. 871// New call's MIB list. 872 std::vector<Metadata *> NewMIBList;
874// For each MIB metadata, check if its call stack context starts with the 875// new clone's callsite metadata. If so, that MIB goes onto the cloned call in 876// the inlined body. If not, it stays on the out-of-line original call. 877for (
auto &MIBOp : OrigMemProfMD->
operands()) {
878MDNode *MIB = dyn_cast<MDNode>(MIBOp);
879// Stack is first operand of MIB. 882// See if the new cloned callsite context matches this profiled context. 884// Add it to the cloned call's MIB list. 885 NewMIBList.push_back(MIB);
887if (NewMIBList.empty()) {
896// Update memprof related metadata (!memprof and !callsite) based on the 897// inlining of Callee into the callsite at CB. The updates include merging the 898// inlined callee's callsite metadata with that of the inlined call, 899// and moving the subset of any memprof contexts to the inlined callee 900// allocations if they match the new inlined call stack. 903bool ContainsMemProfMetadata,
906// Only need to update if the inlined callsite had callsite metadata, or if 907// there was any memprof metadata inlined. 908if (!CallsiteMD && !ContainsMemProfMetadata)
911// Propagate metadata onto the cloned calls in the inlined callee. 912for (
constauto &Entry : VMap) {
913// See if this is a call that has been inlined and remapped, and not 914// simplified away in the process. 915auto *OrigCall = dyn_cast_or_null<CallBase>(Entry.first);
916auto *ClonedCall = dyn_cast_or_null<CallBase>(Entry.second);
917if (!OrigCall || !ClonedCall)
919// If the inlined callsite did not have any callsite metadata, then it isn't 920// involved in any profiled call contexts, and we can remove any memprof 921// metadata on the cloned call. 931/// When inlining a call site that has !llvm.mem.parallel_loop_access, 932/// !llvm.access.group, !alias.scope or !noalias metadata, that metadata should 933/// be propagated to all memory-accessing cloned instructions. 936MDNode *MemParallelLoopAccess =
937 CB.
getMetadata(LLVMContext::MD_mem_parallel_loop_access);
941if (!MemParallelLoopAccess && !AccessGroup && !AliasScope && !NoAlias)
946// This metadata is only relevant for instructions that access memory. 947if (!
I.mayReadOrWriteMemory())
950if (MemParallelLoopAccess) {
951// TODO: This probably should not overwrite MemParalleLoopAccess. 953I.getMetadata(LLVMContext::MD_mem_parallel_loop_access),
954 MemParallelLoopAccess);
955I.setMetadata(LLVMContext::MD_mem_parallel_loop_access,
956 MemParallelLoopAccess);
961I.getMetadata(LLVMContext::MD_access_group), AccessGroup));
965I.getMetadata(LLVMContext::MD_alias_scope), AliasScope));
969I.getMetadata(LLVMContext::MD_noalias), NoAlias));
974/// Bundle operands of the inlined function must be added to inlined call sites. 981// Skip call sites which already have a "funclet" bundle. 984// Skip call sites which are nounwind intrinsics (as long as they don't 985// lower into regular function calls in the course of IR transformations). 987 dyn_cast<Function>(
I->getCalledOperand()->stripPointerCasts());
988if (CalledFn && CalledFn->isIntrinsic() &&
I->doesNotThrow() &&
993I->getOperandBundlesAsDefs(OpBundles);
998I->replaceAllUsesWith(NewInst);
1004/// Utility for cloning !noalias and !alias.scope metadata. When a code region 1005/// using scoped alias metadata is inlined, the aliasing relationships may not 1006/// hold between the two version. It is necessary to create a deep clone of the 1007/// metadata, putting the two versions in separate scope domains. 1008classScopedAliasMetadataDeepCloner {
1012void addRecursiveMetadataUses();
1015 ScopedAliasMetadataDeepCloner(
constFunction *
F);
1017 /// Create a new clone of the scoped alias metadata, which will be used by 1018 /// subsequent remap() calls. 1021 /// Remap instructions in the given range from the original to the cloned 1027ScopedAliasMetadataDeepCloner::ScopedAliasMetadataDeepCloner(
1031if (
constMDNode *M =
I.getMetadata(LLVMContext::MD_alias_scope))
1033if (
constMDNode *M =
I.getMetadata(LLVMContext::MD_noalias))
1036// We also need to clone the metadata in noalias intrinsics. 1037if (
constauto *Decl = dyn_cast<NoAliasScopeDeclInst>(&
I))
1038 MD.insert(Decl->getScopeList());
1041 addRecursiveMetadataUses();
1044void ScopedAliasMetadataDeepCloner::addRecursiveMetadataUses() {
1046while (!
Queue.empty()) {
1049if (
constMDNode *OpMD = dyn_cast<MDNode>(
Op))
1051Queue.push_back(OpMD);
1055void ScopedAliasMetadataDeepCloner::clone() {
1056assert(MDMap.empty() &&
"clone() already called ?");
1061 MDMap[
I].reset(DummyNodes.
back().get());
1064// Create new metadata nodes to replace the dummy nodes, replacing old 1065// metadata references with either a dummy node or an already-created new 1070if (
constMDNode *M = dyn_cast<MDNode>(
Op))
1077MDTuple *TempM = cast<MDTuple>(MDMap[
I]);
1088return;
// Nothing to do. 1092// TODO: The null checks for the MDMap.lookup() results should no longer 1094if (
MDNode *M =
I.getMetadata(LLVMContext::MD_alias_scope))
1095if (
MDNode *MNew = MDMap.lookup(M))
1096I.setMetadata(LLVMContext::MD_alias_scope, MNew);
1098if (
MDNode *M =
I.getMetadata(LLVMContext::MD_noalias))
1099if (
MDNode *MNew = MDMap.lookup(M))
1100I.setMetadata(LLVMContext::MD_noalias, MNew);
1102if (
auto *Decl = dyn_cast<NoAliasScopeDeclInst>(&
I))
1103if (
MDNode *MNew = MDMap.lookup(Decl->getScopeList()))
1104 Decl->setScopeList(MNew);
1109/// If the inlined function has noalias arguments, 1110/// then add new alias scopes for each noalias argument, tag the mapped noalias 1111/// parameters with noalias metadata specifying the new scope, and tag all 1112/// non-derived loads, stores and memory intrinsics with the new alias scopes. 1123if (CB.
paramHasAttr(Arg.getArgNo(), Attribute::NoAlias) && !Arg.use_empty())
1126if (NoAliasArgs.
empty())
1129// To do a good job, if a noalias variable is captured, we need to know if 1130// the capture point dominates the particular use we're considering. 1134// noalias indicates that pointer values based on the argument do not alias 1135// pointer values which are not based on it. So we add a new "scope" for each 1136// noalias function argument. Accesses using pointers based on that argument 1137// become part of that alias scope, accesses using pointers not based on that 1138// argument are tagged as noalias with that scope. 1143// Create a new scope domain for this function. 1146for (
unsigned i = 0, e = NoAliasArgs.
size(); i != e; ++i) {
1149 std::string
Name = std::string(CalledFunc->
getName());
1154Name +=
": argument ";
1158// Note: We always create a new anonymous root here. This is true regardless 1159// of the linkage of the callee because the aliasing "scope" is not just a 1160// property of the callee, but also all control dependencies in the caller. 1162 NewScopes.
insert(std::make_pair(
A, NewScope));
1165// Introduce a llvm.experimental.noalias.scope.decl for the noalias 1170// Ignore the result for now. The result will be used when the 1171// llvm.noalias intrinsic is introduced. 1176// Iterate over all new instructions in the map; for all memory-access 1177// instructions, add the alias scope metadata. 1179 VMI != VMIE; ++VMI) {
1180if (
constInstruction *
I = dyn_cast<Instruction>(VMI->first)) {
1184Instruction *NI = dyn_cast<Instruction>(VMI->second);
1188bool IsArgMemOnlyCall =
false, IsFuncCall =
false;
1191if (
constLoadInst *LI = dyn_cast<LoadInst>(
I))
1192 PtrArgs.
push_back(LI->getPointerOperand());
1193elseif (
constStoreInst *SI = dyn_cast<StoreInst>(
I))
1194 PtrArgs.
push_back(SI->getPointerOperand());
1195elseif (
constVAArgInst *VAAI = dyn_cast<VAArgInst>(
I))
1196 PtrArgs.
push_back(VAAI->getPointerOperand());
1198 PtrArgs.
push_back(CXI->getPointerOperand());
1200 PtrArgs.
push_back(RMWI->getPointerOperand());
1201elseif (
constauto *Call = dyn_cast<CallBase>(
I)) {
1202// If we know that the call does not access memory, then we'll still 1203// know that about the inlined clone of this call site, and we don't 1204// need to add metadata. 1205if (Call->doesNotAccessMemory())
1212// We'll retain this knowledge without additional metadata. 1217 IsArgMemOnlyCall =
true;
1220for (
Value *Arg : Call->args()) {
1221// Only care about pointer arguments. If a noalias argument is 1222// accessed through a non-pointer argument, it must be captured 1223// first (e.g. via ptrtoint), and we protect against captures below. 1224if (!Arg->getType()->isPointerTy())
1231// If we found no pointers, then this instruction is not suitable for 1232// pairing with an instruction to receive aliasing metadata. 1233// However, if this is a call, this we might just alias with none of the 1234// noalias arguments. 1235if (PtrArgs.
empty() && !IsFuncCall)
1238// It is possible that there is only one underlying object, but you 1239// need to go through several PHIs to see it, and thus could be 1240// repeated in the Objects list. 1244for (
constValue *V : PtrArgs) {
1248for (
constValue *O : Objects)
1252// Figure out if we're derived from anything that is not a noalias 1254bool RequiresNoCaptureBefore =
false, UsesAliasingPtr =
false,
1255 UsesUnknownObject =
false;
1256for (
constValue *V : ObjSet) {
1257// Is this value a constant that cannot be derived from any pointer 1258// value (we need to exclude constant expressions, for example, that 1259// are formed from arithmetic on global symbols). 1260bool IsNonPtrConst = isa<ConstantInt>(V) || isa<ConstantFP>(V) ||
1261 isa<ConstantPointerNull>(V) ||
1262 isa<ConstantDataVector>(V) || isa<UndefValue>(V);
1266// If this is anything other than a noalias argument, then we cannot 1267// completely describe the aliasing properties using alias.scope 1268// metadata (and, thus, won't add any). 1269if (
constArgument *
A = dyn_cast<Argument>(V)) {
1271 UsesAliasingPtr =
true;
1273 UsesAliasingPtr =
true;
1277// An escape source can only alias with a noalias argument if it has 1278// been captured beforehand. 1279 RequiresNoCaptureBefore =
true;
1281// If this is neither an escape source, nor some identified object 1282// (which cannot directly alias a noalias argument), nor some other 1283// argument (which, by definition, also cannot alias a noalias 1284// argument), conservatively do not make any assumptions. 1285 UsesUnknownObject =
true;
1289// Nothing we can do if the used underlying object cannot be reliably 1291if (UsesUnknownObject)
1294// A function call can always get captured noalias pointers (via other 1295// parameters, globals, etc.). 1296if (IsFuncCall && !IsArgMemOnlyCall)
1297 RequiresNoCaptureBefore =
true;
1299// First, we want to figure out all of the sets with which we definitely 1300// don't alias. Iterate over all noalias set, and add those for which: 1301// 1. The noalias argument is not in the set of objects from which we 1302// definitely derive. 1303// 2. The noalias argument has not yet been captured. 1304// An arbitrary function that might load pointers could see captured 1305// noalias arguments via other noalias arguments or globals, and so we 1306// must always check for prior capture. 1309continue;
// May be based on a noalias argument. 1311// It might be tempting to skip the PointerMayBeCapturedBefore check if 1312// A->hasNoCaptureAttr() is true, but this is incorrect because 1313// nocapture only guarantees that no copies outlive the function, not 1314// that the value cannot be locally captured. 1315if (!RequiresNoCaptureBefore ||
1317/* StoreCaptures */false,
I, &DT))
1327// Next, we want to figure out all of the sets to which we might belong. 1328// We might belong to a set if the noalias argument is in the set of 1329// underlying objects. If there is some non-noalias argument in our list 1330// of underlying objects, then we cannot add a scope because the fact 1331// that some access does not alias with any set of our noalias arguments 1332// cannot itself guarantee that it does not alias with this access 1333// (because there is some pointer of unknown origin involved and the 1334// other access might also depend on this pointer). We also cannot add 1335// scopes to arbitrary functions unless we know they don't access any 1336// non-parameter pointer-values. 1337bool CanAddScopes = !UsesAliasingPtr;
1338if (CanAddScopes && IsFuncCall)
1339 CanAddScopes = IsArgMemOnlyCall;
1344 Scopes.push_back(NewScopes[
A]);
1349 LLVMContext::MD_alias_scope,
1360"Expected to be in same basic block!");
1362assert(BeginIt !=
End->getIterator() &&
"Non-empty BB has empty iterator");
1367// Add attributes from CB params and Fn attributes that can always be propagated 1368// to the corresponding argument / inner callbases. 1375// Collect valid attributes for all params. 1377bool HasAttrToPropagate =
false;
1379// Attributes we can only propagate if the exact parameter is forwarded. 1380// We can propagate both poison generating and UB generating attributes 1381// without any extra checks. The only attribute that is tricky to propagate 1382// is `noundef` (skipped for now) as that can create new UB where previous 1383// behavior was just using a poison value. 1385 Attribute::Dereferenceable, Attribute::DereferenceableOrNull,
1386 Attribute::NonNull, Attribute::Alignment, Attribute::Range};
1388for (
unsignedI = 0, E = CB.
arg_size();
I < E; ++
I) {
1391// Access attributes can be propagated to any param with the same underlying 1392// object as the argument. 1394 ValidObjParamAttrs.
back().addAttribute(Attribute::ReadNone);
1396 ValidObjParamAttrs.
back().addAttribute(Attribute::ReadOnly);
1401 ValidExactParamAttrs.
back().addAttribute(Attr);
1404 HasAttrToPropagate |= ValidObjParamAttrs.
back().hasAttributes();
1405 HasAttrToPropagate |= ValidExactParamAttrs.
back().hasAttributes();
1408// Won't be able to propagate anything. 1409if (!HasAttrToPropagate)
1414constauto *InnerCB = dyn_cast<CallBase>(&Ins);
1417auto *NewInnerCB = dyn_cast_or_null<CallBase>(VMap.
lookup(InnerCB));
1420// The InnerCB might have be simplified during the inlining 1421// process which can make propagation incorrect. 1422if (InlinedFunctionInfo.
isSimplified(InnerCB, NewInnerCB))
1426for (
unsignedI = 0, E = InnerCB->arg_size();
I < E; ++
I) {
1427// It's unsound or requires special handling to propagate 1428// attributes to byval arguments. Even if CalledFunction 1429// doesn't e.g. write to the argument (readonly), the call to 1430// NewInnerCB may write to its by-value copy. 1431if (NewInnerCB->paramHasAttr(
I, Attribute::ByVal))
1434// Don't bother propagating attrs to constants. 1435if (
match(NewInnerCB->getArgOperand(
I),
1439// Check if the underlying value for the parameter is an argument. 1440constArgument *Arg = dyn_cast<Argument>(InnerCB->getArgOperand(
I));
1444// For dereferenceable, dereferenceable_or_null, align, etc... 1445// we don't want to propagate if the existing param has the same 1446// attribute with "better" constraints. So remove from the 1447// new AL if the region of the existing param is larger than 1448// what we can propagate. 1451if (AL.getParamDereferenceableBytes(
I) >
1452 NewAB.getDereferenceableBytes())
1454if (AL.getParamDereferenceableOrNullBytes(
I) >
1455 NewAB.getDereferenceableOrNullBytes())
1457if (AL.getParamAlignment(
I).valueOrOne() >
1458 NewAB.getAlignment().valueOrOne())
1460if (
auto ExistingRange = AL.getParamRange(
I)) {
1461if (
auto NewRange = NewAB.getRange()) {
1464 NewAB.removeAttribute(Attribute::Range);
1465 NewAB.addRangeAttr(CombinedRange);
1468 AL = AL.addParamAttributes(Context,
I, NewAB);
1469 }
elseif (NewInnerCB->getArgOperand(
I)->getType()->isPointerTy()) {
1470// Check if the underlying value for the parameter is an argument. 1471constValue *UnderlyingV =
1473 Arg = dyn_cast<Argument>(UnderlyingV);
1481// If so, propagate its access attributes. 1482 AL = AL.addParamAttributes(Context,
I, ValidObjParamAttrs[ArgNo]);
1484// We can have conflicting attributes from the inner callsite and 1485// to-be-inlined callsite. In that case, choose the most 1488// readonly + writeonly means we can never deref so make readnone. 1489if (AL.hasParamAttr(
I, Attribute::ReadOnly) &&
1490 AL.hasParamAttr(
I, Attribute::WriteOnly))
1491 AL = AL.addParamAttribute(Context,
I, Attribute::ReadNone);
1493// If have readnone, need to clear readonly/writeonly 1494if (AL.hasParamAttr(
I, Attribute::ReadNone)) {
1495 AL = AL.removeParamAttribute(Context,
I, Attribute::ReadOnly);
1496 AL = AL.removeParamAttribute(Context,
I, Attribute::WriteOnly);
1499// Writable cannot exist in conjunction w/ readonly/readnone 1500if (AL.hasParamAttr(
I, Attribute::ReadOnly) ||
1501 AL.hasParamAttr(
I, Attribute::ReadNone))
1502 AL = AL.removeParamAttribute(Context,
I, Attribute::Writable);
1504 NewInnerCB->setAttributes(AL);
1509// Only allow these white listed attributes to be propagated back to the 1510// callee. This is because other attributes may only be valid on the call 1511// itself, i.e. attributes such as signext and zeroext. 1513// Attributes that are always okay to propagate as if they are violated its 1528// Attributes that need additional checks as propagating them may change 1529// behavior or cause new UB. 1550for (
auto &BB : *CalledFunction) {
1551auto *RI = dyn_cast<ReturnInst>(BB.getTerminator());
1554auto *RetVal = cast<CallBase>(RI->
getOperand(0));
1555// Check that the cloned RetVal exists and is a call, otherwise we cannot 1556// add the attributes on the cloned RetVal. Simplification during inlining 1557// could have transformed the cloned instruction. 1558auto *NewRetVal = dyn_cast_or_null<CallBase>(VMap.
lookup(RetVal));
1562// The RetVal might have be simplified during the inlining 1563// process which can make propagation incorrect. 1564if (InlinedFunctionInfo.
isSimplified(RetVal, NewRetVal))
1566// Backward propagation of attributes to the returned value may be incorrect 1567// if it is control flow dependent. 1571// %rv2 = call @bar() 1579// %val = call nonnull @callee() 1581// Here we cannot add the nonnull attribute on either foo or bar. So, we 1582// limit the check to both RetVal and RI are in the same basic block and 1583// there are no throwing/exiting instructions between these instructions. 1584if (RI->
getParent() != RetVal->getParent() ||
1587// Add to the existing attributes of NewRetVal, i.e. the cloned call 1589// NB! When we have the same attribute already existing on NewRetVal, but 1590// with a differing value, the AttributeList's merge API honours the already 1591// existing attribute value (i.e. attributes such as dereferenceable, 1592// dereferenceable_or_null etc). See AttrBuilder::merge for more details. 1597 AL.getRetDereferenceableOrNullBytes())
1600// Attributes that may generate poison returns are a bit tricky. If we 1601// propagate them, other uses of the callsite might have their behavior 1602// change or cause UB (if they have noundef) b.c of the new potential 1604// Take the following three cases: 1607// define nonnull ptr @foo() { 1608// %p = call ptr @bar() 1609// call void @use(ptr %p) willreturn nounwind 1614// define noundef nonnull ptr @foo() { 1615// %p = call ptr @bar() 1616// call void @use(ptr %p) willreturn nounwind 1621// define nonnull ptr @foo() { 1622// %p = call noundef ptr @bar() 1626// In case 1, we can't propagate nonnull because poison value in @use may 1627// change behavior or trigger UB. 1628// In case 2, we don't need to be concerned about propagating nonnull, as 1629// any new poison at @use will trigger UB anyways. 1630// In case 3, we can never propagate nonnull because it may create UB due to 1631// the noundef on @bar. 1637Attribute NewRange = AL.getRetAttr(Attribute::Range);
1644// If the callsite has `noundef`, then a poison due to violating the 1645// return attribute will create UB anyways so we can always propagate. 1646// Otherwise, if the return value (callee to be inlined) has `noundef`, we 1647// can't propagate as a new poison return will cause UB. 1648// Finally, check if the return value has no uses whose behavior may 1649// change/may cause UB if we potentially return poison. At the moment this 1650// is implemented overly conservatively with a single-use check. 1651// TODO: Update the single-use check to iterate through uses and only bail 1652// if we have a potentially dangerous use. 1655 (RetVal->hasOneUse() && !RetVal->hasRetAttr(Attribute::NoUndef)))
1658 NewRetVal->setAttributes(NewAL);
1662/// If the inlined function has non-byval align arguments, then 1663/// add @llvm.assume-based alignment assumptions to preserve this information. 1671// To avoid inserting redundant assumptions, we should check for assumptions 1672// already in the caller. To do this, we might need a DT of the caller. 1674bool DTCalculated =
false;
1678if (!Arg.getType()->isPointerTy() || Arg.hasPassPointeeByValueCopyAttr() ||
1689// If we can already prove the asserted alignment in the context of the 1690// caller, then don't bother inserting the assumption. 1696DL, ArgVal, Alignment->value());
1708 Builder.
getInt64(M->getDataLayout().getTypeStoreSize(ByValType));
1710// Always generate a memcpy of alignment 1 here because we don't know 1711// the alignment of the src pointer. Other optimizations can infer 1716// The verifier requires that all calls of debug-info-bearing functions 1717// from debug-info-bearing functions have a debug location (for inlining 1718// purposes). Assign a dummy location to satisfy the constraint. 1721 CI->
setDebugLoc(DILocation::get(SP->getContext(), 0, 0, SP));
1724/// When inlining a call site that has a byval argument, 1725/// we have to make the implicit memcpy explicit by adding it. 1734// If the called function is readonly, then it could not mutate the caller's 1735// copy of the byval'd memory. In this case, it is safe to elide the copy and 1738// If the byval argument has a specified alignment that is greater than the 1739// passed in pointer, then we either have to round up the input pointer or 1740// give up on this transformation. 1747// If the pointer is already known to be sufficiently aligned, or if we can 1748// round it up to a larger alignment, then we don't need a temporary. 1753// Otherwise, we have to make a memcpy to get a safe alignment. This is bad 1754// for code quality, but rarely happens and is required for correctness. 1757// Create the alloca. If we have DataLayout, use nice alignment. 1758Align Alignment =
DL.getPrefTypeAlign(ByValType);
1760// If the byval had an alignment specified, we *must* use at least that 1761// alignment, as it is required by the byval argument (and uses of the 1762// pointer inside the callee). 1764 Alignment = std::max(Alignment, *ByValAlignment);
1768nullptr, Alignment, Arg->
getName());
1772// Uses of the argument in the function should use our new alloca 1777// Check whether this Value is used by a lifetime intrinsic. 1779for (
User *U : V->users())
1781if (
II->isLifetimeStartOrEnd())
1786// Check whether the given alloca already has 1787// lifetime.start or lifetime.end intrinsics. 1795// Do a scan to find all the casts to i8*. 1797if (U->getType() != Int8PtrTy)
continue;
1798if (U->stripPointerCasts() != AI)
continue;
1805/// Return the result of AI->isStaticAlloca() if AI were moved to the entry 1806/// block. Allocas used in inalloca calls and allocas of dynamic array size 1807/// cannot be static. 1812/// Returns a DebugLoc for a new DILocation which is a clone of \p OrigDL 1813/// inlined at \p InlinedAt. \p IANodes is an inlined-at cache. 1818return DILocation::get(Ctx, OrigDL.
getLine(), OrigDL.
getCol(),
1822/// Update inlined instructions' line numbers to 1823/// to encode location where these instructions are inlined. 1833// Create a unique call site, not to be confused with any other call from the 1835 InlinedAtNode = DILocation::getDistinct(
1836 Ctx, InlinedAtNode->getLine(), InlinedAtNode->getColumn(),
1837 InlinedAtNode->getScope(), InlinedAtNode->getInlinedAt());
1839// Cache the inlined-at nodes as they're built so they are reused, without 1840// this every instruction's inlined-at chain would become distinct from each 1844// Check if we are not generating inline line tables and want to use 1845// the call site location instead. 1846bool NoInlineLineTables = Fn->
hasFnAttribute(
"no-inline-line-tables");
1848// Helper-util for updating the metadata attached to an instruction. 1850// Loop metadata needs to be updated so that the start and end locs 1851// reference inlined-at locations. 1852auto updateLoopInfoLoc = [&Ctx, &InlinedAtNode,
1854if (
auto *Loc = dyn_cast_or_null<DILocation>(MD))
1860if (!NoInlineLineTables)
1868if (CalleeHasDebugInfo && !NoInlineLineTables)
1871// If the inlined instruction has no line number, or if inline info 1872// is not being generated, make it look as if it originates from the call 1873// location. This is important for ((__always_inline, __nodebug__)) 1874// functions which must use caller location for all instructions in their 1877// Don't update static allocas, as they may get moved later. 1878if (
auto *AI = dyn_cast<AllocaInst>(&
I))
1882// Do not force a debug loc for pseudo probes, since they do not need to 1883// be debuggable, and also they are expected to have a zero/null dwarf 1884// discriminator at this point which could be violated otherwise. 1885if (isa<PseudoProbeInst>(
I))
1888I.setDebugLoc(TheCallDL);
1891// Helper-util for updating debug-info records attached to instructions. 1893assert(DVR->getDebugLoc() &&
"Debug Value must have debug loc");
1894if (NoInlineLineTables) {
1895 DVR->setDebugLoc(TheCallDL);
1901 DVR->getMarker()->getParent()->
getContext(), IANodes);
1902 DVR->setDebugLoc(IDL);
1905// Iterate over all instructions, updating metadata and debug-info records. 1906for (; FI != Fn->
end(); ++FI) {
1914// Remove debug info intrinsics if we're not keeping inline info. 1915if (NoInlineLineTables) {
1917while (BI != FI->end()) {
1918if (isa<DbgInfoIntrinsic>(BI)) {
1919 BI = BI->eraseFromParent();
1922 BI->dropDbgRecords();
1931#define DEBUG_TYPE "assignment-tracking" 1932/// Find Alloca and linked DbgAssignIntrinsic for locals escaped by \p CB. 1939errs() <<
"# Finding caller local variables escaped by callee\n");
1942if (!Arg->getType()->isPointerTy()) {
1953// Walk back to the base storage. 1954assert(Arg->getType()->isPtrOrPtrVectorTy());
1955APInt TmpOffset(
DL.getIndexTypeSizeInBits(Arg->getType()), 0,
false);
1957 Arg->stripAndAccumulateConstantOffsets(
DL, TmpOffset,
true));
1959LLVM_DEBUG(
errs() <<
" | SKIP: Couldn't walk back to base storage\n");
1965// We only need to process each base address once - skip any duplicates. 1969// Find all local variables associated with the backing storage. 1970auto CollectAssignsForStorage = [&](
auto *DbgAssign) {
1971// Skip variables from inlined functions - they are not local variables. 1972if (DbgAssign->getDebugLoc().getInlinedAt())
1980return EscapedLocals;
1986 << Start->getParent()->getName() <<
" from " 1992/// Update inlined instructions' DIAssignID metadata. We need to do this 1993/// otherwise a function inlined more than once into the same function 1994/// will cause DIAssignID to be shared by many instructions. 1997// Loop over all the inlined instructions. If we find a DIAssignID 1998// attachment or use, replace it with a new version. 1999for (
auto BBI = Start; BBI !=
End; ++BBI) {
2005#define DEBUG_TYPE "inline-function" 2007/// Update the block frequencies of the caller after a callee has been inlined. 2009/// Each block cloned into the caller has its block frequency scaled by the 2010/// ratio of CallSiteFreq/CalleeEntryFreq. This ensures that the cloned copy of 2011/// callee's entry block gets the same frequency as the callsite block and the 2012/// relative frequencies of all cloned blocks remain the same after cloning. 2019for (
auto Entry : VMap) {
2020if (!isa<BasicBlock>(Entry.first) || !Entry.second)
2022auto *OrigBB = cast<BasicBlock>(Entry.first);
2023auto *ClonedBB = cast<BasicBlock>(Entry.second);
2025if (!ClonedBBs.
insert(ClonedBB).second) {
2026// Multiple blocks in the callee might get mapped to one cloned block in 2027// the caller since we prune the callee as we clone it. When that happens, 2028// we want to use the maximum among the original blocks' frequencies. 2037 EntryClone, CallerBFI->
getBlockFreq(CallSiteBlock), ClonedBBs);
2040/// Update the branch metadata for cloned call instructions. 2050 std::min(CallSiteCount.value_or(0), CalleeEntryCount.
getCount());
2055Function *Callee, int64_t EntryDelta,
2057auto CalleeCount = Callee->getEntryCount();
2061constuint64_t PriorEntryCount = CalleeCount->getCount();
2063// Since CallSiteCount is an estimate, it could exceed the original callee 2064// count and has to be set to 0 so guard against underflow. 2066 (EntryDelta < 0 && static_cast<uint64_t>(-EntryDelta) > PriorEntryCount)
2068 : PriorEntryCount + EntryDelta;
2079uint64_t CloneEntryCount = PriorEntryCount - NewEntryCount;
2080for (
auto Entry : *VMap) {
2081if (isa<CallInst>(Entry.first))
2082if (
auto *CI = dyn_cast_or_null<CallInst>(Entry.second)) {
2083 CI->updateProfWeight(CloneEntryCount, PriorEntryCount);
2084 updateVTableProfWeight(CI, CloneEntryCount, PriorEntryCount);
2087if (isa<InvokeInst>(Entry.first))
2088if (
auto *
II = dyn_cast_or_null<InvokeInst>(Entry.second)) {
2089II->updateProfWeight(CloneEntryCount, PriorEntryCount);
2090 updateVTableProfWeight(
II, CloneEntryCount, PriorEntryCount);
2096 Callee->setEntryCount(NewEntryCount);
2099// No need to update the callsite if it is pruned during inlining. 2100if (!VMap || VMap->
count(&BB))
2102if (
CallInst *CI = dyn_cast<CallInst>(&
I)) {
2103 CI->updateProfWeight(NewEntryCount, PriorEntryCount);
2104 updateVTableProfWeight(CI, NewEntryCount, PriorEntryCount);
2107II->updateProfWeight(NewEntryCount, PriorEntryCount);
2108 updateVTableProfWeight(
II, NewEntryCount, PriorEntryCount);
2114/// An operand bundle "clang.arc.attachedcall" on a call indicates the call 2115/// result is implicitly consumed by a call to retainRV or claimRV immediately 2116/// after the call. This function inlines the retainRV/claimRV calls. 2118/// There are three cases to consider: 2120/// 1. If there is a call to autoreleaseRV that takes a pointer to the returned 2121/// object in the callee return block, the autoreleaseRV call and the 2122/// retainRV/claimRV call in the caller cancel out. If the call in the caller 2123/// is a claimRV call, a call to objc_release is emitted. 2125/// 2. If there is a call in the callee return block that doesn't have operand 2126/// bundle "clang.arc.attachedcall", the operand bundle on the original call 2127/// is transferred to the call in the callee. 2129/// 3. Otherwise, a call to objc_retain is inserted if the call in the caller is 2135bool IsRetainRV = RVCallKind == objcarc::ARCInstKind::RetainRV,
2136 IsUnsafeClaimRV = !IsRetainRV;
2138for (
auto *RI : Returns) {
2140bool InsertRetainCall = IsRetainRV;
2143// Walk backwards through the basic block looking for either a matching 2144// autoreleaseRV call or an unannotated call. 2149if (isa<CastInst>(
I))
2152if (
auto *
II = dyn_cast<IntrinsicInst>(&
I)) {
2153if (
II->getIntrinsicID() != Intrinsic::objc_autoreleaseReturnValue ||
2158// If we've found a matching authoreleaseRV call: 2159// - If claimRV is attached to the call, insert a call to objc_release 2160// and erase the autoreleaseRV call. 2161// - If retainRV is attached to the call, just erase the autoreleaseRV 2163if (IsUnsafeClaimRV) {
2167II->eraseFromParent();
2168 InsertRetainCall =
false;
2172auto *CI = dyn_cast<CallInst>(&
I);
2181// If we've found an unannotated call that defines RetOpnd, add a 2182// "clang.arc.attachedcall" operand bundle. 2187 NewCall->copyMetadata(*CI);
2188 CI->replaceAllUsesWith(NewCall);
2189 CI->eraseFromParent();
2190 InsertRetainCall =
false;
2194if (InsertRetainCall) {
2195// The retainRV is attached to the call and we've failed to find a 2196// matching autoreleaseRV or an annotated call in the callee. Emit a call 2204// In contextual profiling, when an inline succeeds, we want to remap the 2205// indices of the callee into the index space of the caller. We can't just leave 2206// them as-is because the same callee may appear in other places in this caller 2207// (other callsites), and its (callee's) counters and sub-contextual profile 2208// tree would be potentially different. 2209// Not all BBs of the callee may survive the opportunistic DCE InlineFunction 2210// does (same goes for callsites in the callee). 2211// We will return a pair of vectors, one for basic block IDs and one for 2212// callsites. For such a vector V, V[Idx] will be -1 if the callee 2213// instrumentation with index Idx did not survive inlining, and a new value 2215// This function will update the caller's instrumentation intrinsics 2216// accordingly, mapping indices as described above. We also replace the "name" 2217// operand because we use it to distinguish between "own" instrumentation and 2218// "from callee" instrumentation when performing the traversal of the CFG of the 2219// caller. We traverse depth-first from the callsite's BB and up to the point we 2220// hit BBs owned by the caller. 2221// The return values will be then used to update the contextual 2222// profile. Note: we only update the "name" and "index" operands in the 2223// instrumentation intrinsics, we leave the hash and total nr of indices as-is, 2224// it's not worth updating those. 2225staticconst std::pair<std::vector<int64_t>, std::vector<int64_t>>
2229// We'll allocate a new ID to imported callsite counters and callsites. We're 2230// using -1 to indicate a counter we delete. Most likely the entry ID, for 2231// example, will be deleted - we don't want 2 IDs in the same BB, and the 2232// entry would have been cloned in the callsite's old BB. 2233 std::vector<int64_t> CalleeCounterMap;
2234 std::vector<int64_t> CalleeCallsiteMap;
2235 CalleeCounterMap.resize(CalleeCounters, -1);
2236 CalleeCallsiteMap.resize(CalleeCallsites, -1);
2239if (Ins.getNameValue() == &Caller)
2241constauto OldID =
static_cast<uint32_t>(Ins.getIndex()->getZExtValue());
2242if (CalleeCounterMap[OldID] == -1)
2244constauto NewID =
static_cast<uint32_t>(CalleeCounterMap[OldID]);
2246 Ins.setNameValue(&Caller);
2247 Ins.setIndex(NewID);
2252if (Ins.getNameValue() == &Caller)
2254constauto OldID =
static_cast<uint32_t>(Ins.getIndex()->getZExtValue());
2255if (CalleeCallsiteMap[OldID] == -1)
2257constauto NewID =
static_cast<uint32_t>(CalleeCallsiteMap[OldID]);
2259 Ins.setNameValue(&Caller);
2260 Ins.setIndex(NewID);
2264 std::deque<BasicBlock *> Worklist;
2266// We will traverse the BBs starting from the callsite BB. The callsite BB 2267// will have at least a BB ID - maybe its own, and in any case the one coming 2268// from the cloned function's entry BB. The other BBs we'll start seeing from 2269// there on may or may not have BB IDs. BBs with IDs belonging to our caller 2270// are definitely not coming from the imported function and form a boundary 2271// past which we don't need to traverse anymore. BBs may have no 2272// instrumentation (because we originally inserted instrumentation as per 2273// MST), in which case we'll traverse past them. An invariant we'll keep is 2274// that a BB will have at most 1 BB ID. For example, in the callsite BB, we 2275// will delete the callee BB's instrumentation. This doesn't result in 2276// information loss: the entry BB of the callee will have the same count as 2277// the callsite's BB. At the end of this traversal, all the callee's 2278// instrumentation would be mapped into the caller's instrumentation index 2279// space. Some of the callee's counters may be deleted (as mentioned, this 2280// should result in no loss of information). 2281 Worklist.push_back(StartBB);
2282while (!Worklist.empty()) {
2283auto *BB = Worklist.front();
2284 Worklist.pop_front();
2288 Changed |= RewriteInstrIfNeeded(*BBID);
2289// this may be the entryblock from the inlined callee, coming into a BB 2290// that didn't have instrumentation because of MST decisions. Let's make 2291// sure it's placed accordingly. This is a noop elsewhere. 2292 BBID->moveBefore(BB->getFirstInsertionPt());
2295if (
auto *Inc = dyn_cast<InstrProfIncrementInst>(&
I)) {
2296if (isa<InstrProfIncrementInstStep>(Inc)) {
2297// Step instrumentation is used for select instructions. Inlining may 2298// have propagated a constant resulting in the condition of the select 2299// being resolved, case in which function cloning resolves the value 2300// of the select, and elides the select instruction. If that is the 2301// case, the step parameter of the instrumentation will reflect that. 2302// We can delete the instrumentation in that case. 2303if (isa<Constant>(Inc->getStep())) {
2304assert(!Inc->getNextNode() || !isa<SelectInst>(Inc->getNextNode()));
2305 Inc->eraseFromParent();
2307assert(isa_and_nonnull<SelectInst>(Inc->getNextNode()));
2308 RewriteInstrIfNeeded(*Inc);
2310 }
elseif (Inc != BBID) {
2311// If we're here it means that the BB had more than 1 IDs, presumably 2312// some coming from the callee. We "made up our mind" to keep the 2313// first one (which may or may not have been originally the caller's). 2314// All the others are superfluous and we delete them. 2315 Inc->eraseFromParent();
2318 }
elseif (
auto *CS = dyn_cast<InstrProfCallsite>(&
I)) {
2319 Changed |= RewriteCallsiteInsIfNeeded(*CS);
2322if (!BBID || Changed)
2324if (Seen.
insert(Succ).second)
2325 Worklist.push_back(Succ);
2329llvm::all_of(CalleeCounterMap, [&](
constauto &V) {
return V != 0; }) &&
2330"Counter index mapping should be either to -1 or to non-zero index, " 2332"index corresponds to the entry BB of the caller");
2334llvm::all_of(CalleeCallsiteMap, [&](
constauto &V) {
return V != 0; }) &&
2335"Callsite index mapping should be either to -1 or to non-zero index, " 2336"because there should have been at least a callsite - the inlined one " 2337"- which would have had a 0 index.");
2339return {std::move(CalleeCounterMap), std::move(CalleeCallsiteMap)};
2342// Inline. If successful, update the contextual profile (if a valid one is 2344// The contextual profile data is organized in trees, as follows: 2345// - each node corresponds to a function 2346// - the root of each tree corresponds to an "entrypoint" - e.g. 2347// RPC handler for server side 2348// - the path from the root to a node is a particular call path 2349// - the counters stored in a node are counter values observed in that 2350// particular call path ("context") 2351// - the edges between nodes are annotated with callsite IDs. 2353// Updating the contextual profile after an inlining means, at a high level, 2354// copying over the data of the callee, **intentionally without any value 2355// scaling**, and copying over the callees of the inlined callee. 2358bool MergeAttributes,
2363returnInlineFunction(CB, IFI, MergeAttributes, CalleeAAR, InsertLifetime,
2370// Get some preliminary data about the callsite before it might get inlined. 2371// Inlining shouldn't delete the callee, but it's cleaner (and low-cost) to 2372// get this data upfront and rely less on InlineFunction's behavior. 2375constauto CallsiteID =
2376static_cast<uint32_t>(CallsiteIDIns->getIndex()->getZExtValue());
2381auto Ret =
InlineFunction(CB, IFI, MergeAttributes, CalleeAAR, InsertLifetime,
2383if (!Ret.isSuccess())
2386// Inlining succeeded, we don't need the instrumentation of the inlined 2388 CallsiteIDIns->eraseFromParent();
2390// Assinging Maps and then capturing references into it in the lambda because 2391// captured structured bindings are a C++20 extension. We do also need a 2392// capture here, though. 2393constauto IndicesMaps =
remapIndices(Caller, StartBB, CtxProf,
2394 NumCalleeCounters, NumCalleeCallsites);
2399constauto &[CalleeCounterMap, CalleeCallsiteMap] = IndicesMaps;
2401 (Ctx.counters().size() +
2402llvm::count_if(CalleeCounterMap, [](
auto V) { return V != -1; }) ==
2404"The caller's counters size should have grown by the number of new " 2405"distinct counters inherited from the inlined callee.");
2406 Ctx.resizeCounters(NewCountersSize);
2407// If the callsite wasn't exercised in this context, the value of the 2408// counters coming from it is 0 - which it is right now, after resizing them 2409// - and so we're done. 2410auto CSIt = Ctx.callsites().find(CallsiteID);
2411if (CSIt == Ctx.callsites().end())
2413auto CalleeCtxIt = CSIt->second.find(CalleeGUID);
2414// The callsite was exercised, but not with this callee (so presumably this 2415// is an indirect callsite). Again, we're done here. 2416if (CalleeCtxIt == CSIt->second.end())
2419// Let's pull in the counter values and the subcontexts coming from the 2421auto &CalleeCtx = CalleeCtxIt->second;
2422assert(CalleeCtx.guid() == CalleeGUID);
2424for (
autoI = 0U;
I < CalleeCtx.counters().
size(); ++
I) {
2425const int64_t NewIndex = CalleeCounterMap[
I];
2427assert(NewIndex != 0 &&
"counter index mapping shouldn't happen to a 0 " 2428"index, that's the caller's entry BB");
2429 Ctx.counters()[NewIndex] = CalleeCtx.counters()[
I];
2432for (
auto &[
I, OtherSet] : CalleeCtx.callsites()) {
2433const int64_t NewCSIdx = CalleeCallsiteMap[
I];
2436"callsite index mapping shouldn't happen to a 0 index, the " 2437"caller must've had at least one callsite (with such an index)");
2438 Ctx.ingestAllContexts(NewCSIdx, std::move(OtherSet));
2441// We know the traversal is preorder, so it wouldn't have yet looked at the 2442// sub-contexts of this context that it's currently visiting. Meaning, the 2443// erase below invalidates no iterators. 2444autoDeleted = Ctx.callsites().erase(CallsiteID);
2448 CtxProf.
update(Updater, Caller);
2452/// This function inlines the called function into the basic block of the 2453/// caller. This returns false if it is not possible to inline this call. 2454/// The program is still in a well defined state if this occurs though. 2456/// Note that this only does one level of inlining. For example, if the 2457/// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now 2458/// exists in the instruction stream. Similarly this will inline a recursive 2459/// function by one level. 2461bool MergeAttributes,
2467// FIXME: we don't inline callbr yet. 2468if (isa<CallBrInst>(CB))
2471// If IFI has any state in it, zap it before we fill it in. 2475if (!CalledFunc ||
// Can't inline external function or indirect 2479// The inliner does not know how to inline through calls with operand bundles 2481Value *ConvergenceControlToken =
nullptr;
2486// ... but it knows how to inline through "deopt" operand bundles ... 2489// ... and "funclet" operand bundles. 2497 ConvergenceControlToken = OBUse.Inputs[0].get();
2505// FIXME: The check below is redundant and incomplete. According to spec, if a 2506// convergent call is missing a token, then the caller is using uncontrolled 2507// convergence. If the callee has an entry intrinsic, then the callee is using 2508// controlled convergence, and the call cannot be inlined. A proper 2509// implemenation of this check requires a whole new analysis that identifies 2510// convergence in every function. For now, we skip that and just do this one 2511// cursory check. The underlying assumption is that in a compiler flow that 2512// fully implements convergence control tokens, there is no mixing of 2513// controlled and uncontrolled convergent operations in the whole program. 2515if (!ConvergenceControlToken &&
2518"convergent call needs convergencectrl operand");
2522// If the call to the callee cannot throw, set the 'nounwind' flag on any 2523// calls that we inline. 2529// GC poses two hazards to inlining, which only occur when the callee has GC: 2530// 1. If the caller has no GC, then the callee's GC must be propagated to the 2532// 2. If the caller has a differing GC, it is invalid to inline. 2533if (CalledFunc->
hasGC()) {
2534if (!Caller->hasGC())
2535 Caller->setGC(CalledFunc->
getGC());
2536elseif (CalledFunc->
getGC() != Caller->getGC())
2540// Get the personality function from the callee if it contains a landing pad. 2546// Find the personality function used by the landing pads of the caller. If it 2547// exists, then check to see that it matches the personality function used in 2550 Caller->hasPersonalityFn()
2551 ? Caller->getPersonalityFn()->stripPointerCasts()
2553if (CalledPersonality) {
2554if (!CallerPersonality)
2555 Caller->setPersonalityFn(CalledPersonality);
2556// If the personality functions match, then we can perform the 2557// inlining. Otherwise, we can't inline. 2558// TODO: This isn't 100% true. Some personality functions are proper 2559// supersets of others and can be used in place of the other. 2560elseif (CalledPersonality != CallerPersonality)
2564// We need to figure out which funclet the callsite was in so that we may 2565// properly nest the callee. 2567if (CallerPersonality) {
2570 std::optional<OperandBundleUse> ParentFunclet =
2573 CallSiteEHPad = cast<FuncletPadInst>(ParentFunclet->Inputs.front());
2575// OK, the inlining site is legal. What about the target function? 2578if (Personality == EHPersonality::MSVC_CXX) {
2579// The MSVC personality cannot tolerate catches getting inlined into 2581if (isa<CleanupPadInst>(CallSiteEHPad)) {
2582// Ok, the call site is within a cleanuppad. Let's check the callee 2584for (
constBasicBlock &CalledBB : *CalledFunc) {
2585if (isa<CatchSwitchInst>(CalledBB.getFirstNonPHIIt()))
2590// SEH is even less tolerant, there may not be any sort of exceptional 2591// funclet in the callee. 2592for (
constBasicBlock &CalledBB : *CalledFunc) {
2593if (CalledBB.isEHPad())
2601// Determine if we are dealing with a call in an EHPad which does not unwind 2603bool EHPadForCallUnwindsLocally =
false;
2604if (CallSiteEHPad && isa<CallInst>(CB)) {
2606Value *CallSiteUnwindDestToken =
2609 EHPadForCallUnwindsLocally =
2610 CallSiteUnwindDestToken &&
2611 !isa<ConstantTokenNone>(CallSiteUnwindDestToken);
2614// Get an iterator to the last basic block in the function, which will have 2615// the new function inlined after it. 2618// Make sure to capture all of the return instructions from the cloned 2624 {
// Scope to destroy VMap after cloning. 2631// Keep a list of pair (dst, src) to emit byval initializations. 2634// When inlining a function that contains noalias scope metadata, 2635// this metadata needs to be cloned so that the inlined blocks 2636// have different "unique scopes" at every call site. 2637// Track the metadata that must be cloned. Do this before other changes to 2638// the function, so that we do not get in trouble when inlining caller == 2642auto &
DL = Caller->getDataLayout();
2644// Calculate the vector of arguments to pass into the function cloner, which 2645// matches up the formal to the actual argument values. 2649 E = CalledFunc->
arg_end();
I != E; ++
I, ++AI, ++ArgNo) {
2650Value *ActualArg = *AI;
2652// When byval arguments actually inlined, we need to make the copy implied 2653// by them explicit. However, we don't do this if the callee is readonly 2654// or readnone, because the copy would be unneeded: the callee doesn't 2655// modify the struct. 2658 &CB, CalledFunc, IFI,
2660if (ActualArg != *AI)
2665 VMap[&*
I] = ActualArg;
2668// TODO: Remove this when users have been updated to the assume bundles. 2669// Add alignment assumptions if necessary. We do this before the inlined 2670// instructions are actually cloned into the caller so that we can easily 2671// check what will be known at the start of the inlined code. 2677 /// Preserve all attributes on of the call and its parameters. 2680// We want the inliner to prune the code as it copies. We would LOVE to 2681// have no dead or constant instructions leftover after inlining occurs 2682// (which can happen, e.g., because an argument was constant), but we'll be 2683// happy with whatever the cloner can do. 2685/*ModuleLevelChanges=*/false, Returns,
".i",
2686 &InlinedFunctionInfo);
2687// Remember the first block that is newly cloned over. 2688 FirstNewBlock = LastBlock; ++FirstNewBlock;
2690// Insert retainRV/clainRV runtime calls. 2692if (RVCallKind != objcarc::ARCInstKind::None)
2695// Updated caller/callee profiles only when requested. For sample loader 2696// inlining, the context-sensitive inlinee profile doesn't need to be 2697// subtracted from callee profile, and the inlined clone also doesn't need 2698// to be scaled based on call site count. 2701// Update the BFI of blocks cloned into the caller. 2703 CalledFunc->
front());
2710// Inject byval arguments initialization. 2711for (ByValInit &
Init : ByValInits)
2713 &*FirstNewBlock, IFI, CalledFunc);
2715 std::optional<OperandBundleUse> ParentDeopt =
2721CallBase *ICS = dyn_cast_or_null<CallBase>(VH);
2723continue;
// instruction was DCE'd or RAUW'ed to undef 2733// If the inlined call has other operand bundles, let them be 2738// It may be useful to separate this logic (of handling operand 2739// bundles) out to a separate "policy" component if this gets crowded. 2740// Prepend the parent's deoptimization continuation to the newly 2741// inlined call's deoptimization continuation. 2742 std::vector<Value *> MergedDeoptArgs;
2743 MergedDeoptArgs.reserve(ParentDeopt->Inputs.size() +
2744 ChildOB.Inputs.size());
2749 OpDefs.
emplace_back(
"deopt", std::move(MergedDeoptArgs));
2754// Note: the RAUW does the appropriate fixup in VMap, so we need to do 2755// this even if the call returns void. 2763// For 'nodebug' functions, the associated DISubprogram is always null. 2764// Conservatively avoid propagating the callsite debug location to 2765// instructions inlined from a function whose DISubprogram is not null. 2770// Interpret inlined stores to caller-local variables as assignments. 2773// Update DIAssignID metadata attachments and uses so that they are 2774// unique to this inlined instance. 2778// Now clone the inlined noalias scope metadata. 2779 SAMetadataCloner.clone();
2780 SAMetadataCloner.remap(FirstNewBlock, Caller->end());
2782// Add noalias metadata if necessary. 2785// Clone return attributes on the callsite into the calls within the inlined 2786// function which feed into its return value. 2789// Clone attributes on the params of the callsite to calls within the 2790// inlined function which use the same param. 2796// Propagate metadata on the callsite if necessary. 2799// Register any cloned assumptions. 2802make_range(FirstNewBlock->getIterator(), Caller->end()))
2804if (
auto *
II = dyn_cast<AssumeInst>(&
I))
2808if (ConvergenceControlToken) {
2816// If there are any alloca instructions in the block that used to be the entry 2817// block for the callee, move them to the entry block of the caller. First 2818// calculate which instruction they should be inserted before. We insert the 2819// instructions at the end of the current alloca list. 2823 E = FirstNewBlock->end();
I != E; ) {
2827// If the alloca is now dead, remove it. This often occurs due to code 2837// Keep track of the static allocas that we inline into the caller. 2840// Scan for the block of allocas that we can move over, and move them 2842while (isa<AllocaInst>(
I) &&
2843 !cast<AllocaInst>(
I)->use_empty() &&
2849// Transfer all of the allocas over in a block. Using splice means 2850// that the instructions aren't removed from the symbol table, then 2853 Caller->getEntryBlock().splice(InsertPoint, &*FirstNewBlock,
2866bool InlinedMustTailCalls =
false, InlinedDeoptimizeCalls =
false;
2869if (
CallInst *CI = dyn_cast<CallInst>(&CB))
2870 CallSiteTailKind = CI->getTailCallKind();
2872// For inlining purposes, the "notail" marker is the same as no marker. 2883// Forward varargs from inlined call site to calls to the 2884// ForwardVarArgsTo function, if requested, and to musttail calls. 2885if (!VarArgsToForward.
empty() &&
2886 ((ForwardVarArgsTo &&
2889// Collect attributes for non-vararg parameters. 2892if (!Attrs.isEmpty() || !VarArgsAttrs.
empty()) {
2893for (
unsigned ArgNo = 0;
2895 ArgAttrs.
push_back(Attrs.getParamAttrs(ArgNo));
2898// Add VarArg attributes. 2901 Attrs.getRetAttrs(), ArgAttrs);
2902// Add VarArgs to existing parameters. 2916 InlinedDeoptimizeCalls |=
2917F->getIntrinsicID() == Intrinsic::experimental_deoptimize;
2919// We need to reduce the strength of any inlined tail calls. For 2920// musttail, we have to avoid introducing potential unbounded stack 2921// growth. For example, if functions 'f' and 'g' are mutually recursive 2922// with musttail, we can inline 'g' into 'f' so long as we preserve 2923// musttail on the cloned call to 'f'. If either the inlined call site 2924// or the cloned call site is *not* musttail, the program already has 2925// one frame of stack growth, so it's safe to remove musttail. Here is 2926// a table of example transformations: 2928// f -> musttail g -> musttail f ==> f -> musttail f 2929// f -> musttail g -> tail f ==> f -> tail f 2930// f -> g -> musttail f ==> f -> f 2931// f -> g -> tail f ==> f -> f 2933// Inlined notail calls should remain notail calls. 2936 ChildTCK = std::min(CallSiteTailKind, ChildTCK);
2940// Call sites inlined through a 'nounwind' call site should be 2941// 'nounwind' as well. However, avoid marking call sites explicitly 2942// where possible. This helps expose more opportunities for CSE after 2943// inlining, commonly when the callee is an intrinsic. 2950// Leave lifetime markers for the static alloca's, scoping them to the 2951// function we just inlined. 2952// We need to insert lifetime intrinsics even at O0 to avoid invalid 2953// access caused by multithreaded coroutines. The check 2954// `Caller->isPresplitCoroutine()` would affect AlwaysInliner at O0 only. 2955if ((InsertLifetime || Caller->isPresplitCoroutine()) &&
2957IRBuilder<> builder(&*FirstNewBlock, FirstNewBlock->begin());
2959// Don't mark swifterror allocas. They can't have bitcast uses. 2963// If the alloca is already scoped to something smaller than the whole 2964// function then there's no need to add redundant, less accurate markers. 2968// Try to determine the size of the allocation. 2972auto &
DL = Caller->getDataLayout();
2974TypeSize AllocaTypeSize =
DL.getTypeAllocSize(AllocaType);
2975uint64_t AllocaArraySize = AIArraySize->getLimitedValue();
2977// Don't add markers for zero-sized allocas. 2978if (AllocaArraySize == 0)
2981// Check that array size doesn't saturate uint64_t and doesn't 2982// overflow when it's multiplied by type size. 2984 AllocaArraySize != std::numeric_limits<uint64_t>::max() &&
2985 std::numeric_limits<uint64_t>::max() / AllocaArraySize >=
2988 AllocaArraySize * AllocaTypeSize);
2994// Don't insert llvm.lifetime.end calls between a musttail or deoptimize 2995// call and a return. The return kills all local allocas. 2996if (InlinedMustTailCalls &&
2997 RI->
getParent()->getTerminatingMustTailCall())
2999if (InlinedDeoptimizeCalls &&
3000 RI->
getParent()->getTerminatingDeoptimizeCall())
3007// If the inlined code contained dynamic alloca instructions, wrap the inlined 3008// code with llvm.stacksave/llvm.stackrestore intrinsics. 3010// Insert the llvm.stacksave. 3014// Insert a call to llvm.stackrestore before any return instructions in the 3017// Don't insert llvm.stackrestore calls between a musttail or deoptimize 3018// call and a return. The return will restore the stack pointer. 3019if (InlinedMustTailCalls && RI->
getParent()->getTerminatingMustTailCall())
3021if (InlinedDeoptimizeCalls && RI->
getParent()->getTerminatingDeoptimizeCall())
3027// If we are inlining for an invoke instruction, we must make sure to rewrite 3028// any call instructions into invoke instructions. This is sensitive to which 3029// funclet pads were top-level in the inlinee, so must be done before 3030// rewriting the "parent pad" links. 3031if (
auto *
II = dyn_cast<InvokeInst>(&CB)) {
3034if (isa<LandingPadInst>(FirstNonPHI)) {
3041// Update the lexical scopes of the new funclets and callsites. 3042// Anything that had 'none' as its parent is now nested inside the callsite's 3048// Add bundle operands to inlined call sites. 3051// It is problematic if the inlinee has a cleanupret which unwinds to 3052// caller and we inline it into a call site which doesn't unwind but into 3053// an EH pad that does. Such an edge must be dynamically unreachable. 3054// As such, we replace the cleanupret with unreachable. 3055if (
auto *CleanupRet = dyn_cast<CleanupReturnInst>(BB->getTerminator()))
3056if (CleanupRet->unwindsToCaller() && EHPadForCallUnwindsLocally)
3063if (
auto *CatchSwitch = dyn_cast<CatchSwitchInst>(
I)) {
3064if (isa<ConstantTokenNone>(CatchSwitch->getParentPad()))
3065 CatchSwitch->setParentPad(CallSiteEHPad);
3067auto *FPI = cast<FuncletPadInst>(
I);
3068if (isa<ConstantTokenNone>(FPI->getParentPad()))
3069 FPI->setParentPad(CallSiteEHPad);
3074if (InlinedDeoptimizeCalls) {
3075// We need to at least remove the deoptimizing returns from the Return set, 3076// so that the control flow from those returns does not get merged into the 3077// caller (but terminate it instead). If the caller's return type does not 3078// match the callee's return type, we also need to change the return type of 3080if (Caller->getReturnType() == CB.
getType()) {
3082return RI->
getParent()->getTerminatingDeoptimizeCall() !=
nullptr;
3087 Caller->getParent(), Intrinsic::experimental_deoptimize,
3088 {Caller->getReturnType()});
3097// The calling convention on the deoptimize call itself may be bogus, 3098// since the code we're inlining may have undefined behavior (and may 3099// never actually execute at runtime); but all 3100// @llvm.experimental.deoptimize declarations have to have the same 3101// calling convention in a well-formed module. 3114"Expected at least the deopt operand bundle");
3118 Builder.
CreateCall(NewDeoptIntrinsic, CallArgs, OpBundles);
3125// Since the ret type is changed, remove the incompatible attributes. 3130// Leave behind the normal returns so we can merge control flow. 3135// Handle any inlined musttail call sites. In order for a new call site to be 3136// musttail, the source of the clone and the inlined call site must have been 3137// musttail. Therefore it's safe to return without merging control into the 3139if (InlinedMustTailCalls) {
3140// Check if we need to bitcast the result of any musttail calls. 3141Type *NewRetTy = Caller->getReturnType();
3144// Handle the returns preceded by musttail calls separately. 3148 RI->
getParent()->getTerminatingMustTailCall();
3149if (!ReturnedMustTail) {
3156// Delete the old return and any preceding bitcast. 3158auto *OldCast = dyn_cast_or_null<BitCastInst>(RI->getReturnValue());
3161 OldCast->eraseFromParent();
3163// Insert a new bitcast and return with the right type. 3168// Leave behind the normal returns so we can merge control flow. 3172// Now that all of the transforms on the inlined code have taken place but 3173// before we splice the inlined code into the CFG and lose track of which 3174// blocks were actually inlined, collect the call sites. We only do this if 3175// call graph updates weren't requested, as those provide value handle based 3176// tracking of inlined call sites instead. Calls to intrinsics are not 3177// collected because they are not inlineable. 3179// Otherwise just collect the raw call sites that were inlined. 3181make_range(FirstNewBlock->getIterator(), Caller->end()))
3183if (
auto *CB = dyn_cast<CallBase>(&
I))
3189// If we cloned in _exactly one_ basic block, and if that block ends in a 3190// return instruction, we splice the body of the inlined callee directly into 3191// the calling basic block. 3192if (Returns.
size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) {
3193// Move all of the instructions right before the call. 3195 FirstNewBlock->end());
3196// Remove the cloned basic block. 3197 Caller->back().eraseFromParent();
3199// If the call site was an invoke instruction, add a branch to the normal 3206// If the return instruction returned a value, replace uses of the call with 3207// uses of the returned value. 3210if (&CB == R->getReturnValue())
3215// Since we are now done with the Call/Invoke, we can delete it. 3218// Since we are now done with the return instruction, delete it also. 3219 Returns[0]->eraseFromParent();
3224// We are now done with the inlining. 3228// Otherwise, we have the normal case, of more than one block to inline or 3229// multiple return sites. 3231// We want to clone the entire callee function into the hole between the 3232// "starter" and "ender" blocks. How we accomplish this depends on whether 3233// this is an invoke instruction or a call instruction. 3235BranchInst *CreatedBranchToNormalDest =
nullptr;
3238// Add an unconditional branch to make this look like the CallInst case... 3241// Split the basic block. This guarantees that no PHI nodes will have to be 3242// updated due to new incoming edges, and make the invoke case more 3243// symmetric to the call case. 3246 CalledFunc->
getName() +
".exit");
3248 }
else {
// It's a call 3249// If this is a call instruction, we need to split the basic block that 3250// the call lives in. 3253 CalledFunc->
getName() +
".exit");
3257// Copy original BB's block frequency to AfterCallBB 3262// Change the branch that used to go to AfterCallBB to branch to the first 3263// basic block of the inlined function. 3267"splitBasicBlock broken!");
3270// Now that the function is correct, make it a little bit nicer. In 3271// particular, move the basic blocks inserted from the end of the function 3272// into the space made by splitting the source basic block. 3273 Caller->splice(AfterCallBB->
getIterator(), Caller, FirstNewBlock,
3276// Handle all of the return instructions that we just cloned in, and eliminate 3277// any users of the original call/invoke instruction. 3281if (Returns.
size() > 1) {
3282// The PHI node should go at the front of the new basic block to merge all 3283// possible incoming values. 3286PHI->insertBefore(AfterCallBB->
begin());
3287// Anything that used the result of the function call should now use the 3288// PHI node as their operand. 3292// Loop over all of the return instructions adding entries to the PHI node 3297"Ret value not consistent in function!");
3298PHI->addIncoming(RI->getReturnValue(), RI->
getParent());
3302// Add a branch to the merge points and remove return instructions. 3310// We need to set the debug location to *somewhere* inside the 3311// inlined function. The line number may be nonsensical, but the 3312// instruction will at least be associated with the right 3314if (CreatedBranchToNormalDest)
3316 }
elseif (!Returns.
empty()) {
3317// Otherwise, if there is exactly one return value, just replace anything 3318// using the return value of the call with the computed value. 3320if (&CB == Returns[0]->getReturnValue())
3326// Update PHI nodes that use the ReturnBB to use the AfterCallBB. 3327BasicBlock *ReturnBB = Returns[0]->getParent();
3330// Splice the code from the return block into the block that it will return 3331// to, which contains the code that was after the call. 3332 AfterCallBB->
splice(AfterCallBB->
begin(), ReturnBB);
3334if (CreatedBranchToNormalDest)
3337// Delete the return instruction now and empty ReturnBB now. 3338 Returns[0]->eraseFromParent();
3341// No returns, but something is using the return value of the call. Just 3346// Since we are now done with the Call/Invoke, we can delete it. 3349// If we inlined any musttail calls and the original return is now 3350// unreachable, delete it. It can only contain a bitcast and ret. 3351if (InlinedMustTailCalls &&
pred_empty(AfterCallBB))
3354// We should always be able to fold the entry block of the function into the 3355// single predecessor of the block... 3356assert(cast<BranchInst>(Br)->isUnconditional() &&
"splitBasicBlock broken!");
3357BasicBlock *CalleeEntry = cast<BranchInst>(Br)->getSuccessor(0);
3359// Splice the code entry block into calling block, right before the 3360// unconditional branch. 3364// Remove the unconditional branch. 3367// Now we can remove the CalleeEntry block, which is now empty. 3370// If we inserted a phi node, check to see if it has a single value (e.g. all 3371// the entries are the same or undef). If so, remove the PHI so it doesn't 3372// block other optimizations. 3376auto &
DL = Caller->getDataLayout();
3378PHI->replaceAllUsesWith(V);
3379PHI->eraseFromParent();
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file contains the simple types necessary to represent the attributes associated with functions a...
static void UpdatePHINodes(BasicBlock *OrigBB, BasicBlock *NewBB, ArrayRef< BasicBlock * > Preds, BranchInst *BI, bool HasLoopExit)
Update the PHI nodes in OrigBB to include the values coming from NewBB.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static cl::opt< bool > NoAliases("csky-no-aliases", cl::desc("Disable the emission of assembler pseudo instructions"), cl::init(false), cl::Hidden)
This file provides interfaces used to build and manipulate a call graph, which is a very useful tool ...
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file defines the DenseMap class.
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
static AttrBuilder IdentifyValidUBGeneratingAttributes(CallBase &CB)
static at::StorageToVarsMap collectEscapedLocals(const DataLayout &DL, const CallBase &CB)
Find Alloca and linked DbgAssignIntrinsic for locals escaped by CB.
static void fixupLineNumbers(Function *Fn, Function::iterator FI, Instruction *TheCall, bool CalleeHasDebugInfo)
Update inlined instructions' line numbers to to encode location where these instructions are inlined.
static void removeCallsiteMetadata(CallBase *Call)
static void propagateMemProfHelper(const CallBase *OrigCall, CallBase *ClonedCall, MDNode *InlinedCallsiteMD)
static Value * getUnwindDestToken(Instruction *EHPad, UnwindDestMemoTy &MemoMap)
Given an EH pad, find where it unwinds.
static cl::opt< bool > PreserveAlignmentAssumptions("preserve-alignment-assumptions-during-inlining", cl::init(false), cl::Hidden, cl::desc("Convert align attributes to assumptions during inlining."))
static void HandleInlinedLandingPad(InvokeInst *II, BasicBlock *FirstNewBlock, ClonedCodeInfo &InlinedCodeInfo)
If we inlined an invoke site, we need to convert calls in the body of the inlined function into invok...
static Value * getUnwindDestTokenHelper(Instruction *EHPad, UnwindDestMemoTy &MemoMap)
Helper for getUnwindDestToken that does the descendant-ward part of the search.
static BasicBlock * HandleCallsInBlockInlinedThroughInvoke(BasicBlock *BB, BasicBlock *UnwindEdge, UnwindDestMemoTy *FuncletUnwindMap=nullptr)
When we inline a basic block into an invoke, we have to turn all of the calls that can throw into inv...
static DebugLoc inlineDebugLoc(DebugLoc OrigDL, DILocation *InlinedAt, LLVMContext &Ctx, DenseMap< const MDNode *, MDNode * > &IANodes)
Returns a DebugLoc for a new DILocation which is a clone of OrigDL inlined at InlinedAt.
static cl::opt< bool > UseNoAliasIntrinsic("use-noalias-intrinsic-during-inlining", cl::Hidden, cl::init(true), cl::desc("Use the llvm.experimental.noalias.scope.decl " "intrinsic during inlining."))
static void PropagateCallSiteMetadata(CallBase &CB, Function::iterator FStart, Function::iterator FEnd)
When inlining a call site that has !llvm.mem.parallel_loop_access, !llvm.access.group,...
static AttrBuilder IdentifyValidPoisonGeneratingAttributes(CallBase &CB)
static void propagateMemProfMetadata(Function *Callee, CallBase &CB, bool ContainsMemProfMetadata, const ValueMap< const Value *, WeakTrackingVH > &VMap)
static void updateCallProfile(Function *Callee, const ValueToValueMapTy &VMap, const ProfileCount &CalleeEntryCount, const CallBase &TheCall, ProfileSummaryInfo *PSI, BlockFrequencyInfo *CallerBFI)
Update the branch metadata for cloned call instructions.
static void updateCallerBFI(BasicBlock *CallSiteBlock, const ValueToValueMapTy &VMap, BlockFrequencyInfo *CallerBFI, BlockFrequencyInfo *CalleeBFI, const BasicBlock &CalleeEntryBlock)
Update the block frequencies of the caller after a callee has been inlined.
static void AddReturnAttributes(CallBase &CB, ValueToValueMapTy &VMap, ClonedCodeInfo &InlinedFunctionInfo)
static bool MayContainThrowingOrExitingCallAfterCB(CallBase *Begin, ReturnInst *End)
static void HandleByValArgumentInit(Type *ByValType, Value *Dst, Value *Src, Module *M, BasicBlock *InsertBlock, InlineFunctionInfo &IFI, Function *CalledFunc)
static cl::opt< bool > EnableNoAliasConversion("enable-noalias-to-md-conversion", cl::init(true), cl::Hidden, cl::desc("Convert noalias attributes to metadata during inlining."))
static void AddAliasScopeMetadata(CallBase &CB, ValueToValueMapTy &VMap, const DataLayout &DL, AAResults *CalleeAAR, ClonedCodeInfo &InlinedFunctionInfo)
If the inlined function has noalias arguments, then add new alias scopes for each noalias argument,...
static const std::pair< std::vector< int64_t >, std::vector< int64_t > > remapIndices(Function &Caller, BasicBlock *StartBB, PGOContextualProfile &CtxProf, uint32_t CalleeCounters, uint32_t CalleeCallsites)
static IntrinsicInst * getConvergenceEntry(BasicBlock &BB)
static void HandleInlinedEHPad(InvokeInst *II, BasicBlock *FirstNewBlock, ClonedCodeInfo &InlinedCodeInfo)
If we inlined an invoke site, we need to convert calls in the body of the inlined function into invok...
static void inlineRetainOrClaimRVCalls(CallBase &CB, objcarc::ARCInstKind RVCallKind, const SmallVectorImpl< ReturnInst * > &Returns)
An operand bundle "clang.arc.attachedcall" on a call indicates the call result is implicitly consumed...
static Value * getParentPad(Value *EHPad)
Helper for getUnwindDestToken/getUnwindDestTokenHelper.
static void fixupAssignments(Function::iterator Start, Function::iterator End)
Update inlined instructions' DIAssignID metadata.
static bool allocaWouldBeStaticInEntry(const AllocaInst *AI)
Return the result of AI->isStaticAlloca() if AI were moved to the entry block.
static bool isUsedByLifetimeMarker(Value *V)
static void removeMemProfMetadata(CallBase *Call)
static Value * HandleByValArgument(Type *ByValType, Value *Arg, Instruction *TheCall, const Function *CalledFunc, InlineFunctionInfo &IFI, MaybeAlign ByValAlignment)
When inlining a call site that has a byval argument, we have to make the implicit memcpy explicit by ...
static void AddAlignmentAssumptions(CallBase &CB, InlineFunctionInfo &IFI)
If the inlined function has non-byval align arguments, then add @llvm.assume-based alignment assumpti...
static void trackInlinedStores(Function::iterator Start, Function::iterator End, const CallBase &CB)
static cl::opt< unsigned > InlinerAttributeWindow("max-inst-checked-for-throw-during-inlining", cl::Hidden, cl::desc("the maximum number of instructions analyzed for may throw during " "attribute inference in inlined body"), cl::init(4))
static void AddParamAndFnBasicAttributes(const CallBase &CB, ValueToValueMapTy &VMap, ClonedCodeInfo &InlinedFunctionInfo)
static bool haveCommonPrefix(MDNode *MIBStackContext, MDNode *CallsiteStackContext)
static void PropagateOperandBundles(Function::iterator InlinedBB, Instruction *CallSiteEHPad)
Bundle operands of the inlined function must be added to inlined call sites.
static bool hasLifetimeMarkers(AllocaInst *AI)
static void updateMemprofMetadata(CallBase *CI, const std::vector< Metadata * > &MIBList)
static DebugLoc getDebugLoc(MachineBasicBlock::instr_iterator FirstMI, MachineBasicBlock::instr_iterator LastMI)
Return the first found DebugLoc that has a DILocation, given a range of instructions.
This file contains the declarations for metadata subclasses.
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
This file defines common analysis utilities used by the ObjC ARC Optimizer.
This file defines ARC utility functions which are used by various parts of the compiler.
This file contains the declarations for profiling metadata utility functions.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file contains some functions that are useful when dealing with strings.
MemoryEffects getMemoryEffects(const CallBase *Call)
Return the behavior of the given call site.
Class for arbitrary precision integers.
an instruction to allocate memory on the stack
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
PointerType * getType() const
Overload to return most specific pointer type.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
bool isUsedWithInAlloca() const
Return true if this alloca is used as an inalloca argument to a call.
const Value * getArraySize() const
Get the number of elements allocated.
This class represents an incoming formal argument to a Function.
unsigned getArgNo() const
Return the index of this formal argument in its containing function.
static uint64_t getGUID(const Function &F)
A cache of @llvm.assume calls within a function.
void registerAssumption(AssumeInst *CI)
Add an @llvm.assume intrinsic to this function's cache.
An instruction that atomically checks whether a specified value is in a memory location,...
an instruction that atomically reads a memory location, combines it with another value,...
AttrBuilder & addAlignmentAttr(MaybeAlign Align)
This turns an alignment into the form used internally in Attribute.
Attribute getAttribute(Attribute::AttrKind Kind) const
Return Attribute with the given Kind.
uint64_t getDereferenceableBytes() const
Retrieve the number of dereferenceable bytes, if the dereferenceable attribute exists (zero is return...
bool hasAttributes() const
Return true if the builder has IR-level attributes.
AttrBuilder & addAttribute(Attribute::AttrKind Val)
Add an attribute to the builder.
MaybeAlign getAlignment() const
Retrieve the alignment attribute, if it exists.
AttrBuilder & addDereferenceableAttr(uint64_t Bytes)
This turns the number of dereferenceable bytes into the form used internally in Attribute.
uint64_t getDereferenceableOrNullBytes() const
Retrieve the number of dereferenceable_or_null bytes, if the dereferenceable_or_null attribute exists...
AttrBuilder & removeAttribute(Attribute::AttrKind Val)
Remove an attribute from the builder.
AttrBuilder & addDereferenceableOrNullAttr(uint64_t Bytes)
This turns the number of dereferenceable_or_null bytes into the form used internally in Attribute.
AttrBuilder & addRangeAttr(const ConstantRange &CR)
Add range attribute.
AttributeList addRetAttributes(LLVMContext &C, const AttrBuilder &B) const
Add a return value attribute to the list.
static AttributeList get(LLVMContext &C, ArrayRef< std::pair< unsigned, Attribute > > Attrs)
Create an AttributeList with the specified parameters in it.
AttributeSet getParamAttrs(unsigned ArgNo) const
The attributes for the argument or parameter at the given index are returned.
AttributeSet removeAttribute(LLVMContext &C, Attribute::AttrKind Kind) const
Remove the specified attribute from this set.
static AttributeSet get(LLVMContext &C, const AttrBuilder &B)
const ConstantRange & getRange() const
Returns the value of the range attribute.
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
bool isValid() const
Return true if the attribute is any kind of attribute.
LLVM Basic Block Representation.
iterator begin()
Instruction iterator methods.
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
BasicBlock * splitBasicBlock(iterator I, const Twine &BBName="", bool Before=false)
Split the basic block into two basic blocks at the specified instruction.
const Function * getParent() const
Return the enclosing method, or null if none.
SymbolTableList< BasicBlock >::iterator eraseFromParent()
Unlink 'this' from the containing function and delete it.
InstListType::iterator iterator
Instruction iterators...
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
void splice(BasicBlock::iterator ToIt, BasicBlock *FromBB)
Transfer all instructions from FromBB to this basic block at ToIt.
void removePredecessor(BasicBlock *Pred, bool KeepOneInputPHIs=false)
Update PHI nodes in this BasicBlock before removal of predecessor Pred.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
void setBlockFreq(const BasicBlock *BB, BlockFrequency Freq)
void setBlockFreqAndScale(const BasicBlock *ReferenceBB, BlockFrequency Freq, SmallPtrSetImpl< BasicBlock * > &BlocksToScale)
Set the frequency of ReferenceBB to Freq and scale the frequencies of the blocks in BlocksToScale suc...
BlockFrequency getBlockFreq(const BasicBlock *BB) const
getblockFreq - Return block frequency.
Conditional or Unconditional Branch instruction.
static BranchInst * Create(BasicBlock *IfTrue, InsertPosition InsertBefore=nullptr)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
void setCallingConv(CallingConv::ID CC)
MaybeAlign getRetAlign() const
Extract the alignment of the return value.
void getOperandBundlesAsDefs(SmallVectorImpl< OperandBundleDef > &Defs) const
Return the list of operand bundles attached to this instruction as a vector of OperandBundleDefs.
OperandBundleUse getOperandBundleAt(unsigned Index) const
Return the operand bundle at a specific index.
std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
void removeRetAttrs(const AttributeMask &AttrsToRemove)
Removes the attributes from the return value.
bool hasRetAttr(Attribute::AttrKind Kind) const
Determine whether the return value has the given attribute.
unsigned getNumOperandBundles() const
Return the number of operand bundles associated with this User.
CallingConv::ID getCallingConv() const
bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
Attribute getParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Get the attribute of a given kind from a given arg.
bool isByValArgument(unsigned ArgNo) const
Determine whether this argument is passed by value.
static CallBase * addOperandBundle(CallBase *CB, uint32_t ID, OperandBundleDef OB, InsertPosition InsertPt=nullptr)
Create a clone of CB with operand bundle OB added.
AttributeSet getRetAttributes() const
Return the return attributes for this call.
Type * getParamByValType(unsigned ArgNo) const
Extract the byval type for a call or parameter.
Value * getCalledOperand() const
void setAttributes(AttributeList A)
Set the attributes for this call.
std::optional< ConstantRange > getRange() const
If this return value has a range attribute, return the value range of the argument.
bool doesNotThrow() const
Determine if the call cannot unwind.
Value * getArgOperand(unsigned i) const
uint64_t getRetDereferenceableBytes() const
Extract the number of dereferenceable bytes for a call or parameter (0=unknown).
bool isConvergent() const
Determine if the invoke is convergent.
FunctionType * getFunctionType() const
static CallBase * Create(CallBase *CB, ArrayRef< OperandBundleDef > Bundles, InsertPosition InsertPt=nullptr)
Create a clone of CB with a different set of operand bundles and insert it before InsertPt.
uint64_t getRetDereferenceableOrNullBytes() const
Extract the number of dereferenceable_or_null bytes for a call (0=unknown).
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
unsigned arg_size() const
AttributeList getAttributes() const
Return the attributes for this call.
bool hasOperandBundles() const
Return true if this User has any operand bundles.
Function * getCaller()
Helper to get the caller (the parent function).
This class represents a function call, abstracting a target machine's calling convention.
void setTailCallKind(TailCallKind TCK)
TailCallKind getTailCallKind() const
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
bool isMustTailCall() const
static CatchSwitchInst * Create(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumHandlers, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static CleanupReturnInst * Create(Value *CleanupPad, BasicBlock *UnwindBB=nullptr, InsertPosition InsertBefore=nullptr)
This is the shared class of boolean and integer constants.
This class represents a range of values.
ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
static ConstantTokenNone * get(LLVMContext &Context)
Return the ConstantTokenNone.
This is an important base class in LLVM.
const Constant * stripPointerCasts() const
static InstrProfIncrementInst * getBBInstrumentation(BasicBlock &BB)
Get the instruction instrumenting a BB, or nullptr if not present.
static InstrProfCallsite * getCallsiteInstrumentation(CallBase &CB)
Get the instruction instrumenting a callsite, or nullptr if that cannot be found.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Base class for non-instruction debug metadata records that have positions within IR.
DILocation * get() const
Get the underlying DILocation.
MDNode * getScope() const
static DebugLoc appendInlinedAt(const DebugLoc &DL, DILocation *InlinedAt, LLVMContext &Ctx, DenseMap< const MDNode *, MDNode * > &Cache)
Rebuild the entire inlined-at chain for this instruction so that the top of the chain now is inlined-...
iterator find(const_arg_type_t< KeyT > Val)
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Implements a dense probed hash-table based set.
void recalculate(ParentType &Func)
recalculate - compute a dominator tree for the given function
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
Class to represent profile counts.
uint64_t getCount() const
const BasicBlock & getEntryBlock() const
BasicBlockListType::iterator iterator
FunctionType * getFunctionType() const
Returns the FunctionType for me.
const BasicBlock & front() const
iterator_range< arg_iterator > args()
DISubprogram * getSubprogram() const
Get the attached subprogram.
bool hasGC() const
hasGC/getGC/setGC/clearGC - The name of the garbage collection algorithm to use during code generatio...
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
bool hasPersonalityFn() const
Check whether this function has a personality function.
Constant * getPersonalityFn() const
Get the personality function associated with this function.
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
MaybeAlign getParamAlign(unsigned ArgNo) const
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
const std::string & getGC() const
std::optional< ProfileCount > getEntryCount(bool AllowSynthetic=false) const
Get the entry count for this function.
Type * getReturnType() const
Returns the type of the ret val.
void setCallingConv(CallingConv::ID CC)
bool onlyReadsMemory() const
Determine if the function does not access or only reads memory.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
CallInst * CreateStackSave(const Twine &Name="")
Create a call to llvm.stacksave.
CallInst * CreateLifetimeStart(Value *Ptr, ConstantInt *Size=nullptr)
Create a lifetime.start intrinsic.
CallInst * CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue, unsigned Alignment, Value *OffsetValue=nullptr)
Create an assume intrinsic call that represents an alignment assumption on the provided pointer.
ReturnInst * CreateRet(Value *V)
Create a 'ret <val>' instruction.
ConstantInt * getInt64(uint64_t C)
Get a constant 64-bit value.
CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
ReturnInst * CreateRetVoid()
Create a 'ret void' instruction.
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
CallInst * CreateLifetimeEnd(Value *Ptr, ConstantInt *Size=nullptr)
Create a lifetime.end intrinsic.
CallInst * CreateStackRestore(Value *Ptr, const Twine &Name="")
Create a call to llvm.stackrestore.
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
CallInst * CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src, MaybeAlign SrcAlign, uint64_t Size, bool isVolatile=false, MDNode *TBAATag=nullptr, MDNode *TBAAStructTag=nullptr, MDNode *ScopeTag=nullptr, MDNode *NoAliasTag=nullptr)
Create and insert a memcpy between the specified pointers.
Instruction * CreateNoAliasScopeDeclaration(Value *Scope)
Create a llvm.experimental.noalias.scope.decl intrinsic call.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
This class captures the data input to the InlineFunction call, and records the auxiliary results prod...
bool UpdateProfile
Update profile for callee as well as cloned version.
function_ref< AssumptionCache &(Function &)> GetAssumptionCache
If non-null, InlineFunction will update the callgraph to reflect the changes it makes.
BlockFrequencyInfo * CalleeBFI
SmallVector< AllocaInst *, 4 > StaticAllocas
InlineFunction fills this in with all static allocas that get copied into the caller.
BlockFrequencyInfo * CallerBFI
SmallVector< CallBase *, 8 > InlinedCallSites
All of the new call sites inlined into the caller.
InlineResult is basically true or false.
static InlineResult success()
static InlineResult failure(const char *Reason)
This represents the llvm.instrprof.callsite intrinsic.
This represents the llvm.instrprof.increment intrinsic.
void insertBefore(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified instruction.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
bool hasMetadata() const
Return true if this instruction has any metadata attached to it.
InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
const Function * getFunction() const
Return the function this instruction belongs to.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
A wrapper class for inspecting calls to intrinsic functions.
static bool mayLowerToFunctionCall(Intrinsic::ID IID)
Check if the intrinsic might lower into a regular function call in the course of IR transformations.
This is an important class for using LLVM in a threaded context.
@ OB_clang_arc_attachedcall
The landingpad instruction holds all of the information necessary to generate correct exception handl...
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
An instruction for reading from memory.
MDNode * createAnonymousAliasScope(MDNode *Domain, StringRef Name=StringRef())
Return metadata appropriate for an alias scope root node.
MDNode * createAnonymousAliasScopeDomain(StringRef Name=StringRef())
Return metadata appropriate for an alias scope domain node.
void replaceAllUsesWith(Metadata *MD)
RAUW a temporary.
static MDNode * concatenate(MDNode *A, MDNode *B)
Methods for metadata merging.
ArrayRef< MDOperand > operands() const
op_iterator op_end() const
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
unsigned getNumOperands() const
Return number of MDNode operands.
op_iterator op_begin() const
LLVMContext & getContext() const
static TempMDTuple getTemporary(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Return a temporary node.
bool onlyAccessesInaccessibleMem() const
Whether this function only (at most) accesses inaccessible memory.
bool onlyAccessesArgPointees() const
Whether this function only (at most) accesses argument memory.
Root of the metadata hierarchy.
A Module instance is used to store all the information related to an LLVM module.
A container for an operand bundle being viewed as a set of values rather than a set of uses.
The instrumented contextual profile, produced by the CtxProfAnalysis.
void update(Visitor, const Function &F)
uint32_t getNumCounters(const Function &F) const
uint32_t allocateNextCounterIndex(const Function &F)
uint32_t getNumCallsites(const Function &F) const
uint32_t allocateNextCallsiteIndex(const Function &F)
A node (context) in the loaded contextual profile, suitable for mutation during IPO passes.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Analysis providing profile information.
std::optional< uint64_t > getProfileCount(const CallBase &CallInst, BlockFrequencyInfo *BFI, bool AllowSynthetic=false) const
Returns the profile count for CallInst.
Resume the propagation of an exception.
Return a value (possibly void), from a function.
A vector that has set insertion semantics.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
The instances of the Type class are immutable: once they are created, they are never changed.
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
static IntegerType * getInt64Ty(LLVMContext &C)
bool isVoidTy() const
Return true if this is 'void'.
void setOperand(unsigned i, Value *Val)
Value * getOperand(unsigned i) const
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
ValueT lookup(const KeyT &Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
size_type count(const KeyT &Val) const
Return 1 if the specified key is in the map, 0 otherwise.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
iterator_range< user_iterator > users()
LLVMContext & getContext() const
All values hold a context through their type.
StringRef getName() const
Return a constant reference to the value's name.
void takeName(Value *V)
Transfer the name from V to this value.
std::pair< iterator, bool > insert(const ValueT &V)
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
const ParentTy * getParent() const
self_iterator getIterator()
Class to build a trie of call stack contexts for a particular profiled allocation call,...
Helper class to iterate through stack ids in both metadata (memprof MIB and callsite) and the corresp...
This provides a very simple, boring adaptor for a begin and end iterator into a range type.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
AttributeMask typeIncompatible(Type *Ty, AttributeSet AS, AttributeSafetyKind ASK=ASK_ALL)
Which attributes cannot be applied to a type.
void mergeAttributesForInlining(Function &Caller, const Function &Callee)
Merge caller's and callee's attributes.
Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})
Look up the Function declaration of the intrinsic id in the Module M.
bool match(Val *V, const Pattern &P)
match_combine_and< class_match< Constant >, match_unless< constantexpr_match > > m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
AssignmentMarkerRange getAssignmentMarkers(DIAssignID *ID)
Return a range of dbg.assign intrinsics which use \ID as an operand.
void trackAssignments(Function::iterator Start, Function::iterator End, const StorageToVarsMap &Vars, const DataLayout &DL, bool DebugPrints=false)
Track assignments to Vars between Start and End.
void remapAssignID(DenseMap< DIAssignID *, DIAssignID * > &Map, Instruction &I)
Replace DIAssignID uses and attachments with IDs from Map.
SmallVector< DbgVariableRecord * > getDVRAssignmentMarkers(const Instruction *Inst)
initializer< Ty > init(const Ty &Val)
MDNode * getMIBStackNode(const MDNode *MIB)
Returns the stack node from an MIB metadata node.
ARCInstKind getAttachedARCFunctionKind(const CallBase *CB)
This function returns the ARCInstKind of the function attached to operand bundle clang_arc_attachedca...
ARCInstKind
Equivalence classes of instructions in the ARC Model.
std::optional< Function * > getAttachedARCFunction(const CallBase *CB)
This function returns operand bundle clang_arc_attachedcall's argument, which is the address of the A...
bool isRetainOrClaimRV(ARCInstKind Kind)
Check whether the function is retainRV/unsafeClaimRV.
const Value * GetRCIdentityRoot(const Value *V)
The RCIdentity root of a value V is a dominating value U for which retaining or releasing U is equiva...
bool hasAttachedCallOpBundle(const CallBase *CB)
This is an optimization pass for GlobalISel generic memory operations.
UnaryFunction for_each(R &&Range, UnaryFunction F)
Provide wrappers to std::for_each which take ranges instead of having to pass begin/end explicitly.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
BasicBlock * changeToInvokeAndSplitBasicBlock(CallInst *CI, BasicBlock *UnwindEdge, DomTreeUpdater *DTU=nullptr)
Convert the CallInst to InvokeInst with the specified unwind edge basic block.
auto successors(const MachineBasicBlock *BB)
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
bool PointerMayBeCapturedBefore(const Value *V, bool ReturnCaptures, bool StoreCaptures, const Instruction *I, const DominatorTree *DT, bool IncludeI=false, unsigned MaxUsesToExplore=0, const LoopInfo *LI=nullptr)
PointerMayBeCapturedBefore - Return true if this pointer value may be captured by the enclosing funct...
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
Align getKnownAlignment(Value *V, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to infer an alignment for the specified pointer.
Align getOrEnforceKnownAlignment(Value *V, MaybeAlign PrefAlign, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to ensure that the alignment of V is at least PrefAlign bytes.
void CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc, ValueToValueMapTy &VMap, bool ModuleLevelChanges, SmallVectorImpl< ReturnInst * > &Returns, const char *NameSuffix="", ClonedCodeInfo *CodeInfo=nullptr)
This works exactly like CloneFunctionInto, except that it does some simple constant prop and DCE on t...
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
unsigned changeToUnreachable(Instruction *I, bool PreserveLCSSA=false, DomTreeUpdater *DTU=nullptr, MemorySSAUpdater *MSSAU=nullptr)
Insert an unreachable instruction before the specified instruction, making it and the rest of the cod...
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
bool salvageKnowledge(Instruction *I, AssumptionCache *AC=nullptr, DominatorTree *DT=nullptr)
Calls BuildAssumeFromInst and if the resulting llvm.assume is valid insert if before I.
void updateProfileCallee(Function *Callee, int64_t EntryDelta, const ValueMap< const Value *, WeakTrackingVH > *VMap=nullptr)
Updates profile information by adjusting the entry count by adding EntryDelta then scaling callsite i...
bool isAssignmentTrackingEnabled(const Module &M)
Return true if assignment tracking is enabled for module M.
MDNode * uniteAccessGroups(MDNode *AccGroups1, MDNode *AccGroups2)
Compute the union of two access-group lists.
InlineResult InlineFunction(CallBase &CB, InlineFunctionInfo &IFI, bool MergeAttributes=false, AAResults *CalleeAAR=nullptr, bool InsertLifetime=true, Function *ForwardVarArgsTo=nullptr)
This function inlines the called function into the basic block of the caller.
bool isAsynchronousEHPersonality(EHPersonality Pers)
Returns true if this personality function catches asynchronous exceptions.
bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I)
Return true if this function can prove that the instruction I will always transfer execution to one o...
bool isEscapeSource(const Value *V)
Returns true if the pointer is one which would have been considered an escape by isNonEscapingLocalOb...
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
void getUnderlyingObjects(const Value *V, SmallVectorImpl< const Value * > &Objects, const LoopInfo *LI=nullptr, unsigned MaxLookup=6)
This method is similar to getUnderlyingObject except that it can look through phi and select instruct...
bool pred_empty(const BasicBlock *BB)
void updateLoopMetadataDebugLocations(Instruction &I, function_ref< Metadata *(Metadata *)> Updater)
Update the debug locations contained within the MD_loop metadata attached to the instruction I,...
bool isIdentifiedObject(const Value *V)
Return true if this pointer refers to a distinct and identifiable object.
void scaleProfData(Instruction &I, uint64_t S, uint64_t T)
Scaling the profile data attached to 'I' using the ratio of S/T.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
This struct can be used to capture information about code being cloned, while it is being cloned.
bool ContainsDynamicAllocas
This is set to true if the cloned code contains a 'dynamic' alloca.
bool isSimplified(const Value *From, const Value *To) const
bool ContainsCalls
This is set to true if the cloned code contains a normal call instruction.
bool ContainsMemProfMetadata
This is set to true if there is memprof related metadata (memprof or callsite metadata) in the cloned...
std::vector< WeakTrackingVH > OperandBundleCallSites
All cloned call sites that have operand bundles attached are appended to this vector.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
static Instruction * tryGetVTableInstruction(CallBase *CB)
Helper struct for trackAssignments, below.