Movatterモバイル変換


[0]ホーム

URL:


LLVM 20.0.0git
InlineFunction.cpp
Go to the documentation of this file.
1//===- InlineFunction.cpp - Code to perform function inlining -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements inlining of a function into a call site, resolving
10// parameters and the return value as appropriate.
11//
12//===----------------------------------------------------------------------===//
13
14#include "llvm/ADT/DenseMap.h"
15#include "llvm/ADT/STLExtras.h"
16#include "llvm/ADT/SetVector.h"
17#include "llvm/ADT/SmallPtrSet.h"
18#include "llvm/ADT/SmallVector.h"
19#include "llvm/ADT/StringExtras.h"
20#include "llvm/ADT/iterator_range.h"
21#include "llvm/Analysis/AliasAnalysis.h"
22#include "llvm/Analysis/AssumptionCache.h"
23#include "llvm/Analysis/BlockFrequencyInfo.h"
24#include "llvm/Analysis/CallGraph.h"
25#include "llvm/Analysis/CaptureTracking.h"
26#include "llvm/Analysis/CtxProfAnalysis.h"
27#include "llvm/Analysis/IndirectCallVisitor.h"
28#include "llvm/Analysis/InstructionSimplify.h"
29#include "llvm/Analysis/MemoryProfileInfo.h"
30#include "llvm/Analysis/ObjCARCAnalysisUtils.h"
31#include "llvm/Analysis/ObjCARCUtil.h"
32#include "llvm/Analysis/ProfileSummaryInfo.h"
33#include "llvm/Analysis/ValueTracking.h"
34#include "llvm/Analysis/VectorUtils.h"
35#include "llvm/IR/Argument.h"
36#include "llvm/IR/AttributeMask.h"
37#include "llvm/IR/Attributes.h"
38#include "llvm/IR/BasicBlock.h"
39#include "llvm/IR/CFG.h"
40#include "llvm/IR/Constant.h"
41#include "llvm/IR/ConstantRange.h"
42#include "llvm/IR/Constants.h"
43#include "llvm/IR/DataLayout.h"
44#include "llvm/IR/DebugInfo.h"
45#include "llvm/IR/DebugInfoMetadata.h"
46#include "llvm/IR/DebugLoc.h"
47#include "llvm/IR/DerivedTypes.h"
48#include "llvm/IR/Dominators.h"
49#include "llvm/IR/EHPersonalities.h"
50#include "llvm/IR/Function.h"
51#include "llvm/IR/GlobalVariable.h"
52#include "llvm/IR/IRBuilder.h"
53#include "llvm/IR/InlineAsm.h"
54#include "llvm/IR/InstrTypes.h"
55#include "llvm/IR/Instruction.h"
56#include "llvm/IR/Instructions.h"
57#include "llvm/IR/IntrinsicInst.h"
58#include "llvm/IR/Intrinsics.h"
59#include "llvm/IR/LLVMContext.h"
60#include "llvm/IR/MDBuilder.h"
61#include "llvm/IR/Metadata.h"
62#include "llvm/IR/Module.h"
63#include "llvm/IR/PatternMatch.h"
64#include "llvm/IR/ProfDataUtils.h"
65#include "llvm/IR/Type.h"
66#include "llvm/IR/User.h"
67#include "llvm/IR/Value.h"
68#include "llvm/Support/Casting.h"
69#include "llvm/Support/CommandLine.h"
70#include "llvm/Support/ErrorHandling.h"
71#include "llvm/Transforms/Utils/AssumeBundleBuilder.h"
72#include "llvm/Transforms/Utils/Cloning.h"
73#include "llvm/Transforms/Utils/Local.h"
74#include "llvm/Transforms/Utils/ValueMapper.h"
75#include <algorithm>
76#include <cassert>
77#include <cstdint>
78#include <deque>
79#include <iterator>
80#include <limits>
81#include <optional>
82#include <string>
83#include <utility>
84#include <vector>
85
86#define DEBUG_TYPE "inline-function"
87
88using namespacellvm;
89using namespacellvm::memprof;
90usingProfileCount =Function::ProfileCount;
91
92staticcl::opt<bool>
93EnableNoAliasConversion("enable-noalias-to-md-conversion",cl::init(true),
94cl::Hidden,
95cl::desc("Convert noalias attributes to metadata during inlining."));
96
97staticcl::opt<bool>
98UseNoAliasIntrinsic("use-noalias-intrinsic-during-inlining",cl::Hidden,
99cl::init(true),
100cl::desc("Use the llvm.experimental.noalias.scope.decl "
101"intrinsic during inlining."));
102
103// Disabled by default, because the added alignment assumptions may increase
104// compile-time and block optimizations. This option is not suitable for use
105// with frontends that emit comprehensive parameter alignment annotations.
106staticcl::opt<bool>
107PreserveAlignmentAssumptions("preserve-alignment-assumptions-during-inlining",
108cl::init(false),cl::Hidden,
109cl::desc("Convert align attributes to assumptions during inlining."));
110
111staticcl::opt<unsigned>InlinerAttributeWindow(
112"max-inst-checked-for-throw-during-inlining",cl::Hidden,
113cl::desc("the maximum number of instructions analyzed for may throw during "
114"attribute inference in inlined body"),
115cl::init(4));
116
117namespace{
118
119 /// A class for recording information about inlining a landing pad.
120classLandingPadInliningInfo {
121 /// Destination of the invoke's unwind.
122BasicBlock *OuterResumeDest;
123
124 /// Destination for the callee's resume.
125BasicBlock *InnerResumeDest =nullptr;
126
127 /// LandingPadInst associated with the invoke.
128LandingPadInst *CallerLPad =nullptr;
129
130 /// PHI for EH values from landingpad insts.
131PHINode *InnerEHValuesPHI =nullptr;
132
133SmallVector<Value*, 8> UnwindDestPHIValues;
134
135public:
136 LandingPadInliningInfo(InvokeInst *II)
137 : OuterResumeDest(II->getUnwindDest()) {
138// If there are PHI nodes in the unwind destination block, we need to keep
139// track of which values came into them from the invoke before removing
140// the edge from this block.
141BasicBlock *InvokeBB =II->getParent();
142BasicBlock::iteratorI = OuterResumeDest->begin();
143for (; isa<PHINode>(I); ++I) {
144// Save the value to use for this edge.
145PHINode *PHI = cast<PHINode>(I);
146 UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB));
147 }
148
149 CallerLPad = cast<LandingPadInst>(I);
150 }
151
152 /// The outer unwind destination is the target of
153 /// unwind edges introduced for calls within the inlined function.
154BasicBlock *getOuterResumeDest() const{
155return OuterResumeDest;
156 }
157
158BasicBlock *getInnerResumeDest();
159
160LandingPadInst *getLandingPadInst() const{return CallerLPad; }
161
162 /// Forward the 'resume' instruction to the caller's landing pad block.
163 /// When the landing pad block has only one predecessor, this is
164 /// a simple branch. When there is more than one predecessor, we need to
165 /// split the landing pad block after the landingpad instruction and jump
166 /// to there.
167void forwardResume(ResumeInst *RI,
168SmallPtrSetImpl<LandingPadInst*> &InlinedLPads);
169
170 /// Add incoming-PHI values to the unwind destination block for the given
171 /// basic block, using the values for the original invoke's source block.
172void addIncomingPHIValuesFor(BasicBlock *BB) const{
173 addIncomingPHIValuesForInto(BB, OuterResumeDest);
174 }
175
176void addIncomingPHIValuesForInto(BasicBlock *src,BasicBlock *dest) const{
177BasicBlock::iteratorI = dest->begin();
178for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
179PHINode *phi = cast<PHINode>(I);
180phi->addIncoming(UnwindDestPHIValues[i], src);
181 }
182 }
183 };
184}// end anonymous namespace
185
186staticIntrinsicInst *getConvergenceEntry(BasicBlock &BB) {
187BasicBlock::iterator It = BB.getFirstNonPHIIt();
188while (It != BB.end()) {
189if (auto *IntrinsicCall = dyn_cast<ConvergenceControlInst>(It)) {
190if (IntrinsicCall->isEntry()) {
191return IntrinsicCall;
192 }
193 }
194 It = std::next(It);
195 }
196returnnullptr;
197}
198
199/// Get or create a target for the branch from ResumeInsts.
200BasicBlock *LandingPadInliningInfo::getInnerResumeDest() {
201if (InnerResumeDest)return InnerResumeDest;
202
203// Split the landing pad.
204BasicBlock::iterator SplitPoint = ++CallerLPad->getIterator();
205 InnerResumeDest =
206 OuterResumeDest->splitBasicBlock(SplitPoint,
207 OuterResumeDest->getName() +".body");
208
209// The number of incoming edges we expect to the inner landing pad.
210constunsigned PHICapacity = 2;
211
212// Create corresponding new PHIs for all the PHIs in the outer landing pad.
213BasicBlock::iterator InsertPoint = InnerResumeDest->begin();
214BasicBlock::iteratorI = OuterResumeDest->begin();
215for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
216PHINode *OuterPHI = cast<PHINode>(I);
217PHINode *InnerPHI =PHINode::Create(OuterPHI->getType(), PHICapacity,
218 OuterPHI->getName() +".lpad-body");
219 InnerPHI->insertBefore(InsertPoint);
220 OuterPHI->replaceAllUsesWith(InnerPHI);
221 InnerPHI->addIncoming(OuterPHI, OuterResumeDest);
222 }
223
224// Create a PHI for the exception values.
225 InnerEHValuesPHI =
226PHINode::Create(CallerLPad->getType(), PHICapacity,"eh.lpad-body");
227 InnerEHValuesPHI->insertBefore(InsertPoint);
228 CallerLPad->replaceAllUsesWith(InnerEHValuesPHI);
229 InnerEHValuesPHI->addIncoming(CallerLPad, OuterResumeDest);
230
231// All done.
232return InnerResumeDest;
233}
234
235/// Forward the 'resume' instruction to the caller's landing pad block.
236/// When the landing pad block has only one predecessor, this is a simple
237/// branch. When there is more than one predecessor, we need to split the
238/// landing pad block after the landingpad instruction and jump to there.
239void LandingPadInliningInfo::forwardResume(
240ResumeInst *RI,SmallPtrSetImpl<LandingPadInst *> &InlinedLPads) {
241BasicBlock *Dest = getInnerResumeDest();
242BasicBlock *Src = RI->getParent();
243
244BranchInst::Create(Dest, Src);
245
246// Update the PHIs in the destination. They were inserted in an order which
247// makes this work.
248 addIncomingPHIValuesForInto(Src, Dest);
249
250 InnerEHValuesPHI->addIncoming(RI->getOperand(0), Src);
251 RI->eraseFromParent();
252}
253
254/// Helper for getUnwindDestToken/getUnwindDestTokenHelper.
255staticValue *getParentPad(Value *EHPad) {
256if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
257return FPI->getParentPad();
258return cast<CatchSwitchInst>(EHPad)->getParentPad();
259}
260
261usingUnwindDestMemoTy =DenseMap<Instruction *, Value *>;
262
263/// Helper for getUnwindDestToken that does the descendant-ward part of
264/// the search.
265staticValue *getUnwindDestTokenHelper(Instruction *EHPad,
266UnwindDestMemoTy &MemoMap) {
267SmallVector<Instruction *, 8> Worklist(1, EHPad);
268
269while (!Worklist.empty()) {
270Instruction *CurrentPad = Worklist.pop_back_val();
271// We only put pads on the worklist that aren't in the MemoMap. When
272// we find an unwind dest for a pad we may update its ancestors, but
273// the queue only ever contains uncles/great-uncles/etc. of CurrentPad,
274// so they should never get updated while queued on the worklist.
275assert(!MemoMap.count(CurrentPad));
276Value *UnwindDestToken =nullptr;
277if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(CurrentPad)) {
278if (CatchSwitch->hasUnwindDest()) {
279 UnwindDestToken = &*CatchSwitch->getUnwindDest()->getFirstNonPHIIt();
280 }else {
281// Catchswitch doesn't have a 'nounwind' variant, and one might be
282// annotated as "unwinds to caller" when really it's nounwind (see
283// e.g. SimplifyCFGOpt::SimplifyUnreachable), so we can't infer the
284// parent's unwind dest from this. We can check its catchpads'
285// descendants, since they might include a cleanuppad with an
286// "unwinds to caller" cleanupret, which can be trusted.
287for (auto HI = CatchSwitch->handler_begin(),
288 HE = CatchSwitch->handler_end();
289 HI != HE && !UnwindDestToken; ++HI) {
290BasicBlock *HandlerBlock = *HI;
291auto *CatchPad =
292 cast<CatchPadInst>(&*HandlerBlock->getFirstNonPHIIt());
293for (User *Child : CatchPad->users()) {
294// Intentionally ignore invokes here -- since the catchswitch is
295// marked "unwind to caller", it would be a verifier error if it
296// contained an invoke which unwinds out of it, so any invoke we'd
297// encounter must unwind to some child of the catch.
298if (!isa<CleanupPadInst>(Child) && !isa<CatchSwitchInst>(Child))
299continue;
300
301Instruction *ChildPad = cast<Instruction>(Child);
302auto Memo = MemoMap.find(ChildPad);
303if (Memo == MemoMap.end()) {
304// Haven't figured out this child pad yet; queue it.
305 Worklist.push_back(ChildPad);
306continue;
307 }
308// We've already checked this child, but might have found that
309// it offers no proof either way.
310Value *ChildUnwindDestToken = Memo->second;
311if (!ChildUnwindDestToken)
312continue;
313// We already know the child's unwind dest, which can either
314// be ConstantTokenNone to indicate unwind to caller, or can
315// be another child of the catchpad. Only the former indicates
316// the unwind dest of the catchswitch.
317if (isa<ConstantTokenNone>(ChildUnwindDestToken)) {
318 UnwindDestToken = ChildUnwindDestToken;
319break;
320 }
321assert(getParentPad(ChildUnwindDestToken) == CatchPad);
322 }
323 }
324 }
325 }else {
326auto *CleanupPad = cast<CleanupPadInst>(CurrentPad);
327for (User *U : CleanupPad->users()) {
328if (auto *CleanupRet = dyn_cast<CleanupReturnInst>(U)) {
329if (BasicBlock *RetUnwindDest = CleanupRet->getUnwindDest())
330 UnwindDestToken = &*RetUnwindDest->getFirstNonPHIIt();
331else
332 UnwindDestToken =ConstantTokenNone::get(CleanupPad->getContext());
333break;
334 }
335Value *ChildUnwindDestToken;
336if (auto *Invoke = dyn_cast<InvokeInst>(U)) {
337 ChildUnwindDestToken = &*Invoke->getUnwindDest()->getFirstNonPHIIt();
338 }elseif (isa<CleanupPadInst>(U) || isa<CatchSwitchInst>(U)) {
339Instruction *ChildPad = cast<Instruction>(U);
340auto Memo = MemoMap.find(ChildPad);
341if (Memo == MemoMap.end()) {
342// Haven't resolved this child yet; queue it and keep searching.
343 Worklist.push_back(ChildPad);
344continue;
345 }
346// We've checked this child, but still need to ignore it if it
347// had no proof either way.
348 ChildUnwindDestToken = Memo->second;
349if (!ChildUnwindDestToken)
350continue;
351 }else {
352// Not a relevant user of the cleanuppad
353continue;
354 }
355// In a well-formed program, the child/invoke must either unwind to
356// an(other) child of the cleanup, or exit the cleanup. In the
357// first case, continue searching.
358if (isa<Instruction>(ChildUnwindDestToken) &&
359getParentPad(ChildUnwindDestToken) == CleanupPad)
360continue;
361 UnwindDestToken = ChildUnwindDestToken;
362break;
363 }
364 }
365// If we haven't found an unwind dest for CurrentPad, we may have queued its
366// children, so move on to the next in the worklist.
367if (!UnwindDestToken)
368continue;
369
370// Now we know that CurrentPad unwinds to UnwindDestToken. It also exits
371// any ancestors of CurrentPad up to but not including UnwindDestToken's
372// parent pad. Record this in the memo map, and check to see if the
373// original EHPad being queried is one of the ones exited.
374Value *UnwindParent;
375if (auto *UnwindPad = dyn_cast<Instruction>(UnwindDestToken))
376 UnwindParent =getParentPad(UnwindPad);
377else
378 UnwindParent =nullptr;
379bool ExitedOriginalPad =false;
380for (Instruction *ExitedPad = CurrentPad;
381 ExitedPad && ExitedPad != UnwindParent;
382 ExitedPad = dyn_cast<Instruction>(getParentPad(ExitedPad))) {
383// Skip over catchpads since they just follow their catchswitches.
384if (isa<CatchPadInst>(ExitedPad))
385continue;
386 MemoMap[ExitedPad] = UnwindDestToken;
387 ExitedOriginalPad |= (ExitedPad == EHPad);
388 }
389
390if (ExitedOriginalPad)
391return UnwindDestToken;
392
393// Continue the search.
394 }
395
396// No definitive information is contained within this funclet.
397returnnullptr;
398}
399
400/// Given an EH pad, find where it unwinds. If it unwinds to an EH pad,
401/// return that pad instruction. If it unwinds to caller, return
402/// ConstantTokenNone. If it does not have a definitive unwind destination,
403/// return nullptr.
404///
405/// This routine gets invoked for calls in funclets in inlinees when inlining
406/// an invoke. Since many funclets don't have calls inside them, it's queried
407/// on-demand rather than building a map of pads to unwind dests up front.
408/// Determining a funclet's unwind dest may require recursively searching its
409/// descendants, and also ancestors and cousins if the descendants don't provide
410/// an answer. Since most funclets will have their unwind dest immediately
411/// available as the unwind dest of a catchswitch or cleanupret, this routine
412/// searches top-down from the given pad and then up. To avoid worst-case
413/// quadratic run-time given that approach, it uses a memo map to avoid
414/// re-processing funclet trees. The callers that rewrite the IR as they go
415/// take advantage of this, for correctness, by checking/forcing rewritten
416/// pads' entries to match the original callee view.
417staticValue *getUnwindDestToken(Instruction *EHPad,
418UnwindDestMemoTy &MemoMap) {
419// Catchpads unwind to the same place as their catchswitch;
420// redirct any queries on catchpads so the code below can
421// deal with just catchswitches and cleanuppads.
422if (auto *CPI = dyn_cast<CatchPadInst>(EHPad))
423 EHPad = CPI->getCatchSwitch();
424
425// Check if we've already determined the unwind dest for this pad.
426auto Memo = MemoMap.find(EHPad);
427if (Memo != MemoMap.end())
428return Memo->second;
429
430// Search EHPad and, if necessary, its descendants.
431Value *UnwindDestToken =getUnwindDestTokenHelper(EHPad, MemoMap);
432assert((UnwindDestToken ==nullptr) != (MemoMap.count(EHPad) != 0));
433if (UnwindDestToken)
434return UnwindDestToken;
435
436// No information is available for this EHPad from itself or any of its
437// descendants. An unwind all the way out to a pad in the caller would
438// need also to agree with the unwind dest of the parent funclet, so
439// search up the chain to try to find a funclet with information. Put
440// null entries in the memo map to avoid re-processing as we go up.
441 MemoMap[EHPad] =nullptr;
442#ifndef NDEBUG
443SmallPtrSet<Instruction *, 4> TempMemos;
444 TempMemos.insert(EHPad);
445#endif
446Instruction *LastUselessPad = EHPad;
447Value *AncestorToken;
448for (AncestorToken =getParentPad(EHPad);
449auto *AncestorPad = dyn_cast<Instruction>(AncestorToken);
450 AncestorToken =getParentPad(AncestorToken)) {
451// Skip over catchpads since they just follow their catchswitches.
452if (isa<CatchPadInst>(AncestorPad))
453continue;
454// If the MemoMap had an entry mapping AncestorPad to nullptr, since we
455// haven't yet called getUnwindDestTokenHelper for AncestorPad in this
456// call to getUnwindDestToken, that would mean that AncestorPad had no
457// information in itself, its descendants, or its ancestors. If that
458// were the case, then we should also have recorded the lack of information
459// for the descendant that we're coming from. So assert that we don't
460// find a null entry in the MemoMap for AncestorPad.
461assert(!MemoMap.count(AncestorPad) || MemoMap[AncestorPad]);
462auto AncestorMemo = MemoMap.find(AncestorPad);
463if (AncestorMemo == MemoMap.end()) {
464 UnwindDestToken =getUnwindDestTokenHelper(AncestorPad, MemoMap);
465 }else {
466 UnwindDestToken = AncestorMemo->second;
467 }
468if (UnwindDestToken)
469break;
470 LastUselessPad = AncestorPad;
471 MemoMap[LastUselessPad] =nullptr;
472#ifndef NDEBUG
473 TempMemos.insert(LastUselessPad);
474#endif
475 }
476
477// We know that getUnwindDestTokenHelper was called on LastUselessPad and
478// returned nullptr (and likewise for EHPad and any of its ancestors up to
479// LastUselessPad), so LastUselessPad has no information from below. Since
480// getUnwindDestTokenHelper must investigate all downward paths through
481// no-information nodes to prove that a node has no information like this,
482// and since any time it finds information it records it in the MemoMap for
483// not just the immediately-containing funclet but also any ancestors also
484// exited, it must be the case that, walking downward from LastUselessPad,
485// visiting just those nodes which have not been mapped to an unwind dest
486// by getUnwindDestTokenHelper (the nullptr TempMemos notwithstanding, since
487// they are just used to keep getUnwindDestTokenHelper from repeating work),
488// any node visited must have been exhaustively searched with no information
489// for it found.
490SmallVector<Instruction *, 8> Worklist(1, LastUselessPad);
491while (!Worklist.empty()) {
492Instruction *UselessPad = Worklist.pop_back_val();
493auto Memo = MemoMap.find(UselessPad);
494if (Memo != MemoMap.end() && Memo->second) {
495// Here the name 'UselessPad' is a bit of a misnomer, because we've found
496// that it is a funclet that does have information about unwinding to
497// a particular destination; its parent was a useless pad.
498// Since its parent has no information, the unwind edge must not escape
499// the parent, and must target a sibling of this pad. This local unwind
500// gives us no information about EHPad. Leave it and the subtree rooted
501// at it alone.
502assert(getParentPad(Memo->second) ==getParentPad(UselessPad));
503continue;
504 }
505// We know we don't have information for UselesPad. If it has an entry in
506// the MemoMap (mapping it to nullptr), it must be one of the TempMemos
507// added on this invocation of getUnwindDestToken; if a previous invocation
508// recorded nullptr, it would have had to prove that the ancestors of
509// UselessPad, which include LastUselessPad, had no information, and that
510// in turn would have required proving that the descendants of
511// LastUselesPad, which include EHPad, have no information about
512// LastUselessPad, which would imply that EHPad was mapped to nullptr in
513// the MemoMap on that invocation, which isn't the case if we got here.
514assert(!MemoMap.count(UselessPad) || TempMemos.count(UselessPad));
515// Assert as we enumerate users that 'UselessPad' doesn't have any unwind
516// information that we'd be contradicting by making a map entry for it
517// (which is something that getUnwindDestTokenHelper must have proved for
518// us to get here). Just assert on is direct users here; the checks in
519// this downward walk at its descendants will verify that they don't have
520// any unwind edges that exit 'UselessPad' either (i.e. they either have no
521// unwind edges or unwind to a sibling).
522 MemoMap[UselessPad] = UnwindDestToken;
523if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(UselessPad)) {
524assert(CatchSwitch->getUnwindDest() ==nullptr &&"Expected useless pad");
525for (BasicBlock *HandlerBlock : CatchSwitch->handlers()) {
526auto *CatchPad = &*HandlerBlock->getFirstNonPHIIt();
527for (User *U : CatchPad->users()) {
528assert((!isa<InvokeInst>(U) ||
529 (getParentPad(&*cast<InvokeInst>(U)
530 ->getUnwindDest()
531 ->getFirstNonPHIIt()) == CatchPad)) &&
532"Expected useless pad");
533if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U))
534 Worklist.push_back(cast<Instruction>(U));
535 }
536 }
537 }else {
538assert(isa<CleanupPadInst>(UselessPad));
539for (User *U : UselessPad->users()) {
540assert(!isa<CleanupReturnInst>(U) &&"Expected useless pad");
541assert(
542 (!isa<InvokeInst>(U) ||
543 (getParentPad(
544 &*cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHIIt()) ==
545 UselessPad)) &&
546"Expected useless pad");
547if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U))
548 Worklist.push_back(cast<Instruction>(U));
549 }
550 }
551 }
552
553return UnwindDestToken;
554}
555
556/// When we inline a basic block into an invoke,
557/// we have to turn all of the calls that can throw into invokes.
558/// This function analyze BB to see if there are any calls, and if so,
559/// it rewrites them to be invokes that jump to InvokeDest and fills in the PHI
560/// nodes in that block with the values specified in InvokeDestPHIValues.
561staticBasicBlock *HandleCallsInBlockInlinedThroughInvoke(
562BasicBlock *BB,BasicBlock *UnwindEdge,
563UnwindDestMemoTy *FuncletUnwindMap =nullptr) {
564for (Instruction &I :llvm::make_early_inc_range(*BB)) {
565// We only need to check for function calls: inlined invoke
566// instructions require no special handling.
567CallInst *CI = dyn_cast<CallInst>(&I);
568
569if (!CI || CI->doesNotThrow())
570continue;
571
572// We do not need to (and in fact, cannot) convert possibly throwing calls
573// to @llvm.experimental_deoptimize (resp. @llvm.experimental.guard) into
574// invokes. The caller's "segment" of the deoptimization continuation
575// attached to the newly inlined @llvm.experimental_deoptimize
576// (resp. @llvm.experimental.guard) call should contain the exception
577// handling logic, if any.
578if (auto *F = CI->getCalledFunction())
579if (F->getIntrinsicID() == Intrinsic::experimental_deoptimize ||
580F->getIntrinsicID() == Intrinsic::experimental_guard)
581continue;
582
583if (auto FuncletBundle = CI->getOperandBundle(LLVMContext::OB_funclet)) {
584// This call is nested inside a funclet. If that funclet has an unwind
585// destination within the inlinee, then unwinding out of this call would
586// be UB. Rewriting this call to an invoke which targets the inlined
587// invoke's unwind dest would give the call's parent funclet multiple
588// unwind destinations, which is something that subsequent EH table
589// generation can't handle and that the veirifer rejects. So when we
590// see such a call, leave it as a call.
591auto *FuncletPad = cast<Instruction>(FuncletBundle->Inputs[0]);
592Value *UnwindDestToken =
593getUnwindDestToken(FuncletPad, *FuncletUnwindMap);
594if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken))
595continue;
596#ifndef NDEBUG
597Instruction *MemoKey;
598if (auto *CatchPad = dyn_cast<CatchPadInst>(FuncletPad))
599 MemoKey = CatchPad->getCatchSwitch();
600else
601 MemoKey = FuncletPad;
602assert(FuncletUnwindMap->count(MemoKey) &&
603 (*FuncletUnwindMap)[MemoKey] == UnwindDestToken &&
604"must get memoized to avoid confusing later searches");
605#endif// NDEBUG
606 }
607
608changeToInvokeAndSplitBasicBlock(CI, UnwindEdge);
609return BB;
610 }
611returnnullptr;
612}
613
614/// If we inlined an invoke site, we need to convert calls
615/// in the body of the inlined function into invokes.
616///
617/// II is the invoke instruction being inlined. FirstNewBlock is the first
618/// block of the inlined code (the last block is the end of the function),
619/// and InlineCodeInfo is information about the code that got inlined.
620staticvoidHandleInlinedLandingPad(InvokeInst *II,BasicBlock *FirstNewBlock,
621ClonedCodeInfo &InlinedCodeInfo) {
622BasicBlock *InvokeDest =II->getUnwindDest();
623
624Function *Caller = FirstNewBlock->getParent();
625
626// The inlined code is currently at the end of the function, scan from the
627// start of the inlined code to its end, checking for stuff we need to
628// rewrite.
629 LandingPadInliningInfo Invoke(II);
630
631// Get all of the inlined landing pad instructions.
632SmallPtrSet<LandingPadInst*, 16> InlinedLPads;
633for (Function::iteratorI = FirstNewBlock->getIterator(), E = Caller->end();
634I != E; ++I)
635if (InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator()))
636 InlinedLPads.insert(II->getLandingPadInst());
637
638// Append the clauses from the outer landing pad instruction into the inlined
639// landing pad instructions.
640LandingPadInst *OuterLPad = Invoke.getLandingPadInst();
641for (LandingPadInst *InlinedLPad : InlinedLPads) {
642unsigned OuterNum = OuterLPad->getNumClauses();
643 InlinedLPad->reserveClauses(OuterNum);
644for (unsigned OuterIdx = 0; OuterIdx != OuterNum; ++OuterIdx)
645 InlinedLPad->addClause(OuterLPad->getClause(OuterIdx));
646if (OuterLPad->isCleanup())
647 InlinedLPad->setCleanup(true);
648 }
649
650for (Function::iterator BB = FirstNewBlock->getIterator(), E = Caller->end();
651 BB != E; ++BB) {
652if (InlinedCodeInfo.ContainsCalls)
653if (BasicBlock *NewBB =HandleCallsInBlockInlinedThroughInvoke(
654 &*BB, Invoke.getOuterResumeDest()))
655// Update any PHI nodes in the exceptional block to indicate that there
656// is now a new entry in them.
657 Invoke.addIncomingPHIValuesFor(NewBB);
658
659// Forward any resumes that are remaining here.
660if (ResumeInst *RI = dyn_cast<ResumeInst>(BB->getTerminator()))
661 Invoke.forwardResume(RI, InlinedLPads);
662 }
663
664// Now that everything is happy, we have one final detail. The PHI nodes in
665// the exception destination block still have entries due to the original
666// invoke instruction. Eliminate these entries (which might even delete the
667// PHI node) now.
668 InvokeDest->removePredecessor(II->getParent());
669}
670
671/// If we inlined an invoke site, we need to convert calls
672/// in the body of the inlined function into invokes.
673///
674/// II is the invoke instruction being inlined. FirstNewBlock is the first
675/// block of the inlined code (the last block is the end of the function),
676/// and InlineCodeInfo is information about the code that got inlined.
677staticvoidHandleInlinedEHPad(InvokeInst *II,BasicBlock *FirstNewBlock,
678ClonedCodeInfo &InlinedCodeInfo) {
679BasicBlock *UnwindDest =II->getUnwindDest();
680Function *Caller = FirstNewBlock->getParent();
681
682assert(UnwindDest->getFirstNonPHIIt()->isEHPad() &&"unexpected BasicBlock!");
683
684// If there are PHI nodes in the unwind destination block, we need to keep
685// track of which values came into them from the invoke before removing the
686// edge from this block.
687SmallVector<Value *, 8> UnwindDestPHIValues;
688BasicBlock *InvokeBB =II->getParent();
689for (PHINode &PHI : UnwindDest->phis()) {
690// Save the value to use for this edge.
691 UnwindDestPHIValues.push_back(PHI.getIncomingValueForBlock(InvokeBB));
692 }
693
694// Add incoming-PHI values to the unwind destination block for the given basic
695// block, using the values for the original invoke's source block.
696autoUpdatePHINodes = [&](BasicBlock *Src) {
697BasicBlock::iteratorI = UnwindDest->begin();
698for (Value *V : UnwindDestPHIValues) {
699PHINode *PHI = cast<PHINode>(I);
700PHI->addIncoming(V, Src);
701 ++I;
702 }
703 };
704
705// This connects all the instructions which 'unwind to caller' to the invoke
706// destination.
707UnwindDestMemoTy FuncletUnwindMap;
708for (Function::iterator BB = FirstNewBlock->getIterator(), E = Caller->end();
709 BB != E; ++BB) {
710if (auto *CRI = dyn_cast<CleanupReturnInst>(BB->getTerminator())) {
711if (CRI->unwindsToCaller()) {
712auto *CleanupPad = CRI->getCleanupPad();
713CleanupReturnInst::Create(CleanupPad, UnwindDest, CRI->getIterator());
714 CRI->eraseFromParent();
715UpdatePHINodes(&*BB);
716// Finding a cleanupret with an unwind destination would confuse
717// subsequent calls to getUnwindDestToken, so map the cleanuppad
718// to short-circuit any such calls and recognize this as an "unwind
719// to caller" cleanup.
720assert(!FuncletUnwindMap.count(CleanupPad) ||
721 isa<ConstantTokenNone>(FuncletUnwindMap[CleanupPad]));
722 FuncletUnwindMap[CleanupPad] =
723ConstantTokenNone::get(Caller->getContext());
724 }
725 }
726
727BasicBlock::iteratorI = BB->getFirstNonPHIIt();
728if (!I->isEHPad())
729continue;
730
731Instruction *Replacement =nullptr;
732if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) {
733if (CatchSwitch->unwindsToCaller()) {
734Value *UnwindDestToken;
735if (auto *ParentPad =
736 dyn_cast<Instruction>(CatchSwitch->getParentPad())) {
737// This catchswitch is nested inside another funclet. If that
738// funclet has an unwind destination within the inlinee, then
739// unwinding out of this catchswitch would be UB. Rewriting this
740// catchswitch to unwind to the inlined invoke's unwind dest would
741// give the parent funclet multiple unwind destinations, which is
742// something that subsequent EH table generation can't handle and
743// that the veirifer rejects. So when we see such a call, leave it
744// as "unwind to caller".
745 UnwindDestToken =getUnwindDestToken(ParentPad, FuncletUnwindMap);
746if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken))
747continue;
748 }else {
749// This catchswitch has no parent to inherit constraints from, and
750// none of its descendants can have an unwind edge that exits it and
751// targets another funclet in the inlinee. It may or may not have a
752// descendant that definitively has an unwind to caller. In either
753// case, we'll have to assume that any unwinds out of it may need to
754// be routed to the caller, so treat it as though it has a definitive
755// unwind to caller.
756 UnwindDestToken =ConstantTokenNone::get(Caller->getContext());
757 }
758auto *NewCatchSwitch =CatchSwitchInst::Create(
759 CatchSwitch->getParentPad(), UnwindDest,
760 CatchSwitch->getNumHandlers(), CatchSwitch->getName(),
761 CatchSwitch->getIterator());
762for (BasicBlock *PadBB : CatchSwitch->handlers())
763 NewCatchSwitch->addHandler(PadBB);
764// Propagate info for the old catchswitch over to the new one in
765// the unwind map. This also serves to short-circuit any subsequent
766// checks for the unwind dest of this catchswitch, which would get
767// confused if they found the outer handler in the callee.
768 FuncletUnwindMap[NewCatchSwitch] = UnwindDestToken;
769 Replacement = NewCatchSwitch;
770 }
771 }elseif (!isa<FuncletPadInst>(I)) {
772llvm_unreachable("unexpected EHPad!");
773 }
774
775if (Replacement) {
776 Replacement->takeName(&*I);
777I->replaceAllUsesWith(Replacement);
778I->eraseFromParent();
779UpdatePHINodes(&*BB);
780 }
781 }
782
783if (InlinedCodeInfo.ContainsCalls)
784for (Function::iterator BB = FirstNewBlock->getIterator(),
785 E = Caller->end();
786 BB != E; ++BB)
787if (BasicBlock *NewBB =HandleCallsInBlockInlinedThroughInvoke(
788 &*BB, UnwindDest, &FuncletUnwindMap))
789// Update any PHI nodes in the exceptional block to indicate that there
790// is now a new entry in them.
791UpdatePHINodes(NewBB);
792
793// Now that everything is happy, we have one final detail. The PHI nodes in
794// the exception destination block still have entries due to the original
795// invoke instruction. Eliminate these entries (which might even delete the
796// PHI node) now.
797 UnwindDest->removePredecessor(InvokeBB);
798}
799
800staticboolhaveCommonPrefix(MDNode *MIBStackContext,
801MDNode *CallsiteStackContext) {
802assert(MIBStackContext->getNumOperands() > 0 &&
803 CallsiteStackContext->getNumOperands() > 0);
804// Because of the context trimming performed during matching, the callsite
805// context could have more stack ids than the MIB. We match up to the end of
806// the shortest stack context.
807for (auto MIBStackIter = MIBStackContext->op_begin(),
808 CallsiteStackIter = CallsiteStackContext->op_begin();
809 MIBStackIter != MIBStackContext->op_end() &&
810 CallsiteStackIter != CallsiteStackContext->op_end();
811 MIBStackIter++, CallsiteStackIter++) {
812auto *Val1 = mdconst::dyn_extract<ConstantInt>(*MIBStackIter);
813auto *Val2 = mdconst::dyn_extract<ConstantInt>(*CallsiteStackIter);
814assert(Val1 && Val2);
815if (Val1->getZExtValue() != Val2->getZExtValue())
816returnfalse;
817 }
818returntrue;
819}
820
821staticvoidremoveMemProfMetadata(CallBase *Call) {
822 Call->setMetadata(LLVMContext::MD_memprof,nullptr);
823}
824
825staticvoidremoveCallsiteMetadata(CallBase *Call) {
826 Call->setMetadata(LLVMContext::MD_callsite,nullptr);
827}
828
829staticvoidupdateMemprofMetadata(CallBase *CI,
830const std::vector<Metadata *> &MIBList) {
831assert(!MIBList.empty());
832// Remove existing memprof, which will either be replaced or may not be needed
833// if we are able to use a single allocation type function attribute.
834removeMemProfMetadata(CI);
835CallStackTrieCallStack;
836for (Metadata *MIB : MIBList)
837CallStack.addCallStack(cast<MDNode>(MIB));
838bool MemprofMDAttached =CallStack.buildAndAttachMIBMetadata(CI);
839assert(MemprofMDAttached == CI->hasMetadata(LLVMContext::MD_memprof));
840if (!MemprofMDAttached)
841// If we used a function attribute remove the callsite metadata as well.
842removeCallsiteMetadata(CI);
843}
844
845// Update the metadata on the inlined copy ClonedCall of a call OrigCall in the
846// inlined callee body, based on the callsite metadata InlinedCallsiteMD from
847// the call that was inlined.
848staticvoidpropagateMemProfHelper(constCallBase *OrigCall,
849CallBase *ClonedCall,
850MDNode *InlinedCallsiteMD) {
851MDNode *OrigCallsiteMD = ClonedCall->getMetadata(LLVMContext::MD_callsite);
852MDNode *ClonedCallsiteMD =nullptr;
853// Check if the call originally had callsite metadata, and update it for the
854// new call in the inlined body.
855if (OrigCallsiteMD) {
856// The cloned call's context is now the concatenation of the original call's
857// callsite metadata and the callsite metadata on the call where it was
858// inlined.
859 ClonedCallsiteMD =MDNode::concatenate(OrigCallsiteMD, InlinedCallsiteMD);
860 ClonedCall->setMetadata(LLVMContext::MD_callsite, ClonedCallsiteMD);
861 }
862
863// Update any memprof metadata on the cloned call.
864MDNode *OrigMemProfMD = ClonedCall->getMetadata(LLVMContext::MD_memprof);
865if (!OrigMemProfMD)
866return;
867// We currently expect that allocations with memprof metadata also have
868// callsite metadata for the allocation's part of the context.
869assert(OrigCallsiteMD);
870
871// New call's MIB list.
872 std::vector<Metadata *> NewMIBList;
873
874// For each MIB metadata, check if its call stack context starts with the
875// new clone's callsite metadata. If so, that MIB goes onto the cloned call in
876// the inlined body. If not, it stays on the out-of-line original call.
877for (auto &MIBOp : OrigMemProfMD->operands()) {
878MDNode *MIB = dyn_cast<MDNode>(MIBOp);
879// Stack is first operand of MIB.
880MDNode *StackMD =getMIBStackNode(MIB);
881assert(StackMD);
882// See if the new cloned callsite context matches this profiled context.
883if (haveCommonPrefix(StackMD, ClonedCallsiteMD))
884// Add it to the cloned call's MIB list.
885 NewMIBList.push_back(MIB);
886 }
887if (NewMIBList.empty()) {
888removeMemProfMetadata(ClonedCall);
889removeCallsiteMetadata(ClonedCall);
890return;
891 }
892if (NewMIBList.size() < OrigMemProfMD->getNumOperands())
893updateMemprofMetadata(ClonedCall, NewMIBList);
894}
895
896// Update memprof related metadata (!memprof and !callsite) based on the
897// inlining of Callee into the callsite at CB. The updates include merging the
898// inlined callee's callsite metadata with that of the inlined call,
899// and moving the subset of any memprof contexts to the inlined callee
900// allocations if they match the new inlined call stack.
901staticvoid
902propagateMemProfMetadata(Function *Callee,CallBase &CB,
903bool ContainsMemProfMetadata,
904constValueMap<const Value *, WeakTrackingVH> &VMap) {
905MDNode *CallsiteMD = CB.getMetadata(LLVMContext::MD_callsite);
906// Only need to update if the inlined callsite had callsite metadata, or if
907// there was any memprof metadata inlined.
908if (!CallsiteMD && !ContainsMemProfMetadata)
909return;
910
911// Propagate metadata onto the cloned calls in the inlined callee.
912for (constauto &Entry : VMap) {
913// See if this is a call that has been inlined and remapped, and not
914// simplified away in the process.
915auto *OrigCall = dyn_cast_or_null<CallBase>(Entry.first);
916auto *ClonedCall = dyn_cast_or_null<CallBase>(Entry.second);
917if (!OrigCall || !ClonedCall)
918continue;
919// If the inlined callsite did not have any callsite metadata, then it isn't
920// involved in any profiled call contexts, and we can remove any memprof
921// metadata on the cloned call.
922if (!CallsiteMD) {
923removeMemProfMetadata(ClonedCall);
924removeCallsiteMetadata(ClonedCall);
925continue;
926 }
927propagateMemProfHelper(OrigCall, ClonedCall, CallsiteMD);
928 }
929}
930
931/// When inlining a call site that has !llvm.mem.parallel_loop_access,
932/// !llvm.access.group, !alias.scope or !noalias metadata, that metadata should
933/// be propagated to all memory-accessing cloned instructions.
934staticvoidPropagateCallSiteMetadata(CallBase &CB,Function::iterator FStart,
935Function::iterator FEnd) {
936MDNode *MemParallelLoopAccess =
937 CB.getMetadata(LLVMContext::MD_mem_parallel_loop_access);
938MDNode *AccessGroup = CB.getMetadata(LLVMContext::MD_access_group);
939MDNode *AliasScope = CB.getMetadata(LLVMContext::MD_alias_scope);
940MDNode *NoAlias = CB.getMetadata(LLVMContext::MD_noalias);
941if (!MemParallelLoopAccess && !AccessGroup && !AliasScope && !NoAlias)
942return;
943
944for (BasicBlock &BB :make_range(FStart, FEnd)) {
945for (Instruction &I : BB) {
946// This metadata is only relevant for instructions that access memory.
947if (!I.mayReadOrWriteMemory())
948continue;
949
950if (MemParallelLoopAccess) {
951// TODO: This probably should not overwrite MemParalleLoopAccess.
952 MemParallelLoopAccess =MDNode::concatenate(
953I.getMetadata(LLVMContext::MD_mem_parallel_loop_access),
954 MemParallelLoopAccess);
955I.setMetadata(LLVMContext::MD_mem_parallel_loop_access,
956 MemParallelLoopAccess);
957 }
958
959if (AccessGroup)
960I.setMetadata(LLVMContext::MD_access_group,uniteAccessGroups(
961I.getMetadata(LLVMContext::MD_access_group), AccessGroup));
962
963if (AliasScope)
964I.setMetadata(LLVMContext::MD_alias_scope,MDNode::concatenate(
965I.getMetadata(LLVMContext::MD_alias_scope), AliasScope));
966
967if (NoAlias)
968I.setMetadata(LLVMContext::MD_noalias,MDNode::concatenate(
969I.getMetadata(LLVMContext::MD_noalias), NoAlias));
970 }
971 }
972}
973
974/// Bundle operands of the inlined function must be added to inlined call sites.
975staticvoidPropagateOperandBundles(Function::iterator InlinedBB,
976Instruction *CallSiteEHPad) {
977for (Instruction &II :llvm::make_early_inc_range(*InlinedBB)) {
978CallBase *I = dyn_cast<CallBase>(&II);
979if (!I)
980continue;
981// Skip call sites which already have a "funclet" bundle.
982if (I->getOperandBundle(LLVMContext::OB_funclet))
983continue;
984// Skip call sites which are nounwind intrinsics (as long as they don't
985// lower into regular function calls in the course of IR transformations).
986auto *CalledFn =
987 dyn_cast<Function>(I->getCalledOperand()->stripPointerCasts());
988if (CalledFn && CalledFn->isIntrinsic() &&I->doesNotThrow() &&
989 !IntrinsicInst::mayLowerToFunctionCall(CalledFn->getIntrinsicID()))
990continue;
991
992SmallVector<OperandBundleDef, 1> OpBundles;
993I->getOperandBundlesAsDefs(OpBundles);
994 OpBundles.emplace_back("funclet", CallSiteEHPad);
995
996Instruction *NewInst =CallBase::Create(I, OpBundles,I->getIterator());
997 NewInst->takeName(I);
998I->replaceAllUsesWith(NewInst);
999I->eraseFromParent();
1000 }
1001}
1002
1003namespace{
1004/// Utility for cloning !noalias and !alias.scope metadata. When a code region
1005/// using scoped alias metadata is inlined, the aliasing relationships may not
1006/// hold between the two version. It is necessary to create a deep clone of the
1007/// metadata, putting the two versions in separate scope domains.
1008classScopedAliasMetadataDeepCloner {
1009usingMetadataMap =DenseMap<const MDNode *, TrackingMDNodeRef>;
1010SetVector<const MDNode *> MD;
1011 MetadataMap MDMap;
1012void addRecursiveMetadataUses();
1013
1014public:
1015 ScopedAliasMetadataDeepCloner(constFunction *F);
1016
1017 /// Create a new clone of the scoped alias metadata, which will be used by
1018 /// subsequent remap() calls.
1019void clone();
1020
1021 /// Remap instructions in the given range from the original to the cloned
1022 /// metadata.
1023void remap(Function::iterator FStart,Function::iterator FEnd);
1024};
1025}// namespace
1026
1027ScopedAliasMetadataDeepCloner::ScopedAliasMetadataDeepCloner(
1028constFunction *F) {
1029for (constBasicBlock &BB : *F) {
1030for (constInstruction &I : BB) {
1031if (constMDNode *M =I.getMetadata(LLVMContext::MD_alias_scope))
1032 MD.insert(M);
1033if (constMDNode *M =I.getMetadata(LLVMContext::MD_noalias))
1034 MD.insert(M);
1035
1036// We also need to clone the metadata in noalias intrinsics.
1037if (constauto *Decl = dyn_cast<NoAliasScopeDeclInst>(&I))
1038 MD.insert(Decl->getScopeList());
1039 }
1040 }
1041 addRecursiveMetadataUses();
1042}
1043
1044void ScopedAliasMetadataDeepCloner::addRecursiveMetadataUses() {
1045SmallVector<const Metadata *, 16>Queue(MD.begin(), MD.end());
1046while (!Queue.empty()) {
1047constMDNode *M = cast<MDNode>(Queue.pop_back_val());
1048for (constMetadata *Op :M->operands())
1049if (constMDNode *OpMD = dyn_cast<MDNode>(Op))
1050if (MD.insert(OpMD))
1051Queue.push_back(OpMD);
1052 }
1053}
1054
1055void ScopedAliasMetadataDeepCloner::clone() {
1056assert(MDMap.empty() &&"clone() already called ?");
1057
1058SmallVector<TempMDTuple, 16> DummyNodes;
1059for (constMDNode *I : MD) {
1060 DummyNodes.push_back(MDTuple::getTemporary(I->getContext(), {}));
1061 MDMap[I].reset(DummyNodes.back().get());
1062 }
1063
1064// Create new metadata nodes to replace the dummy nodes, replacing old
1065// metadata references with either a dummy node or an already-created new
1066// node.
1067SmallVector<Metadata *, 4> NewOps;
1068for (constMDNode *I : MD) {
1069for (constMetadata *Op :I->operands()) {
1070if (constMDNode *M = dyn_cast<MDNode>(Op))
1071 NewOps.push_back(MDMap[M]);
1072else
1073 NewOps.push_back(const_cast<Metadata *>(Op));
1074 }
1075
1076MDNode *NewM =MDNode::get(I->getContext(), NewOps);
1077MDTuple *TempM = cast<MDTuple>(MDMap[I]);
1078assert(TempM->isTemporary() &&"Expected temporary node");
1079
1080 TempM->replaceAllUsesWith(NewM);
1081 NewOps.clear();
1082 }
1083}
1084
1085void ScopedAliasMetadataDeepCloner::remap(Function::iterator FStart,
1086Function::iterator FEnd) {
1087if (MDMap.empty())
1088return;// Nothing to do.
1089
1090for (BasicBlock &BB :make_range(FStart, FEnd)) {
1091for (Instruction &I : BB) {
1092// TODO: The null checks for the MDMap.lookup() results should no longer
1093// be necessary.
1094if (MDNode *M =I.getMetadata(LLVMContext::MD_alias_scope))
1095if (MDNode *MNew = MDMap.lookup(M))
1096I.setMetadata(LLVMContext::MD_alias_scope, MNew);
1097
1098if (MDNode *M =I.getMetadata(LLVMContext::MD_noalias))
1099if (MDNode *MNew = MDMap.lookup(M))
1100I.setMetadata(LLVMContext::MD_noalias, MNew);
1101
1102if (auto *Decl = dyn_cast<NoAliasScopeDeclInst>(&I))
1103if (MDNode *MNew = MDMap.lookup(Decl->getScopeList()))
1104 Decl->setScopeList(MNew);
1105 }
1106 }
1107}
1108
1109/// If the inlined function has noalias arguments,
1110/// then add new alias scopes for each noalias argument, tag the mapped noalias
1111/// parameters with noalias metadata specifying the new scope, and tag all
1112/// non-derived loads, stores and memory intrinsics with the new alias scopes.
1113staticvoidAddAliasScopeMetadata(CallBase &CB,ValueToValueMapTy &VMap,
1114constDataLayout &DL,AAResults *CalleeAAR,
1115ClonedCodeInfo &InlinedFunctionInfo) {
1116if (!EnableNoAliasConversion)
1117return;
1118
1119constFunction *CalledFunc = CB.getCalledFunction();
1120SmallVector<const Argument *, 4> NoAliasArgs;
1121
1122for (constArgument &Arg : CalledFunc->args())
1123if (CB.paramHasAttr(Arg.getArgNo(), Attribute::NoAlias) && !Arg.use_empty())
1124 NoAliasArgs.push_back(&Arg);
1125
1126if (NoAliasArgs.empty())
1127return;
1128
1129// To do a good job, if a noalias variable is captured, we need to know if
1130// the capture point dominates the particular use we're considering.
1131DominatorTree DT;
1132 DT.recalculate(const_cast<Function&>(*CalledFunc));
1133
1134// noalias indicates that pointer values based on the argument do not alias
1135// pointer values which are not based on it. So we add a new "scope" for each
1136// noalias function argument. Accesses using pointers based on that argument
1137// become part of that alias scope, accesses using pointers not based on that
1138// argument are tagged as noalias with that scope.
1139
1140DenseMap<const Argument *, MDNode *> NewScopes;
1141MDBuilder MDB(CalledFunc->getContext());
1142
1143// Create a new scope domain for this function.
1144MDNode *NewDomain =
1145 MDB.createAnonymousAliasScopeDomain(CalledFunc->getName());
1146for (unsigned i = 0, e = NoAliasArgs.size(); i != e; ++i) {
1147constArgument *A = NoAliasArgs[i];
1148
1149 std::stringName = std::string(CalledFunc->getName());
1150if (A->hasName()) {
1151Name +=": %";
1152Name +=A->getName();
1153 }else {
1154Name +=": argument ";
1155Name += utostr(i);
1156 }
1157
1158// Note: We always create a new anonymous root here. This is true regardless
1159// of the linkage of the callee because the aliasing "scope" is not just a
1160// property of the callee, but also all control dependencies in the caller.
1161MDNode *NewScope = MDB.createAnonymousAliasScope(NewDomain,Name);
1162 NewScopes.insert(std::make_pair(A, NewScope));
1163
1164if (UseNoAliasIntrinsic) {
1165// Introduce a llvm.experimental.noalias.scope.decl for the noalias
1166// argument.
1167MDNode *AScopeList =MDNode::get(CalledFunc->getContext(), NewScope);
1168auto *NoAliasDecl =
1169IRBuilder<>(&CB).CreateNoAliasScopeDeclaration(AScopeList);
1170// Ignore the result for now. The result will be used when the
1171// llvm.noalias intrinsic is introduced.
1172 (void)NoAliasDecl;
1173 }
1174 }
1175
1176// Iterate over all new instructions in the map; for all memory-access
1177// instructions, add the alias scope metadata.
1178for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
1179 VMI != VMIE; ++VMI) {
1180if (constInstruction *I = dyn_cast<Instruction>(VMI->first)) {
1181if (!VMI->second)
1182continue;
1183
1184Instruction *NI = dyn_cast<Instruction>(VMI->second);
1185if (!NI || InlinedFunctionInfo.isSimplified(I, NI))
1186continue;
1187
1188bool IsArgMemOnlyCall =false, IsFuncCall =false;
1189SmallVector<const Value *, 2> PtrArgs;
1190
1191if (constLoadInst *LI = dyn_cast<LoadInst>(I))
1192 PtrArgs.push_back(LI->getPointerOperand());
1193elseif (constStoreInst *SI = dyn_cast<StoreInst>(I))
1194 PtrArgs.push_back(SI->getPointerOperand());
1195elseif (constVAArgInst *VAAI = dyn_cast<VAArgInst>(I))
1196 PtrArgs.push_back(VAAI->getPointerOperand());
1197elseif (constAtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I))
1198 PtrArgs.push_back(CXI->getPointerOperand());
1199elseif (constAtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I))
1200 PtrArgs.push_back(RMWI->getPointerOperand());
1201elseif (constauto *Call = dyn_cast<CallBase>(I)) {
1202// If we know that the call does not access memory, then we'll still
1203// know that about the inlined clone of this call site, and we don't
1204// need to add metadata.
1205if (Call->doesNotAccessMemory())
1206continue;
1207
1208 IsFuncCall =true;
1209if (CalleeAAR) {
1210MemoryEffects ME = CalleeAAR->getMemoryEffects(Call);
1211
1212// We'll retain this knowledge without additional metadata.
1213if (ME.onlyAccessesInaccessibleMem())
1214continue;
1215
1216if (ME.onlyAccessesArgPointees())
1217 IsArgMemOnlyCall =true;
1218 }
1219
1220for (Value *Arg : Call->args()) {
1221// Only care about pointer arguments. If a noalias argument is
1222// accessed through a non-pointer argument, it must be captured
1223// first (e.g. via ptrtoint), and we protect against captures below.
1224if (!Arg->getType()->isPointerTy())
1225continue;
1226
1227 PtrArgs.push_back(Arg);
1228 }
1229 }
1230
1231// If we found no pointers, then this instruction is not suitable for
1232// pairing with an instruction to receive aliasing metadata.
1233// However, if this is a call, this we might just alias with none of the
1234// noalias arguments.
1235if (PtrArgs.empty() && !IsFuncCall)
1236continue;
1237
1238// It is possible that there is only one underlying object, but you
1239// need to go through several PHIs to see it, and thus could be
1240// repeated in the Objects list.
1241SmallPtrSet<const Value *, 4> ObjSet;
1242SmallVector<Metadata *, 4> Scopes,NoAliases;
1243
1244for (constValue *V : PtrArgs) {
1245SmallVector<const Value *, 4> Objects;
1246getUnderlyingObjects(V, Objects,/* LI = */nullptr);
1247
1248for (constValue *O : Objects)
1249 ObjSet.insert(O);
1250 }
1251
1252// Figure out if we're derived from anything that is not a noalias
1253// argument.
1254bool RequiresNoCaptureBefore =false, UsesAliasingPtr =false,
1255 UsesUnknownObject =false;
1256for (constValue *V : ObjSet) {
1257// Is this value a constant that cannot be derived from any pointer
1258// value (we need to exclude constant expressions, for example, that
1259// are formed from arithmetic on global symbols).
1260bool IsNonPtrConst = isa<ConstantInt>(V) || isa<ConstantFP>(V) ||
1261 isa<ConstantPointerNull>(V) ||
1262 isa<ConstantDataVector>(V) || isa<UndefValue>(V);
1263if (IsNonPtrConst)
1264continue;
1265
1266// If this is anything other than a noalias argument, then we cannot
1267// completely describe the aliasing properties using alias.scope
1268// metadata (and, thus, won't add any).
1269if (constArgument *A = dyn_cast<Argument>(V)) {
1270if (!CB.paramHasAttr(A->getArgNo(), Attribute::NoAlias))
1271 UsesAliasingPtr =true;
1272 }else {
1273 UsesAliasingPtr =true;
1274 }
1275
1276if (isEscapeSource(V)) {
1277// An escape source can only alias with a noalias argument if it has
1278// been captured beforehand.
1279 RequiresNoCaptureBefore =true;
1280 }elseif (!isa<Argument>(V) && !isIdentifiedObject(V)) {
1281// If this is neither an escape source, nor some identified object
1282// (which cannot directly alias a noalias argument), nor some other
1283// argument (which, by definition, also cannot alias a noalias
1284// argument), conservatively do not make any assumptions.
1285 UsesUnknownObject =true;
1286 }
1287 }
1288
1289// Nothing we can do if the used underlying object cannot be reliably
1290// determined.
1291if (UsesUnknownObject)
1292continue;
1293
1294// A function call can always get captured noalias pointers (via other
1295// parameters, globals, etc.).
1296if (IsFuncCall && !IsArgMemOnlyCall)
1297 RequiresNoCaptureBefore =true;
1298
1299// First, we want to figure out all of the sets with which we definitely
1300// don't alias. Iterate over all noalias set, and add those for which:
1301// 1. The noalias argument is not in the set of objects from which we
1302// definitely derive.
1303// 2. The noalias argument has not yet been captured.
1304// An arbitrary function that might load pointers could see captured
1305// noalias arguments via other noalias arguments or globals, and so we
1306// must always check for prior capture.
1307for (constArgument *A : NoAliasArgs) {
1308if (ObjSet.contains(A))
1309continue;// May be based on a noalias argument.
1310
1311// It might be tempting to skip the PointerMayBeCapturedBefore check if
1312// A->hasNoCaptureAttr() is true, but this is incorrect because
1313// nocapture only guarantees that no copies outlive the function, not
1314// that the value cannot be locally captured.
1315if (!RequiresNoCaptureBefore ||
1316 !PointerMayBeCapturedBefore(A,/* ReturnCaptures */false,
1317/* StoreCaptures */false,I, &DT))
1318NoAliases.push_back(NewScopes[A]);
1319 }
1320
1321if (!NoAliases.empty())
1322 NI->setMetadata(LLVMContext::MD_noalias,
1323MDNode::concatenate(
1324 NI->getMetadata(LLVMContext::MD_noalias),
1325MDNode::get(CalledFunc->getContext(),NoAliases)));
1326
1327// Next, we want to figure out all of the sets to which we might belong.
1328// We might belong to a set if the noalias argument is in the set of
1329// underlying objects. If there is some non-noalias argument in our list
1330// of underlying objects, then we cannot add a scope because the fact
1331// that some access does not alias with any set of our noalias arguments
1332// cannot itself guarantee that it does not alias with this access
1333// (because there is some pointer of unknown origin involved and the
1334// other access might also depend on this pointer). We also cannot add
1335// scopes to arbitrary functions unless we know they don't access any
1336// non-parameter pointer-values.
1337bool CanAddScopes = !UsesAliasingPtr;
1338if (CanAddScopes && IsFuncCall)
1339 CanAddScopes = IsArgMemOnlyCall;
1340
1341if (CanAddScopes)
1342for (constArgument *A : NoAliasArgs) {
1343if (ObjSet.count(A))
1344 Scopes.push_back(NewScopes[A]);
1345 }
1346
1347if (!Scopes.empty())
1348 NI->setMetadata(
1349 LLVMContext::MD_alias_scope,
1350MDNode::concatenate(NI->getMetadata(LLVMContext::MD_alias_scope),
1351MDNode::get(CalledFunc->getContext(), Scopes)));
1352 }
1353 }
1354}
1355
1356staticboolMayContainThrowingOrExitingCallAfterCB(CallBase *Begin,
1357ReturnInst *End) {
1358
1359assert(Begin->getParent() ==End->getParent() &&
1360"Expected to be in same basic block!");
1361auto BeginIt = Begin->getIterator();
1362assert(BeginIt !=End->getIterator() &&"Non-empty BB has empty iterator");
1363return !llvm::isGuaranteedToTransferExecutionToSuccessor(
1364 ++BeginIt,End->getIterator(),InlinerAttributeWindow + 1);
1365}
1366
1367// Add attributes from CB params and Fn attributes that can always be propagated
1368// to the corresponding argument / inner callbases.
1369staticvoidAddParamAndFnBasicAttributes(constCallBase &CB,
1370ValueToValueMapTy &VMap,
1371ClonedCodeInfo &InlinedFunctionInfo) {
1372auto *CalledFunction = CB.getCalledFunction();
1373auto &Context = CalledFunction->getContext();
1374
1375// Collect valid attributes for all params.
1376SmallVector<AttrBuilder> ValidObjParamAttrs, ValidExactParamAttrs;
1377bool HasAttrToPropagate =false;
1378
1379// Attributes we can only propagate if the exact parameter is forwarded.
1380// We can propagate both poison generating and UB generating attributes
1381// without any extra checks. The only attribute that is tricky to propagate
1382// is `noundef` (skipped for now) as that can create new UB where previous
1383// behavior was just using a poison value.
1384staticconstAttribute::AttrKind ExactAttrsToPropagate[] = {
1385 Attribute::Dereferenceable, Attribute::DereferenceableOrNull,
1386 Attribute::NonNull, Attribute::Alignment, Attribute::Range};
1387
1388for (unsignedI = 0, E = CB.arg_size();I < E; ++I) {
1389 ValidObjParamAttrs.emplace_back(AttrBuilder{CB.getContext()});
1390 ValidExactParamAttrs.emplace_back(AttrBuilder{CB.getContext()});
1391// Access attributes can be propagated to any param with the same underlying
1392// object as the argument.
1393if (CB.paramHasAttr(I, Attribute::ReadNone))
1394 ValidObjParamAttrs.back().addAttribute(Attribute::ReadNone);
1395if (CB.paramHasAttr(I, Attribute::ReadOnly))
1396 ValidObjParamAttrs.back().addAttribute(Attribute::ReadOnly);
1397
1398for (Attribute::AttrKind AK : ExactAttrsToPropagate) {
1399Attribute Attr = CB.getParamAttr(I, AK);
1400if (Attr.isValid())
1401 ValidExactParamAttrs.back().addAttribute(Attr);
1402 }
1403
1404 HasAttrToPropagate |= ValidObjParamAttrs.back().hasAttributes();
1405 HasAttrToPropagate |= ValidExactParamAttrs.back().hasAttributes();
1406 }
1407
1408// Won't be able to propagate anything.
1409if (!HasAttrToPropagate)
1410return;
1411
1412for (BasicBlock &BB : *CalledFunction) {
1413for (Instruction &Ins : BB) {
1414constauto *InnerCB = dyn_cast<CallBase>(&Ins);
1415if (!InnerCB)
1416continue;
1417auto *NewInnerCB = dyn_cast_or_null<CallBase>(VMap.lookup(InnerCB));
1418if (!NewInnerCB)
1419continue;
1420// The InnerCB might have be simplified during the inlining
1421// process which can make propagation incorrect.
1422if (InlinedFunctionInfo.isSimplified(InnerCB, NewInnerCB))
1423continue;
1424
1425AttributeList AL = NewInnerCB->getAttributes();
1426for (unsignedI = 0, E = InnerCB->arg_size();I < E; ++I) {
1427// It's unsound or requires special handling to propagate
1428// attributes to byval arguments. Even if CalledFunction
1429// doesn't e.g. write to the argument (readonly), the call to
1430// NewInnerCB may write to its by-value copy.
1431if (NewInnerCB->paramHasAttr(I, Attribute::ByVal))
1432continue;
1433
1434// Don't bother propagating attrs to constants.
1435if (match(NewInnerCB->getArgOperand(I),
1436llvm::PatternMatch::m_ImmConstant()))
1437continue;
1438
1439// Check if the underlying value for the parameter is an argument.
1440constArgument *Arg = dyn_cast<Argument>(InnerCB->getArgOperand(I));
1441unsigned ArgNo;
1442if (Arg) {
1443 ArgNo = Arg->getArgNo();
1444// For dereferenceable, dereferenceable_or_null, align, etc...
1445// we don't want to propagate if the existing param has the same
1446// attribute with "better" constraints. So remove from the
1447// new AL if the region of the existing param is larger than
1448// what we can propagate.
1449AttrBuilder NewAB{
1450 Context,AttributeSet::get(Context, ValidExactParamAttrs[ArgNo])};
1451if (AL.getParamDereferenceableBytes(I) >
1452 NewAB.getDereferenceableBytes())
1453 NewAB.removeAttribute(Attribute::Dereferenceable);
1454if (AL.getParamDereferenceableOrNullBytes(I) >
1455 NewAB.getDereferenceableOrNullBytes())
1456 NewAB.removeAttribute(Attribute::DereferenceableOrNull);
1457if (AL.getParamAlignment(I).valueOrOne() >
1458 NewAB.getAlignment().valueOrOne())
1459 NewAB.removeAttribute(Attribute::Alignment);
1460if (auto ExistingRange = AL.getParamRange(I)) {
1461if (auto NewRange = NewAB.getRange()) {
1462ConstantRange CombinedRange =
1463 ExistingRange->intersectWith(*NewRange);
1464 NewAB.removeAttribute(Attribute::Range);
1465 NewAB.addRangeAttr(CombinedRange);
1466 }
1467 }
1468 AL = AL.addParamAttributes(Context,I, NewAB);
1469 }elseif (NewInnerCB->getArgOperand(I)->getType()->isPointerTy()) {
1470// Check if the underlying value for the parameter is an argument.
1471constValue *UnderlyingV =
1472getUnderlyingObject(InnerCB->getArgOperand(I));
1473 Arg = dyn_cast<Argument>(UnderlyingV);
1474if (!Arg)
1475continue;
1476 ArgNo = Arg->getArgNo();
1477 }else {
1478continue;
1479 }
1480
1481// If so, propagate its access attributes.
1482 AL = AL.addParamAttributes(Context,I, ValidObjParamAttrs[ArgNo]);
1483
1484// We can have conflicting attributes from the inner callsite and
1485// to-be-inlined callsite. In that case, choose the most
1486// restrictive.
1487
1488// readonly + writeonly means we can never deref so make readnone.
1489if (AL.hasParamAttr(I, Attribute::ReadOnly) &&
1490 AL.hasParamAttr(I, Attribute::WriteOnly))
1491 AL = AL.addParamAttribute(Context,I, Attribute::ReadNone);
1492
1493// If have readnone, need to clear readonly/writeonly
1494if (AL.hasParamAttr(I, Attribute::ReadNone)) {
1495 AL = AL.removeParamAttribute(Context,I, Attribute::ReadOnly);
1496 AL = AL.removeParamAttribute(Context,I, Attribute::WriteOnly);
1497 }
1498
1499// Writable cannot exist in conjunction w/ readonly/readnone
1500if (AL.hasParamAttr(I, Attribute::ReadOnly) ||
1501 AL.hasParamAttr(I, Attribute::ReadNone))
1502 AL = AL.removeParamAttribute(Context,I, Attribute::Writable);
1503 }
1504 NewInnerCB->setAttributes(AL);
1505 }
1506 }
1507}
1508
1509// Only allow these white listed attributes to be propagated back to the
1510// callee. This is because other attributes may only be valid on the call
1511// itself, i.e. attributes such as signext and zeroext.
1512
1513// Attributes that are always okay to propagate as if they are violated its
1514// immediate UB.
1515staticAttrBuilderIdentifyValidUBGeneratingAttributes(CallBase &CB) {
1516AttrBuilder Valid(CB.getContext());
1517if (auto DerefBytes = CB.getRetDereferenceableBytes())
1518 Valid.addDereferenceableAttr(DerefBytes);
1519if (auto DerefOrNullBytes = CB.getRetDereferenceableOrNullBytes())
1520 Valid.addDereferenceableOrNullAttr(DerefOrNullBytes);
1521if (CB.hasRetAttr(Attribute::NoAlias))
1522 Valid.addAttribute(Attribute::NoAlias);
1523if (CB.hasRetAttr(Attribute::NoUndef))
1524 Valid.addAttribute(Attribute::NoUndef);
1525return Valid;
1526}
1527
1528// Attributes that need additional checks as propagating them may change
1529// behavior or cause new UB.
1530staticAttrBuilderIdentifyValidPoisonGeneratingAttributes(CallBase &CB) {
1531AttrBuilder Valid(CB.getContext());
1532if (CB.hasRetAttr(Attribute::NonNull))
1533 Valid.addAttribute(Attribute::NonNull);
1534if (CB.hasRetAttr(Attribute::Alignment))
1535 Valid.addAlignmentAttr(CB.getRetAlign());
1536if (std::optional<ConstantRange>Range = CB.getRange())
1537 Valid.addRangeAttr(*Range);
1538return Valid;
1539}
1540
1541staticvoidAddReturnAttributes(CallBase &CB,ValueToValueMapTy &VMap,
1542ClonedCodeInfo &InlinedFunctionInfo) {
1543AttrBuilder ValidUB =IdentifyValidUBGeneratingAttributes(CB);
1544AttrBuilder ValidPG =IdentifyValidPoisonGeneratingAttributes(CB);
1545if (!ValidUB.hasAttributes() && !ValidPG.hasAttributes())
1546return;
1547auto *CalledFunction = CB.getCalledFunction();
1548auto &Context = CalledFunction->getContext();
1549
1550for (auto &BB : *CalledFunction) {
1551auto *RI = dyn_cast<ReturnInst>(BB.getTerminator());
1552if (!RI || !isa<CallBase>(RI->getOperand(0)))
1553continue;
1554auto *RetVal = cast<CallBase>(RI->getOperand(0));
1555// Check that the cloned RetVal exists and is a call, otherwise we cannot
1556// add the attributes on the cloned RetVal. Simplification during inlining
1557// could have transformed the cloned instruction.
1558auto *NewRetVal = dyn_cast_or_null<CallBase>(VMap.lookup(RetVal));
1559if (!NewRetVal)
1560continue;
1561
1562// The RetVal might have be simplified during the inlining
1563// process which can make propagation incorrect.
1564if (InlinedFunctionInfo.isSimplified(RetVal, NewRetVal))
1565continue;
1566// Backward propagation of attributes to the returned value may be incorrect
1567// if it is control flow dependent.
1568// Consider:
1569// @callee {
1570// %rv = call @foo()
1571// %rv2 = call @bar()
1572// if (%rv2 != null)
1573// return %rv2
1574// if (%rv == null)
1575// exit()
1576// return %rv
1577// }
1578// caller() {
1579// %val = call nonnull @callee()
1580// }
1581// Here we cannot add the nonnull attribute on either foo or bar. So, we
1582// limit the check to both RetVal and RI are in the same basic block and
1583// there are no throwing/exiting instructions between these instructions.
1584if (RI->getParent() != RetVal->getParent() ||
1585MayContainThrowingOrExitingCallAfterCB(RetVal, RI))
1586continue;
1587// Add to the existing attributes of NewRetVal, i.e. the cloned call
1588// instruction.
1589// NB! When we have the same attribute already existing on NewRetVal, but
1590// with a differing value, the AttributeList's merge API honours the already
1591// existing attribute value (i.e. attributes such as dereferenceable,
1592// dereferenceable_or_null etc). See AttrBuilder::merge for more details.
1593AttributeList AL = NewRetVal->getAttributes();
1594if (ValidUB.getDereferenceableBytes() < AL.getRetDereferenceableBytes())
1595 ValidUB.removeAttribute(Attribute::Dereferenceable);
1596if (ValidUB.getDereferenceableOrNullBytes() <
1597 AL.getRetDereferenceableOrNullBytes())
1598 ValidUB.removeAttribute(Attribute::DereferenceableOrNull);
1599AttributeList NewAL = AL.addRetAttributes(Context, ValidUB);
1600// Attributes that may generate poison returns are a bit tricky. If we
1601// propagate them, other uses of the callsite might have their behavior
1602// change or cause UB (if they have noundef) b.c of the new potential
1603// poison.
1604// Take the following three cases:
1605//
1606// 1)
1607// define nonnull ptr @foo() {
1608// %p = call ptr @bar()
1609// call void @use(ptr %p) willreturn nounwind
1610// ret ptr %p
1611// }
1612//
1613// 2)
1614// define noundef nonnull ptr @foo() {
1615// %p = call ptr @bar()
1616// call void @use(ptr %p) willreturn nounwind
1617// ret ptr %p
1618// }
1619//
1620// 3)
1621// define nonnull ptr @foo() {
1622// %p = call noundef ptr @bar()
1623// ret ptr %p
1624// }
1625//
1626// In case 1, we can't propagate nonnull because poison value in @use may
1627// change behavior or trigger UB.
1628// In case 2, we don't need to be concerned about propagating nonnull, as
1629// any new poison at @use will trigger UB anyways.
1630// In case 3, we can never propagate nonnull because it may create UB due to
1631// the noundef on @bar.
1632if (ValidPG.getAlignment().valueOrOne() < AL.getRetAlignment().valueOrOne())
1633 ValidPG.removeAttribute(Attribute::Alignment);
1634if (ValidPG.hasAttributes()) {
1635Attribute CBRange = ValidPG.getAttribute(Attribute::Range);
1636if (CBRange.isValid()) {
1637Attribute NewRange = AL.getRetAttr(Attribute::Range);
1638if (NewRange.isValid()) {
1639 ValidPG.addRangeAttr(
1640 CBRange.getRange().intersectWith(NewRange.getRange()));
1641 }
1642 }
1643// Three checks.
1644// If the callsite has `noundef`, then a poison due to violating the
1645// return attribute will create UB anyways so we can always propagate.
1646// Otherwise, if the return value (callee to be inlined) has `noundef`, we
1647// can't propagate as a new poison return will cause UB.
1648// Finally, check if the return value has no uses whose behavior may
1649// change/may cause UB if we potentially return poison. At the moment this
1650// is implemented overly conservatively with a single-use check.
1651// TODO: Update the single-use check to iterate through uses and only bail
1652// if we have a potentially dangerous use.
1653
1654if (CB.hasRetAttr(Attribute::NoUndef) ||
1655 (RetVal->hasOneUse() && !RetVal->hasRetAttr(Attribute::NoUndef)))
1656 NewAL = NewAL.addRetAttributes(Context, ValidPG);
1657 }
1658 NewRetVal->setAttributes(NewAL);
1659 }
1660}
1661
1662/// If the inlined function has non-byval align arguments, then
1663/// add @llvm.assume-based alignment assumptions to preserve this information.
1664staticvoidAddAlignmentAssumptions(CallBase &CB,InlineFunctionInfo &IFI) {
1665if (!PreserveAlignmentAssumptions || !IFI.GetAssumptionCache)
1666return;
1667
1668AssumptionCache *AC = &IFI.GetAssumptionCache(*CB.getCaller());
1669auto &DL = CB.getDataLayout();
1670
1671// To avoid inserting redundant assumptions, we should check for assumptions
1672// already in the caller. To do this, we might need a DT of the caller.
1673DominatorTree DT;
1674bool DTCalculated =false;
1675
1676Function *CalledFunc = CB.getCalledFunction();
1677for (Argument &Arg : CalledFunc->args()) {
1678if (!Arg.getType()->isPointerTy() || Arg.hasPassPointeeByValueCopyAttr() ||
1679 Arg.hasNUses(0))
1680continue;
1681MaybeAlign Alignment = Arg.getParamAlign();
1682if (!Alignment)
1683continue;
1684
1685if (!DTCalculated) {
1686 DT.recalculate(*CB.getCaller());
1687 DTCalculated =true;
1688 }
1689// If we can already prove the asserted alignment in the context of the
1690// caller, then don't bother inserting the assumption.
1691Value *ArgVal = CB.getArgOperand(Arg.getArgNo());
1692if (getKnownAlignment(ArgVal,DL, &CB, AC, &DT) >= *Alignment)
1693continue;
1694
1695CallInst *NewAsmp =IRBuilder<>(&CB).CreateAlignmentAssumption(
1696DL, ArgVal, Alignment->value());
1697 AC->registerAssumption(cast<AssumeInst>(NewAsmp));
1698 }
1699}
1700
1701staticvoidHandleByValArgumentInit(Type *ByValType,Value *Dst,Value *Src,
1702Module *M,BasicBlock *InsertBlock,
1703InlineFunctionInfo &IFI,
1704Function *CalledFunc) {
1705IRBuilder<> Builder(InsertBlock, InsertBlock->begin());
1706
1707Value *Size =
1708 Builder.getInt64(M->getDataLayout().getTypeStoreSize(ByValType));
1709
1710// Always generate a memcpy of alignment 1 here because we don't know
1711// the alignment of the src pointer. Other optimizations can infer
1712// better alignment.
1713CallInst *CI = Builder.CreateMemCpy(Dst,/*DstAlign*/Align(1), Src,
1714/*SrcAlign*/Align(1),Size);
1715
1716// The verifier requires that all calls of debug-info-bearing functions
1717// from debug-info-bearing functions have a debug location (for inlining
1718// purposes). Assign a dummy location to satisfy the constraint.
1719if (!CI->getDebugLoc() && InsertBlock->getParent()->getSubprogram())
1720if (DISubprogram *SP = CalledFunc->getSubprogram())
1721 CI->setDebugLoc(DILocation::get(SP->getContext(), 0, 0, SP));
1722}
1723
1724/// When inlining a call site that has a byval argument,
1725/// we have to make the implicit memcpy explicit by adding it.
1726staticValue *HandleByValArgument(Type *ByValType,Value *Arg,
1727Instruction *TheCall,
1728constFunction *CalledFunc,
1729InlineFunctionInfo &IFI,
1730MaybeAlign ByValAlignment) {
1731Function *Caller = TheCall->getFunction();
1732constDataLayout &DL = Caller->getDataLayout();
1733
1734// If the called function is readonly, then it could not mutate the caller's
1735// copy of the byval'd memory. In this case, it is safe to elide the copy and
1736// temporary.
1737if (CalledFunc->onlyReadsMemory()) {
1738// If the byval argument has a specified alignment that is greater than the
1739// passed in pointer, then we either have to round up the input pointer or
1740// give up on this transformation.
1741if (ByValAlignment.valueOrOne() == 1)
1742return Arg;
1743
1744AssumptionCache *AC =
1745 IFI.GetAssumptionCache ? &IFI.GetAssumptionCache(*Caller) :nullptr;
1746
1747// If the pointer is already known to be sufficiently aligned, or if we can
1748// round it up to a larger alignment, then we don't need a temporary.
1749if (getOrEnforceKnownAlignment(Arg, *ByValAlignment,DL, TheCall, AC) >=
1750 *ByValAlignment)
1751return Arg;
1752
1753// Otherwise, we have to make a memcpy to get a safe alignment. This is bad
1754// for code quality, but rarely happens and is required for correctness.
1755 }
1756
1757// Create the alloca. If we have DataLayout, use nice alignment.
1758Align Alignment =DL.getPrefTypeAlign(ByValType);
1759
1760// If the byval had an alignment specified, we *must* use at least that
1761// alignment, as it is required by the byval argument (and uses of the
1762// pointer inside the callee).
1763if (ByValAlignment)
1764 Alignment = std::max(Alignment, *ByValAlignment);
1765
1766AllocaInst *NewAlloca =
1767newAllocaInst(ByValType, Arg->getType()->getPointerAddressSpace(),
1768nullptr, Alignment, Arg->getName());
1769 NewAlloca->insertBefore(Caller->begin()->begin());
1770 IFI.StaticAllocas.push_back(NewAlloca);
1771
1772// Uses of the argument in the function should use our new alloca
1773// instead.
1774return NewAlloca;
1775}
1776
1777// Check whether this Value is used by a lifetime intrinsic.
1778staticboolisUsedByLifetimeMarker(Value *V) {
1779for (User *U : V->users())
1780if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U))
1781if (II->isLifetimeStartOrEnd())
1782returntrue;
1783returnfalse;
1784}
1785
1786// Check whether the given alloca already has
1787// lifetime.start or lifetime.end intrinsics.
1788staticboolhasLifetimeMarkers(AllocaInst *AI) {
1789Type *Ty = AI->getType();
1790Type *Int8PtrTy =
1791 PointerType::get(Ty->getContext(), Ty->getPointerAddressSpace());
1792if (Ty == Int8PtrTy)
1793returnisUsedByLifetimeMarker(AI);
1794
1795// Do a scan to find all the casts to i8*.
1796for (User *U : AI->users()) {
1797if (U->getType() != Int8PtrTy)continue;
1798if (U->stripPointerCasts() != AI)continue;
1799if (isUsedByLifetimeMarker(U))
1800returntrue;
1801 }
1802returnfalse;
1803}
1804
1805/// Return the result of AI->isStaticAlloca() if AI were moved to the entry
1806/// block. Allocas used in inalloca calls and allocas of dynamic array size
1807/// cannot be static.
1808staticboolallocaWouldBeStaticInEntry(constAllocaInst *AI ) {
1809return isa<Constant>(AI->getArraySize()) && !AI->isUsedWithInAlloca();
1810}
1811
1812/// Returns a DebugLoc for a new DILocation which is a clone of \p OrigDL
1813/// inlined at \p InlinedAt. \p IANodes is an inlined-at cache.
1814staticDebugLocinlineDebugLoc(DebugLoc OrigDL,DILocation *InlinedAt,
1815LLVMContext &Ctx,
1816DenseMap<const MDNode *, MDNode *> &IANodes) {
1817auto IA =DebugLoc::appendInlinedAt(OrigDL, InlinedAt, Ctx, IANodes);
1818return DILocation::get(Ctx, OrigDL.getLine(), OrigDL.getCol(),
1819 OrigDL.getScope(), IA);
1820}
1821
1822/// Update inlined instructions' line numbers to
1823/// to encode location where these instructions are inlined.
1824staticvoidfixupLineNumbers(Function *Fn,Function::iterator FI,
1825Instruction *TheCall,bool CalleeHasDebugInfo) {
1826constDebugLoc &TheCallDL = TheCall->getDebugLoc();
1827if (!TheCallDL)
1828return;
1829
1830auto &Ctx = Fn->getContext();
1831DILocation *InlinedAtNode = TheCallDL;
1832
1833// Create a unique call site, not to be confused with any other call from the
1834// same location.
1835 InlinedAtNode = DILocation::getDistinct(
1836 Ctx, InlinedAtNode->getLine(), InlinedAtNode->getColumn(),
1837 InlinedAtNode->getScope(), InlinedAtNode->getInlinedAt());
1838
1839// Cache the inlined-at nodes as they're built so they are reused, without
1840// this every instruction's inlined-at chain would become distinct from each
1841// other.
1842DenseMap<const MDNode *, MDNode *> IANodes;
1843
1844// Check if we are not generating inline line tables and want to use
1845// the call site location instead.
1846bool NoInlineLineTables = Fn->hasFnAttribute("no-inline-line-tables");
1847
1848// Helper-util for updating the metadata attached to an instruction.
1849auto UpdateInst = [&](Instruction &I) {
1850// Loop metadata needs to be updated so that the start and end locs
1851// reference inlined-at locations.
1852auto updateLoopInfoLoc = [&Ctx, &InlinedAtNode,
1853 &IANodes](Metadata *MD) ->Metadata * {
1854if (auto *Loc = dyn_cast_or_null<DILocation>(MD))
1855returninlineDebugLoc(Loc, InlinedAtNode, Ctx, IANodes).get();
1856return MD;
1857 };
1858updateLoopMetadataDebugLocations(I, updateLoopInfoLoc);
1859
1860if (!NoInlineLineTables)
1861if (DebugLocDL =I.getDebugLoc()) {
1862DebugLoc IDL =
1863inlineDebugLoc(DL, InlinedAtNode,I.getContext(), IANodes);
1864I.setDebugLoc(IDL);
1865return;
1866 }
1867
1868if (CalleeHasDebugInfo && !NoInlineLineTables)
1869return;
1870
1871// If the inlined instruction has no line number, or if inline info
1872// is not being generated, make it look as if it originates from the call
1873// location. This is important for ((__always_inline, __nodebug__))
1874// functions which must use caller location for all instructions in their
1875// function body.
1876
1877// Don't update static allocas, as they may get moved later.
1878if (auto *AI = dyn_cast<AllocaInst>(&I))
1879if (allocaWouldBeStaticInEntry(AI))
1880return;
1881
1882// Do not force a debug loc for pseudo probes, since they do not need to
1883// be debuggable, and also they are expected to have a zero/null dwarf
1884// discriminator at this point which could be violated otherwise.
1885if (isa<PseudoProbeInst>(I))
1886return;
1887
1888I.setDebugLoc(TheCallDL);
1889 };
1890
1891// Helper-util for updating debug-info records attached to instructions.
1892auto UpdateDVR = [&](DbgRecord *DVR) {
1893assert(DVR->getDebugLoc() &&"Debug Value must have debug loc");
1894if (NoInlineLineTables) {
1895 DVR->setDebugLoc(TheCallDL);
1896return;
1897 }
1898DebugLocDL = DVR->getDebugLoc();
1899DebugLoc IDL =
1900inlineDebugLoc(DL, InlinedAtNode,
1901 DVR->getMarker()->getParent()->getContext(), IANodes);
1902 DVR->setDebugLoc(IDL);
1903 };
1904
1905// Iterate over all instructions, updating metadata and debug-info records.
1906for (; FI != Fn->end(); ++FI) {
1907for (Instruction &I : *FI) {
1908 UpdateInst(I);
1909for (DbgRecord &DVR :I.getDbgRecordRange()) {
1910 UpdateDVR(&DVR);
1911 }
1912 }
1913
1914// Remove debug info intrinsics if we're not keeping inline info.
1915if (NoInlineLineTables) {
1916BasicBlock::iterator BI = FI->begin();
1917while (BI != FI->end()) {
1918if (isa<DbgInfoIntrinsic>(BI)) {
1919 BI = BI->eraseFromParent();
1920continue;
1921 }else {
1922 BI->dropDbgRecords();
1923 }
1924 ++BI;
1925 }
1926 }
1927 }
1928}
1929
1930#undef DEBUG_TYPE
1931#define DEBUG_TYPE "assignment-tracking"
1932/// Find Alloca and linked DbgAssignIntrinsic for locals escaped by \p CB.
1933staticat::StorageToVarsMapcollectEscapedLocals(constDataLayout &DL,
1934constCallBase &CB) {
1935at::StorageToVarsMap EscapedLocals;
1936SmallPtrSet<const Value *, 4> SeenBases;
1937
1938LLVM_DEBUG(
1939errs() <<"# Finding caller local variables escaped by callee\n");
1940for (constValue *Arg : CB.args()) {
1941LLVM_DEBUG(errs() <<"INSPECT: " << *Arg <<"\n");
1942if (!Arg->getType()->isPointerTy()) {
1943LLVM_DEBUG(errs() <<" | SKIP: Not a pointer\n");
1944continue;
1945 }
1946
1947constInstruction *I = dyn_cast<Instruction>(Arg);
1948if (!I) {
1949LLVM_DEBUG(errs() <<" | SKIP: Not result of instruction\n");
1950continue;
1951 }
1952
1953// Walk back to the base storage.
1954assert(Arg->getType()->isPtrOrPtrVectorTy());
1955APInt TmpOffset(DL.getIndexTypeSizeInBits(Arg->getType()), 0,false);
1956constAllocaInst *Base = dyn_cast<AllocaInst>(
1957 Arg->stripAndAccumulateConstantOffsets(DL, TmpOffset,true));
1958if (!Base) {
1959LLVM_DEBUG(errs() <<" | SKIP: Couldn't walk back to base storage\n");
1960continue;
1961 }
1962
1963assert(Base);
1964LLVM_DEBUG(errs() <<" | BASE: " << *Base <<"\n");
1965// We only need to process each base address once - skip any duplicates.
1966if (!SeenBases.insert(Base).second)
1967continue;
1968
1969// Find all local variables associated with the backing storage.
1970auto CollectAssignsForStorage = [&](auto *DbgAssign) {
1971// Skip variables from inlined functions - they are not local variables.
1972if (DbgAssign->getDebugLoc().getInlinedAt())
1973return;
1974LLVM_DEBUG(errs() <<" > DEF : " << *DbgAssign <<"\n");
1975 EscapedLocals[Base].insert(at::VarRecord(DbgAssign));
1976 };
1977for_each(at::getAssignmentMarkers(Base), CollectAssignsForStorage);
1978for_each(at::getDVRAssignmentMarkers(Base), CollectAssignsForStorage);
1979 }
1980return EscapedLocals;
1981}
1982
1983staticvoidtrackInlinedStores(Function::iterator Start,Function::iteratorEnd,
1984constCallBase &CB) {
1985LLVM_DEBUG(errs() <<"trackInlinedStores into "
1986 << Start->getParent()->getName() <<" from "
1987 << CB.getCalledFunction()->getName() <<"\n");
1988constDataLayout &DL = CB.getDataLayout();
1989at::trackAssignments(Start,End,collectEscapedLocals(DL, CB),DL);
1990}
1991
1992/// Update inlined instructions' DIAssignID metadata. We need to do this
1993/// otherwise a function inlined more than once into the same function
1994/// will cause DIAssignID to be shared by many instructions.
1995staticvoidfixupAssignments(Function::iterator Start,Function::iteratorEnd) {
1996DenseMap<DIAssignID *, DIAssignID *> Map;
1997// Loop over all the inlined instructions. If we find a DIAssignID
1998// attachment or use, replace it with a new version.
1999for (auto BBI = Start; BBI !=End; ++BBI) {
2000for (Instruction &I : *BBI)
2001at::remapAssignID(Map,I);
2002 }
2003}
2004#undef DEBUG_TYPE
2005#define DEBUG_TYPE "inline-function"
2006
2007/// Update the block frequencies of the caller after a callee has been inlined.
2008///
2009/// Each block cloned into the caller has its block frequency scaled by the
2010/// ratio of CallSiteFreq/CalleeEntryFreq. This ensures that the cloned copy of
2011/// callee's entry block gets the same frequency as the callsite block and the
2012/// relative frequencies of all cloned blocks remain the same after cloning.
2013staticvoidupdateCallerBFI(BasicBlock *CallSiteBlock,
2014constValueToValueMapTy &VMap,
2015BlockFrequencyInfo *CallerBFI,
2016BlockFrequencyInfo *CalleeBFI,
2017constBasicBlock &CalleeEntryBlock) {
2018SmallPtrSet<BasicBlock *, 16> ClonedBBs;
2019for (auto Entry : VMap) {
2020if (!isa<BasicBlock>(Entry.first) || !Entry.second)
2021continue;
2022auto *OrigBB = cast<BasicBlock>(Entry.first);
2023auto *ClonedBB = cast<BasicBlock>(Entry.second);
2024BlockFrequency Freq = CalleeBFI->getBlockFreq(OrigBB);
2025if (!ClonedBBs.insert(ClonedBB).second) {
2026// Multiple blocks in the callee might get mapped to one cloned block in
2027// the caller since we prune the callee as we clone it. When that happens,
2028// we want to use the maximum among the original blocks' frequencies.
2029BlockFrequency NewFreq = CallerBFI->getBlockFreq(ClonedBB);
2030if (NewFreq > Freq)
2031 Freq = NewFreq;
2032 }
2033 CallerBFI->setBlockFreq(ClonedBB, Freq);
2034 }
2035BasicBlock *EntryClone = cast<BasicBlock>(VMap.lookup(&CalleeEntryBlock));
2036 CallerBFI->setBlockFreqAndScale(
2037 EntryClone, CallerBFI->getBlockFreq(CallSiteBlock), ClonedBBs);
2038}
2039
2040/// Update the branch metadata for cloned call instructions.
2041staticvoidupdateCallProfile(Function *Callee,constValueToValueMapTy &VMap,
2042constProfileCount &CalleeEntryCount,
2043constCallBase &TheCall,ProfileSummaryInfo *PSI,
2044BlockFrequencyInfo *CallerBFI) {
2045if (CalleeEntryCount.isSynthetic() || CalleeEntryCount.getCount() < 1)
2046return;
2047auto CallSiteCount =
2048 PSI ? PSI->getProfileCount(TheCall, CallerBFI) : std::nullopt;
2049 int64_t CallCount =
2050 std::min(CallSiteCount.value_or(0), CalleeEntryCount.getCount());
2051updateProfileCallee(Callee, -CallCount, &VMap);
2052}
2053
2054voidllvm::updateProfileCallee(
2055Function *Callee, int64_t EntryDelta,
2056constValueMap<const Value *, WeakTrackingVH> *VMap) {
2057auto CalleeCount = Callee->getEntryCount();
2058if (!CalleeCount)
2059return;
2060
2061constuint64_t PriorEntryCount = CalleeCount->getCount();
2062
2063// Since CallSiteCount is an estimate, it could exceed the original callee
2064// count and has to be set to 0 so guard against underflow.
2065constuint64_t NewEntryCount =
2066 (EntryDelta < 0 && static_cast<uint64_t>(-EntryDelta) > PriorEntryCount)
2067 ? 0
2068 : PriorEntryCount + EntryDelta;
2069
2070auto updateVTableProfWeight = [](CallBase *CB,constuint64_t NewEntryCount,
2071constuint64_t PriorEntryCount) {
2072Instruction *VPtr =PGOIndirectCallVisitor::tryGetVTableInstruction(CB);
2073if (VPtr)
2074scaleProfData(*VPtr, NewEntryCount, PriorEntryCount);
2075 };
2076
2077// During inlining ?
2078if (VMap) {
2079uint64_t CloneEntryCount = PriorEntryCount - NewEntryCount;
2080for (auto Entry : *VMap) {
2081if (isa<CallInst>(Entry.first))
2082if (auto *CI = dyn_cast_or_null<CallInst>(Entry.second)) {
2083 CI->updateProfWeight(CloneEntryCount, PriorEntryCount);
2084 updateVTableProfWeight(CI, CloneEntryCount, PriorEntryCount);
2085 }
2086
2087if (isa<InvokeInst>(Entry.first))
2088if (auto *II = dyn_cast_or_null<InvokeInst>(Entry.second)) {
2089II->updateProfWeight(CloneEntryCount, PriorEntryCount);
2090 updateVTableProfWeight(II, CloneEntryCount, PriorEntryCount);
2091 }
2092 }
2093 }
2094
2095if (EntryDelta) {
2096 Callee->setEntryCount(NewEntryCount);
2097
2098for (BasicBlock &BB : *Callee)
2099// No need to update the callsite if it is pruned during inlining.
2100if (!VMap || VMap->count(&BB))
2101for (Instruction &I : BB) {
2102if (CallInst *CI = dyn_cast<CallInst>(&I)) {
2103 CI->updateProfWeight(NewEntryCount, PriorEntryCount);
2104 updateVTableProfWeight(CI, NewEntryCount, PriorEntryCount);
2105 }
2106if (InvokeInst *II = dyn_cast<InvokeInst>(&I)) {
2107II->updateProfWeight(NewEntryCount, PriorEntryCount);
2108 updateVTableProfWeight(II, NewEntryCount, PriorEntryCount);
2109 }
2110 }
2111 }
2112}
2113
2114/// An operand bundle "clang.arc.attachedcall" on a call indicates the call
2115/// result is implicitly consumed by a call to retainRV or claimRV immediately
2116/// after the call. This function inlines the retainRV/claimRV calls.
2117///
2118/// There are three cases to consider:
2119///
2120/// 1. If there is a call to autoreleaseRV that takes a pointer to the returned
2121/// object in the callee return block, the autoreleaseRV call and the
2122/// retainRV/claimRV call in the caller cancel out. If the call in the caller
2123/// is a claimRV call, a call to objc_release is emitted.
2124///
2125/// 2. If there is a call in the callee return block that doesn't have operand
2126/// bundle "clang.arc.attachedcall", the operand bundle on the original call
2127/// is transferred to the call in the callee.
2128///
2129/// 3. Otherwise, a call to objc_retain is inserted if the call in the caller is
2130/// a retainRV call.
2131staticvoid
2132inlineRetainOrClaimRVCalls(CallBase &CB,objcarc::ARCInstKind RVCallKind,
2133constSmallVectorImpl<ReturnInst *> &Returns) {
2134assert(objcarc::isRetainOrClaimRV(RVCallKind) &&"unexpected ARC function");
2135bool IsRetainRV = RVCallKind == objcarc::ARCInstKind::RetainRV,
2136 IsUnsafeClaimRV = !IsRetainRV;
2137
2138for (auto *RI : Returns) {
2139Value *RetOpnd =objcarc::GetRCIdentityRoot(RI->getOperand(0));
2140bool InsertRetainCall = IsRetainRV;
2141IRBuilder<> Builder(RI->getContext());
2142
2143// Walk backwards through the basic block looking for either a matching
2144// autoreleaseRV call or an unannotated call.
2145auto InstRange =llvm::make_range(++(RI->getIterator().getReverse()),
2146 RI->getParent()->rend());
2147for (Instruction &I :llvm::make_early_inc_range(InstRange)) {
2148// Ignore casts.
2149if (isa<CastInst>(I))
2150continue;
2151
2152if (auto *II = dyn_cast<IntrinsicInst>(&I)) {
2153if (II->getIntrinsicID() != Intrinsic::objc_autoreleaseReturnValue ||
2154 !II->hasNUses(0) ||
2155objcarc::GetRCIdentityRoot(II->getOperand(0)) != RetOpnd)
2156break;
2157
2158// If we've found a matching authoreleaseRV call:
2159// - If claimRV is attached to the call, insert a call to objc_release
2160// and erase the autoreleaseRV call.
2161// - If retainRV is attached to the call, just erase the autoreleaseRV
2162// call.
2163if (IsUnsafeClaimRV) {
2164 Builder.SetInsertPoint(II);
2165 Builder.CreateIntrinsic(Intrinsic::objc_release, {}, RetOpnd);
2166 }
2167II->eraseFromParent();
2168 InsertRetainCall =false;
2169break;
2170 }
2171
2172auto *CI = dyn_cast<CallInst>(&I);
2173
2174if (!CI)
2175break;
2176
2177if (objcarc::GetRCIdentityRoot(CI) != RetOpnd ||
2178objcarc::hasAttachedCallOpBundle(CI))
2179break;
2180
2181// If we've found an unannotated call that defines RetOpnd, add a
2182// "clang.arc.attachedcall" operand bundle.
2183Value *BundleArgs[] = {*objcarc::getAttachedARCFunction(&CB)};
2184OperandBundleDef OB("clang.arc.attachedcall", BundleArgs);
2185auto *NewCall =CallBase::addOperandBundle(
2186 CI,LLVMContext::OB_clang_arc_attachedcall, OB, CI->getIterator());
2187 NewCall->copyMetadata(*CI);
2188 CI->replaceAllUsesWith(NewCall);
2189 CI->eraseFromParent();
2190 InsertRetainCall =false;
2191break;
2192 }
2193
2194if (InsertRetainCall) {
2195// The retainRV is attached to the call and we've failed to find a
2196// matching autoreleaseRV or an annotated call in the callee. Emit a call
2197// to objc_retain.
2198 Builder.SetInsertPoint(RI);
2199 Builder.CreateIntrinsic(Intrinsic::objc_retain, {}, RetOpnd);
2200 }
2201 }
2202}
2203
2204// In contextual profiling, when an inline succeeds, we want to remap the
2205// indices of the callee into the index space of the caller. We can't just leave
2206// them as-is because the same callee may appear in other places in this caller
2207// (other callsites), and its (callee's) counters and sub-contextual profile
2208// tree would be potentially different.
2209// Not all BBs of the callee may survive the opportunistic DCE InlineFunction
2210// does (same goes for callsites in the callee).
2211// We will return a pair of vectors, one for basic block IDs and one for
2212// callsites. For such a vector V, V[Idx] will be -1 if the callee
2213// instrumentation with index Idx did not survive inlining, and a new value
2214// otherwise.
2215// This function will update the caller's instrumentation intrinsics
2216// accordingly, mapping indices as described above. We also replace the "name"
2217// operand because we use it to distinguish between "own" instrumentation and
2218// "from callee" instrumentation when performing the traversal of the CFG of the
2219// caller. We traverse depth-first from the callsite's BB and up to the point we
2220// hit BBs owned by the caller.
2221// The return values will be then used to update the contextual
2222// profile. Note: we only update the "name" and "index" operands in the
2223// instrumentation intrinsics, we leave the hash and total nr of indices as-is,
2224// it's not worth updating those.
2225staticconst std::pair<std::vector<int64_t>, std::vector<int64_t>>
2226remapIndices(Function &Caller,BasicBlock *StartBB,
2227PGOContextualProfile &CtxProf,uint32_t CalleeCounters,
2228uint32_t CalleeCallsites) {
2229// We'll allocate a new ID to imported callsite counters and callsites. We're
2230// using -1 to indicate a counter we delete. Most likely the entry ID, for
2231// example, will be deleted - we don't want 2 IDs in the same BB, and the
2232// entry would have been cloned in the callsite's old BB.
2233 std::vector<int64_t> CalleeCounterMap;
2234 std::vector<int64_t> CalleeCallsiteMap;
2235 CalleeCounterMap.resize(CalleeCounters, -1);
2236 CalleeCallsiteMap.resize(CalleeCallsites, -1);
2237
2238auto RewriteInstrIfNeeded = [&](InstrProfIncrementInst &Ins) ->bool {
2239if (Ins.getNameValue() == &Caller)
2240returnfalse;
2241constauto OldID =static_cast<uint32_t>(Ins.getIndex()->getZExtValue());
2242if (CalleeCounterMap[OldID] == -1)
2243 CalleeCounterMap[OldID] = CtxProf.allocateNextCounterIndex(Caller);
2244constauto NewID =static_cast<uint32_t>(CalleeCounterMap[OldID]);
2245
2246 Ins.setNameValue(&Caller);
2247 Ins.setIndex(NewID);
2248returntrue;
2249 };
2250
2251auto RewriteCallsiteInsIfNeeded = [&](InstrProfCallsite &Ins) ->bool {
2252if (Ins.getNameValue() == &Caller)
2253returnfalse;
2254constauto OldID =static_cast<uint32_t>(Ins.getIndex()->getZExtValue());
2255if (CalleeCallsiteMap[OldID] == -1)
2256 CalleeCallsiteMap[OldID] = CtxProf.allocateNextCallsiteIndex(Caller);
2257constauto NewID =static_cast<uint32_t>(CalleeCallsiteMap[OldID]);
2258
2259 Ins.setNameValue(&Caller);
2260 Ins.setIndex(NewID);
2261returntrue;
2262 };
2263
2264 std::deque<BasicBlock *> Worklist;
2265DenseSet<const BasicBlock *> Seen;
2266// We will traverse the BBs starting from the callsite BB. The callsite BB
2267// will have at least a BB ID - maybe its own, and in any case the one coming
2268// from the cloned function's entry BB. The other BBs we'll start seeing from
2269// there on may or may not have BB IDs. BBs with IDs belonging to our caller
2270// are definitely not coming from the imported function and form a boundary
2271// past which we don't need to traverse anymore. BBs may have no
2272// instrumentation (because we originally inserted instrumentation as per
2273// MST), in which case we'll traverse past them. An invariant we'll keep is
2274// that a BB will have at most 1 BB ID. For example, in the callsite BB, we
2275// will delete the callee BB's instrumentation. This doesn't result in
2276// information loss: the entry BB of the callee will have the same count as
2277// the callsite's BB. At the end of this traversal, all the callee's
2278// instrumentation would be mapped into the caller's instrumentation index
2279// space. Some of the callee's counters may be deleted (as mentioned, this
2280// should result in no loss of information).
2281 Worklist.push_back(StartBB);
2282while (!Worklist.empty()) {
2283auto *BB = Worklist.front();
2284 Worklist.pop_front();
2285bool Changed =false;
2286auto *BBID =CtxProfAnalysis::getBBInstrumentation(*BB);
2287if (BBID) {
2288 Changed |= RewriteInstrIfNeeded(*BBID);
2289// this may be the entryblock from the inlined callee, coming into a BB
2290// that didn't have instrumentation because of MST decisions. Let's make
2291// sure it's placed accordingly. This is a noop elsewhere.
2292 BBID->moveBefore(BB->getFirstInsertionPt());
2293 }
2294for (auto &I :llvm::make_early_inc_range(*BB)) {
2295if (auto *Inc = dyn_cast<InstrProfIncrementInst>(&I)) {
2296if (isa<InstrProfIncrementInstStep>(Inc)) {
2297// Step instrumentation is used for select instructions. Inlining may
2298// have propagated a constant resulting in the condition of the select
2299// being resolved, case in which function cloning resolves the value
2300// of the select, and elides the select instruction. If that is the
2301// case, the step parameter of the instrumentation will reflect that.
2302// We can delete the instrumentation in that case.
2303if (isa<Constant>(Inc->getStep())) {
2304assert(!Inc->getNextNode() || !isa<SelectInst>(Inc->getNextNode()));
2305 Inc->eraseFromParent();
2306 }else {
2307assert(isa_and_nonnull<SelectInst>(Inc->getNextNode()));
2308 RewriteInstrIfNeeded(*Inc);
2309 }
2310 }elseif (Inc != BBID) {
2311// If we're here it means that the BB had more than 1 IDs, presumably
2312// some coming from the callee. We "made up our mind" to keep the
2313// first one (which may or may not have been originally the caller's).
2314// All the others are superfluous and we delete them.
2315 Inc->eraseFromParent();
2316 Changed =true;
2317 }
2318 }elseif (auto *CS = dyn_cast<InstrProfCallsite>(&I)) {
2319 Changed |= RewriteCallsiteInsIfNeeded(*CS);
2320 }
2321 }
2322if (!BBID || Changed)
2323for (auto *Succ :successors(BB))
2324if (Seen.insert(Succ).second)
2325 Worklist.push_back(Succ);
2326 }
2327
2328assert(
2329llvm::all_of(CalleeCounterMap, [&](constauto &V) {return V != 0; }) &&
2330"Counter index mapping should be either to -1 or to non-zero index, "
2331"because the 0 "
2332"index corresponds to the entry BB of the caller");
2333assert(
2334llvm::all_of(CalleeCallsiteMap, [&](constauto &V) {return V != 0; }) &&
2335"Callsite index mapping should be either to -1 or to non-zero index, "
2336"because there should have been at least a callsite - the inlined one "
2337"- which would have had a 0 index.");
2338
2339return {std::move(CalleeCounterMap), std::move(CalleeCallsiteMap)};
2340}
2341
2342// Inline. If successful, update the contextual profile (if a valid one is
2343// given).
2344// The contextual profile data is organized in trees, as follows:
2345// - each node corresponds to a function
2346// - the root of each tree corresponds to an "entrypoint" - e.g.
2347// RPC handler for server side
2348// - the path from the root to a node is a particular call path
2349// - the counters stored in a node are counter values observed in that
2350// particular call path ("context")
2351// - the edges between nodes are annotated with callsite IDs.
2352//
2353// Updating the contextual profile after an inlining means, at a high level,
2354// copying over the data of the callee, **intentionally without any value
2355// scaling**, and copying over the callees of the inlined callee.
2356llvm::InlineResultllvm::InlineFunction(CallBase &CB,InlineFunctionInfo &IFI,
2357PGOContextualProfile &CtxProf,
2358bool MergeAttributes,
2359AAResults *CalleeAAR,
2360bool InsertLifetime,
2361Function *ForwardVarArgsTo) {
2362if (!CtxProf)
2363returnInlineFunction(CB, IFI, MergeAttributes, CalleeAAR, InsertLifetime,
2364 ForwardVarArgsTo);
2365
2366auto &Caller = *CB.getCaller();
2367auto &Callee = *CB.getCalledFunction();
2368auto *StartBB = CB.getParent();
2369
2370// Get some preliminary data about the callsite before it might get inlined.
2371// Inlining shouldn't delete the callee, but it's cleaner (and low-cost) to
2372// get this data upfront and rely less on InlineFunction's behavior.
2373constauto CalleeGUID =AssignGUIDPass::getGUID(Callee);
2374auto *CallsiteIDIns =CtxProfAnalysis::getCallsiteInstrumentation(CB);
2375constauto CallsiteID =
2376static_cast<uint32_t>(CallsiteIDIns->getIndex()->getZExtValue());
2377
2378constauto NumCalleeCounters = CtxProf.getNumCounters(Callee);
2379constauto NumCalleeCallsites = CtxProf.getNumCallsites(Callee);
2380
2381auto Ret =InlineFunction(CB, IFI, MergeAttributes, CalleeAAR, InsertLifetime,
2382 ForwardVarArgsTo);
2383if (!Ret.isSuccess())
2384return Ret;
2385
2386// Inlining succeeded, we don't need the instrumentation of the inlined
2387// callsite.
2388 CallsiteIDIns->eraseFromParent();
2389
2390// Assinging Maps and then capturing references into it in the lambda because
2391// captured structured bindings are a C++20 extension. We do also need a
2392// capture here, though.
2393constauto IndicesMaps =remapIndices(Caller, StartBB, CtxProf,
2394 NumCalleeCounters, NumCalleeCallsites);
2395constuint32_t NewCountersSize = CtxProf.getNumCounters(Caller);
2396
2397auto Updater = [&](PGOCtxProfContext &Ctx) {
2398assert(Ctx.guid() ==AssignGUIDPass::getGUID(Caller));
2399constauto &[CalleeCounterMap, CalleeCallsiteMap] = IndicesMaps;
2400assert(
2401 (Ctx.counters().size() +
2402llvm::count_if(CalleeCounterMap, [](auto V) { return V != -1; }) ==
2403 NewCountersSize) &&
2404"The caller's counters size should have grown by the number of new "
2405"distinct counters inherited from the inlined callee.");
2406 Ctx.resizeCounters(NewCountersSize);
2407// If the callsite wasn't exercised in this context, the value of the
2408// counters coming from it is 0 - which it is right now, after resizing them
2409// - and so we're done.
2410auto CSIt = Ctx.callsites().find(CallsiteID);
2411if (CSIt == Ctx.callsites().end())
2412return;
2413auto CalleeCtxIt = CSIt->second.find(CalleeGUID);
2414// The callsite was exercised, but not with this callee (so presumably this
2415// is an indirect callsite). Again, we're done here.
2416if (CalleeCtxIt == CSIt->second.end())
2417return;
2418
2419// Let's pull in the counter values and the subcontexts coming from the
2420// inlined callee.
2421auto &CalleeCtx = CalleeCtxIt->second;
2422assert(CalleeCtx.guid() == CalleeGUID);
2423
2424for (autoI = 0U;I < CalleeCtx.counters().size(); ++I) {
2425const int64_t NewIndex = CalleeCounterMap[I];
2426if (NewIndex >= 0) {
2427assert(NewIndex != 0 &&"counter index mapping shouldn't happen to a 0 "
2428"index, that's the caller's entry BB");
2429 Ctx.counters()[NewIndex] = CalleeCtx.counters()[I];
2430 }
2431 }
2432for (auto &[I, OtherSet] : CalleeCtx.callsites()) {
2433const int64_t NewCSIdx = CalleeCallsiteMap[I];
2434if (NewCSIdx >= 0) {
2435assert(NewCSIdx != 0 &&
2436"callsite index mapping shouldn't happen to a 0 index, the "
2437"caller must've had at least one callsite (with such an index)");
2438 Ctx.ingestAllContexts(NewCSIdx, std::move(OtherSet));
2439 }
2440 }
2441// We know the traversal is preorder, so it wouldn't have yet looked at the
2442// sub-contexts of this context that it's currently visiting. Meaning, the
2443// erase below invalidates no iterators.
2444autoDeleted = Ctx.callsites().erase(CallsiteID);
2445assert(Deleted);
2446 (void)Deleted;
2447 };
2448 CtxProf.update(Updater, Caller);
2449return Ret;
2450}
2451
2452/// This function inlines the called function into the basic block of the
2453/// caller. This returns false if it is not possible to inline this call.
2454/// The program is still in a well defined state if this occurs though.
2455///
2456/// Note that this only does one level of inlining. For example, if the
2457/// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now
2458/// exists in the instruction stream. Similarly this will inline a recursive
2459/// function by one level.
2460llvm::InlineResultllvm::InlineFunction(CallBase &CB,InlineFunctionInfo &IFI,
2461bool MergeAttributes,
2462AAResults *CalleeAAR,
2463bool InsertLifetime,
2464Function *ForwardVarArgsTo) {
2465assert(CB.getParent() && CB.getFunction() &&"Instruction not in function!");
2466
2467// FIXME: we don't inline callbr yet.
2468if (isa<CallBrInst>(CB))
2469returnInlineResult::failure("We don't inline callbr yet.");
2470
2471// If IFI has any state in it, zap it before we fill it in.
2472 IFI.reset();
2473
2474Function *CalledFunc = CB.getCalledFunction();
2475if (!CalledFunc ||// Can't inline external function or indirect
2476 CalledFunc->isDeclaration())// call!
2477returnInlineResult::failure("external or indirect");
2478
2479// The inliner does not know how to inline through calls with operand bundles
2480// in general ...
2481Value *ConvergenceControlToken =nullptr;
2482if (CB.hasOperandBundles()) {
2483for (int i = 0, e = CB.getNumOperandBundles(); i != e; ++i) {
2484auto OBUse = CB.getOperandBundleAt(i);
2485uint32_t Tag = OBUse.getTagID();
2486// ... but it knows how to inline through "deopt" operand bundles ...
2487if (Tag ==LLVMContext::OB_deopt)
2488continue;
2489// ... and "funclet" operand bundles.
2490if (Tag ==LLVMContext::OB_funclet)
2491continue;
2492if (Tag ==LLVMContext::OB_clang_arc_attachedcall)
2493continue;
2494if (Tag ==LLVMContext::OB_kcfi)
2495continue;
2496if (Tag ==LLVMContext::OB_convergencectrl) {
2497 ConvergenceControlToken = OBUse.Inputs[0].get();
2498continue;
2499 }
2500
2501returnInlineResult::failure("unsupported operand bundle");
2502 }
2503 }
2504
2505// FIXME: The check below is redundant and incomplete. According to spec, if a
2506// convergent call is missing a token, then the caller is using uncontrolled
2507// convergence. If the callee has an entry intrinsic, then the callee is using
2508// controlled convergence, and the call cannot be inlined. A proper
2509// implemenation of this check requires a whole new analysis that identifies
2510// convergence in every function. For now, we skip that and just do this one
2511// cursory check. The underlying assumption is that in a compiler flow that
2512// fully implements convergence control tokens, there is no mixing of
2513// controlled and uncontrolled convergent operations in the whole program.
2514if (CB.isConvergent()) {
2515if (!ConvergenceControlToken &&
2516getConvergenceEntry(CalledFunc->getEntryBlock())) {
2517returnInlineResult::failure(
2518"convergent call needs convergencectrl operand");
2519 }
2520 }
2521
2522// If the call to the callee cannot throw, set the 'nounwind' flag on any
2523// calls that we inline.
2524bool MarkNoUnwind = CB.doesNotThrow();
2525
2526BasicBlock *OrigBB = CB.getParent();
2527Function *Caller = OrigBB->getParent();
2528
2529// GC poses two hazards to inlining, which only occur when the callee has GC:
2530// 1. If the caller has no GC, then the callee's GC must be propagated to the
2531// caller.
2532// 2. If the caller has a differing GC, it is invalid to inline.
2533if (CalledFunc->hasGC()) {
2534if (!Caller->hasGC())
2535 Caller->setGC(CalledFunc->getGC());
2536elseif (CalledFunc->getGC() != Caller->getGC())
2537returnInlineResult::failure("incompatible GC");
2538 }
2539
2540// Get the personality function from the callee if it contains a landing pad.
2541Constant *CalledPersonality =
2542 CalledFunc->hasPersonalityFn()
2543 ? CalledFunc->getPersonalityFn()->stripPointerCasts()
2544 :nullptr;
2545
2546// Find the personality function used by the landing pads of the caller. If it
2547// exists, then check to see that it matches the personality function used in
2548// the callee.
2549Constant *CallerPersonality =
2550 Caller->hasPersonalityFn()
2551 ? Caller->getPersonalityFn()->stripPointerCasts()
2552 :nullptr;
2553if (CalledPersonality) {
2554if (!CallerPersonality)
2555 Caller->setPersonalityFn(CalledPersonality);
2556// If the personality functions match, then we can perform the
2557// inlining. Otherwise, we can't inline.
2558// TODO: This isn't 100% true. Some personality functions are proper
2559// supersets of others and can be used in place of the other.
2560elseif (CalledPersonality != CallerPersonality)
2561returnInlineResult::failure("incompatible personality");
2562 }
2563
2564// We need to figure out which funclet the callsite was in so that we may
2565// properly nest the callee.
2566Instruction *CallSiteEHPad =nullptr;
2567if (CallerPersonality) {
2568EHPersonality Personality =classifyEHPersonality(CallerPersonality);
2569if (isScopedEHPersonality(Personality)) {
2570 std::optional<OperandBundleUse> ParentFunclet =
2571 CB.getOperandBundle(LLVMContext::OB_funclet);
2572if (ParentFunclet)
2573 CallSiteEHPad = cast<FuncletPadInst>(ParentFunclet->Inputs.front());
2574
2575// OK, the inlining site is legal. What about the target function?
2576
2577if (CallSiteEHPad) {
2578if (Personality == EHPersonality::MSVC_CXX) {
2579// The MSVC personality cannot tolerate catches getting inlined into
2580// cleanup funclets.
2581if (isa<CleanupPadInst>(CallSiteEHPad)) {
2582// Ok, the call site is within a cleanuppad. Let's check the callee
2583// for catchpads.
2584for (constBasicBlock &CalledBB : *CalledFunc) {
2585if (isa<CatchSwitchInst>(CalledBB.getFirstNonPHIIt()))
2586returnInlineResult::failure("catch in cleanup funclet");
2587 }
2588 }
2589 }elseif (isAsynchronousEHPersonality(Personality)) {
2590// SEH is even less tolerant, there may not be any sort of exceptional
2591// funclet in the callee.
2592for (constBasicBlock &CalledBB : *CalledFunc) {
2593if (CalledBB.isEHPad())
2594returnInlineResult::failure("SEH in cleanup funclet");
2595 }
2596 }
2597 }
2598 }
2599 }
2600
2601// Determine if we are dealing with a call in an EHPad which does not unwind
2602// to caller.
2603bool EHPadForCallUnwindsLocally =false;
2604if (CallSiteEHPad && isa<CallInst>(CB)) {
2605UnwindDestMemoTy FuncletUnwindMap;
2606Value *CallSiteUnwindDestToken =
2607getUnwindDestToken(CallSiteEHPad, FuncletUnwindMap);
2608
2609 EHPadForCallUnwindsLocally =
2610 CallSiteUnwindDestToken &&
2611 !isa<ConstantTokenNone>(CallSiteUnwindDestToken);
2612 }
2613
2614// Get an iterator to the last basic block in the function, which will have
2615// the new function inlined after it.
2616Function::iterator LastBlock = --Caller->end();
2617
2618// Make sure to capture all of the return instructions from the cloned
2619// function.
2620SmallVector<ReturnInst*, 8> Returns;
2621ClonedCodeInfo InlinedFunctionInfo;
2622Function::iterator FirstNewBlock;
2623
2624 {// Scope to destroy VMap after cloning.
2625ValueToValueMapTy VMap;
2626structByValInit {
2627Value *Dst;
2628Value *Src;
2629Type *Ty;
2630 };
2631// Keep a list of pair (dst, src) to emit byval initializations.
2632SmallVector<ByValInit, 4> ByValInits;
2633
2634// When inlining a function that contains noalias scope metadata,
2635// this metadata needs to be cloned so that the inlined blocks
2636// have different "unique scopes" at every call site.
2637// Track the metadata that must be cloned. Do this before other changes to
2638// the function, so that we do not get in trouble when inlining caller ==
2639// callee.
2640 ScopedAliasMetadataDeepCloner SAMetadataCloner(CB.getCalledFunction());
2641
2642auto &DL = Caller->getDataLayout();
2643
2644// Calculate the vector of arguments to pass into the function cloner, which
2645// matches up the formal to the actual argument values.
2646auto AI = CB.arg_begin();
2647unsigned ArgNo = 0;
2648for (Function::arg_iteratorI = CalledFunc->arg_begin(),
2649 E = CalledFunc->arg_end();I != E; ++I, ++AI, ++ArgNo) {
2650Value *ActualArg = *AI;
2651
2652// When byval arguments actually inlined, we need to make the copy implied
2653// by them explicit. However, we don't do this if the callee is readonly
2654// or readnone, because the copy would be unneeded: the callee doesn't
2655// modify the struct.
2656if (CB.isByValArgument(ArgNo)) {
2657 ActualArg =HandleByValArgument(CB.getParamByValType(ArgNo), ActualArg,
2658 &CB, CalledFunc, IFI,
2659 CalledFunc->getParamAlign(ArgNo));
2660if (ActualArg != *AI)
2661 ByValInits.push_back(
2662 {ActualArg, (Value *)*AI, CB.getParamByValType(ArgNo)});
2663 }
2664
2665 VMap[&*I] = ActualArg;
2666 }
2667
2668// TODO: Remove this when users have been updated to the assume bundles.
2669// Add alignment assumptions if necessary. We do this before the inlined
2670// instructions are actually cloned into the caller so that we can easily
2671// check what will be known at the start of the inlined code.
2672AddAlignmentAssumptions(CB, IFI);
2673
2674AssumptionCache *AC =
2675 IFI.GetAssumptionCache ? &IFI.GetAssumptionCache(*Caller) :nullptr;
2676
2677 /// Preserve all attributes on of the call and its parameters.
2678salvageKnowledge(&CB, AC);
2679
2680// We want the inliner to prune the code as it copies. We would LOVE to
2681// have no dead or constant instructions leftover after inlining occurs
2682// (which can happen, e.g., because an argument was constant), but we'll be
2683// happy with whatever the cloner can do.
2684CloneAndPruneFunctionInto(Caller, CalledFunc, VMap,
2685/*ModuleLevelChanges=*/false, Returns,".i",
2686 &InlinedFunctionInfo);
2687// Remember the first block that is newly cloned over.
2688 FirstNewBlock = LastBlock; ++FirstNewBlock;
2689
2690// Insert retainRV/clainRV runtime calls.
2691objcarc::ARCInstKind RVCallKind =objcarc::getAttachedARCFunctionKind(&CB);
2692if (RVCallKind != objcarc::ARCInstKind::None)
2693inlineRetainOrClaimRVCalls(CB, RVCallKind, Returns);
2694
2695// Updated caller/callee profiles only when requested. For sample loader
2696// inlining, the context-sensitive inlinee profile doesn't need to be
2697// subtracted from callee profile, and the inlined clone also doesn't need
2698// to be scaled based on call site count.
2699if (IFI.UpdateProfile) {
2700if (IFI.CallerBFI !=nullptr && IFI.CalleeBFI !=nullptr)
2701// Update the BFI of blocks cloned into the caller.
2702updateCallerBFI(OrigBB, VMap, IFI.CallerBFI, IFI.CalleeBFI,
2703 CalledFunc->front());
2704
2705if (autoProfile = CalledFunc->getEntryCount())
2706updateCallProfile(CalledFunc, VMap, *Profile, CB, IFI.PSI,
2707 IFI.CallerBFI);
2708 }
2709
2710// Inject byval arguments initialization.
2711for (ByValInit &Init : ByValInits)
2712HandleByValArgumentInit(Init.Ty,Init.Dst,Init.Src, Caller->getParent(),
2713 &*FirstNewBlock, IFI, CalledFunc);
2714
2715 std::optional<OperandBundleUse> ParentDeopt =
2716 CB.getOperandBundle(LLVMContext::OB_deopt);
2717if (ParentDeopt) {
2718SmallVector<OperandBundleDef, 2> OpDefs;
2719
2720for (auto &VH : InlinedFunctionInfo.OperandBundleCallSites) {
2721CallBase *ICS = dyn_cast_or_null<CallBase>(VH);
2722if (!ICS)
2723continue;// instruction was DCE'd or RAUW'ed to undef
2724
2725 OpDefs.clear();
2726
2727 OpDefs.reserve(ICS->getNumOperandBundles());
2728
2729for (unsigned COBi = 0, COBe = ICS->getNumOperandBundles(); COBi < COBe;
2730 ++COBi) {
2731auto ChildOB = ICS->getOperandBundleAt(COBi);
2732if (ChildOB.getTagID() !=LLVMContext::OB_deopt) {
2733// If the inlined call has other operand bundles, let them be
2734 OpDefs.emplace_back(ChildOB);
2735continue;
2736 }
2737
2738// It may be useful to separate this logic (of handling operand
2739// bundles) out to a separate "policy" component if this gets crowded.
2740// Prepend the parent's deoptimization continuation to the newly
2741// inlined call's deoptimization continuation.
2742 std::vector<Value *> MergedDeoptArgs;
2743 MergedDeoptArgs.reserve(ParentDeopt->Inputs.size() +
2744 ChildOB.Inputs.size());
2745
2746llvm::append_range(MergedDeoptArgs, ParentDeopt->Inputs);
2747llvm::append_range(MergedDeoptArgs, ChildOB.Inputs);
2748
2749 OpDefs.emplace_back("deopt", std::move(MergedDeoptArgs));
2750 }
2751
2752Instruction *NewI =CallBase::Create(ICS, OpDefs, ICS->getIterator());
2753
2754// Note: the RAUW does the appropriate fixup in VMap, so we need to do
2755// this even if the call returns void.
2756 ICS->replaceAllUsesWith(NewI);
2757
2758 VH =nullptr;
2759 ICS->eraseFromParent();
2760 }
2761 }
2762
2763// For 'nodebug' functions, the associated DISubprogram is always null.
2764// Conservatively avoid propagating the callsite debug location to
2765// instructions inlined from a function whose DISubprogram is not null.
2766fixupLineNumbers(Caller, FirstNewBlock, &CB,
2767 CalledFunc->getSubprogram() !=nullptr);
2768
2769if (isAssignmentTrackingEnabled(*Caller->getParent())) {
2770// Interpret inlined stores to caller-local variables as assignments.
2771trackInlinedStores(FirstNewBlock, Caller->end(), CB);
2772
2773// Update DIAssignID metadata attachments and uses so that they are
2774// unique to this inlined instance.
2775fixupAssignments(FirstNewBlock, Caller->end());
2776 }
2777
2778// Now clone the inlined noalias scope metadata.
2779 SAMetadataCloner.clone();
2780 SAMetadataCloner.remap(FirstNewBlock, Caller->end());
2781
2782// Add noalias metadata if necessary.
2783AddAliasScopeMetadata(CB, VMap,DL, CalleeAAR, InlinedFunctionInfo);
2784
2785// Clone return attributes on the callsite into the calls within the inlined
2786// function which feed into its return value.
2787AddReturnAttributes(CB, VMap, InlinedFunctionInfo);
2788
2789// Clone attributes on the params of the callsite to calls within the
2790// inlined function which use the same param.
2791AddParamAndFnBasicAttributes(CB, VMap, InlinedFunctionInfo);
2792
2793propagateMemProfMetadata(CalledFunc, CB,
2794 InlinedFunctionInfo.ContainsMemProfMetadata, VMap);
2795
2796// Propagate metadata on the callsite if necessary.
2797PropagateCallSiteMetadata(CB, FirstNewBlock, Caller->end());
2798
2799// Register any cloned assumptions.
2800if (IFI.GetAssumptionCache)
2801for (BasicBlock &NewBlock :
2802make_range(FirstNewBlock->getIterator(), Caller->end()))
2803for (Instruction &I : NewBlock)
2804if (auto *II = dyn_cast<AssumeInst>(&I))
2805 IFI.GetAssumptionCache(*Caller).registerAssumption(II);
2806 }
2807
2808if (ConvergenceControlToken) {
2809IntrinsicInst *IntrinsicCall =getConvergenceEntry(*FirstNewBlock);
2810if (IntrinsicCall) {
2811 IntrinsicCall->replaceAllUsesWith(ConvergenceControlToken);
2812 IntrinsicCall->eraseFromParent();
2813 }
2814 }
2815
2816// If there are any alloca instructions in the block that used to be the entry
2817// block for the callee, move them to the entry block of the caller. First
2818// calculate which instruction they should be inserted before. We insert the
2819// instructions at the end of the current alloca list.
2820 {
2821BasicBlock::iterator InsertPoint = Caller->begin()->begin();
2822for (BasicBlock::iteratorI = FirstNewBlock->begin(),
2823 E = FirstNewBlock->end();I != E; ) {
2824AllocaInst *AI = dyn_cast<AllocaInst>(I++);
2825if (!AI)continue;
2826
2827// If the alloca is now dead, remove it. This often occurs due to code
2828// specialization.
2829if (AI->use_empty()) {
2830 AI->eraseFromParent();
2831continue;
2832 }
2833
2834if (!allocaWouldBeStaticInEntry(AI))
2835continue;
2836
2837// Keep track of the static allocas that we inline into the caller.
2838 IFI.StaticAllocas.push_back(AI);
2839
2840// Scan for the block of allocas that we can move over, and move them
2841// all at once.
2842while (isa<AllocaInst>(I) &&
2843 !cast<AllocaInst>(I)->use_empty() &&
2844allocaWouldBeStaticInEntry(cast<AllocaInst>(I))) {
2845 IFI.StaticAllocas.push_back(cast<AllocaInst>(I));
2846 ++I;
2847 }
2848
2849// Transfer all of the allocas over in a block. Using splice means
2850// that the instructions aren't removed from the symbol table, then
2851// reinserted.
2852I.setTailBit(true);
2853 Caller->getEntryBlock().splice(InsertPoint, &*FirstNewBlock,
2854 AI->getIterator(),I);
2855 }
2856 }
2857
2858SmallVector<Value*,4> VarArgsToForward;
2859SmallVector<AttributeSet, 4> VarArgsAttrs;
2860for (unsigned i = CalledFunc->getFunctionType()->getNumParams();
2861 i < CB.arg_size(); i++) {
2862 VarArgsToForward.push_back(CB.getArgOperand(i));
2863 VarArgsAttrs.push_back(CB.getAttributes().getParamAttrs(i));
2864 }
2865
2866bool InlinedMustTailCalls =false, InlinedDeoptimizeCalls =false;
2867if (InlinedFunctionInfo.ContainsCalls) {
2868CallInst::TailCallKind CallSiteTailKind =CallInst::TCK_None;
2869if (CallInst *CI = dyn_cast<CallInst>(&CB))
2870 CallSiteTailKind = CI->getTailCallKind();
2871
2872// For inlining purposes, the "notail" marker is the same as no marker.
2873if (CallSiteTailKind ==CallInst::TCK_NoTail)
2874 CallSiteTailKind =CallInst::TCK_None;
2875
2876for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E;
2877 ++BB) {
2878for (Instruction &I :llvm::make_early_inc_range(*BB)) {
2879CallInst *CI = dyn_cast<CallInst>(&I);
2880if (!CI)
2881continue;
2882
2883// Forward varargs from inlined call site to calls to the
2884// ForwardVarArgsTo function, if requested, and to musttail calls.
2885if (!VarArgsToForward.empty() &&
2886 ((ForwardVarArgsTo &&
2887 CI->getCalledFunction() == ForwardVarArgsTo) ||
2888 CI->isMustTailCall())) {
2889// Collect attributes for non-vararg parameters.
2890AttributeList Attrs = CI->getAttributes();
2891SmallVector<AttributeSet, 8> ArgAttrs;
2892if (!Attrs.isEmpty() || !VarArgsAttrs.empty()) {
2893for (unsigned ArgNo = 0;
2894 ArgNo < CI->getFunctionType()->getNumParams(); ++ArgNo)
2895 ArgAttrs.push_back(Attrs.getParamAttrs(ArgNo));
2896 }
2897
2898// Add VarArg attributes.
2899 ArgAttrs.append(VarArgsAttrs.begin(), VarArgsAttrs.end());
2900 Attrs =AttributeList::get(CI->getContext(), Attrs.getFnAttrs(),
2901 Attrs.getRetAttrs(), ArgAttrs);
2902// Add VarArgs to existing parameters.
2903SmallVector<Value *, 6> Params(CI->args());
2904 Params.append(VarArgsToForward.begin(), VarArgsToForward.end());
2905CallInst *NewCI =CallInst::Create(
2906 CI->getFunctionType(), CI->getCalledOperand(), Params,"", CI->getIterator());
2907 NewCI->setDebugLoc(CI->getDebugLoc());
2908 NewCI->setAttributes(Attrs);
2909 NewCI->setCallingConv(CI->getCallingConv());
2910 CI->replaceAllUsesWith(NewCI);
2911 CI->eraseFromParent();
2912 CI = NewCI;
2913 }
2914
2915if (Function *F = CI->getCalledFunction())
2916 InlinedDeoptimizeCalls |=
2917F->getIntrinsicID() == Intrinsic::experimental_deoptimize;
2918
2919// We need to reduce the strength of any inlined tail calls. For
2920// musttail, we have to avoid introducing potential unbounded stack
2921// growth. For example, if functions 'f' and 'g' are mutually recursive
2922// with musttail, we can inline 'g' into 'f' so long as we preserve
2923// musttail on the cloned call to 'f'. If either the inlined call site
2924// or the cloned call site is *not* musttail, the program already has
2925// one frame of stack growth, so it's safe to remove musttail. Here is
2926// a table of example transformations:
2927//
2928// f -> musttail g -> musttail f ==> f -> musttail f
2929// f -> musttail g -> tail f ==> f -> tail f
2930// f -> g -> musttail f ==> f -> f
2931// f -> g -> tail f ==> f -> f
2932//
2933// Inlined notail calls should remain notail calls.
2934CallInst::TailCallKind ChildTCK = CI->getTailCallKind();
2935if (ChildTCK !=CallInst::TCK_NoTail)
2936 ChildTCK = std::min(CallSiteTailKind, ChildTCK);
2937 CI->setTailCallKind(ChildTCK);
2938 InlinedMustTailCalls |= CI->isMustTailCall();
2939
2940// Call sites inlined through a 'nounwind' call site should be
2941// 'nounwind' as well. However, avoid marking call sites explicitly
2942// where possible. This helps expose more opportunities for CSE after
2943// inlining, commonly when the callee is an intrinsic.
2944if (MarkNoUnwind && !CI->doesNotThrow())
2945 CI->setDoesNotThrow();
2946 }
2947 }
2948 }
2949
2950// Leave lifetime markers for the static alloca's, scoping them to the
2951// function we just inlined.
2952// We need to insert lifetime intrinsics even at O0 to avoid invalid
2953// access caused by multithreaded coroutines. The check
2954// `Caller->isPresplitCoroutine()` would affect AlwaysInliner at O0 only.
2955if ((InsertLifetime || Caller->isPresplitCoroutine()) &&
2956 !IFI.StaticAllocas.empty()) {
2957IRBuilder<> builder(&*FirstNewBlock, FirstNewBlock->begin());
2958for (AllocaInst *AI : IFI.StaticAllocas) {
2959// Don't mark swifterror allocas. They can't have bitcast uses.
2960if (AI->isSwiftError())
2961continue;
2962
2963// If the alloca is already scoped to something smaller than the whole
2964// function then there's no need to add redundant, less accurate markers.
2965if (hasLifetimeMarkers(AI))
2966continue;
2967
2968// Try to determine the size of the allocation.
2969ConstantInt *AllocaSize =nullptr;
2970if (ConstantInt *AIArraySize =
2971 dyn_cast<ConstantInt>(AI->getArraySize())) {
2972auto &DL = Caller->getDataLayout();
2973Type *AllocaType = AI->getAllocatedType();
2974TypeSize AllocaTypeSize =DL.getTypeAllocSize(AllocaType);
2975uint64_t AllocaArraySize = AIArraySize->getLimitedValue();
2976
2977// Don't add markers for zero-sized allocas.
2978if (AllocaArraySize == 0)
2979continue;
2980
2981// Check that array size doesn't saturate uint64_t and doesn't
2982// overflow when it's multiplied by type size.
2983if (!AllocaTypeSize.isScalable() &&
2984 AllocaArraySize != std::numeric_limits<uint64_t>::max() &&
2985 std::numeric_limits<uint64_t>::max() / AllocaArraySize >=
2986 AllocaTypeSize.getFixedValue()) {
2987 AllocaSize = ConstantInt::get(Type::getInt64Ty(AI->getContext()),
2988 AllocaArraySize * AllocaTypeSize);
2989 }
2990 }
2991
2992 builder.CreateLifetimeStart(AI, AllocaSize);
2993for (ReturnInst *RI : Returns) {
2994// Don't insert llvm.lifetime.end calls between a musttail or deoptimize
2995// call and a return. The return kills all local allocas.
2996if (InlinedMustTailCalls &&
2997 RI->getParent()->getTerminatingMustTailCall())
2998continue;
2999if (InlinedDeoptimizeCalls &&
3000 RI->getParent()->getTerminatingDeoptimizeCall())
3001continue;
3002IRBuilder<>(RI).CreateLifetimeEnd(AI, AllocaSize);
3003 }
3004 }
3005 }
3006
3007// If the inlined code contained dynamic alloca instructions, wrap the inlined
3008// code with llvm.stacksave/llvm.stackrestore intrinsics.
3009if (InlinedFunctionInfo.ContainsDynamicAllocas) {
3010// Insert the llvm.stacksave.
3011CallInst *SavedPtr =IRBuilder<>(&*FirstNewBlock, FirstNewBlock->begin())
3012 .CreateStackSave("savedstack");
3013
3014// Insert a call to llvm.stackrestore before any return instructions in the
3015// inlined function.
3016for (ReturnInst *RI : Returns) {
3017// Don't insert llvm.stackrestore calls between a musttail or deoptimize
3018// call and a return. The return will restore the stack pointer.
3019if (InlinedMustTailCalls && RI->getParent()->getTerminatingMustTailCall())
3020continue;
3021if (InlinedDeoptimizeCalls && RI->getParent()->getTerminatingDeoptimizeCall())
3022continue;
3023IRBuilder<>(RI).CreateStackRestore(SavedPtr);
3024 }
3025 }
3026
3027// If we are inlining for an invoke instruction, we must make sure to rewrite
3028// any call instructions into invoke instructions. This is sensitive to which
3029// funclet pads were top-level in the inlinee, so must be done before
3030// rewriting the "parent pad" links.
3031if (auto *II = dyn_cast<InvokeInst>(&CB)) {
3032BasicBlock *UnwindDest =II->getUnwindDest();
3033BasicBlock::iterator FirstNonPHI = UnwindDest->getFirstNonPHIIt();
3034if (isa<LandingPadInst>(FirstNonPHI)) {
3035HandleInlinedLandingPad(II, &*FirstNewBlock, InlinedFunctionInfo);
3036 }else {
3037HandleInlinedEHPad(II, &*FirstNewBlock, InlinedFunctionInfo);
3038 }
3039 }
3040
3041// Update the lexical scopes of the new funclets and callsites.
3042// Anything that had 'none' as its parent is now nested inside the callsite's
3043// EHPad.
3044if (CallSiteEHPad) {
3045for (Function::iterator BB = FirstNewBlock->getIterator(),
3046 E = Caller->end();
3047 BB != E; ++BB) {
3048// Add bundle operands to inlined call sites.
3049PropagateOperandBundles(BB, CallSiteEHPad);
3050
3051// It is problematic if the inlinee has a cleanupret which unwinds to
3052// caller and we inline it into a call site which doesn't unwind but into
3053// an EH pad that does. Such an edge must be dynamically unreachable.
3054// As such, we replace the cleanupret with unreachable.
3055if (auto *CleanupRet = dyn_cast<CleanupReturnInst>(BB->getTerminator()))
3056if (CleanupRet->unwindsToCaller() && EHPadForCallUnwindsLocally)
3057changeToUnreachable(CleanupRet);
3058
3059BasicBlock::iteratorI = BB->getFirstNonPHIIt();
3060if (!I->isEHPad())
3061continue;
3062
3063if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) {
3064if (isa<ConstantTokenNone>(CatchSwitch->getParentPad()))
3065 CatchSwitch->setParentPad(CallSiteEHPad);
3066 }else {
3067auto *FPI = cast<FuncletPadInst>(I);
3068if (isa<ConstantTokenNone>(FPI->getParentPad()))
3069 FPI->setParentPad(CallSiteEHPad);
3070 }
3071 }
3072 }
3073
3074if (InlinedDeoptimizeCalls) {
3075// We need to at least remove the deoptimizing returns from the Return set,
3076// so that the control flow from those returns does not get merged into the
3077// caller (but terminate it instead). If the caller's return type does not
3078// match the callee's return type, we also need to change the return type of
3079// the intrinsic.
3080if (Caller->getReturnType() == CB.getType()) {
3081llvm::erase_if(Returns, [](ReturnInst *RI) {
3082return RI->getParent()->getTerminatingDeoptimizeCall() !=nullptr;
3083 });
3084 }else {
3085SmallVector<ReturnInst *, 8> NormalReturns;
3086Function *NewDeoptIntrinsic =Intrinsic::getOrInsertDeclaration(
3087 Caller->getParent(), Intrinsic::experimental_deoptimize,
3088 {Caller->getReturnType()});
3089
3090for (ReturnInst *RI : Returns) {
3091CallInst *DeoptCall = RI->getParent()->getTerminatingDeoptimizeCall();
3092if (!DeoptCall) {
3093 NormalReturns.push_back(RI);
3094continue;
3095 }
3096
3097// The calling convention on the deoptimize call itself may be bogus,
3098// since the code we're inlining may have undefined behavior (and may
3099// never actually execute at runtime); but all
3100// @llvm.experimental.deoptimize declarations have to have the same
3101// calling convention in a well-formed module.
3102auto CallingConv = DeoptCall->getCalledFunction()->getCallingConv();
3103 NewDeoptIntrinsic->setCallingConv(CallingConv);
3104auto *CurBB = RI->getParent();
3105 RI->eraseFromParent();
3106
3107SmallVector<Value *, 4> CallArgs(DeoptCall->args());
3108
3109SmallVector<OperandBundleDef, 1> OpBundles;
3110 DeoptCall->getOperandBundlesAsDefs(OpBundles);
3111auto DeoptAttributes = DeoptCall->getAttributes();
3112 DeoptCall->eraseFromParent();
3113assert(!OpBundles.empty() &&
3114"Expected at least the deopt operand bundle");
3115
3116IRBuilder<> Builder(CurBB);
3117CallInst *NewDeoptCall =
3118 Builder.CreateCall(NewDeoptIntrinsic, CallArgs, OpBundles);
3119 NewDeoptCall->setCallingConv(CallingConv);
3120 NewDeoptCall->setAttributes(DeoptAttributes);
3121if (NewDeoptCall->getType()->isVoidTy())
3122 Builder.CreateRetVoid();
3123else
3124 Builder.CreateRet(NewDeoptCall);
3125// Since the ret type is changed, remove the incompatible attributes.
3126 NewDeoptCall->removeRetAttrs(AttributeFuncs::typeIncompatible(
3127 NewDeoptCall->getType(), NewDeoptCall->getRetAttributes()));
3128 }
3129
3130// Leave behind the normal returns so we can merge control flow.
3131std::swap(Returns, NormalReturns);
3132 }
3133 }
3134
3135// Handle any inlined musttail call sites. In order for a new call site to be
3136// musttail, the source of the clone and the inlined call site must have been
3137// musttail. Therefore it's safe to return without merging control into the
3138// phi below.
3139if (InlinedMustTailCalls) {
3140// Check if we need to bitcast the result of any musttail calls.
3141Type *NewRetTy = Caller->getReturnType();
3142bool NeedBitCast = !CB.use_empty() && CB.getType() != NewRetTy;
3143
3144// Handle the returns preceded by musttail calls separately.
3145SmallVector<ReturnInst *, 8> NormalReturns;
3146for (ReturnInst *RI : Returns) {
3147CallInst *ReturnedMustTail =
3148 RI->getParent()->getTerminatingMustTailCall();
3149if (!ReturnedMustTail) {
3150 NormalReturns.push_back(RI);
3151continue;
3152 }
3153if (!NeedBitCast)
3154continue;
3155
3156// Delete the old return and any preceding bitcast.
3157BasicBlock *CurBB = RI->getParent();
3158auto *OldCast = dyn_cast_or_null<BitCastInst>(RI->getReturnValue());
3159 RI->eraseFromParent();
3160if (OldCast)
3161 OldCast->eraseFromParent();
3162
3163// Insert a new bitcast and return with the right type.
3164IRBuilder<> Builder(CurBB);
3165 Builder.CreateRet(Builder.CreateBitCast(ReturnedMustTail, NewRetTy));
3166 }
3167
3168// Leave behind the normal returns so we can merge control flow.
3169std::swap(Returns, NormalReturns);
3170 }
3171
3172// Now that all of the transforms on the inlined code have taken place but
3173// before we splice the inlined code into the CFG and lose track of which
3174// blocks were actually inlined, collect the call sites. We only do this if
3175// call graph updates weren't requested, as those provide value handle based
3176// tracking of inlined call sites instead. Calls to intrinsics are not
3177// collected because they are not inlineable.
3178if (InlinedFunctionInfo.ContainsCalls) {
3179// Otherwise just collect the raw call sites that were inlined.
3180for (BasicBlock &NewBB :
3181make_range(FirstNewBlock->getIterator(), Caller->end()))
3182for (Instruction &I : NewBB)
3183if (auto *CB = dyn_cast<CallBase>(&I))
3184if (!(CB->getCalledFunction() &&
3185 CB->getCalledFunction()->isIntrinsic()))
3186 IFI.InlinedCallSites.push_back(CB);
3187 }
3188
3189// If we cloned in _exactly one_ basic block, and if that block ends in a
3190// return instruction, we splice the body of the inlined callee directly into
3191// the calling basic block.
3192if (Returns.size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) {
3193// Move all of the instructions right before the call.
3194 OrigBB->splice(CB.getIterator(), &*FirstNewBlock, FirstNewBlock->begin(),
3195 FirstNewBlock->end());
3196// Remove the cloned basic block.
3197 Caller->back().eraseFromParent();
3198
3199// If the call site was an invoke instruction, add a branch to the normal
3200// destination.
3201if (InvokeInst *II = dyn_cast<InvokeInst>(&CB)) {
3202BranchInst *NewBr =BranchInst::Create(II->getNormalDest(), CB.getIterator());
3203 NewBr->setDebugLoc(Returns[0]->getDebugLoc());
3204 }
3205
3206// If the return instruction returned a value, replace uses of the call with
3207// uses of the returned value.
3208if (!CB.use_empty()) {
3209ReturnInst *R = Returns[0];
3210if (&CB == R->getReturnValue())
3211 CB.replaceAllUsesWith(PoisonValue::get(CB.getType()));
3212else
3213 CB.replaceAllUsesWith(R->getReturnValue());
3214 }
3215// Since we are now done with the Call/Invoke, we can delete it.
3216 CB.eraseFromParent();
3217
3218// Since we are now done with the return instruction, delete it also.
3219 Returns[0]->eraseFromParent();
3220
3221if (MergeAttributes)
3222AttributeFuncs::mergeAttributesForInlining(*Caller, *CalledFunc);
3223
3224// We are now done with the inlining.
3225returnInlineResult::success();
3226 }
3227
3228// Otherwise, we have the normal case, of more than one block to inline or
3229// multiple return sites.
3230
3231// We want to clone the entire callee function into the hole between the
3232// "starter" and "ender" blocks. How we accomplish this depends on whether
3233// this is an invoke instruction or a call instruction.
3234BasicBlock *AfterCallBB;
3235BranchInst *CreatedBranchToNormalDest =nullptr;
3236if (InvokeInst *II = dyn_cast<InvokeInst>(&CB)) {
3237
3238// Add an unconditional branch to make this look like the CallInst case...
3239 CreatedBranchToNormalDest =BranchInst::Create(II->getNormalDest(), CB.getIterator());
3240
3241// Split the basic block. This guarantees that no PHI nodes will have to be
3242// updated due to new incoming edges, and make the invoke case more
3243// symmetric to the call case.
3244 AfterCallBB =
3245 OrigBB->splitBasicBlock(CreatedBranchToNormalDest->getIterator(),
3246 CalledFunc->getName() +".exit");
3247
3248 }else {// It's a call
3249// If this is a call instruction, we need to split the basic block that
3250// the call lives in.
3251//
3252 AfterCallBB = OrigBB->splitBasicBlock(CB.getIterator(),
3253 CalledFunc->getName() +".exit");
3254 }
3255
3256if (IFI.CallerBFI) {
3257// Copy original BB's block frequency to AfterCallBB
3258 IFI.CallerBFI->setBlockFreq(AfterCallBB,
3259 IFI.CallerBFI->getBlockFreq(OrigBB));
3260 }
3261
3262// Change the branch that used to go to AfterCallBB to branch to the first
3263// basic block of the inlined function.
3264//
3265Instruction *Br = OrigBB->getTerminator();
3266assert(Br && Br->getOpcode() == Instruction::Br &&
3267"splitBasicBlock broken!");
3268 Br->setOperand(0, &*FirstNewBlock);
3269
3270// Now that the function is correct, make it a little bit nicer. In
3271// particular, move the basic blocks inserted from the end of the function
3272// into the space made by splitting the source basic block.
3273 Caller->splice(AfterCallBB->getIterator(), Caller, FirstNewBlock,
3274 Caller->end());
3275
3276// Handle all of the return instructions that we just cloned in, and eliminate
3277// any users of the original call/invoke instruction.
3278Type *RTy = CalledFunc->getReturnType();
3279
3280PHINode *PHI =nullptr;
3281if (Returns.size() > 1) {
3282// The PHI node should go at the front of the new basic block to merge all
3283// possible incoming values.
3284if (!CB.use_empty()) {
3285PHI =PHINode::Create(RTy, Returns.size(), CB.getName());
3286PHI->insertBefore(AfterCallBB->begin());
3287// Anything that used the result of the function call should now use the
3288// PHI node as their operand.
3289 CB.replaceAllUsesWith(PHI);
3290 }
3291
3292// Loop over all of the return instructions adding entries to the PHI node
3293// as appropriate.
3294if (PHI) {
3295for (ReturnInst *RI : Returns) {
3296assert(RI->getReturnValue()->getType() ==PHI->getType() &&
3297"Ret value not consistent in function!");
3298PHI->addIncoming(RI->getReturnValue(), RI->getParent());
3299 }
3300 }
3301
3302// Add a branch to the merge points and remove return instructions.
3303DebugLoc Loc;
3304for (ReturnInst *RI : Returns) {
3305BranchInst *BI =BranchInst::Create(AfterCallBB, RI->getIterator());
3306 Loc = RI->getDebugLoc();
3307 BI->setDebugLoc(Loc);
3308 RI->eraseFromParent();
3309 }
3310// We need to set the debug location to *somewhere* inside the
3311// inlined function. The line number may be nonsensical, but the
3312// instruction will at least be associated with the right
3313// function.
3314if (CreatedBranchToNormalDest)
3315 CreatedBranchToNormalDest->setDebugLoc(Loc);
3316 }elseif (!Returns.empty()) {
3317// Otherwise, if there is exactly one return value, just replace anything
3318// using the return value of the call with the computed value.
3319if (!CB.use_empty()) {
3320if (&CB == Returns[0]->getReturnValue())
3321 CB.replaceAllUsesWith(PoisonValue::get(CB.getType()));
3322else
3323 CB.replaceAllUsesWith(Returns[0]->getReturnValue());
3324 }
3325
3326// Update PHI nodes that use the ReturnBB to use the AfterCallBB.
3327BasicBlock *ReturnBB = Returns[0]->getParent();
3328 ReturnBB->replaceAllUsesWith(AfterCallBB);
3329
3330// Splice the code from the return block into the block that it will return
3331// to, which contains the code that was after the call.
3332 AfterCallBB->splice(AfterCallBB->begin(), ReturnBB);
3333
3334if (CreatedBranchToNormalDest)
3335 CreatedBranchToNormalDest->setDebugLoc(Returns[0]->getDebugLoc());
3336
3337// Delete the return instruction now and empty ReturnBB now.
3338 Returns[0]->eraseFromParent();
3339 ReturnBB->eraseFromParent();
3340 }elseif (!CB.use_empty()) {
3341// No returns, but something is using the return value of the call. Just
3342// nuke the result.
3343 CB.replaceAllUsesWith(PoisonValue::get(CB.getType()));
3344 }
3345
3346// Since we are now done with the Call/Invoke, we can delete it.
3347 CB.eraseFromParent();
3348
3349// If we inlined any musttail calls and the original return is now
3350// unreachable, delete it. It can only contain a bitcast and ret.
3351if (InlinedMustTailCalls &&pred_empty(AfterCallBB))
3352 AfterCallBB->eraseFromParent();
3353
3354// We should always be able to fold the entry block of the function into the
3355// single predecessor of the block...
3356assert(cast<BranchInst>(Br)->isUnconditional() &&"splitBasicBlock broken!");
3357BasicBlock *CalleeEntry = cast<BranchInst>(Br)->getSuccessor(0);
3358
3359// Splice the code entry block into calling block, right before the
3360// unconditional branch.
3361 CalleeEntry->replaceAllUsesWith(OrigBB);// Update PHI nodes
3362 OrigBB->splice(Br->getIterator(), CalleeEntry);
3363
3364// Remove the unconditional branch.
3365 Br->eraseFromParent();
3366
3367// Now we can remove the CalleeEntry block, which is now empty.
3368 CalleeEntry->eraseFromParent();
3369
3370// If we inserted a phi node, check to see if it has a single value (e.g. all
3371// the entries are the same or undef). If so, remove the PHI so it doesn't
3372// block other optimizations.
3373if (PHI) {
3374AssumptionCache *AC =
3375 IFI.GetAssumptionCache ? &IFI.GetAssumptionCache(*Caller) :nullptr;
3376auto &DL = Caller->getDataLayout();
3377if (Value *V =simplifyInstruction(PHI, {DL,nullptr,nullptr, AC})) {
3378PHI->replaceAllUsesWith(V);
3379PHI->eraseFromParent();
3380 }
3381 }
3382
3383if (MergeAttributes)
3384AttributeFuncs::mergeAttributesForInlining(*Caller, *CalledFunc);
3385
3386returnInlineResult::success();
3387}
PHI
Rewrite undef for PHI
Definition:AMDGPURewriteUndefForPHI.cpp:100
DL
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Definition:ARMSLSHardening.cpp:73
AliasAnalysis.h
AssumeBundleBuilder.h
AssumptionCache.h
AttributeMask.h
Attributes.h
This file contains the simple types necessary to represent the attributes associated with functions a...
UpdatePHINodes
static void UpdatePHINodes(BasicBlock *OrigBB, BasicBlock *NewBB, ArrayRef< BasicBlock * > Preds, BranchInst *BI, bool HasLoopExit)
Update the PHI nodes in OrigBB to include the values coming from NewBB.
Definition:BasicBlockUtils.cpp:1260
BlockFrequencyInfo.h
A
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
NoAliases
static cl::opt< bool > NoAliases("csky-no-aliases", cl::desc("Disable the emission of assembler pseudo instructions"), cl::init(false), cl::Hidden)
CallGraph.h
This file provides interfaces used to build and manipulate a call graph, which is a very useful tool ...
CaptureTracking.h
Casting.h
Cloning.h
CommandLine.h
ConstantRange.h
Constants.h
This file contains the declarations for the subclasses of Constant, which represent the different fla...
CtxProfAnalysis.h
DataLayout.h
DebugInfoMetadata.h
DebugLoc.h
LLVM_DEBUG
#define LLVM_DEBUG(...)
Definition:Debug.h:106
DenseMap.h
This file defines the DenseMap class.
DerivedTypes.h
Dominators.h
EHPersonalities.h
Name
std::string Name
Definition:ELFObjHandler.cpp:77
Size
uint64_t Size
Definition:ELFObjHandler.cpp:81
End
bool End
Definition:ELF_riscv.cpp:480
GlobalVariable.h
IRBuilder.h
Argument.h
BasicBlock.h
CFG.h
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Constant.h
Function.h
Instruction.h
IntrinsicInst.h
Module.h
Module.h This file contains the declarations for the Module class.
Type.h
User.h
Value.h
IndirectCallVisitor.h
InlineAsm.h
IdentifyValidUBGeneratingAttributes
static AttrBuilder IdentifyValidUBGeneratingAttributes(CallBase &CB)
Definition:InlineFunction.cpp:1515
collectEscapedLocals
static at::StorageToVarsMap collectEscapedLocals(const DataLayout &DL, const CallBase &CB)
Find Alloca and linked DbgAssignIntrinsic for locals escaped by CB.
Definition:InlineFunction.cpp:1933
fixupLineNumbers
static void fixupLineNumbers(Function *Fn, Function::iterator FI, Instruction *TheCall, bool CalleeHasDebugInfo)
Update inlined instructions' line numbers to to encode location where these instructions are inlined.
Definition:InlineFunction.cpp:1824
removeCallsiteMetadata
static void removeCallsiteMetadata(CallBase *Call)
Definition:InlineFunction.cpp:825
propagateMemProfHelper
static void propagateMemProfHelper(const CallBase *OrigCall, CallBase *ClonedCall, MDNode *InlinedCallsiteMD)
Definition:InlineFunction.cpp:848
getUnwindDestToken
static Value * getUnwindDestToken(Instruction *EHPad, UnwindDestMemoTy &MemoMap)
Given an EH pad, find where it unwinds.
Definition:InlineFunction.cpp:417
PreserveAlignmentAssumptions
static cl::opt< bool > PreserveAlignmentAssumptions("preserve-alignment-assumptions-during-inlining", cl::init(false), cl::Hidden, cl::desc("Convert align attributes to assumptions during inlining."))
HandleInlinedLandingPad
static void HandleInlinedLandingPad(InvokeInst *II, BasicBlock *FirstNewBlock, ClonedCodeInfo &InlinedCodeInfo)
If we inlined an invoke site, we need to convert calls in the body of the inlined function into invok...
Definition:InlineFunction.cpp:620
getUnwindDestTokenHelper
static Value * getUnwindDestTokenHelper(Instruction *EHPad, UnwindDestMemoTy &MemoMap)
Helper for getUnwindDestToken that does the descendant-ward part of the search.
Definition:InlineFunction.cpp:265
HandleCallsInBlockInlinedThroughInvoke
static BasicBlock * HandleCallsInBlockInlinedThroughInvoke(BasicBlock *BB, BasicBlock *UnwindEdge, UnwindDestMemoTy *FuncletUnwindMap=nullptr)
When we inline a basic block into an invoke, we have to turn all of the calls that can throw into inv...
Definition:InlineFunction.cpp:561
inlineDebugLoc
static DebugLoc inlineDebugLoc(DebugLoc OrigDL, DILocation *InlinedAt, LLVMContext &Ctx, DenseMap< const MDNode *, MDNode * > &IANodes)
Returns a DebugLoc for a new DILocation which is a clone of OrigDL inlined at InlinedAt.
Definition:InlineFunction.cpp:1814
UseNoAliasIntrinsic
static cl::opt< bool > UseNoAliasIntrinsic("use-noalias-intrinsic-during-inlining", cl::Hidden, cl::init(true), cl::desc("Use the llvm.experimental.noalias.scope.decl " "intrinsic during inlining."))
PropagateCallSiteMetadata
static void PropagateCallSiteMetadata(CallBase &CB, Function::iterator FStart, Function::iterator FEnd)
When inlining a call site that has !llvm.mem.parallel_loop_access, !llvm.access.group,...
Definition:InlineFunction.cpp:934
IdentifyValidPoisonGeneratingAttributes
static AttrBuilder IdentifyValidPoisonGeneratingAttributes(CallBase &CB)
Definition:InlineFunction.cpp:1530
propagateMemProfMetadata
static void propagateMemProfMetadata(Function *Callee, CallBase &CB, bool ContainsMemProfMetadata, const ValueMap< const Value *, WeakTrackingVH > &VMap)
Definition:InlineFunction.cpp:902
updateCallProfile
static void updateCallProfile(Function *Callee, const ValueToValueMapTy &VMap, const ProfileCount &CalleeEntryCount, const CallBase &TheCall, ProfileSummaryInfo *PSI, BlockFrequencyInfo *CallerBFI)
Update the branch metadata for cloned call instructions.
Definition:InlineFunction.cpp:2041
updateCallerBFI
static void updateCallerBFI(BasicBlock *CallSiteBlock, const ValueToValueMapTy &VMap, BlockFrequencyInfo *CallerBFI, BlockFrequencyInfo *CalleeBFI, const BasicBlock &CalleeEntryBlock)
Update the block frequencies of the caller after a callee has been inlined.
Definition:InlineFunction.cpp:2013
AddReturnAttributes
static void AddReturnAttributes(CallBase &CB, ValueToValueMapTy &VMap, ClonedCodeInfo &InlinedFunctionInfo)
Definition:InlineFunction.cpp:1541
MayContainThrowingOrExitingCallAfterCB
static bool MayContainThrowingOrExitingCallAfterCB(CallBase *Begin, ReturnInst *End)
Definition:InlineFunction.cpp:1356
HandleByValArgumentInit
static void HandleByValArgumentInit(Type *ByValType, Value *Dst, Value *Src, Module *M, BasicBlock *InsertBlock, InlineFunctionInfo &IFI, Function *CalledFunc)
Definition:InlineFunction.cpp:1701
EnableNoAliasConversion
static cl::opt< bool > EnableNoAliasConversion("enable-noalias-to-md-conversion", cl::init(true), cl::Hidden, cl::desc("Convert noalias attributes to metadata during inlining."))
AddAliasScopeMetadata
static void AddAliasScopeMetadata(CallBase &CB, ValueToValueMapTy &VMap, const DataLayout &DL, AAResults *CalleeAAR, ClonedCodeInfo &InlinedFunctionInfo)
If the inlined function has noalias arguments, then add new alias scopes for each noalias argument,...
Definition:InlineFunction.cpp:1113
remapIndices
static const std::pair< std::vector< int64_t >, std::vector< int64_t > > remapIndices(Function &Caller, BasicBlock *StartBB, PGOContextualProfile &CtxProf, uint32_t CalleeCounters, uint32_t CalleeCallsites)
Definition:InlineFunction.cpp:2226
getConvergenceEntry
static IntrinsicInst * getConvergenceEntry(BasicBlock &BB)
Definition:InlineFunction.cpp:186
HandleInlinedEHPad
static void HandleInlinedEHPad(InvokeInst *II, BasicBlock *FirstNewBlock, ClonedCodeInfo &InlinedCodeInfo)
If we inlined an invoke site, we need to convert calls in the body of the inlined function into invok...
Definition:InlineFunction.cpp:677
inlineRetainOrClaimRVCalls
static void inlineRetainOrClaimRVCalls(CallBase &CB, objcarc::ARCInstKind RVCallKind, const SmallVectorImpl< ReturnInst * > &Returns)
An operand bundle "clang.arc.attachedcall" on a call indicates the call result is implicitly consumed...
Definition:InlineFunction.cpp:2132
getParentPad
static Value * getParentPad(Value *EHPad)
Helper for getUnwindDestToken/getUnwindDestTokenHelper.
Definition:InlineFunction.cpp:255
fixupAssignments
static void fixupAssignments(Function::iterator Start, Function::iterator End)
Update inlined instructions' DIAssignID metadata.
Definition:InlineFunction.cpp:1995
allocaWouldBeStaticInEntry
static bool allocaWouldBeStaticInEntry(const AllocaInst *AI)
Return the result of AI->isStaticAlloca() if AI were moved to the entry block.
Definition:InlineFunction.cpp:1808
isUsedByLifetimeMarker
static bool isUsedByLifetimeMarker(Value *V)
Definition:InlineFunction.cpp:1778
removeMemProfMetadata
static void removeMemProfMetadata(CallBase *Call)
Definition:InlineFunction.cpp:821
HandleByValArgument
static Value * HandleByValArgument(Type *ByValType, Value *Arg, Instruction *TheCall, const Function *CalledFunc, InlineFunctionInfo &IFI, MaybeAlign ByValAlignment)
When inlining a call site that has a byval argument, we have to make the implicit memcpy explicit by ...
Definition:InlineFunction.cpp:1726
AddAlignmentAssumptions
static void AddAlignmentAssumptions(CallBase &CB, InlineFunctionInfo &IFI)
If the inlined function has non-byval align arguments, then add @llvm.assume-based alignment assumpti...
Definition:InlineFunction.cpp:1664
trackInlinedStores
static void trackInlinedStores(Function::iterator Start, Function::iterator End, const CallBase &CB)
Definition:InlineFunction.cpp:1983
InlinerAttributeWindow
static cl::opt< unsigned > InlinerAttributeWindow("max-inst-checked-for-throw-during-inlining", cl::Hidden, cl::desc("the maximum number of instructions analyzed for may throw during " "attribute inference in inlined body"), cl::init(4))
AddParamAndFnBasicAttributes
static void AddParamAndFnBasicAttributes(const CallBase &CB, ValueToValueMapTy &VMap, ClonedCodeInfo &InlinedFunctionInfo)
Definition:InlineFunction.cpp:1369
haveCommonPrefix
static bool haveCommonPrefix(MDNode *MIBStackContext, MDNode *CallsiteStackContext)
Definition:InlineFunction.cpp:800
PropagateOperandBundles
static void PropagateOperandBundles(Function::iterator InlinedBB, Instruction *CallSiteEHPad)
Bundle operands of the inlined function must be added to inlined call sites.
Definition:InlineFunction.cpp:975
hasLifetimeMarkers
static bool hasLifetimeMarkers(AllocaInst *AI)
Definition:InlineFunction.cpp:1788
updateMemprofMetadata
static void updateMemprofMetadata(CallBase *CI, const std::vector< Metadata * > &MIBList)
Definition:InlineFunction.cpp:829
InstrTypes.h
InstructionSimplify.h
Instructions.h
Intrinsics.h
LLVMContext.h
LoopDeletionResult::Deleted
@ Deleted
F
#define F(x, y, z)
Definition:MD5.cpp:55
I
#define I(x, y, z)
Definition:MD5.cpp:58
MDBuilder.h
Profile
Load MIR Sample Profile
Definition:MIRSampleProfile.cpp:78
getDebugLoc
static DebugLoc getDebugLoc(MachineBasicBlock::instr_iterator FirstMI, MachineBasicBlock::instr_iterator LastMI)
Return the first found DebugLoc that has a DILocation, given a range of instructions.
Definition:MachineInstrBundle.cpp:109
MemoryProfileInfo.h
Metadata.h
This file contains the declarations for metadata subclasses.
Range
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
II
uint64_t IntrinsicInst * II
Definition:NVVMIntrRange.cpp:51
ObjCARCAnalysisUtils.h
This file defines common analysis utilities used by the ObjC ARC Optimizer.
ObjCARCUtil.h
This file defines ARC utility functions which are used by various parts of the compiler.
PatternMatch.h
ProfDataUtils.h
This file contains the declarations for profiling metadata utility functions.
ProfileSummaryInfo.h
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
STLExtras.h
This file contains some templates that are useful if you are working with the STL at all.
SetVector.h
This file implements a set that has insertion order iteration characteristics.
SmallPtrSet.h
This file defines the SmallPtrSet class.
SmallVector.h
This file defines the SmallVector class.
StringExtras.h
This file contains some functions that are useful when dealing with strings.
Local.h
ValueMapper.h
ValueTracking.h
VectorUtils.h
llvm::AAResults
Definition:AliasAnalysis.h:314
llvm::AAResults::getMemoryEffects
MemoryEffects getMemoryEffects(const CallBase *Call)
Return the behavior of the given call site.
Definition:AliasAnalysis.cpp:387
llvm::APInt
Class for arbitrary precision integers.
Definition:APInt.h:78
llvm::AllocaInst
an instruction to allocate memory on the stack
Definition:Instructions.h:63
llvm::AllocaInst::isSwiftError
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
Definition:Instructions.h:149
llvm::AllocaInst::getType
PointerType * getType() const
Overload to return most specific pointer type.
Definition:Instructions.h:99
llvm::AllocaInst::getAllocatedType
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
Definition:Instructions.h:117
llvm::AllocaInst::isUsedWithInAlloca
bool isUsedWithInAlloca() const
Return true if this alloca is used as an inalloca argument to a call.
Definition:Instructions.h:139
llvm::AllocaInst::getArraySize
const Value * getArraySize() const
Get the number of elements allocated.
Definition:Instructions.h:95
llvm::Argument
This class represents an incoming formal argument to a Function.
Definition:Argument.h:31
llvm::Argument::getArgNo
unsigned getArgNo() const
Return the index of this formal argument in its containing function.
Definition:Argument.h:49
llvm::AssignGUIDPass::getGUID
static uint64_t getGUID(const Function &F)
Definition:CtxProfAnalysis.cpp:57
llvm::AssumptionCache
A cache of @llvm.assume calls within a function.
Definition:AssumptionCache.h:42
llvm::AssumptionCache::registerAssumption
void registerAssumption(AssumeInst *CI)
Add an @llvm.assume intrinsic to this function's cache.
Definition:AssumptionCache.cpp:185
llvm::AtomicCmpXchgInst
An instruction that atomically checks whether a specified value is in a memory location,...
Definition:Instructions.h:501
llvm::AtomicRMWInst
an instruction that atomically reads a memory location, combines it with another value,...
Definition:Instructions.h:704
llvm::AttrBuilder
Definition:Attributes.h:1064
llvm::AttrBuilder::addAlignmentAttr
AttrBuilder & addAlignmentAttr(MaybeAlign Align)
This turns an alignment into the form used internally in Attribute.
Definition:Attributes.cpp:2155
llvm::AttrBuilder::getAttribute
Attribute getAttribute(Attribute::AttrKind Kind) const
Return Attribute with the given Kind.
Definition:Attributes.cpp:2304
llvm::AttrBuilder::getDereferenceableBytes
uint64_t getDereferenceableBytes() const
Retrieve the number of dereferenceable bytes, if the dereferenceable attribute exists (zero is return...
Definition:Attributes.h:1153
llvm::AttrBuilder::hasAttributes
bool hasAttributes() const
Return true if the builder has IR-level attributes.
Definition:Attributes.h:1123
llvm::AttrBuilder::addAttribute
AttrBuilder & addAttribute(Attribute::AttrKind Val)
Add an attribute to the builder.
Definition:Attributes.cpp:2108
llvm::AttrBuilder::getAlignment
MaybeAlign getAlignment() const
Retrieve the alignment attribute, if it exists.
Definition:Attributes.h:1142
llvm::AttrBuilder::addDereferenceableAttr
AttrBuilder & addDereferenceableAttr(uint64_t Bytes)
This turns the number of dereferenceable bytes into the form used internally in Attribute.
Definition:Attributes.cpp:2172
llvm::AttrBuilder::getDereferenceableOrNullBytes
uint64_t getDereferenceableOrNullBytes() const
Retrieve the number of dereferenceable_or_null bytes, if the dereferenceable_or_null attribute exists...
Definition:Attributes.h:1159
llvm::AttrBuilder::removeAttribute
AttrBuilder & removeAttribute(Attribute::AttrKind Val)
Remove an attribute from the builder.
Definition:Attributes.cpp:2118
llvm::AttrBuilder::addDereferenceableOrNullAttr
AttrBuilder & addDereferenceableOrNullAttr(uint64_t Bytes)
This turns the number of dereferenceable_or_null bytes into the form used internally in Attribute.
Definition:Attributes.cpp:2178
llvm::AttrBuilder::addRangeAttr
AttrBuilder & addRangeAttr(const ConstantRange &CR)
Add range attribute.
Definition:Attributes.cpp:2273
llvm::AttributeList
Definition:Attributes.h:490
llvm::AttributeList::addRetAttributes
AttributeList addRetAttributes(LLVMContext &C, const AttrBuilder &B) const
Add a return value attribute to the list.
Definition:Attributes.h:620
llvm::AttributeList::get
static AttributeList get(LLVMContext &C, ArrayRef< std::pair< unsigned, Attribute > > Attrs)
Create an AttributeList with the specified parameters in it.
Definition:Attributes.cpp:1499
llvm::AttributeList::getParamAttrs
AttributeSet getParamAttrs(unsigned ArgNo) const
The attributes for the argument or parameter at the given index are returned.
Definition:Attributes.cpp:1852
llvm::AttributeSet::removeAttribute
AttributeSet removeAttribute(LLVMContext &C, Attribute::AttrKind Kind) const
Remove the specified attribute from this set.
Definition:Attributes.cpp:946
llvm::AttributeSet::get
static AttributeSet get(LLVMContext &C, const AttrBuilder &B)
Definition:Attributes.cpp:910
llvm::Attribute
Definition:Attributes.h:67
llvm::Attribute::getRange
const ConstantRange & getRange() const
Returns the value of the range attribute.
Definition:Attributes.cpp:502
llvm::Attribute::AttrKind
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
Definition:Attributes.h:86
llvm::Attribute::isValid
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition:Attributes.h:208
llvm::BasicBlock
LLVM Basic Block Representation.
Definition:BasicBlock.h:61
llvm::BasicBlock::end
iterator end()
Definition:BasicBlock.h:474
llvm::BasicBlock::begin
iterator begin()
Instruction iterator methods.
Definition:BasicBlock.h:461
llvm::BasicBlock::phis
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition:BasicBlock.h:530
llvm::BasicBlock::getFirstNonPHIIt
InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
Definition:BasicBlock.cpp:381
llvm::BasicBlock::splitBasicBlock
BasicBlock * splitBasicBlock(iterator I, const Twine &BBName="", bool Before=false)
Split the basic block into two basic blocks at the specified instruction.
Definition:BasicBlock.cpp:599
llvm::BasicBlock::getParent
const Function * getParent() const
Return the enclosing method, or null if none.
Definition:BasicBlock.h:220
llvm::BasicBlock::eraseFromParent
SymbolTableList< BasicBlock >::iterator eraseFromParent()
Unlink 'this' from the containing function and delete it.
Definition:BasicBlock.cpp:279
llvm::BasicBlock::iterator
InstListType::iterator iterator
Instruction iterators...
Definition:BasicBlock.h:177
llvm::BasicBlock::getTerminator
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition:BasicBlock.h:240
llvm::BasicBlock::splice
void splice(BasicBlock::iterator ToIt, BasicBlock *FromBB)
Transfer all instructions from FromBB to this basic block at ToIt.
Definition:BasicBlock.h:644
llvm::BasicBlock::removePredecessor
void removePredecessor(BasicBlock *Pred, bool KeepOneInputPHIs=false)
Update PHI nodes in this BasicBlock before removal of predecessor Pred.
Definition:BasicBlock.cpp:538
llvm::BlockFrequencyInfo
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Definition:BlockFrequencyInfo.h:37
llvm::BlockFrequencyInfo::setBlockFreq
void setBlockFreq(const BasicBlock *BB, BlockFrequency Freq)
Definition:BlockFrequencyInfo.cpp:225
llvm::BlockFrequencyInfo::setBlockFreqAndScale
void setBlockFreqAndScale(const BasicBlock *ReferenceBB, BlockFrequency Freq, SmallPtrSetImpl< BasicBlock * > &BlocksToScale)
Set the frequency of ReferenceBB to Freq and scale the frequencies of the blocks in BlocksToScale suc...
Definition:BlockFrequencyInfo.cpp:231
llvm::BlockFrequencyInfo::getBlockFreq
BlockFrequency getBlockFreq(const BasicBlock *BB) const
getblockFreq - Return block frequency.
Definition:BlockFrequencyInfo.cpp:200
llvm::BlockFrequency
Definition:BlockFrequency.h:26
llvm::BranchInst
Conditional or Unconditional Branch instruction.
Definition:Instructions.h:3016
llvm::BranchInst::Create
static BranchInst * Create(BasicBlock *IfTrue, InsertPosition InsertBefore=nullptr)
Definition:Instructions.h:3072
llvm::CallBase
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition:InstrTypes.h:1112
llvm::CallBase::setCallingConv
void setCallingConv(CallingConv::ID CC)
Definition:InstrTypes.h:1403
llvm::CallBase::setDoesNotThrow
void setDoesNotThrow()
Definition:InstrTypes.h:1926
llvm::CallBase::getRetAlign
MaybeAlign getRetAlign() const
Extract the alignment of the return value.
Definition:InstrTypes.h:1739
llvm::CallBase::getOperandBundlesAsDefs
void getOperandBundlesAsDefs(SmallVectorImpl< OperandBundleDef > &Defs) const
Return the list of operand bundles attached to this instruction as a vector of OperandBundleDefs.
Definition:Instructions.cpp:483
llvm::CallBase::getOperandBundleAt
OperandBundleUse getOperandBundleAt(unsigned Index) const
Return the operand bundle at a specific index.
Definition:InstrTypes.h:2022
llvm::CallBase::getOperandBundle
std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
Definition:InstrTypes.h:2053
llvm::CallBase::getCalledFunction
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
Definition:InstrTypes.h:1341
llvm::CallBase::removeRetAttrs
void removeRetAttrs(const AttributeMask &AttrsToRemove)
Removes the attributes from the return value.
Definition:InstrTypes.h:1536
llvm::CallBase::hasRetAttr
bool hasRetAttr(Attribute::AttrKind Kind) const
Determine whether the return value has the given attribute.
Definition:InstrTypes.h:1573
llvm::CallBase::getNumOperandBundles
unsigned getNumOperandBundles() const
Return the number of operand bundles associated with this User.
Definition:InstrTypes.h:1966
llvm::CallBase::getCallingConv
CallingConv::ID getCallingConv() const
Definition:InstrTypes.h:1399
llvm::CallBase::paramHasAttr
bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
Definition:Instructions.cpp:409
llvm::CallBase::arg_begin
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
Definition:InstrTypes.h:1261
llvm::CallBase::getParamAttr
Attribute getParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Get the attribute of a given kind from a given arg.
Definition:InstrTypes.h:1621
llvm::CallBase::isByValArgument
bool isByValArgument(unsigned ArgNo) const
Determine whether this argument is passed by value.
Definition:InstrTypes.h:1679
llvm::CallBase::addOperandBundle
static CallBase * addOperandBundle(CallBase *CB, uint32_t ID, OperandBundleDef OB, InsertPosition InsertPt=nullptr)
Create a clone of CB with operand bundle OB added.
Definition:Instructions.cpp:562
llvm::CallBase::getRetAttributes
AttributeSet getRetAttributes() const
Return the return attributes for this call.
Definition:InstrTypes.h:1423
llvm::CallBase::getParamByValType
Type * getParamByValType(unsigned ArgNo) const
Extract the byval type for a call or parameter.
Definition:InstrTypes.h:1766
llvm::CallBase::getCalledOperand
Value * getCalledOperand() const
Definition:InstrTypes.h:1334
llvm::CallBase::setAttributes
void setAttributes(AttributeList A)
Set the attributes for this call.
Definition:InstrTypes.h:1420
llvm::CallBase::getRange
std::optional< ConstantRange > getRange() const
If this return value has a range attribute, return the value range of the argument.
Definition:Instructions.cpp:378
llvm::CallBase::doesNotThrow
bool doesNotThrow() const
Determine if the call cannot unwind.
Definition:InstrTypes.h:1925
llvm::CallBase::getArgOperand
Value * getArgOperand(unsigned i) const
Definition:InstrTypes.h:1286
llvm::CallBase::getRetDereferenceableBytes
uint64_t getRetDereferenceableBytes() const
Extract the number of dereferenceable bytes for a call or parameter (0=unknown).
Definition:InstrTypes.h:1810
llvm::CallBase::isConvergent
bool isConvergent() const
Determine if the invoke is convergent.
Definition:InstrTypes.h:1937
llvm::CallBase::getFunctionType
FunctionType * getFunctionType() const
Definition:InstrTypes.h:1199
llvm::CallBase::Create
static CallBase * Create(CallBase *CB, ArrayRef< OperandBundleDef > Bundles, InsertPosition InsertPt=nullptr)
Create a clone of CB with a different set of operand bundles and insert it before InsertPt.
Definition:Instructions.cpp:301
llvm::CallBase::getRetDereferenceableOrNullBytes
uint64_t getRetDereferenceableOrNullBytes() const
Extract the number of dereferenceable_or_null bytes for a call (0=unknown).
Definition:InstrTypes.h:1825
llvm::CallBase::args
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
Definition:InstrTypes.h:1277
llvm::CallBase::arg_size
unsigned arg_size() const
Definition:InstrTypes.h:1284
llvm::CallBase::getAttributes
AttributeList getAttributes() const
Return the attributes for this call.
Definition:InstrTypes.h:1417
llvm::CallBase::hasOperandBundles
bool hasOperandBundles() const
Return true if this User has any operand bundles.
Definition:InstrTypes.h:1971
llvm::CallBase::getCaller
Function * getCaller()
Helper to get the caller (the parent function).
Definition:Instructions.cpp:327
llvm::CallInst
This class represents a function call, abstracting a target machine's calling convention.
Definition:Instructions.h:1479
llvm::CallInst::setTailCallKind
void setTailCallKind(TailCallKind TCK)
Definition:Instructions.h:1598
llvm::CallInst::getTailCallKind
TailCallKind getTailCallKind() const
Definition:Instructions.h:1585
llvm::CallInst::Create
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Definition:Instructions.h:1514
llvm::CallInst::isMustTailCall
bool isMustTailCall() const
Definition:Instructions.h:1594
llvm::CallInst::TailCallKind
TailCallKind
Definition:Instructions.h:1572
llvm::CallInst::TCK_None
@ TCK_None
Definition:Instructions.h:1573
llvm::CallInst::TCK_NoTail
@ TCK_NoTail
Definition:Instructions.h:1576
llvm::CatchSwitchInst::Create
static CatchSwitchInst * Create(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumHandlers, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Definition:Instructions.h:4093
llvm::CleanupReturnInst::Create
static CleanupReturnInst * Create(Value *CleanupPad, BasicBlock *UnwindBB=nullptr, InsertPosition InsertBefore=nullptr)
Definition:Instructions.h:4381
llvm::ConstantInt
This is the shared class of boolean and integer constants.
Definition:Constants.h:83
llvm::ConstantRange
This class represents a range of values.
Definition:ConstantRange.h:47
llvm::ConstantRange::intersectWith
ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
Definition:ConstantRange.cpp:581
llvm::ConstantTokenNone::get
static ConstantTokenNone * get(LLVMContext &Context)
Return the ConstantTokenNone.
Definition:Constants.cpp:1522
llvm::Constant
This is an important base class in LLVM.
Definition:Constant.h:42
llvm::Constant::stripPointerCasts
const Constant * stripPointerCasts() const
Definition:Constant.h:218
llvm::CtxProfAnalysis::getBBInstrumentation
static InstrProfIncrementInst * getBBInstrumentation(BasicBlock &BB)
Get the instruction instrumenting a BB, or nullptr if not present.
Definition:CtxProfAnalysis.cpp:211
llvm::CtxProfAnalysis::getCallsiteInstrumentation
static InstrProfCallsite * getCallsiteInstrumentation(CallBase &CB)
Get the instruction instrumenting a callsite, or nullptr if that cannot be found.
Definition:CtxProfAnalysis.cpp:198
llvm::DILocation
Debug location.
Definition:DebugInfoMetadata.h:1988
llvm::DISubprogram
Subprogram description.
Definition:DebugInfoMetadata.h:1710
llvm::DWARFExpression::Operation
This class represents an Operation in the Expression.
Definition:DWARFExpression.h:32
llvm::DataLayout
A parsed version of the target data layout string in and methods for querying it.
Definition:DataLayout.h:63
llvm::DbgRecord
Base class for non-instruction debug metadata records that have positions within IR.
Definition:DebugProgramInstruction.h:134
llvm::DebugLoc
A debug info location.
Definition:DebugLoc.h:33
llvm::DebugLoc::getLine
unsigned getLine() const
Definition:DebugLoc.cpp:24
llvm::DebugLoc::get
DILocation * get() const
Get the underlying DILocation.
Definition:DebugLoc.cpp:20
llvm::DebugLoc::getScope
MDNode * getScope() const
Definition:DebugLoc.cpp:34
llvm::DebugLoc::appendInlinedAt
static DebugLoc appendInlinedAt(const DebugLoc &DL, DILocation *InlinedAt, LLVMContext &Ctx, DenseMap< const MDNode *, MDNode * > &Cache)
Rebuild the entire inlined-at chain for this instruction so that the top of the chain now is inlined-...
Definition:DebugLoc.cpp:110
llvm::DebugLoc::getCol
unsigned getCol() const
Definition:DebugLoc.cpp:29
llvm::DenseMapBase::find
iterator find(const_arg_type_t< KeyT > Val)
Definition:DenseMap.h:156
llvm::DenseMapBase::count
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Definition:DenseMap.h:152
llvm::DenseMapBase::end
iterator end()
Definition:DenseMap.h:84
llvm::DenseMapBase::insert
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition:DenseMap.h:211
llvm::DenseMap
Definition:DenseMap.h:727
llvm::DenseSet
Implements a dense probed hash-table based set.
Definition:DenseSet.h:278
llvm::DominatorTreeBase::recalculate
void recalculate(ParentType &Func)
recalculate - compute a dominator tree for the given function
Definition:GenericDomTree.h:859
llvm::DominatorTree
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition:Dominators.h:162
llvm::FunctionType::getNumParams
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
Definition:DerivedTypes.h:144
llvm::Function::ProfileCount
Class to represent profile counts.
Definition:Function.h:292
llvm::Function::ProfileCount::getCount
uint64_t getCount() const
Definition:Function.h:300
llvm::Function::ProfileCount::isSynthetic
bool isSynthetic() const
Definition:Function.h:302
llvm::Function
Definition:Function.h:63
llvm::Function::getEntryBlock
const BasicBlock & getEntryBlock() const
Definition:Function.h:809
llvm::Function::iterator
BasicBlockListType::iterator iterator
Definition:Function.h:68
llvm::Function::getFunctionType
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition:Function.h:216
llvm::Function::front
const BasicBlock & front() const
Definition:Function.h:860
llvm::Function::args
iterator_range< arg_iterator > args()
Definition:Function.h:892
llvm::Function::getSubprogram
DISubprogram * getSubprogram() const
Get the attached subprogram.
Definition:Metadata.cpp:1874
llvm::Function::hasGC
bool hasGC() const
hasGC/getGC/setGC/clearGC - The name of the garbage collection algorithm to use during code generatio...
Definition:Function.h:345
llvm::Function::getCallingConv
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition:Function.h:277
llvm::Function::hasPersonalityFn
bool hasPersonalityFn() const
Check whether this function has a personality function.
Definition:Function.h:905
llvm::Function::getPersonalityFn
Constant * getPersonalityFn() const
Get the personality function associated with this function.
Definition:Function.cpp:1048
llvm::Function::arg_end
arg_iterator arg_end()
Definition:Function.h:877
llvm::Function::arg_begin
arg_iterator arg_begin()
Definition:Function.h:868
llvm::Function::isIntrinsic
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
Definition:Function.h:256
llvm::Function::getParamAlign
MaybeAlign getParamAlign(unsigned ArgNo) const
Definition:Function.h:488
llvm::Function::getContext
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition:Function.cpp:369
llvm::Function::getGC
const std::string & getGC() const
Definition:Function.cpp:835
llvm::Function::getEntryCount
std::optional< ProfileCount > getEntryCount(bool AllowSynthetic=false) const
Get the entry count for this function.
Definition:Function.cpp:1133
llvm::Function::getReturnType
Type * getReturnType() const
Returns the type of the ret val.
Definition:Function.h:221
llvm::Function::end
iterator end()
Definition:Function.h:855
llvm::Function::setCallingConv
void setCallingConv(CallingConv::ID CC)
Definition:Function.h:281
llvm::Function::onlyReadsMemory
bool onlyReadsMemory() const
Determine if the function does not access or only reads memory.
Definition:Function.cpp:892
llvm::Function::hasFnAttribute
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition:Function.cpp:731
llvm::GlobalValue::isDeclaration
bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
Definition:Globals.cpp:296
llvm::IRBuilderBase::CreateStackSave
CallInst * CreateStackSave(const Twine &Name="")
Create a call to llvm.stacksave.
Definition:IRBuilder.h:1088
llvm::IRBuilderBase::CreateLifetimeStart
CallInst * CreateLifetimeStart(Value *Ptr, ConstantInt *Size=nullptr)
Create a lifetime.start intrinsic.
Definition:IRBuilder.cpp:460
llvm::IRBuilderBase::CreateAlignmentAssumption
CallInst * CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue, unsigned Alignment, Value *OffsetValue=nullptr)
Create an assume intrinsic call that represents an alignment assumption on the provided pointer.
Definition:IRBuilder.cpp:1265
llvm::IRBuilderBase::CreateRet
ReturnInst * CreateRet(Value *V)
Create a 'ret <val>' instruction.
Definition:IRBuilder.h:1139
llvm::IRBuilderBase::getInt64
ConstantInt * getInt64(uint64_t C)
Get a constant 64-bit value.
Definition:IRBuilder.h:510
llvm::IRBuilderBase::CreateIntrinsic
CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
Definition:IRBuilder.cpp:900
llvm::IRBuilderBase::CreateBitCast
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
Definition:IRBuilder.h:2152
llvm::IRBuilderBase::CreateRetVoid
ReturnInst * CreateRetVoid()
Create a 'ret void' instruction.
Definition:IRBuilder.h:1134
llvm::IRBuilderBase::CreateCall
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition:IRBuilder.h:2449
llvm::IRBuilderBase::CreateLifetimeEnd
CallInst * CreateLifetimeEnd(Value *Ptr, ConstantInt *Size=nullptr)
Create a lifetime.end intrinsic.
Definition:IRBuilder.cpp:472
llvm::IRBuilderBase::CreateStackRestore
CallInst * CreateStackRestore(Value *Ptr, const Twine &Name="")
Create a call to llvm.stackrestore.
Definition:IRBuilder.h:1095
llvm::IRBuilderBase::SetInsertPoint
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition:IRBuilder.h:199
llvm::IRBuilderBase::CreateMemCpy
CallInst * CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src, MaybeAlign SrcAlign, uint64_t Size, bool isVolatile=false, MDNode *TBAATag=nullptr, MDNode *TBAAStructTag=nullptr, MDNode *ScopeTag=nullptr, MDNode *NoAliasTag=nullptr)
Create and insert a memcpy between the specified pointers.
Definition:IRBuilder.h:677
llvm::IRBuilderBase::CreateNoAliasScopeDeclaration
Instruction * CreateNoAliasScopeDeclaration(Value *Scope)
Create a llvm.experimental.noalias.scope.decl intrinsic call.
Definition:IRBuilder.cpp:532
llvm::IRBuilder
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition:IRBuilder.h:2705
llvm::Init
Definition:Record.h:285
llvm::InlineFunctionInfo
This class captures the data input to the InlineFunction call, and records the auxiliary results prod...
Definition:Cloning.h:268
llvm::InlineFunctionInfo::PSI
ProfileSummaryInfo * PSI
Definition:Cloning.h:281
llvm::InlineFunctionInfo::UpdateProfile
bool UpdateProfile
Update profile for callee as well as cloned version.
Definition:Cloning.h:301
llvm::InlineFunctionInfo::GetAssumptionCache
function_ref< AssumptionCache &(Function &)> GetAssumptionCache
If non-null, InlineFunction will update the callgraph to reflect the changes it makes.
Definition:Cloning.h:280
llvm::InlineFunctionInfo::CalleeBFI
BlockFrequencyInfo * CalleeBFI
Definition:Cloning.h:282
llvm::InlineFunctionInfo::StaticAllocas
SmallVector< AllocaInst *, 4 > StaticAllocas
InlineFunction fills this in with all static allocas that get copied into the caller.
Definition:Cloning.h:286
llvm::InlineFunctionInfo::reset
void reset()
Definition:Cloning.h:303
llvm::InlineFunctionInfo::CallerBFI
BlockFrequencyInfo * CallerBFI
Definition:Cloning.h:282
llvm::InlineFunctionInfo::InlinedCallSites
SmallVector< CallBase *, 8 > InlinedCallSites
All of the new call sites inlined into the caller.
Definition:Cloning.h:297
llvm::InlineResult
InlineResult is basically true or false.
Definition:InlineCost.h:179
llvm::InlineResult::success
static InlineResult success()
Definition:InlineCost.h:184
llvm::InlineResult::failure
static InlineResult failure(const char *Reason)
Definition:InlineCost.h:185
llvm::InstrProfCallsite
This represents the llvm.instrprof.callsite intrinsic.
Definition:IntrinsicInst.h:1617
llvm::InstrProfIncrementInst
This represents the llvm.instrprof.increment intrinsic.
Definition:IntrinsicInst.h:1590
llvm::Instruction
Definition:Instruction.h:68
llvm::Instruction::insertBefore
void insertBefore(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified instruction.
Definition:Instruction.cpp:99
llvm::Instruction::getDebugLoc
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition:Instruction.h:511
llvm::Instruction::hasMetadata
bool hasMetadata() const
Return true if this instruction has any metadata attached to it.
Definition:Instruction.h:404
llvm::Instruction::eraseFromParent
InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Definition:Instruction.cpp:94
llvm::Instruction::getFunction
const Function * getFunction() const
Return the function this instruction belongs to.
Definition:Instruction.cpp:72
llvm::Instruction::getMetadata
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Definition:Instruction.h:426
llvm::Instruction::setMetadata
void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
Definition:Metadata.cpp:1679
llvm::Instruction::getOpcode
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition:Instruction.h:310
llvm::Instruction::setDebugLoc
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
Definition:Instruction.h:508
llvm::Instruction::getDataLayout
const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
Definition:Instruction.cpp:76
llvm::IntrinsicInst
A wrapper class for inspecting calls to intrinsic functions.
Definition:IntrinsicInst.h:48
llvm::IntrinsicInst::mayLowerToFunctionCall
static bool mayLowerToFunctionCall(Intrinsic::ID IID)
Check if the intrinsic might lower into a regular function call in the course of IR transformations.
Definition:IntrinsicInst.cpp:36
llvm::InvokeInst
Invoke instruction.
Definition:Instructions.h:3670
llvm::LLVMContext
This is an important class for using LLVM in a threaded context.
Definition:LLVMContext.h:67
llvm::LLVMContext::OB_deopt
@ OB_deopt
Definition:LLVMContext.h:89
llvm::LLVMContext::OB_clang_arc_attachedcall
@ OB_clang_arc_attachedcall
Definition:LLVMContext.h:95
llvm::LLVMContext::OB_funclet
@ OB_funclet
Definition:LLVMContext.h:90
llvm::LLVMContext::OB_convergencectrl
@ OB_convergencectrl
Definition:LLVMContext.h:98
llvm::LLVMContext::OB_kcfi
@ OB_kcfi
Definition:LLVMContext.h:97
llvm::LandingPadInst
The landingpad instruction holds all of the information necessary to generate correct exception handl...
Definition:Instructions.h:2840
llvm::LandingPadInst::isCleanup
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
Definition:Instructions.h:2885
llvm::LandingPadInst::getNumClauses
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
Definition:Instructions.h:2910
llvm::LandingPadInst::getClause
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
Definition:Instructions.h:2895
llvm::LoadInst
An instruction for reading from memory.
Definition:Instructions.h:176
llvm::MDBuilder
Definition:MDBuilder.h:36
llvm::MDBuilder::createAnonymousAliasScope
MDNode * createAnonymousAliasScope(MDNode *Domain, StringRef Name=StringRef())
Return metadata appropriate for an alias scope root node.
Definition:MDBuilder.h:174
llvm::MDBuilder::createAnonymousAliasScopeDomain
MDNode * createAnonymousAliasScopeDomain(StringRef Name=StringRef())
Return metadata appropriate for an alias scope domain node.
Definition:MDBuilder.h:167
llvm::MDNode
Metadata node.
Definition:Metadata.h:1073
llvm::MDNode::replaceAllUsesWith
void replaceAllUsesWith(Metadata *MD)
RAUW a temporary.
Definition:Metadata.h:1270
llvm::MDNode::concatenate
static MDNode * concatenate(MDNode *A, MDNode *B)
Methods for metadata merging.
Definition:Metadata.cpp:1114
llvm::MDNode::isTemporary
bool isTemporary() const
Definition:Metadata.h:1257
llvm::MDNode::operands
ArrayRef< MDOperand > operands() const
Definition:Metadata.h:1432
llvm::MDNode::op_end
op_iterator op_end() const
Definition:Metadata.h:1428
llvm::MDNode::get
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition:Metadata.h:1549
llvm::MDNode::getNumOperands
unsigned getNumOperands() const
Return number of MDNode operands.
Definition:Metadata.h:1440
llvm::MDNode::op_begin
op_iterator op_begin() const
Definition:Metadata.h:1424
llvm::MDNode::getContext
LLVMContext & getContext() const
Definition:Metadata.h:1237
llvm::MDTuple
Tuple of metadata.
Definition:Metadata.h:1479
llvm::MDTuple::getTemporary
static TempMDTuple getTemporary(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Return a temporary node.
Definition:Metadata.h:1526
llvm::MemoryEffectsBase
Definition:ModRef.h:72
llvm::MemoryEffectsBase::onlyAccessesInaccessibleMem
bool onlyAccessesInaccessibleMem() const
Whether this function only (at most) accesses inaccessible memory.
Definition:ModRef.h:211
llvm::MemoryEffectsBase::onlyAccessesArgPointees
bool onlyAccessesArgPointees() const
Whether this function only (at most) accesses argument memory.
Definition:ModRef.h:201
llvm::Metadata
Root of the metadata hierarchy.
Definition:Metadata.h:62
llvm::Module
A Module instance is used to store all the information related to an LLVM module.
Definition:Module.h:65
llvm::OperandBundleDefT
A container for an operand bundle being viewed as a set of values rather than a set of uses.
Definition:InstrTypes.h:1065
llvm::PGOContextualProfile
The instrumented contextual profile, produced by the CtxProfAnalysis.
Definition:CtxProfAnalysis.h:31
llvm::PGOContextualProfile::update
void update(Visitor, const Function &F)
Definition:CtxProfAnalysis.cpp:262
llvm::PGOContextualProfile::getNumCounters
uint32_t getNumCounters(const Function &F) const
Definition:CtxProfAnalysis.h:76
llvm::PGOContextualProfile::allocateNextCounterIndex
uint32_t allocateNextCounterIndex(const Function &F)
Definition:CtxProfAnalysis.h:86
llvm::PGOContextualProfile::getNumCallsites
uint32_t getNumCallsites(const Function &F) const
Definition:CtxProfAnalysis.h:81
llvm::PGOContextualProfile::allocateNextCallsiteIndex
uint32_t allocateNextCallsiteIndex(const Function &F)
Definition:CtxProfAnalysis.h:91
llvm::PGOCtxProfContext
A node (context) in the loaded contextual profile, suitable for mutation during IPO passes.
Definition:PGOCtxProfReader.h:84
llvm::PHINode
Definition:Instructions.h:2600
llvm::PHINode::addIncoming
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
Definition:Instructions.h:2735
llvm::PHINode::Create
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
Definition:Instructions.h:2635
llvm::PoisonValue::get
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Definition:Constants.cpp:1878
llvm::ProfileSummaryInfo
Analysis providing profile information.
Definition:ProfileSummaryInfo.h:41
llvm::ProfileSummaryInfo::getProfileCount
std::optional< uint64_t > getProfileCount(const CallBase &CallInst, BlockFrequencyInfo *BFI, bool AllowSynthetic=false) const
Returns the profile count for CallInst.
Definition:ProfileSummaryInfo.cpp:69
llvm::ResumeInst
Resume the propagation of an exception.
Definition:Instructions.h:4002
llvm::ReturnInst
Return a value (possibly void), from a function.
Definition:Instructions.h:2938
llvm::SetVector
A vector that has set insertion semantics.
Definition:SetVector.h:57
llvm::SmallPtrSetImpl
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition:SmallPtrSet.h:363
llvm::SmallPtrSetImpl::count
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition:SmallPtrSet.h:452
llvm::SmallPtrSetImpl::insert
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition:SmallPtrSet.h:384
llvm::SmallPtrSetImpl::contains
bool contains(ConstPtrType Ptr) const
Definition:SmallPtrSet.h:458
llvm::SmallPtrSet
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition:SmallPtrSet.h:519
llvm::SmallVectorBase::empty
bool empty() const
Definition:SmallVector.h:81
llvm::SmallVectorBase::size
size_t size() const
Definition:SmallVector.h:78
llvm::SmallVectorImpl
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition:SmallVector.h:573
llvm::SmallVectorImpl::pop_back_val
T pop_back_val()
Definition:SmallVector.h:673
llvm::SmallVectorImpl::emplace_back
reference emplace_back(ArgTypes &&... Args)
Definition:SmallVector.h:937
llvm::SmallVectorImpl::reserve
void reserve(size_type N)
Definition:SmallVector.h:663
llvm::SmallVectorImpl::append
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
Definition:SmallVector.h:683
llvm::SmallVectorImpl::clear
void clear()
Definition:SmallVector.h:610
llvm::SmallVectorTemplateBase::push_back
void push_back(const T &Elt)
Definition:SmallVector.h:413
llvm::SmallVectorTemplateCommon::end
iterator end()
Definition:SmallVector.h:269
llvm::SmallVectorTemplateCommon::begin
iterator begin()
Definition:SmallVector.h:267
llvm::SmallVectorTemplateCommon::back
reference back()
Definition:SmallVector.h:308
llvm::SmallVector
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition:SmallVector.h:1196
llvm::StoreInst
An instruction for storing to memory.
Definition:Instructions.h:292
llvm::TypeSize
Definition:TypeSize.h:334
llvm::Type
The instances of the Type class are immutable: once they are created, they are never changed.
Definition:Type.h:45
llvm::Type::getPointerAddressSpace
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
llvm::Type::getContext
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition:Type.h:128
llvm::Type::getInt64Ty
static IntegerType * getInt64Ty(LLVMContext &C)
llvm::Type::isVoidTy
bool isVoidTy() const
Return true if this is 'void'.
Definition:Type.h:139
llvm::User
Definition:User.h:44
llvm::User::setOperand
void setOperand(unsigned i, Value *Val)
Definition:User.h:233
llvm::User::getOperand
Value * getOperand(unsigned i) const
Definition:User.h:228
llvm::VAArgInst
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
Definition:Instructions.h:1741
llvm::ValueMapIterator
Definition:ValueMap.h:325
llvm::ValueMap
See the file comment.
Definition:ValueMap.h:84
llvm::ValueMap::lookup
ValueT lookup(const KeyT &Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition:ValueMap.h:164
llvm::ValueMap::count
size_type count(const KeyT &Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Definition:ValueMap.h:151
llvm::ValueMap::begin
iterator begin()
Definition:ValueMap.h:134
llvm::ValueMap::end
iterator end()
Definition:ValueMap.h:135
llvm::Value
LLVM Value Representation.
Definition:Value.h:74
llvm::Value::getType
Type * getType() const
All values are typed, get the type of this value.
Definition:Value.h:255
llvm::Value::replaceAllUsesWith
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition:Value.cpp:534
llvm::Value::users
iterator_range< user_iterator > users()
Definition:Value.h:421
llvm::Value::use_empty
bool use_empty() const
Definition:Value.h:344
llvm::Value::getContext
LLVMContext & getContext() const
All values hold a context through their type.
Definition:Value.cpp:1075
llvm::Value::getName
StringRef getName() const
Return a constant reference to the value's name.
Definition:Value.cpp:309
llvm::Value::takeName
void takeName(Value *V)
Transfer the name from V to this value.
Definition:Value.cpp:383
llvm::cl::opt
Definition:CommandLine.h:1423
llvm::detail::DenseSetImpl::insert
std::pair< iterator, bool > insert(const ValueT &V)
Definition:DenseSet.h:213
llvm::details::FixedOrScalableQuantity::getFixedValue
constexpr ScalarTy getFixedValue() const
Definition:TypeSize.h:202
llvm::details::FixedOrScalableQuantity::isScalable
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition:TypeSize.h:171
llvm::ilist_detail::node_parent_access::getParent
const ParentTy * getParent() const
Definition:ilist_node.h:32
llvm::ilist_node_impl::getIterator
self_iterator getIterator()
Definition:ilist_node.h:132
llvm::memprof::CallStackTrie
Class to build a trie of call stack contexts for a particular profiled allocation call,...
Definition:MemoryProfileInfo.h:53
llvm::memprof::CallStack
Helper class to iterate through stack ids in both metadata (memprof MIB and callsite) and the corresp...
Definition:MemoryProfileInfo.h:147
uint32_t
uint64_t
iterator_range.h
This provides a very simple, boring adaptor for a begin and end iterator into a range type.
DebugInfo.h
ErrorHandling.h
llvm_unreachable
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition:ErrorHandling.h:143
llvm::AMDGPU::HSAMD::ValueKind::Queue
@ Queue
llvm::ARM::ProfileKind::M
@ M
llvm::AttributeFuncs::typeIncompatible
AttributeMask typeIncompatible(Type *Ty, AttributeSet AS, AttributeSafetyKind ASK=ASK_ALL)
Which attributes cannot be applied to a type.
Definition:Attributes.cpp:2349
llvm::AttributeFuncs::mergeAttributesForInlining
void mergeAttributesForInlining(Function &Caller, const Function &Callee)
Merge caller's and callee's attributes.
Definition:Attributes.cpp:2655
llvm::Intrinsic::getOrInsertDeclaration
Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})
Look up the Function declaration of the intrinsic id in the Module M.
Definition:Intrinsics.cpp:732
llvm::PatternMatch::match
bool match(Val *V, const Pattern &P)
Definition:PatternMatch.h:49
llvm::PatternMatch::m_ImmConstant
match_combine_and< class_match< Constant >, match_unless< constantexpr_match > > m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
Definition:PatternMatch.h:864
llvm::at::getAssignmentMarkers
AssignmentMarkerRange getAssignmentMarkers(DIAssignID *ID)
Return a range of dbg.assign intrinsics which use \ID as an operand.
Definition:DebugInfo.cpp:1867
llvm::at::trackAssignments
void trackAssignments(Function::iterator Start, Function::iterator End, const StorageToVarsMap &Vars, const DataLayout &DL, bool DebugPrints=false)
Track assignments to Vars between Start and End.
Definition:DebugInfo.cpp:2113
llvm::at::remapAssignID
void remapAssignID(DenseMap< DIAssignID *, DIAssignID * > &Map, Instruction &I)
Replace DIAssignID uses and attachments with IDs from Map.
Definition:DebugInfo.cpp:1983
llvm::at::getDVRAssignmentMarkers
SmallVector< DbgVariableRecord * > getDVRAssignmentMarkers(const Instruction *Inst)
Definition:DebugInfo.h:240
llvm::cl::Hidden
@ Hidden
Definition:CommandLine.h:137
llvm::cl::init
initializer< Ty > init(const Ty &Val)
Definition:CommandLine.h:443
llvm::memprof
Definition:MemoryProfileInfo.h:21
llvm::memprof::getMIBStackNode
MDNode * getMIBStackNode(const MDNode *MIB)
Returns the stack node from an MIB metadata node.
Definition:MemoryProfileInfo.cpp:88
llvm::numbers::phi
constexpr double phi
Definition:MathExtras.h:61
llvm::objcarc::getAttachedARCFunctionKind
ARCInstKind getAttachedARCFunctionKind(const CallBase *CB)
This function returns the ARCInstKind of the function attached to operand bundle clang_arc_attachedca...
Definition:ObjCARCUtil.h:60
llvm::objcarc::ARCInstKind
ARCInstKind
Equivalence classes of instructions in the ARC Model.
Definition:ObjCARCInstKind.h:28
llvm::objcarc::getAttachedARCFunction
std::optional< Function * > getAttachedARCFunction(const CallBase *CB)
This function returns operand bundle clang_arc_attachedcall's argument, which is the address of the A...
Definition:ObjCARCUtil.h:43
llvm::objcarc::isRetainOrClaimRV
bool isRetainOrClaimRV(ARCInstKind Kind)
Check whether the function is retainRV/unsafeClaimRV.
Definition:ObjCARCUtil.h:52
llvm::objcarc::GetRCIdentityRoot
const Value * GetRCIdentityRoot(const Value *V)
The RCIdentity root of a value V is a dominating value U for which retaining or releasing U is equiva...
Definition:ObjCARCAnalysisUtils.h:110
llvm::objcarc::hasAttachedCallOpBundle
bool hasAttachedCallOpBundle(const CallBase *CB)
Definition:ObjCARCUtil.h:29
llvm::sampleprof::Base
@ Base
Definition:Discriminator.h:58
llvm
This is an optimization pass for GlobalISel generic memory operations.
Definition:AddressRanges.h:18
llvm::for_each
UnaryFunction for_each(R &&Range, UnaryFunction F)
Provide wrappers to std::for_each which take ranges instead of having to pass begin/end explicitly.
Definition:STLExtras.h:1732
llvm::all_of
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition:STLExtras.h:1739
llvm::size
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition:STLExtras.h:1697
llvm::changeToInvokeAndSplitBasicBlock
BasicBlock * changeToInvokeAndSplitBasicBlock(CallInst *CI, BasicBlock *UnwindEdge, DomTreeUpdater *DTU=nullptr)
Convert the CallInst to InvokeInst with the specified unwind edge basic block.
Definition:Local.cpp:2995
llvm::successors
auto successors(const MachineBasicBlock *BB)
Definition:MachineBasicBlock.h:1376
llvm::make_range
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
Definition:iterator_range.h:77
llvm::PointerMayBeCapturedBefore
bool PointerMayBeCapturedBefore(const Value *V, bool ReturnCaptures, bool StoreCaptures, const Instruction *I, const DominatorTree *DT, bool IncludeI=false, unsigned MaxUsesToExplore=0, const LoopInfo *LI=nullptr)
PointerMayBeCapturedBefore - Return true if this pointer value may be captured by the enclosing funct...
Definition:CaptureTracking.cpp:237
llvm::append_range
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition:STLExtras.h:2115
llvm::getUnderlyingObject
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
Definition:ValueTracking.cpp:6768
llvm::make_early_inc_range
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition:STLExtras.h:657
llvm::isScopedEHPersonality
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
Definition:EHPersonalities.h:80
llvm::simplifyInstruction
Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
Definition:InstructionSimplify.cpp:7234
llvm::EHPersonality
EHPersonality
Definition:EHPersonalities.h:21
llvm::getKnownAlignment
Align getKnownAlignment(Value *V, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to infer an alignment for the specified pointer.
Definition:Local.h:242
llvm::getOrEnforceKnownAlignment
Align getOrEnforceKnownAlignment(Value *V, MaybeAlign PrefAlign, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to ensure that the alignment of V is at least PrefAlign bytes.
Definition:Local.cpp:1581
llvm::CloneAndPruneFunctionInto
void CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc, ValueToValueMapTy &VMap, bool ModuleLevelChanges, SmallVectorImpl< ReturnInst * > &Returns, const char *NameSuffix="", ClonedCodeInfo *CodeInfo=nullptr)
This works exactly like CloneFunctionInto, except that it does some simple constant prop and DCE on t...
Definition:CloneFunction.cpp:1005
llvm::classifyEHPersonality
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
Definition:EHPersonalities.cpp:23
llvm::changeToUnreachable
unsigned changeToUnreachable(Instruction *I, bool PreserveLCSSA=false, DomTreeUpdater *DTU=nullptr, MemorySSAUpdater *MSSAU=nullptr)
Insert an unreachable instruction before the specified instruction, making it and the rest of the cod...
Definition:Local.cpp:2909
llvm::errs
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
Definition:raw_ostream.cpp:907
llvm::salvageKnowledge
bool salvageKnowledge(Instruction *I, AssumptionCache *AC=nullptr, DominatorTree *DT=nullptr)
Calls BuildAssumeFromInst and if the resulting llvm.assume is valid insert if before I.
Definition:AssumeBundleBuilder.cpp:293
llvm::updateProfileCallee
void updateProfileCallee(Function *Callee, int64_t EntryDelta, const ValueMap< const Value *, WeakTrackingVH > *VMap=nullptr)
Updates profile information by adjusting the entry count by adding EntryDelta then scaling callsite i...
Definition:InlineFunction.cpp:2054
llvm::isAssignmentTrackingEnabled
bool isAssignmentTrackingEnabled(const Module &M)
Return true if assignment tracking is enabled for module M.
Definition:DebugInfo.cpp:2299
llvm::uniteAccessGroups
MDNode * uniteAccessGroups(MDNode *AccGroups1, MDNode *AccGroups2)
Compute the union of two access-group lists.
Definition:VectorUtils.cpp:874
llvm::InlineFunction
InlineResult InlineFunction(CallBase &CB, InlineFunctionInfo &IFI, bool MergeAttributes=false, AAResults *CalleeAAR=nullptr, bool InsertLifetime=true, Function *ForwardVarArgsTo=nullptr)
This function inlines the called function into the basic block of the caller.
Definition:InlineFunction.cpp:2460
llvm::isAsynchronousEHPersonality
bool isAsynchronousEHPersonality(EHPersonality Pers)
Returns true if this personality function catches asynchronous exceptions.
Definition:EHPersonalities.h:50
llvm::isGuaranteedToTransferExecutionToSuccessor
bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I)
Return true if this function can prove that the instruction I will always transfer execution to one o...
Definition:ValueTracking.cpp:7920
llvm::isEscapeSource
bool isEscapeSource(const Value *V)
Returns true if the pointer is one which would have been considered an escape by isNonEscapingLocalOb...
Definition:AliasAnalysis.cpp:837
llvm::count_if
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
Definition:STLExtras.h:1945
llvm::erase_if
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
Definition:STLExtras.h:2099
llvm::getUnderlyingObjects
void getUnderlyingObjects(const Value *V, SmallVectorImpl< const Value * > &Objects, const LoopInfo *LI=nullptr, unsigned MaxLookup=6)
This method is similar to getUnderlyingObject except that it can look through phi and select instruct...
Definition:ValueTracking.cpp:6815
llvm::pred_empty
bool pred_empty(const BasicBlock *BB)
Definition:CFG.h:118
llvm::updateLoopMetadataDebugLocations
void updateLoopMetadataDebugLocations(Instruction &I, function_ref< Metadata *(Metadata *)> Updater)
Update the debug locations contained within the MD_loop metadata attached to the instruction I,...
Definition:DebugInfo.cpp:439
llvm::isIdentifiedObject
bool isIdentifiedObject(const Value *V)
Return true if this pointer refers to a distinct and identifiable object.
Definition:AliasAnalysis.cpp:813
llvm::scaleProfData
void scaleProfData(Instruction &I, uint64_t S, uint64_t T)
Scaling the profile data attached to 'I' using the ratio of S/T.
Definition:ProfDataUtils.cpp:242
std::swap
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition:BitVector.h:860
llvm::Align
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition:Alignment.h:39
llvm::ClonedCodeInfo
This struct can be used to capture information about code being cloned, while it is being cloned.
Definition:Cloning.h:63
llvm::ClonedCodeInfo::ContainsDynamicAllocas
bool ContainsDynamicAllocas
This is set to true if the cloned code contains a 'dynamic' alloca.
Definition:Cloning.h:74
llvm::ClonedCodeInfo::isSimplified
bool isSimplified(const Value *From, const Value *To) const
Definition:Cloning.h:88
llvm::ClonedCodeInfo::ContainsCalls
bool ContainsCalls
This is set to true if the cloned code contains a normal call instruction.
Definition:Cloning.h:65
llvm::ClonedCodeInfo::ContainsMemProfMetadata
bool ContainsMemProfMetadata
This is set to true if there is memprof related metadata (memprof or callsite metadata) in the cloned...
Definition:Cloning.h:69
llvm::ClonedCodeInfo::OperandBundleCallSites
std::vector< WeakTrackingVH > OperandBundleCallSites
All cloned call sites that have operand bundles attached are appended to this vector.
Definition:Cloning.h:79
llvm::MaybeAlign
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition:Alignment.h:117
llvm::MaybeAlign::valueOrOne
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
Definition:Alignment.h:141
llvm::PGOIndirectCallVisitor::tryGetVTableInstruction
static Instruction * tryGetVTableInstruction(CallBase *CB)
Definition:IndirectCallVisitor.h:38
llvm::at::VarRecord
Helper struct for trackAssignments, below.
Definition:DebugInfo.h:283
llvm::cl::desc
Definition:CommandLine.h:409

Generated on Fri Jul 18 2025 16:28:39 for LLVM by doxygen 1.9.6
[8]ページ先頭

©2009-2025 Movatter.jp