1//===- CoroSplit.cpp - Converts a coroutine into a state machine ----------===// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 7//===----------------------------------------------------------------------===// 8// This pass builds the coroutine frame and outlines resume and destroy parts 9// of the coroutine into separate functions. 11// We present a coroutine to an LLVM as an ordinary function with suspension 12// points marked up with intrinsics. We let the optimizer party on the coroutine 13// as a single function for as long as possible. Shortly before the coroutine is 14// eligible to be inlined into its callers, we split up the coroutine into parts 15// corresponding to an initial, resume and destroy invocations of the coroutine, 16// add them to the current SCC and restart the IPO pipeline to optimize the 17// coroutine subfunctions we extracted before proceeding to the caller of the 19//===----------------------------------------------------------------------===// 74#include <initializer_list> 79#define DEBUG_TYPE "coro-split" 82/// Collect (a known) subset of global debug info metadata potentially used by 85/// This metadata set can be used to avoid cloning debug info not owned by \p F 86/// and is shared among all potential clones \p F. 92F, CloneFunctionChangeType::LocalChangesOnly, DIFinder);
95 DIFinder, SPClonedWithinModule);
97}
// end anonymous namespace 100// Lower the intrinisc in CoroEarly phase if coroutine frame doesn't escape 101// and it is known that other transformations, for example, sanitizers 102// won't lead to incorrect code. 112// await_suspend has only 2 parameters, awaiter and handle. 113// Copy parameter attributes from the intrinsic call, but remove the last, 114// because the last parameter now becomes the function that is being called. 118if (
auto Invoke = dyn_cast<InvokeInst>(CB)) {
121 Invoke->getUnwindDest(), {Awaiter, FramePtr});
124 std::copy(Invoke->bundle_op_info_begin(), Invoke->bundle_op_info_end(),
125 WrapperInvoke->bundle_op_info_begin());
126 WrapperInvoke->setAttributes(NewAttributes);
127 WrapperInvoke->setDebugLoc(Invoke->getDebugLoc());
128 NewCall = WrapperInvoke;
129 }
elseif (
auto Call = dyn_cast<CallInst>(CB)) {
133 WrapperCall->setDebugLoc(Call->getDebugLoc());
134 NewCall = WrapperCall;
140 Intrinsic::coro_await_suspend_handle) {
141// Follow the lowered await_suspend call above with a lowered resume call 142// to the returned coroutine. 143if (
auto *Invoke = dyn_cast<InvokeInst>(CB)) {
144// If the await_suspend call is an invoke, we continue in the next block. 145 Builder.
SetInsertPoint(Invoke->getNormalDest()->getFirstInsertionPt());
155auto *ResumeCall = Builder.
CreateCall(ResumeTy, ResumeAddr, {NewCall});
158// We can't insert the 'ret' instruction and adjust the cc until the 159// function has been split, so remember this for later. 162 NewCall = ResumeCall;
178assert(Shape.
ABI == coro::ABI::Retcon || Shape.
ABI == coro::ABI::RetconOnce);
185/// Replace an llvm.coro.end.async. 186/// Will inline the must tail call function call if there is one. 187/// \returns true if cleanup of the coro.end block is needed, false otherwise. 191auto *EndAsync = dyn_cast<CoroAsyncEndInst>(
End);
194returntrue/*needs cleanup of coro.end block*/;
197auto *MustTailCallFunc = EndAsync->getMustTailCallFunction();
198if (!MustTailCallFunc) {
200returntrue/*needs cleanup of coro.end block*/;
203// Move the must tail call from the predecessor block into the end block. 204auto *CoroEndBlock =
End->getParent();
205auto *MustTailCallFuncBlock = CoroEndBlock->getSinglePredecessor();
206assert(MustTailCallFuncBlock &&
"Must have a single predecessor block");
207auto It = MustTailCallFuncBlock->getTerminator()->getIterator();
208auto *MustTailCall = cast<CallInst>(&*std::prev(It));
209 CoroEndBlock->splice(
End->getIterator(), MustTailCallFuncBlock,
210 MustTailCall->getIterator());
212// Insert the return instruction. 217// Remove the rest of the block, by splitting it into an unreachable block. 218auto *BB =
End->getParent();
219 BB->splitBasicBlock(
End);
220 BB->getTerminator()->eraseFromParent();
223assert(InlineRes.isSuccess() &&
"Expected inlining to succeed");
226// We have cleaned up the coro.end block above. 230/// Replace a non-unwind call to llvm.coro.end. 234// Start inserting right before the coro.end. 237// Create the return instruction. 239// The cloned functions in switch-lowering always return void. 240case coro::ABI::Switch:
241assert(!cast<CoroEndInst>(
End)->hasResults() &&
242"switch coroutine should not return any values");
243// coro.end doesn't immediately end the coroutine in the main function 244// in this lowering, because we need to deallocate the coroutine. 250// In async lowering this returns. 251case coro::ABI::Async: {
253if (!CoroEndBlockNeedsCleanup)
258// In unique continuation lowering, the continuations always return void. 259// But we may have implicitly allocated storage. 260case coro::ABI::RetconOnce: {
262auto *CoroEnd = cast<CoroEndInst>(
End);
265if (!CoroEnd->hasResults()) {
271auto *CoroResults = CoroEnd->getResults();
272unsigned NumReturns = CoroResults->numReturns();
274if (
auto *RetStructTy = dyn_cast<StructType>(
RetTy)) {
275assert(RetStructTy->getNumElements() == NumReturns &&
276"numbers of returns should match resume function singature");
279for (
Value *RetValEl : CoroResults->return_values())
282 }
elseif (NumReturns == 0) {
287 Builder.
CreateRet(*CoroResults->retval_begin());
291 CoroResults->eraseFromParent();
295// In non-unique continuation lowering, we signal completion by returning 296// a null continuation. 297case coro::ABI::Retcon: {
298assert(!cast<CoroEndInst>(
End)->hasResults() &&
299"retcon coroutine should not return any values");
302auto RetStructTy = dyn_cast<StructType>(
RetTy);
304 cast<PointerType>(RetStructTy ? RetStructTy->getElementType(0) :
RetTy);
316// Remove the rest of the block, by splitting it into an unreachable block. 317auto *BB =
End->getParent();
318 BB->splitBasicBlock(
End);
319 BB->getTerminator()->eraseFromParent();
322// Mark a coroutine as done, which implies that the coroutine is finished and 325// In resume-switched ABI, the done state is represented by storing zero in 328// NOTE: We couldn't omit the argument `FramePtr`. It is necessary because the 329// pointer to the frame in splitted function is not stored in `Shape`. 333 Shape.
ABI == coro::ABI::Switch &&
334"markCoroutineAsDone is only supported for Switch-Resumed ABI for now.");
342// If the coroutine don't have unwind coro end, we could omit the store to 343// the final suspend point since we could infer the coroutine is suspended 344// at the final suspend point by the nullness of ResumeFnAddr. 345// However, we can't skip it if the coroutine have unwind coro end. Since 346// the coroutine reaches unwind coro end is considered suspended at the 347// final suspend point (the ResumeFnAddr is null) but in fact the coroutine 348// didn't complete yet. We need the IndexVal for the final suspend point 349// to make the states clear. 353"The final suspend should only live in the last position of " 363/// Replace an unwind call to llvm.coro.end. 370// In switch-lowering, this does nothing in the main function. 371case coro::ABI::Switch: {
372// In C++'s specification, the coroutine should be marked as done 373// if promise.unhandled_exception() throws. The frontend will 374// call coro.end(true) along this path. 376// FIXME: We should refactor this once there is other language 377// which uses Switch-Resumed style other than C++. 383// In async lowering this does nothing. 384case coro::ABI::Async:
386// In continuation-lowering, this frees the continuation storage. 387case coro::ABI::Retcon:
388case coro::ABI::RetconOnce:
393// If coro.end has an associated bundle, add cleanupret instruction. 395auto *FromPad = cast<CleanupPadInst>(Bundle->Inputs[0]);
397End->getParent()->splitBasicBlock(
End);
398 CleanupRet->getParent()->getTerminator()->eraseFromParent();
409auto &Context =
End->getContext();
412End->eraseFromParent();
415// In the resume function, we remove the last case (when coro::Shape is built, 416// the final suspend point (if present) is always the last element of 417// CoroSuspends array) since it is an undefined behavior to resume a coroutine 418// suspended at the final suspend point. 419// In the destroy function, if it isn't possible that the ResumeFnAddr is NULL 420// and the coroutine doesn't suspend at the final suspend point actually (this 421// is possible since the coroutine is considered suspended at the final suspend 422// point if promise.unhandled_exception() exits via an exception), we can 423// remove the last case. 432auto FinalCaseIt = std::prev(
Switch->case_end());
433BasicBlock *ResumeBB = FinalCaseIt->getCaseSuccessor();
434Switch->removeCase(FinalCaseIt);
441// When the coroutine can only be destroyed when complete, we don't need 442// to generate code for other cases. 459auto *AsyncSuspend = cast<CoroSuspendAsyncInst>(Suspend);
460auto *StructTy = cast<StructType>(AsyncSuspend->getType());
461auto &Context = Suspend->
getParent()->getParent()->getContext();
479 M->getFunctionList().insert(InsertBefore, NewF);
484/// Replace uses of the active llvm.coro.suspend.retcon/async call with the 485/// arguments to the continuation function. 487/// This assumes that the builder has a meaningful insertion point. 492auto NewS = VMap[ActiveSuspend];
493if (NewS->use_empty())
496// Copy out all the continuation arguments after the buffer pointer into 497// an easily-indexed data structure for convenience. 499// The async ABI includes all arguments -- including the first argument. 501for (
autoI = IsAsyncABI ? NewF->arg_begin() : std::next(NewF->arg_begin()),
506// If the suspend returns a single scalar value, we can just do a simple 508if (!isa<StructType>(NewS->getType())) {
510 NewS->replaceAllUsesWith(Args.front());
514// Try to peephole extracts of an aggregate return. 516auto *EVI = dyn_cast<ExtractValueInst>(U.getUser());
517if (!EVI || EVI->getNumIndices() != 1)
520 EVI->replaceAllUsesWith(Args[EVI->getIndices().front()]);
521 EVI->eraseFromParent();
524// If we have no remaining uses, we're done. 525if (NewS->use_empty())
528// Otherwise, we need to create an aggregate. 531 Aggr = Builder.CreateInsertValue(Aggr, Arg,
Idx);
540// In switch lowering, replace coro.suspend with the appropriate value 541// for the type of function we're extracting. 542// Replacing coro.suspend with (0) will result in control flow proceeding to 543// a resume label associated with a suspend point, replacing it with (1) will 544// result in control flow proceeding to a cleanup label associated with this 547 SuspendResult = Builder.getInt8(isSwitchDestroyFunction() ? 1 : 0);
550// In async lowering there are no uses of the result. 554// In returned-continuation lowering, the arguments from earlier 555// continuations are theoretically arbitrary, and they should have been 563// The active suspend was handled earlier. 564if (CS == ActiveSuspend)
567auto *MappedCS = cast<AnyCoroSuspendInst>(VMap[CS]);
568 MappedCS->replaceAllUsesWith(SuspendResult);
569 MappedCS->eraseFromParent();
575// We use a null call graph because there's no call graph node for 576// the cloned function yet. We'll just be rebuilding that later. 577auto *NewCE = cast<AnyCoroEndInst>(VMap[CE]);
586Value *CachedSlot =
nullptr;
587auto getSwiftErrorSlot = [&](
Type *ValueTy) ->
Value * {
591// Check if the function has a swifterror argument. 592for (
auto &Arg :
F.args()) {
593if (Arg.isSwiftError()) {
599// Create a swifterror alloca. 601F.getEntryBlock().getFirstNonPHIOrDbg());
610auto MappedOp = VMap ? cast<CallInst>((*VMap)[
Op]) :
Op;
613// If there are no arguments, this is a 'get' operation. 615if (
Op->arg_empty()) {
616auto ValueTy =
Op->getType();
617auto Slot = getSwiftErrorSlot(ValueTy);
618 MappedResult = Builder.
CreateLoad(ValueTy, Slot);
621autoValue = MappedOp->getArgOperand(0);
623auto Slot = getSwiftErrorSlot(ValueTy);
628 MappedOp->replaceAllUsesWith(MappedResult);
629 MappedOp->eraseFromParent();
632// If we're updating the original function, we've invalidated SwiftErrorOps. 638/// Returns all DbgVariableIntrinsic in F. 639static std::pair<SmallVector<DbgVariableIntrinsic *, 8>,
647if (
auto *DVI = dyn_cast<DbgVariableIntrinsic>(&
I))
650return {Intrinsics, DbgVariableRecords};
661// Only 64-bit ABIs have a register we can refer to with the entry value. 669// Remove all salvaged dbg.declare intrinsics that became 670// either unreachable or stale due to the CoroSplit transformation. 672auto IsUnreachableBlock = [&](
BasicBlock *BB) {
676auto RemoveOne = [&](
auto *DVI) {
677if (IsUnreachableBlock(DVI->getParent()))
678 DVI->eraseFromParent();
679elseif (isa_and_nonnull<AllocaInst>(DVI->getVariableLocationOp(0))) {
680// Count all non-debuginfo uses in reachable blocks. 682for (
auto *
User : DVI->getVariableLocationOp(0)->
users())
683if (
auto *
I = dyn_cast<Instruction>(
User))
684if (!isa<AllocaInst>(
I) && !IsUnreachableBlock(
I->getParent()))
687 DVI->eraseFromParent();
691for_each(DbgVariableRecords, RemoveOne);
695// In the original function, the AllocaSpillBlock is a block immediately 696// following the allocation of the frame object which defines GEPs for 697// all the allocas that have been moved into the frame, and it ends by 698// branching to the original beginning of the coroutine. Make this 699// the entry block of the cloned function. 701auto *OldEntry = &NewF->getEntryBlock();
702 Entry->setName(
"entry" + Suffix);
703 Entry->moveBefore(OldEntry);
704 Entry->getTerminator()->eraseFromParent();
706// Clear all predecessors of the new entry block. There should be 707// exactly one predecessor, which we created when splitting out 708// AllocaSpillBlock to begin with. 709assert(Entry->hasOneUse());
710auto BranchToEntry = cast<BranchInst>(Entry->user_back());
711assert(BranchToEntry->isUnconditional());
712 Builder.SetInsertPoint(BranchToEntry);
713 Builder.CreateUnreachable();
714 BranchToEntry->eraseFromParent();
716// Branch from the entry to the appropriate place. 717 Builder.SetInsertPoint(Entry);
720// In switch-lowering, we built a resume-entry block in the original 721// function. Make the entry block branch to this. 724 Builder.CreateBr(SwitchBB);
730// In continuation ABIs, we want to branch to immediately after the 731// active suspend point. Earlier phases will have put the suspend in its 732// own basic block, so just thread our jump directly to its successor. 734 isa<CoroSuspendAsyncInst>(ActiveSuspend)) ||
737 isa<CoroSuspendRetconInst>(ActiveSuspend)));
738auto *MappedCS = cast<AnyCoroSuspendInst>(VMap[ActiveSuspend]);
739auto Branch = cast<BranchInst>(MappedCS->getNextNode());
740assert(Branch->isUnconditional());
741 Builder.CreateBr(Branch->getSuccessor(0));
746// Any static alloca that's still being used but not reachable from the new 747// entry needs to be moved to the new entry. 751auto *Alloca = dyn_cast<AllocaInst>(&
I);
752if (!Alloca ||
I.use_empty())
755 !isa<ConstantInt>(Alloca->getArraySize()))
757I.moveBefore(*Entry, Entry->getFirstInsertionPt());
761/// Derive the value of the new frame pointer. 763// Builder should be inserting to the front of the new entry block. 766// In switch-lowering, the argument is the frame pointer. 768return &*NewF->arg_begin();
769// In async-lowering, one of the arguments is an async context as determined 770// by the `llvm.coro.id.async` intrinsic. We can retrieve the async context of 771// the resume function from the async context projection function associated 772// with the active suspend. The frame is located as a tail to the async 775auto *ActiveAsyncSuspend = cast<CoroSuspendAsyncInst>(ActiveSuspend);
776auto ContextIdx = ActiveAsyncSuspend->getStorageArgumentIndex() & 0xff;
777auto *CalleeContext = NewF->getArg(ContextIdx);
778auto *ProjectionFunc =
779 ActiveAsyncSuspend->getAsyncContextProjectionFunction();
781 cast<CoroSuspendAsyncInst>(VMap[ActiveSuspend])->getDebugLoc();
783auto *CallerContext = Builder.CreateCall(ProjectionFunc->getFunctionType(),
784 ProjectionFunc, CalleeContext);
785 CallerContext->setCallingConv(ProjectionFunc->getCallingConv());
786 CallerContext->setDebugLoc(DbgLoc);
787// The frame is located after the async_context header. 788auto &Context = Builder.getContext();
789auto *FramePtrAddr = Builder.CreateConstInBoundsGEP1_32(
792// Inline the projection function. 795assert(InlineRes.isSuccess());
799// In continuation-lowering, the argument is the opaque storage. 802Argument *NewStorage = &*NewF->arg_begin();
805// If the storage is inline, just bitcast to the storage to the frame type. 809// Otherwise, load the real frame from the opaque storage. 810return Builder.CreateLoad(FramePtrTy, NewStorage);
816/// Adjust the scope line of the funclet to the first line number after the 817/// suspend point. This avoids a jump in the line table from the function 818/// declaration (where prologue instructions are attributed to) to the suspend 820/// Only adjust the scope line when the files are the same. 821/// If no candidate line number is found, fallback to the line of ActiveSuspend. 827// No subsequent instruction -> fallback to the location of ActiveSuspend. 831 SPToUpdate.setScopeLine(
DL->getLine());
837// Corosplit splits the BB around ActiveSuspend, so the meaningful 838// instructions are not in the same BB. 839if (
auto *Branch = dyn_cast_or_null<BranchInst>(
Successor);
840 Branch && Branch->isUnconditional())
841Successor = Branch->getSuccessor(0)->getFirstNonPHIOrDbg();
843// Find the first successor of ActiveSuspend with a non-zero line location. 844// If that matches the file of ActiveSuspend, use it. 849if (!
DL ||
DL.getLine() == 0)
852if (SPToUpdate.
getFile() ==
DL->getFile()) {
853 SPToUpdate.setScopeLine(
DL.getLine());
860// If the search above failed, fallback to the location of ActiveSuspend. 863 SPToUpdate.setScopeLine(
DL->getLine());
868Align Alignment,
bool NoAlias) {
878 Attrs = Attrs.addParamAttributes(Context, ParamIndex, ParamAttrs);
882unsigned ParamIndex) {
885 Attrs = Attrs.addParamAttributes(Context, ParamIndex, ParamAttrs);
889unsigned ParamIndex) {
892 Attrs = Attrs.addParamAttributes(Context, ParamIndex, ParamAttrs);
895/// Clone the body of the original function into a resume function of 900// Replace all args with dummy instructions. If an argument is the old frame 901// pointer, the dummy will be replaced by the new frame pointer once it is 902// computed below. Uses of all other arguments should have already been 903// rewritten by buildCoroutineFrame() to use loads/stores on the coroutine 908 VMap[&
A] = DummyArgs.
back();
913// Ignore attempts to change certain attributes of the function. 914// TODO: maybe there should be a way to suppress this during cloning? 915auto savedVisibility = NewF->getVisibility();
916auto savedUnnamedAddr = NewF->getUnnamedAddr();
917auto savedDLLStorageClass = NewF->getDLLStorageClass();
919// NewF's linkage (which CloneFunctionInto does *not* change) might not 920// be compatible with the visibility of OrigF (which it *does* change), 921// so protect against that. 922auto savedLinkage = NewF->getLinkage();
929nullptr,
nullptr, &CommonDebugInfo);
931auto &Context = NewF->getContext();
934assert(SP != OrigF.getSubprogram() && SP->isDistinct());
937// Update the linkage name to reflect the modified symbol name. It 938// is necessary to update the linkage name in Swift, since the 939// mangling changes for resume functions. It might also be the 940// right thing to do in C++, but due to a limitation in LLVM's 941// AsmPrinter we can only do this if the function doesn't have an 942// abstract specification, since the DWARF backend expects the 943// abstract specification to contain the linkage name and asserts 944// that they are identical. 946 SP->getUnit()->getSourceLanguage() == dwarf::DW_LANG_Swift) {
947 SP->replaceLinkageName(
MDString::get(Context, NewF->getName()));
948if (
auto *Decl = SP->getDeclaration()) {
950 Decl->getContext(), Decl->getScope(), Decl->getName(),
951 NewF->getName(), Decl->getFile(), Decl->getLine(), Decl->getType(),
952 Decl->getScopeLine(), Decl->getContainingType(),
953 Decl->getVirtualIndex(), Decl->getThisAdjustment(),
954 Decl->getFlags(), Decl->getSPFlags(), Decl->getUnit(),
955 Decl->getTemplateParams(),
nullptr, Decl->getRetainedNodes(),
956 Decl->getThrownTypes(), Decl->getAnnotations(),
957 Decl->getTargetFuncName());
958 SP->replaceDeclaration(NewDecl);
963 NewF->setLinkage(savedLinkage);
964 NewF->setVisibility(savedVisibility);
965 NewF->setUnnamedAddr(savedUnnamedAddr);
966 NewF->setDLLStorageClass(savedDLLStorageClass);
967// The function sanitizer metadata needs to match the signature of the 968// function it is being attached to. However this does not hold for split 969// functions here. Thus remove the metadata for split functions. 971 NewF->hasMetadata(LLVMContext::MD_func_sanitize))
972 NewF->eraseMetadata(LLVMContext::MD_func_sanitize);
974// Replace the attributes of the new function: 975auto OrigAttrs = NewF->getAttributes();
980// Bootstrap attributes by copying function attributes from the 981// original function. This should include optimization settings and so on. 982 NewAttrs = NewAttrs.addFnAttributes(
983 Context,
AttrBuilder(Context, OrigAttrs.getFnAttrs()));
989auto *ActiveAsyncSuspend = cast<CoroSuspendAsyncInst>(ActiveSuspend);
991 Attribute::SwiftAsync)) {
993 ActiveAsyncSuspend->getStorageArgumentIndex();
994auto ContextArgIndex = ArgAttributeIndices & 0xff;
997// `swiftasync` must preceed `swiftself` so 0 is not a valid index for 999auto SwiftSelfIndex = ArgAttributeIndices >> 8;
1004// Transfer the original function's attributes. 1005auto FnAttrs = OrigF.getAttributes().getFnAttrs();
1006 NewAttrs = NewAttrs.addFnAttributes(Context,
AttrBuilder(Context, FnAttrs));
1011// If we have a continuation prototype, just use its attributes, 1015 /// FIXME: Is it really good to add the NoAlias attribute? 1025// In these ABIs, the cloned functions always return 'void', and the 1026// existing return sites are meaningless. Note that for unique 1027// continuations, this includes the returns associated with suspends; 1028// this is fine because we can't suspend twice. 1031// Remove old returns. 1036// With multi-suspend continuations, we'll already have eliminated the 1037// original returns and inserted returns before all the suspend points, 1038// so we want to leave any returns in place. 1041// Async lowering will insert musttail call functions at all suspend points 1042// followed by a return. 1043// Don't change returns to unreachable because that will trip up the verifier. 1044// These returns should be unreachable from the clone. 1049 NewF->setAttributes(NewAttrs);
1052// Set up the new entry block. 1053 replaceEntryBlock();
1055// Turn symmetric transfers into musttail calls. 1057 ResumeCall = cast<CallInst>(VMap[ResumeCall]);
1059// FIXME: Could we support symmetric transfer effectively without 1064// Put a 'ret void' after the call, and split any remaining instructions to 1065// an unreachable block. 1069 Builder.CreateRetVoid();
1073 Builder.SetInsertPoint(&NewF->getEntryBlock().front());
1074 NewFramePtr = deriveNewFramePointer();
1076// Remap frame pointer. 1078 NewFramePtr->
takeName(OldFramePtr);
1081// Remap vFrame pointer. 1082auto *NewVFrame = Builder.CreateBitCast(
1085if (OldVFrame != NewVFrame)
1088// All uses of the arguments should have been resolved by this point, 1089// so we can safely remove the dummy values. 1092 DummyArg->deleteValue();
1097// Rewrite final suspend handling as it is not done via switch (allows to 1098// remove final case from the switch, since it is undefined behavior to 1099// resume the coroutine suspended at the final suspend point. 1101 handleFinalSuspend();
1106// Replace uses of the active suspend with the corresponding 1107// continuation-function arguments. 1108assert(ActiveSuspend !=
nullptr &&
1109"no active suspend when lowering a continuation-style coroutine");
1110 replaceRetconOrAsyncSuspendUses();
1115 replaceCoroSuspends();
1117// Handle swifterror. 1120// Remove coro.end intrinsics. 1123// Salvage debug info that points into the coroutine frame. 1128// Create a new function matching the original type 1132// Clone the function 1135// Eliminate coro.free from the clones, replacing it with 'null' in cleanup, 1136// to suppress deallocation code. 1144auto *FuncPtrStruct = cast<ConstantStruct>(
1146auto *OrigRelativeFunOffset = FuncPtrStruct->getOperand(0);
1147auto *OrigContextSize = FuncPtrStruct->getOperand(1);
1148auto *NewContextSize = ConstantInt::get(OrigContextSize->getType(),
1151 FuncPtrStruct->getType(), OrigRelativeFunOffset, NewContextSize);
1157// In the same function all coro.sizes should have the same result type. 1158auto *SizeIntrin = Shape.
CoroSizes.back();
1159Module *M = SizeIntrin->getModule();
1161returnDL.getTypeAllocSize(Shape.
FrameTy);
1177// In the same function all coro.sizes should have the same result type. 1178auto *SizeIntrin = Shape.
CoroSizes.back();
1192// For now, we do a mandatory verification step because we don't 1193// entirely trust this pass. Note that we don't want to add a verifier 1194// pass to FPM below because it will also verify all the global data. 1200// Coroutine has no suspend points. Remove heap allocation for the coroutine 1201// frame if possible. 1213 AllocInst->replaceAllUsesWith(Builder.
getFalse());
1214 AllocInst->eraseFromParent();
1215 CoroBegin->replaceAllUsesWith(Frame);
1217 CoroBegin->replaceAllUsesWith(CoroBegin->getMem());
1229 CoroBegin->eraseFromParent();
1233// SimplifySuspendPoint needs to check that there is no calls between 1234// coro_save and coro_suspend, since any of the calls may potentially resume 1235// the coroutine and if that is the case we cannot eliminate the suspend point. 1238// Assume that no intrinsic can resume the coroutine. 1239if (isa<IntrinsicInst>(
I))
1242if (isa<CallBase>(
I))
1255// Accumulate all blocks between SaveBB and ResDesBB. Because CoroSaveIntr 1256// returns a token consumed by suspend instruction, all blocks in between 1257// will have to eventually hit SaveBB when going backwards from ResDesBB. 1258while (!Worklist.
empty()) {
1262if (!Set.contains(Pred))
1266// SaveBB and ResDesBB are checked separately in hasCallsBetween. 1268 Set.erase(ResDesBB);
1279auto *ResumeOrDestroyBB = ResumeOrDestroy->
getParent();
1283if (SaveBB == ResumeOrDestroyBB)
1286// Any calls from Save to the end of the block? 1290// Any calls from begging of the block up to ResumeOrDestroy? 1292 {ResumeOrDestroyBB->getFirstNonPHIIt(), ResumeOrDestroyIt}))
1295// Any calls in all of the blocks between SaveBB and ResumeOrDestroyBB? 1302// If a SuspendIntrin is preceded by Resume or Destroy, we can eliminate the 1303// suspend point and replace it with nornal control flow. 1308auto *Pred = Suspend->
getParent()->getSinglePredecessor();
1311 Prev = Pred->getTerminator();
1314CallBase *CB = dyn_cast<CallBase>(Prev);
1320// See if the callsite is for resumption or destruction of the coroutine. 1321auto *SubFn = dyn_cast<CoroSubFnInst>(Callee);
1325// Does not refer to the current coroutine, we cannot do anything with it. 1326if (SubFn->getFrame() != CoroBegin)
1329// See if the transformation is safe. Specifically, see if there are any 1330// calls in between Save and CallInstr. They can potenitally resume the 1331// coroutine rendering this optimization unsafe. 1336// Replace llvm.coro.suspend with the value that results in resumption over 1337// the resume or cleanup path. 1340 Save->eraseFromParent();
1342// No longer need a call to coro.resume or coro.destroy. 1343if (
auto *Invoke = dyn_cast<InvokeInst>(CB)) {
1347// Grab the CalledValue from CB before erasing the CallInstr. 1351// If no more users remove it. Usually it is a bitcast of SubFn. 1352if (CalledValue != SubFn && CalledValue->user_empty())
1353if (
auto *
I = dyn_cast<Instruction>(CalledValue))
1354I->eraseFromParent();
1356// Now we are good to remove SubFn. 1357if (SubFn->user_empty())
1358 SubFn->eraseFromParent();
1363// Remove suspend points that are simplified. 1365// Currently, the only simplification we do is switch-lowering-specific. 1370size_tI = 0,
N = S.size();
1374size_t ChangedFinalIndex = std::numeric_limits<size_t>::max();
1376auto SI = cast<CoroSuspendInst>(S[
I]);
1377// Leave final.suspend to handleFinalSuspend since it is undefined behavior 1378// to resume a coroutine suspended at the final suspend point. 1385if (cast<CoroSuspendInst>(S[
I])->isFinal()) {
1387 ChangedFinalIndex =
I;
1397// Maintain final.suspend in case final suspend was swapped. 1398// Due to we requrie the final suspend to be the last element of CoroSuspends. 1399if (ChangedFinalIndex <
N) {
1400assert(cast<CoroSuspendInst>(S[ChangedFinalIndex])->isFinal());
1401std::swap(S[ChangedFinalIndex], S.back());
1407structSwitchCoroutineSplitter {
1415// Create a resume clone by cloning the body of the original function, 1416// setting new entry block and replacing coro.suspend an appropriate value 1417// to force resume or cleanup pass for every suspend point. 1418 createResumeEntryBlock(
F, Shape);
1420F,
".resume", Shape, coro::CloneKind::SwitchResume,
TTI,
1423F,
".destroy", Shape, coro::CloneKind::SwitchUnwind,
TTI,
1426F,
".cleanup", Shape, coro::CloneKind::SwitchCleanup,
TTI,
1433// Store addresses resume/destroy/cleanup functions in the coroutine frame. 1434 updateCoroFrame(Shape, ResumeClone, DestroyClone, CleanupClone);
1441// Create a constant array referring to resume/destroy/clone functions 1442// pointed by the last argument of @llvm.coro.info, so that CoroElide pass 1443// can determined correct function to call. 1444 setCoroInfo(
F, Shape, Clones);
1447// Create a variant of ramp function that does not perform heap allocation 1448// for a switch ABI coroutine. 1450// The newly split `.noalloc` ramp function has the following differences: 1451// - Has one additional frame pointer parameter in lieu of dynamic 1453// - Suppressed allocations by replacing coro.alloc and coro.free. 1457auto *OrigFnTy =
F.getFunctionType();
1458auto OldParams = OrigFnTy->params();
1461 NewParams.
reserve(OldParams.size() + 1);
1462 NewParams.
append(OldParams.begin(), OldParams.end());
1465auto *NewFnTy = FunctionType::get(OrigFnTy->getReturnType(), NewParams,
1466 OrigFnTy->isVarArg());
1472for (
constauto &
I :
F.args()) {
1475// We just appended the frame pointer as the last argument of the new 1477auto FrameIdx = NoAllocF->
arg_size() - 1;
1480 CloneFunctionChangeType::LocalChangesOnly, Returns);
1484 cast_if_present<CoroBeginInst>(VMap[Shape.
CoroBegin]);
1485auto *NewCoroId = cast<CoroIdInst>(NewCoroBegin->getId());
1488 NewCoroBegin->replaceAllUsesWith(NoAllocF->
getArg(FrameIdx));
1489 NewCoroBegin->eraseFromParent();
1493M->getFunctionList().insert(
M->end(), NoAllocF);
1497// When we elide allocation, we read these attributes to determine the 1498// frame size and alignment. 1506// Reset the original function's coro info, make the new noalloc variant 1507// connected to the original ramp function. 1508 setCoroInfo(
F, Shape, Clones);
1509// After copying, set the linkage to internal linkage. Original function 1510// may have different linkage, but optimization dependent on this function 1511// generally relies on LTO. 1517// Create an entry block for a resume function with a switch that will jump to 1523// %index.addr = getelementptr inbounds %f.Frame, %f.Frame* %FramePtr, i32 1524// 0, i32 2 % index = load i32, i32* %index.addr switch i32 %index, label 1526// i32 0, label %resume.0 1527// i32 1, label %resume.1 1537auto *GepIndex = Builder.CreateStructGEP(
1544size_t SuspendIndex = 0;
1546auto *S = cast<CoroSuspendInst>(AnyS);
1549// Replace CoroSave with a store to Index: 1550// %index.addr = getelementptr %f.frame... (index field number) 1551// store i32 %IndexVal, i32* %index.addr1 1552auto *Save = S->getCoroSave();
1553 Builder.SetInsertPoint(Save);
1555// The coroutine should be marked done if it reaches the final suspend 1559auto *GepIndex = Builder.CreateStructGEP(
1561 Builder.CreateStore(IndexVal, GepIndex);
1565 Save->eraseFromParent();
1567// Split block before and after coro.suspend and add a jump from an entry 1572// %0 = call i8 @llvm.coro.suspend(token none, i1 false) 1573// switch i8 %0, label %suspend[i8 0, label %resume 1574// i8 1, label %cleanup] 1579// br label %resume.0.landing 1581// resume.0: ; <--- jump from the switch in the resume.entry 1582// %0 = tail call i8 @llvm.coro.suspend(token none, i1 false) 1583// br label %resume.0.landing 1586// %1 = phi i8[-1, %whateverBB], [%0, %resume.0] 1587// switch i8 % 1, label %suspend [i8 0, label %resume 1588// i8 1, label %cleanup] 1590auto *SuspendBB = S->getParent();
1592 SuspendBB->splitBasicBlock(S,
"resume." +
Twine(SuspendIndex));
1593auto *LandingBB = ResumeBB->splitBasicBlock(
1594 S->getNextNode(), ResumeBB->getName() +
Twine(
".landing"));
1595Switch->addCase(IndexVal, ResumeBB);
1597 cast<BranchInst>(SuspendBB->getTerminator())->setSuccessor(0, LandingBB);
1599 PN->insertBefore(LandingBB->begin());
1600 S->replaceAllUsesWith(PN);
1601 PN->addIncoming(Builder.getInt8(-1), SuspendBB);
1602 PN->addIncoming(S, ResumeBB);
1607 Builder.SetInsertPoint(UnreachBB);
1608 Builder.CreateUnreachable();
1613// Store addresses of Resume/Destroy/Cleanup functions in the coroutine frame. 1618auto *ResumeAddr = Builder.CreateStructGEP(
1621 Builder.CreateStore(ResumeFn, ResumeAddr);
1623Value *DestroyOrCleanupFn = DestroyFn;
1627// If there is a CoroAlloc and it returns false (meaning we elide the 1628// allocation, use CleanupFn instead of DestroyFn). 1629 DestroyOrCleanupFn = Builder.CreateSelect(CA, DestroyFn, CleanupFn);
1632auto *DestroyAddr = Builder.CreateStructGEP(
1635 Builder.CreateStore(DestroyOrCleanupFn, DestroyAddr);
1638// Create a global constant array containing pointers to functions provided 1639// and set Info parameter of CoroBegin to point at this constant. Example: 1641// @f.resumers = internal constant [2 x void(%f.frame*)*] 1642// [void(%f.frame*)* @f.resume, void(%f.frame*)* 1644// define void @f() { 1646// call i8* @llvm.coro.begin(i8* null, i32 0, i8* null, 1647// i8* bitcast([2 x void(%f.frame*)*] * @f.resumers to 1650// Assumes that all the functions have the same signature. 1653// This only works under the switch-lowering ABI because coro elision 1654// only works on the switch-lowering ABI. 1659auto *ArrTy = ArrayType::get(Part->
getType(),
Args.size());
1662auto *GV =
newGlobalVariable(*M, ConstVal->getType(),
/*isConstant=*/true,
1663 GlobalVariable::PrivateLinkage, ConstVal,
1664F.getName() +
Twine(
".resumers"));
1666// Update coro.begin instruction to refer to this constant. 1678auto &Context = Suspend->
getParent()->getParent()->getContext();
1684 ResumeIntrinsic->eraseFromParent();
1689/// Coerce the arguments in \p FnArgs according to \p FnTy in \p CallArgs. 1694for (
auto *paramTy : FnTy->params()) {
1696if (paramTy != FnArgs[ArgIdx]->
getType())
1710// Coerce the arguments, llvm optimizations seem to ignore the types in 1711// vaarg functions and throws away casts in optimized mode. 1715auto *TailCall = Builder.
CreateCall(FnTy, MustTailCallFn, CallArgs);
1716// Skip targets which don't support tail call. 1720 TailCall->setDebugLoc(Loc);
1730// Reset various things that the optimizer might have decided it 1731// "knows" about the coroutine function due to not seeing a return. 1732F.removeFnAttr(Attribute::NoReturn);
1733F.removeRetAttr(Attribute::NoAlias);
1734F.removeRetAttr(Attribute::NonNull);
1736auto &Context =
F.getContext();
1746"async.ctx.frameptr");
1748// Map all uses of llvm.coro.begin to the allocated frame pointer. 1750// Make sure we don't invalidate Shape.FramePtr. 1756// Create all the functions in order after the main function. 1757auto NextF = std::next(
F.getIterator());
1759// Create a continuation function for each of the suspend points. 1762auto *Suspend = cast<CoroSuspendAsyncInst>(CS);
1764// Create the clone declaration. 1765auto ResumeNameSuffix =
".resume.";
1766auto ProjectionFunctionName =
1767 Suspend->getAsyncContextProjectionFunction()->getName();
1768bool UseSwiftMangling =
false;
1769if (ProjectionFunctionName ==
"__swift_async_resume_project_context") {
1770 ResumeNameSuffix =
"TQ";
1771 UseSwiftMangling =
true;
1772 }
elseif (ProjectionFunctionName ==
"__swift_async_resume_get_context") {
1773 ResumeNameSuffix =
"TY";
1774 UseSwiftMangling =
true;
1778 UseSwiftMangling ? ResumeNameSuffix +
Twine(
Idx) +
"_" 1783// Insert a branch to a new return block immediately before the suspend 1785auto *SuspendBB = Suspend->getParent();
1786auto *NewSuspendBB = SuspendBB->splitBasicBlock(Suspend);
1787auto *Branch = cast<BranchInst>(SuspendBB->getTerminator());
1789// Place it before the first suspend. 1792 Branch->setSuccessor(0, ReturnBB);
1796// Insert the call to the tail call function and inline it. 1797auto *Fn = Suspend->getMustTailCallFunction();
1807// Replace the lvm.coro.async.resume intrisic call. 1817auto *Clone = Clones[
Idx];
1820 Suspend,
TTI, CommonDebugInfo);
1830// Reset various things that the optimizer might have decided it 1831// "knows" about the coroutine function due to not seeing a return. 1832F.removeFnAttr(Attribute::NoReturn);
1833F.removeRetAttr(Attribute::NoAlias);
1834F.removeRetAttr(Attribute::NonNull);
1836// Allocate the frame. 1840 RawFramePtr = Id->getStorage();
1844// Determine the size of the frame. 1848// Allocate. We don't need to update the call graph node because we're 1849// going to recompute it from scratch after splitting. 1850// FIXME: pass the required alignment 1855// Stash the allocated frame pointer in the continuation storage. 1856 Builder.
CreateStore(RawFramePtr, Id->getStorage());
1859// Map all uses of llvm.coro.begin to the allocated frame pointer. 1861// Make sure we don't invalidate Shape.FramePtr. 1867// Create a unique return block. 1869PHINode *ContinuationPhi =
nullptr;
1872// Create all the functions in order after the main function. 1873auto NextF = std::next(
F.getIterator());
1875// Create a continuation function for each of the suspend points. 1878auto Suspend = cast<CoroSuspendRetconInst>(CS);
1880// Create the clone declaration. 1885// Insert a branch to the unified return block immediately before 1886// the suspend point. 1887auto SuspendBB = Suspend->getParent();
1888auto NewSuspendBB = SuspendBB->splitBasicBlock(Suspend);
1889auto Branch = cast<BranchInst>(SuspendBB->getTerminator());
1891// Create the unified return block. 1893// Place it before the first suspend. 1900// First, the continuation. 1904// Create PHIs for all other return values. 1907// Next, all the directly-yielded values. 1912// Build the return value. 1913autoRetTy =
F.getReturnType();
1915// Cast the continuation value if necessary. 1916// We can't rely on the types matching up because that type would 1917// have to be infinite. 1918auto CastedContinuationTy =
1920auto *CastedContinuation =
1921 Builder.
CreateBitCast(ContinuationPhi, CastedContinuationTy);
1923Value *RetV = CastedContinuation;
1924if (!ReturnPHIs.
empty()) {
1929for (
auto Phi : ReturnPHIs)
1936// Branch to the return block. 1937 Branch->setSuccessor(0, ReturnBB);
1940for (
auto [Phi, VUse] :
1942 Phi->addIncoming(VUse, SuspendBB);
1951auto Clone = Clones[
Idx];
1954 Suspend,
TTI, CommonDebugInfo);
1965OS <<
"While splitting coroutine ";
1966F.printAsOperand(
OS,
/*print type*/false,
F.getParent());
1972/// Remove calls to llvm.coro.end in the original function. 1980auto &Context =
End->getContext();
1982End->eraseFromParent();
1988for (
auto *U :
F.users()) {
1989if (
auto *CB = dyn_cast<CallBase>(U)) {
1990auto *Caller = CB->getFunction();
1991if (Caller && Caller->isPresplitCoroutine() &&
1992 CB->hasFnAttr(llvm::Attribute::CoroElideSafe))
2002 SwitchCoroutineSplitter::split(
F,
Shape, Clones,
TTI);
2007bool OptimizeFrame) {
2008 PrettyStackTraceFunction prettyStackTrace(
F);
2010auto &Shape =
ABI.Shape;
2018ABI.buildCoroutineFrame(OptimizeFrame);
2023bool shouldCreateNoAllocVariant =
2027// If there are no suspend points, no split required, just remove 2028// the allocation and deallocation blocks, they are not needed. 2029if (isNoSuspendCoroutine) {
2032ABI.splitCoroutine(
F, Shape, Clones,
TTI);
2035// Replace all the swifterror operations in the original function. 2036// This invalidates SwiftErrorOps in the Shape. 2039// Salvage debug intrinsics that point into the coroutine frame in the 2040// original function. The Cloner has already salvaged debug info in the new 2041// coroutine funclets. 2044for (
auto *DDI : DbgInsts)
2051if (shouldCreateNoAllocVariant)
2052 SwitchCoroutineSplitter::createNoAllocVariant(
F, Shape, Clones);
2061auto *CurrentSCC = &
C;
2062if (!Clones.
empty()) {
2065// Each clone in the Switch lowering is independent of the other clones. 2066// Let the LazyCallGraph know about each one separately. 2073// Each clone in the Async/Retcon lowering references of the other clones. 2074// Let the LazyCallGraph know about all of them at once. 2080// Let the CGSCC infra handle the changes to the original function. 2085// Do some cleanup and let the CGSCC infra see if we've cleaned up any edges 2086// to the split functions. 2093/// Replace a call to llvm.coro.prepare.retcon. 2099// Attempt to peephole this pattern: 2100// %0 = bitcast [[TYPE]] @some_function to i8* 2101// %1 = call @llvm.coro.prepare.retcon(i8* %0) 2102// %2 = bitcast %1 to [[TYPE]] 2104// %2 = @some_function 2106// Look for bitcasts back to the original function type. 2107auto *Cast = dyn_cast<BitCastInst>(U.getUser());
2108if (!Cast || Cast->getType() != Fn->getType())
2111// Replace and remove the cast. 2112 Cast->replaceAllUsesWith(Fn);
2113 Cast->eraseFromParent();
2116// Replace any remaining uses with the function as an i8*. 2117// This can never directly be a callee, so we don't need to update CG. 2121// Kill dead bitcasts. 2122while (
auto *Cast = dyn_cast<BitCastInst>(CastFn)) {
2123if (!Cast->use_empty())
2125 CastFn = Cast->getOperand(0);
2126 Cast->eraseFromParent();
2134// Intrinsics can only be used in calls. 2135auto *Prepare = cast<CallInst>(
P.getUser());
2146auto *PrepareFn = M.getFunction(
Name);
2147if (PrepareFn && !PrepareFn->use_empty())
2151static std::unique_ptr<coro::BaseABI>
2157if (CustomABI >= GenCustomABIs.
size())
2159return GenCustomABIs[CustomABI](
F, S);
2164return std::make_unique<coro::SwitchABI>(
F, S, IsMatCallback);
2166return std::make_unique<coro::AsyncABI>(
F, S, IsMatCallback);
2168return std::make_unique<coro::AnyRetconABI>(
F, S, IsMatCallback);
2170return std::make_unique<coro::AnyRetconABI>(
F, S, IsMatCallback);
2176 : CreateAndInitABI([](
Function &
F, coro::Shape &S) {
2177 std::unique_ptr<coro::BaseABI> ABI =
2182 OptimizeFrame(OptimizeFrame) {}
2186 : CreateAndInitABI([=](
Function &
F, coro::Shape &S) {
2187 std::unique_ptr<coro::BaseABI> ABI =
2192 OptimizeFrame(OptimizeFrame) {}
2194// For back compatibility, constructor takes a materializable callback and 2195// creates a generator for an ABI with a modified materializable callback. 2198 : CreateAndInitABI([=](
Function &
F, coro::Shape &S) {
2199 std::unique_ptr<coro::BaseABI> ABI =
2204 OptimizeFrame(OptimizeFrame) {}
2206// For back compatibility, constructor takes a materializable callback and 2207// creates a generator for an ABI with a modified materializable callback. 2211 : CreateAndInitABI([=](
Function &
F, coro::Shape &S) {
2212 std::unique_ptr<coro::BaseABI> ABI =
2217 OptimizeFrame(OptimizeFrame) {}
2222// NB: One invariant of a valid LazyCallGraph::SCC is that it must contain a 2223// non-zero number of nodes, so we assume that here and grab the first 2224// node's function's module. 2225Module &M = *
C.begin()->getFunction().getParent();
2229// Check for uses of llvm.coro.prepare.retcon/async. 2234// Find coroutines for processing. 2237if (
N.getFunction().isPresplitCoroutine())
2243auto *CurrentSCC = &
C;
2244// Split all the coroutines. 2250// The suspend-crossing algorithm in buildCoroutineFrame gets tripped up 2251// by unreachable blocks, so remove them as a first pass. Remove the 2252// unreachable blocks before collecting intrinsics into Shape. 2259F.setSplittedCoroutine();
2267 *
N, Shape, Clones, *CurrentSCC, CG, AM, UR,
FAM);
2272 <<
"Split '" <<
ore::NV(
"function",
F.getName())
2278// Run the CGSCC pipeline on the original and newly split functions. 2285for (
auto *PrepareFn : PrepareFns) {
amdgpu aa AMDGPU Address space based Alias Analysis Wrapper
AMDGPU Lower Kernel Arguments
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static void print(raw_ostream &Out, object::Archive::Kind Kind, T Val)
Expand Atomic instructions
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
This file provides interfaces used to manipulate a call graph, regardless if it is a "old style" Call...
This file provides interfaces used to build and manipulate a call graph, which is a very useful tool ...
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static void addSwiftSelfAttrs(AttributeList &Attrs, LLVMContext &Context, unsigned ParamIndex)
static bool hasCallsBetween(Instruction *Save, Instruction *ResumeOrDestroy)
static std::pair< SmallVector< DbgVariableIntrinsic *, 8 >, SmallVector< DbgVariableRecord * > > collectDbgVariableIntrinsics(Function &F)
Returns all DbgVariableIntrinsic in F.
static LazyCallGraph::SCC & updateCallGraphAfterCoroutineSplit(LazyCallGraph::Node &N, const coro::Shape &Shape, const SmallVectorImpl< Function * > &Clones, LazyCallGraph::SCC &C, LazyCallGraph &CG, CGSCCAnalysisManager &AM, CGSCCUpdateResult &UR, FunctionAnalysisManager &FAM)
static void replaceSwiftErrorOps(Function &F, coro::Shape &Shape, ValueToValueMapTy *VMap)
static void addAsyncContextAttrs(AttributeList &Attrs, LLVMContext &Context, unsigned ParamIndex)
static void maybeFreeRetconStorage(IRBuilder<> &Builder, const coro::Shape &Shape, Value *FramePtr, CallGraph *CG)
static bool hasCallsInBlocksBetween(BasicBlock *SaveBB, BasicBlock *ResDesBB)
static Function * createCloneDeclaration(Function &OrigF, coro::Shape &Shape, const Twine &Suffix, Module::iterator InsertBefore, AnyCoroSuspendInst *ActiveSuspend)
Remove calls to llvm coro end in the original static function void removeCoroEndsFromRampFunction(const coro::Shape &Shape)
static FunctionType * getFunctionTypeFromAsyncSuspend(AnyCoroSuspendInst *Suspend)
static void updateScopeLine(Instruction *ActiveSuspend, DISubprogram &SPToUpdate)
Adjust the scope line of the funclet to the first line number after the suspend point.
static void addPrepareFunction(const Module &M, SmallVectorImpl< Function * > &Fns, StringRef Name)
static void simplifySuspendPoints(coro::Shape &Shape)
static void addFramePointerAttrs(AttributeList &Attrs, LLVMContext &Context, unsigned ParamIndex, uint64_t Size, Align Alignment, bool NoAlias)
static bool hasSafeElideCaller(Function &F)
static bool replaceAllPrepares(Function *PrepareFn, LazyCallGraph &CG, LazyCallGraph::SCC &C)
static void replaceFallthroughCoroEnd(AnyCoroEndInst *End, const coro::Shape &Shape, Value *FramePtr, bool InResume, CallGraph *CG)
Replace a non-unwind call to llvm.coro.end.
static void replaceFrameSizeAndAlignment(coro::Shape &Shape)
static std::unique_ptr< coro::BaseABI > CreateNewABI(Function &F, coro::Shape &S, std::function< bool(Instruction &)> IsMatCallback, const SmallVector< CoroSplitPass::BaseABITy > GenCustomABIs)
static bool replaceCoroEndAsync(AnyCoroEndInst *End)
Replace an llvm.coro.end.async.
static void doSplitCoroutine(Function &F, SmallVectorImpl< Function * > &Clones, coro::BaseABI &ABI, TargetTransformInfo &TTI, bool OptimizeFrame)
static bool hasCallsInBlockBetween(iterator_range< BasicBlock::iterator > R)
Replace a call to llvm coro prepare static retcon void replacePrepare(CallInst *Prepare, LazyCallGraph &CG, LazyCallGraph::SCC &C)
static void replaceUnwindCoroEnd(AnyCoroEndInst *End, const coro::Shape &Shape, Value *FramePtr, bool InResume, CallGraph *CG)
Replace an unwind call to llvm.coro.end.
static bool simplifySuspendPoint(CoroSuspendInst *Suspend, CoroBeginInst *CoroBegin)
static void markCoroutineAsDone(IRBuilder<> &Builder, const coro::Shape &Shape, Value *FramePtr)
static void updateAsyncFuncPointerContextSize(coro::Shape &Shape)
static void replaceCoroEnd(AnyCoroEndInst *End, const coro::Shape &Shape, Value *FramePtr, bool InResume, CallGraph *CG)
static void lowerAwaitSuspend(IRBuilder<> &Builder, CoroAwaitSuspendInst *CB, coro::Shape &Shape)
static void lowerAwaitSuspends(Function &F, coro::Shape &Shape)
static void handleNoSuspendCoroutine(coro::Shape &Shape)
static void postSplitCleanup(Function &F)
static TypeSize getFrameSizeForShape(coro::Shape &Shape)
Coerce the arguments in p FnArgs according to p FnTy in p static CallArgs void coerceArguments(IRBuilder<> &Builder, FunctionType *FnTy, ArrayRef< Value * > FnArgs, SmallVectorImpl< Value * > &CallArgs)
static void replaceAsyncResumeFunction(CoroSuspendAsyncInst *Suspend, Value *Continuation)
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file defines the DenseMap class.
This file contains constants used for implementing Dwarf debug support.
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
Implements a lazy call graph analysis and related passes for the new pass manager.
FunctionAnalysisManager FAM
This file provides a priority worklist.
const SmallVectorImpl< MachineOperand > & Cond
Remove Loads Into Fake Uses
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file contains some functions that are useful when dealing with strings.
static SymbolRef::Type getType(const Symbol *Sym)
This pass exposes codegen information to IR-level passes.
static const unsigned FramePtr
void setSwiftError(bool V)
Specify whether this alloca is used to represent a swifterror.
void setAlignment(Align Align)
A container for analyses that lazily runs them and caches their results.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
CoroAllocInst * getCoroAlloc()
Align getStorageAlignment() const
uint64_t getStorageSize() const
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
ArrayRef< T > drop_front(size_t N=1) const
Drop the first N elements of the array.
size_t size() const
size - Get the array size.
AttrBuilder & addAlignmentAttr(MaybeAlign Align)
This turns an alignment into the form used internally in Attribute.
AttrBuilder & addAttribute(Attribute::AttrKind Val)
Add an attribute to the builder.
AttrBuilder & addDereferenceableAttr(uint64_t Bytes)
This turns the number of dereferenceable bytes into the form used internally in Attribute.
AttributeList removeParamAttributes(LLVMContext &C, unsigned ArgNo, const AttributeMask &AttrsToRemove) const
Remove the specified attribute at the specified arg index from this attribute list.
LLVM Basic Block Representation.
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
BasicBlock * splitBasicBlock(iterator I, const Twine &BBName="", bool Before=false)
Split the basic block into two basic blocks at the specified instruction.
const Function * getParent() const
Return the enclosing method, or null if none.
InstListType::iterator iterator
Instruction iterators...
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
static BranchInst * Create(BasicBlock *IfTrue, InsertPosition InsertBefore=nullptr)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
void setCallingConv(CallingConv::ID CC)
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
Value * getCalledOperand() const
void setAttributes(AttributeList A)
Set the attributes for this call.
Value * getArgOperand(unsigned i) const
AttributeList getAttributes() const
Return the attributes for this call.
The basic data container for the call graph of a Module of IR.
This class represents a function call, abstracting a target machine's calling convention.
static Constant * get(ArrayType *T, ArrayRef< Constant * > V)
static Constant * getPointerCast(Constant *C, Type *Ty)
Create a BitCast, AddrSpaceCast, or a PtrToInt cast constant expression.
This is the shared class of boolean and integer constants.
static ConstantInt * getTrue(LLVMContext &Context)
static ConstantInt * getFalse(LLVMContext &Context)
static ConstantPointerNull * get(PointerType *T)
Static factory methods - Return objects of the specified value.
static Constant * get(StructType *T, ArrayRef< Constant * > V)
static ConstantTokenNone * get(LLVMContext &Context)
Return the ConstantTokenNone.
This represents the llvm.coro.align instruction.
This represents the llvm.coro.alloc instruction.
This represents the llvm.coro.await.suspend.{void,bool,handle} instructions.
Value * getAwaiter() const
Function * getWrapperFunction() const
This class represents the llvm.coro.begin or llvm.coro.begin.custom.abi instructions.
AnyCoroIdInst * getId() const
bool hasCustomABI() const
This represents the llvm.coro.id instruction.
void setInfo(Constant *C)
This represents the llvm.coro.size instruction.
This represents the llvm.coro.suspend.async instruction.
CoroAsyncResumeInst * getResumeFunction() const
This represents the llvm.coro.suspend instruction.
CoroSaveInst * getCoroSave() const
DISubprogram * getSubprogram() const
Get the subprogram for this scope.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
This is the common base class for debug info intrinsics for variables.
Record of a variable value-assignment, aka a non instruction representation of the dbg....
Utility to find all debug info in a module.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
This class represents a freeze function that returns random concrete value if an operand is either a ...
A proxy from a FunctionAnalysisManager to an SCC.
Class to represent function types.
Type * getReturnType() const
static FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
static Function * Create(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace, const Twine &N="", Module *M=nullptr)
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
AttributeList getAttributes() const
Return the attribute list for this Function.
void setAttributes(AttributeList Attrs)
Set the attribute list for this Function.
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
bool isCoroOnlyDestroyWhenComplete() const
Argument * getArg(unsigned i) const
void setLinkage(LinkageTypes LT)
Module * getParent()
Get the module that this global value is contained inside of...
PointerType * getType() const
Global values are always pointers.
@ InternalLinkage
Rename collisions when linking (static functions).
@ ExternalLinkage
Externally visible function.
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
void setInitializer(Constant *InitVal)
setInitializer - Sets the initializer for this global variable, removing any existing initializer if ...
AllocaInst * CreateAlloca(Type *Ty, unsigned AddrSpace, Value *ArraySize=nullptr, const Twine &Name="")
Value * CreateInsertValue(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &Name="")
InvokeInst * CreateInvoke(FunctionType *Ty, Value *Callee, BasicBlock *NormalDest, BasicBlock *UnwindDest, ArrayRef< Value * > Args, ArrayRef< OperandBundleDef > OpBundles, const Twine &Name="")
Create an invoke instruction.
BasicBlock::iterator GetInsertPoint() const
Value * CreateStructGEP(Type *Ty, Value *Ptr, unsigned Idx, const Twine &Name="")
Value * CreateConstInBoundsGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0, const Twine &Name="")
CleanupReturnInst * CreateCleanupRet(CleanupPadInst *CleanupPad, BasicBlock *UnwindBB=nullptr)
ReturnInst * CreateRet(Value *V)
Create a 'ret <val>' instruction.
ConstantInt * getInt64(uint64_t C)
Get a constant 64-bit value.
Value * CreateBitOrPointerCast(Value *V, Type *DestTy, const Twine &Name="")
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
BranchInst * CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False, MDNode *BranchWeights=nullptr, MDNode *Unpredictable=nullptr)
Create a conditional 'br Cond, TrueDest, FalseDest' instruction.
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
LLVMContext & getContext() const
ReturnInst * CreateRetVoid()
Create a 'ret void' instruction.
StoreInst * CreateStore(Value *Val, Value *Ptr, bool isVolatile=false)
ConstantInt * getFalse()
Get the constant value for i1 false.
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
BranchInst * CreateBr(BasicBlock *Dest)
Create an unconditional 'br label X' instruction.
Value * CreateIsNull(Value *Arg, const Twine &Name="")
Return a boolean value testing if Arg == 0.
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
This class captures the data input to the InlineFunction call, and records the auxiliary results prod...
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
const Instruction * getNextNonDebugInstruction(bool SkipPseudoOp=false) const
Return a pointer to the next non-debug instruction in the same basic block as 'this',...
This is an important class for using LLVM in a threaded context.
A node in the call graph.
An SCC of the call graph.
A lazily constructed view of the call graph of a module.
void addSplitFunction(Function &OriginalFunction, Function &NewFunction)
Add a new function split/outlined from an existing function.
void addSplitRefRecursiveFunctions(Function &OriginalFunction, ArrayRef< Function * > NewFunctions)
Add new ref-recursive functions split/outlined from an existing function.
Node & get(Function &F)
Get a graph node for a given function, scanning it to populate the graph data as necessary.
SCC * lookupSCC(Node &N) const
Lookup a function's SCC in the graph.
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
static MDString * get(LLVMContext &Context, StringRef Str)
A Module instance is used to store all the information related to an LLVM module.
FunctionListType::iterator iterator
The Function iterators.
Diagnostic information for applied optimization remarks.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
PrettyStackTraceEntry - This class is used to represent a frame of the "pretty" stack trace that is d...
Return a value (possibly void), from a function.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void reserve(size_type N)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Type * getTypeAtIndex(const Value *V) const
Given an index value into the type, return the type of the element.
Analysis pass providing the TargetTransformInfo.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
bool supportsTailCallFor(const CallBase *CB) const
If target supports tail call on CB.
The TimeTraceScope is a helper class to call the begin and end functions of the time trace profiler.
Value handle that tracks a Value across RAUW.
ValueTy * getValPtr() const
Triple - Helper class for working with autoconf configuration names.
bool isArch64Bit() const
Test whether the architecture is 64-bit.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
static Type * getVoidTy(LLVMContext &C)
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
static IntegerType * getInt8Ty(LLVMContext &C)
A Use represents the edge between a Value definition and its users.
void setOperand(unsigned i, Value *Val)
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
iterator_range< user_iterator > users()
const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
LLVMContext & getContext() const
All values hold a context through their type.
iterator_range< use_iterator > uses()
StringRef getName() const
Return a constant reference to the value's name.
void takeName(Value *V)
Transfer the name from V to this value.
void splitCoroutine(Function &F, coro::Shape &Shape, SmallVectorImpl< Function * > &Clones, TargetTransformInfo &TTI) override
void splitCoroutine(Function &F, coro::Shape &Shape, SmallVectorImpl< Function * > &Clones, TargetTransformInfo &TTI) override
static Function * createClone(Function &OrigF, const Twine &Suffix, coro::Shape &Shape, Function *NewF, AnyCoroSuspendInst *ActiveSuspend, TargetTransformInfo &TTI, const MetadataSetTy &CommonDebugInfo)
Create a clone for a continuation lowering.
void replaceSwiftErrorOps()
Value * deriveNewFramePointer()
Derive the value of the new frame pointer.
void replaceCoroSuspends()
void handleFinalSuspend()
bool isSwitchDestroyFunction()
void replaceRetconOrAsyncSuspendUses()
Replace uses of the active llvm.coro.suspend.retcon/async call with the arguments to the continuation...
virtual void create()
Clone the body of the original function into a resume function of some sort.
void splitCoroutine(Function &F, coro::Shape &Shape, SmallVectorImpl< Function * > &Clones, TargetTransformInfo &TTI) override
void create() override
Clone the body of the original function into a resume function of some sort.
static Function * createClone(Function &OrigF, const Twine &Suffix, coro::Shape &Shape, CloneKind FKind, TargetTransformInfo &TTI, const MetadataSetTy &CommonDebugInfo)
Create a clone for a switch lowering.
const ParentTy * getParent() const
self_iterator getIterator()
A range adaptor for a pair of iterators.
This class implements an extremely fast bulk output stream that can only output to a stream.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
@ Fast
Attempts to make calls as fast as possible (e.g.
@ C
The default llvm calling convention, compatible with C.
void salvageDebugInfo(SmallDenseMap< Argument *, AllocaInst *, 4 > &ArgToAllocaMap, DbgVariableIntrinsic &DVI, bool IsEntryPoint)
Attempts to rewrite the location operand of debug intrinsics in terms of the coroutine frame pointer,...
@ Async
The "async continuation" lowering, where each suspend point creates a single continuation function.
@ RetconOnce
The "unique returned-continuation" lowering, where each suspend point creates a single continuation f...
@ Retcon
The "returned-continuation" lowering, where each suspend point creates a single continuation function...
@ Switch
The "resume-switch" lowering, where there are separate resume and destroy functions that are shared b...
void suppressCoroAllocs(CoroIdInst *CoroId)
Replaces all @llvm.coro.alloc intrinsics calls associated with a given call @llvm....
void normalizeCoroutine(Function &F, coro::Shape &Shape, TargetTransformInfo &TTI)
CallInst * createMustTailCall(DebugLoc Loc, Function *MustTailCallFn, TargetTransformInfo &TTI, ArrayRef< Value * > Arguments, IRBuilder<> &)
void replaceCoroFree(CoroIdInst *CoroId, bool Elide)
bool isTriviallyMaterializable(Instruction &I)
@ SwitchCleanup
The shared cleanup function for a switch lowering.
@ Continuation
An individual continuation function.
DiagnosticInfoOptimizationBase::Argument NV
This is an optimization pass for GlobalISel generic memory operations.
void CloneFunctionAttributesInto(Function *NewFunc, const Function *OldFunc, ValueToValueMapTy &VMap, bool ModuleLevelChanges, ValueMapTypeRemapper *TypeMapper=nullptr, ValueMaterializer *Materializer=nullptr)
Clone OldFunc's attributes into NewFunc, transforming values based on the mappings in VMap.
UnaryFunction for_each(R &&Range, UnaryFunction F)
Provide wrappers to std::for_each which take ranges instead of having to pass begin/end explicitly.
detail::zippy< detail::zip_first, T, U, Args... > zip_equal(T &&t, U &&u, Args &&...args)
zip iterator that assumes that all iteratees have the same length.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
LazyCallGraph::SCC & updateCGAndAnalysisManagerForFunctionPass(LazyCallGraph &G, LazyCallGraph::SCC &C, LazyCallGraph::Node &N, CGSCCAnalysisManager &AM, CGSCCUpdateResult &UR, FunctionAnalysisManager &FAM)
Helper to update the call graph after running a function pass.
LazyCallGraph::SCC & updateCGAndAnalysisManagerForCGSCCPass(LazyCallGraph &G, LazyCallGraph::SCC &C, LazyCallGraph::Node &N, CGSCCAnalysisManager &AM, CGSCCUpdateResult &UR, FunctionAnalysisManager &FAM)
Helper to update the call graph after running a CGSCC pass.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
BasicBlock::iterator skipDebugIntrinsics(BasicBlock::iterator It)
Advance It while it points to a debug instruction and return the result.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
MetadataSetTy FindDebugInfoToIdentityMap(CloneFunctionChangeType Changes, DebugInfoFinder &DIFinder, DISubprogram *SPClonedWithinModule)
Based on Changes and DIFinder return debug info that needs to be identity mapped during Metadata clon...
unsigned changeToUnreachable(Instruction *I, bool PreserveLCSSA=false, DomTreeUpdater *DTU=nullptr, MemorySSAUpdater *MSSAU=nullptr)
Insert an unreachable instruction before the specified instruction, making it and the rest of the cod...
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
void CloneFunctionMetadataInto(Function &NewFunc, const Function &OldFunc, ValueToValueMapTy &VMap, RemapFlags RemapFlag, ValueMapTypeRemapper *TypeMapper=nullptr, ValueMaterializer *Materializer=nullptr, const MetadataSetTy *IdentityMD=nullptr)
Clone OldFunc's metadata into NewFunc.
DWARFExpression::Operation Op
InlineResult InlineFunction(CallBase &CB, InlineFunctionInfo &IFI, bool MergeAttributes=false, AAResults *CalleeAAR=nullptr, bool InsertLifetime=true, Function *ForwardVarArgsTo=nullptr)
This function inlines the called function into the basic block of the caller.
void CloneFunctionInto(Function *NewFunc, const Function *OldFunc, ValueToValueMapTy &VMap, CloneFunctionChangeType Changes, SmallVectorImpl< ReturnInst * > &Returns, const char *NameSuffix="", ClonedCodeInfo *CodeInfo=nullptr, ValueMapTypeRemapper *TypeMapper=nullptr, ValueMaterializer *Materializer=nullptr)
Clone OldFunc into NewFunc, transforming the old arguments into references to VMap values.
auto predecessors(const MachineBasicBlock *BB)
DISubprogram * CollectDebugInfoForCloning(const Function &F, CloneFunctionChangeType Changes, DebugInfoFinder &DIFinder)
Collect debug information such as types, compile units, and other subprograms that are reachable from...
static auto filterDbgVars(iterator_range< simple_ilist< DbgRecord >::iterator > R)
Filter the DbgRecord range to DbgVariableRecord types only and downcast.
bool removeUnreachableBlocks(Function &F, DomTreeUpdater *DTU=nullptr, MemorySSAUpdater *MSSAU=nullptr)
Remove all blocks that can not be reached from the function's entry.
void CloneFunctionBodyInto(Function &NewFunc, const Function &OldFunc, ValueToValueMapTy &VMap, RemapFlags RemapFlag, SmallVectorImpl< ReturnInst * > &Returns, const char *NameSuffix="", ClonedCodeInfo *CodeInfo=nullptr, ValueMapTypeRemapper *TypeMapper=nullptr, ValueMaterializer *Materializer=nullptr, const MetadataSetTy *IdentityMD=nullptr)
Clone OldFunc's body into NewFunc.
bool isPotentiallyReachable(const Instruction *From, const Instruction *To, const SmallPtrSetImpl< BasicBlock * > *ExclusionSet=nullptr, const DominatorTree *DT=nullptr, const LoopInfo *LI=nullptr)
Determine whether instruction 'To' is reachable from 'From', without passing through any blocks in Ex...
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
Support structure for SCC passes to communicate updates the call graph back to the CGSCC pass manager...
SmallPriorityWorklist< LazyCallGraph::SCC *, 1 > & CWorklist
Worklist of the SCCs queued for processing.
PreservedAnalyses run(LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM, LazyCallGraph &CG, CGSCCUpdateResult &UR)
CoroSplitPass(bool OptimizeFrame=false)
BaseABITy CreateAndInitABI
CallInst * makeSubFnCall(Value *Arg, int Index, Instruction *InsertPt)
GlobalVariable * AsyncFuncPointer
bool IsFrameInlineInStorage
Function * ResumePrototype
SwitchInst * ResumeSwitch
BasicBlock * ResumeEntryBlock
SmallVector< CallInst *, 2 > SymmetricTransfers
SmallVector< CoroAwaitSuspendInst *, 4 > CoroAwaitSuspends
AsyncLoweringStorage AsyncLowering
FunctionType * getResumeFunctionType() const
IntegerType * getIndexType() const
AnyCoroIdRetconInst * getRetconCoroId() const
PointerType * getSwitchResumePointerType() const
CoroIdInst * getSwitchCoroId() const
SmallVector< CoroSizeInst *, 2 > CoroSizes
CallingConv::ID getResumeFunctionCC() const
SmallVector< AnyCoroSuspendInst *, 4 > CoroSuspends
Value * emitAlloc(IRBuilder<> &Builder, Value *Size, CallGraph *CG) const
Allocate memory according to the rules of the active lowering.
ConstantInt * getIndex(uint64_t Value) const
SwitchLoweringStorage SwitchLowering
CoroBeginInst * CoroBegin
BasicBlock::iterator getInsertPtAfterFramePtr() const
ArrayRef< Type * > getRetconResultTypes() const
void emitDealloc(IRBuilder<> &Builder, Value *Ptr, CallGraph *CG) const
Deallocate memory according to the rules of the active lowering.
RetconLoweringStorage RetconLowering
SmallVector< CoroAlignInst *, 2 > CoroAligns
CoroIdAsyncInst * getAsyncCoroId() const
SmallVector< AnyCoroEndInst *, 4 > CoroEnds
SmallVector< CallInst *, 2 > SwiftErrorOps
BasicBlock * AllocaSpillBlock
unsigned getSwitchIndexField() const