Movatterモバイル変換


[0]ホーム

URL:


LLVM 20.0.0git
Analysis.cpp
Go to the documentation of this file.
1//===-- Analysis.cpp - CodeGen LLVM IR Analysis Utilities -----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines several CodeGen-specific LLVM IR analysis utilities.
10//
11//===----------------------------------------------------------------------===//
12
13#include "llvm/CodeGen/Analysis.h"
14#include "llvm/Analysis/ValueTracking.h"
15#include "llvm/CodeGen/MachineFunction.h"
16#include "llvm/CodeGen/TargetInstrInfo.h"
17#include "llvm/CodeGen/TargetLowering.h"
18#include "llvm/CodeGen/TargetSubtargetInfo.h"
19#include "llvm/IR/DataLayout.h"
20#include "llvm/IR/DerivedTypes.h"
21#include "llvm/IR/Function.h"
22#include "llvm/IR/Instructions.h"
23#include "llvm/IR/IntrinsicInst.h"
24#include "llvm/IR/Module.h"
25#include "llvm/Support/ErrorHandling.h"
26#include "llvm/Target/TargetMachine.h"
27
28using namespacellvm;
29
30/// Compute the linearized index of a member in a nested aggregate/struct/array
31/// by recursing and accumulating CurIndex as long as there are indices in the
32/// index list.
33unsignedllvm::ComputeLinearIndex(Type *Ty,
34constunsigned *Indices,
35constunsigned *IndicesEnd,
36unsigned CurIndex) {
37// Base case: We're done.
38if (Indices && Indices == IndicesEnd)
39return CurIndex;
40
41// Given a struct type, recursively traverse the elements.
42if (StructType *STy = dyn_cast<StructType>(Ty)) {
43for (autoI :llvm::enumerate(STy->elements())) {
44Type *ET =I.value();
45if (Indices && *Indices ==I.index())
46returnComputeLinearIndex(ET, Indices + 1, IndicesEnd, CurIndex);
47 CurIndex =ComputeLinearIndex(ET,nullptr,nullptr, CurIndex);
48 }
49assert(!Indices &&"Unexpected out of bound");
50return CurIndex;
51 }
52// Given an array type, recursively traverse the elements.
53elseif (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
54Type *EltTy = ATy->getElementType();
55unsigned NumElts = ATy->getNumElements();
56// Compute the Linear offset when jumping one element of the array
57unsigned EltLinearOffset =ComputeLinearIndex(EltTy,nullptr,nullptr, 0);
58if (Indices) {
59assert(*Indices < NumElts &&"Unexpected out of bound");
60// If the indice is inside the array, compute the index to the requested
61// elt and recurse inside the element with the end of the indices list
62 CurIndex += EltLinearOffset* *Indices;
63returnComputeLinearIndex(EltTy, Indices+1, IndicesEnd, CurIndex);
64 }
65 CurIndex += EltLinearOffset*NumElts;
66return CurIndex;
67 }
68// We haven't found the type we're looking for, so keep searching.
69return CurIndex + 1;
70}
71
72/// ComputeValueVTs - Given an LLVM IR type, compute a sequence of
73/// EVTs that represent all the individual underlying
74/// non-aggregate types that comprise it.
75///
76/// If Offsets is non-null, it points to a vector to be filled in
77/// with the in-memory offsets of each of the individual values.
78///
79voidllvm::ComputeValueVTs(constTargetLowering &TLI,constDataLayout &DL,
80Type *Ty,SmallVectorImpl<EVT> &ValueVTs,
81SmallVectorImpl<EVT> *MemVTs,
82SmallVectorImpl<TypeSize> *Offsets,
83TypeSize StartingOffset) {
84assert((Ty->isScalableTy() == StartingOffset.isScalable() ||
85 StartingOffset.isZero()) &&
86"Offset/TypeSize mismatch!");
87// Given a struct type, recursively traverse the elements.
88if (StructType *STy = dyn_cast<StructType>(Ty)) {
89// If the Offsets aren't needed, don't query the struct layout. This allows
90// us to support structs with scalable vectors for operations that don't
91// need offsets.
92constStructLayout *SL = Offsets ?DL.getStructLayout(STy) :nullptr;
93for (StructType::element_iterator EB = STy->element_begin(),
94 EI = EB,
95 EE = STy->element_end();
96 EI != EE; ++EI) {
97// Don't compute the element offset if we didn't get a StructLayout above.
98TypeSize EltOffset =
99 SL ? SL->getElementOffset(EI - EB) :TypeSize::getZero();
100ComputeValueVTs(TLI,DL, *EI, ValueVTs, MemVTs, Offsets,
101 StartingOffset + EltOffset);
102 }
103return;
104 }
105// Given an array type, recursively traverse the elements.
106if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
107Type *EltTy = ATy->getElementType();
108TypeSize EltSize =DL.getTypeAllocSize(EltTy);
109for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
110ComputeValueVTs(TLI,DL, EltTy, ValueVTs, MemVTs, Offsets,
111 StartingOffset + i * EltSize);
112return;
113 }
114// Interpret void as zero return values.
115if (Ty->isVoidTy())
116return;
117// Base case: we can get an EVT for this LLVM IR type.
118 ValueVTs.push_back(TLI.getValueType(DL, Ty));
119if (MemVTs)
120 MemVTs->push_back(TLI.getMemValueType(DL, Ty));
121if (Offsets)
122 Offsets->push_back(StartingOffset);
123}
124
125voidllvm::ComputeValueVTs(constTargetLowering &TLI,constDataLayout &DL,
126Type *Ty,SmallVectorImpl<EVT> &ValueVTs,
127SmallVectorImpl<EVT> *MemVTs,
128SmallVectorImpl<uint64_t> *FixedOffsets,
129uint64_t StartingOffset) {
130TypeSizeOffset =TypeSize::getFixed(StartingOffset);
131if (FixedOffsets) {
132SmallVector<TypeSize, 4> Offsets;
133ComputeValueVTs(TLI,DL, Ty, ValueVTs, MemVTs, &Offsets,Offset);
134for (TypeSizeOffset : Offsets)
135 FixedOffsets->push_back(Offset.getFixedValue());
136 }else {
137ComputeValueVTs(TLI,DL, Ty, ValueVTs, MemVTs,nullptr,Offset);
138 }
139}
140
141voidllvm::computeValueLLTs(constDataLayout &DL,Type &Ty,
142SmallVectorImpl<LLT> &ValueTys,
143SmallVectorImpl<uint64_t> *Offsets,
144uint64_t StartingOffset) {
145// Given a struct type, recursively traverse the elements.
146if (StructType *STy = dyn_cast<StructType>(&Ty)) {
147// If the Offsets aren't needed, don't query the struct layout. This allows
148// us to support structs with scalable vectors for operations that don't
149// need offsets.
150constStructLayout *SL = Offsets ?DL.getStructLayout(STy) :nullptr;
151for (unsignedI = 0, E = STy->getNumElements();I != E; ++I) {
152uint64_t EltOffset = SL ? SL->getElementOffset(I) : 0;
153computeValueLLTs(DL, *STy->getElementType(I), ValueTys, Offsets,
154 StartingOffset + EltOffset);
155 }
156return;
157 }
158// Given an array type, recursively traverse the elements.
159if (ArrayType *ATy = dyn_cast<ArrayType>(&Ty)) {
160Type *EltTy = ATy->getElementType();
161uint64_t EltSize =DL.getTypeAllocSize(EltTy).getFixedValue();
162for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
163computeValueLLTs(DL, *EltTy, ValueTys, Offsets,
164 StartingOffset + i * EltSize);
165return;
166 }
167// Interpret void as zero return values.
168if (Ty.isVoidTy())
169return;
170// Base case: we can get an LLT for this LLVM IR type.
171 ValueTys.push_back(getLLTForType(Ty,DL));
172if (Offsets !=nullptr)
173 Offsets->push_back(StartingOffset * 8);
174}
175
176/// ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
177GlobalValue *llvm::ExtractTypeInfo(Value *V) {
178 V = V->stripPointerCasts();
179GlobalValue *GV = dyn_cast<GlobalValue>(V);
180GlobalVariable *Var = dyn_cast<GlobalVariable>(V);
181
182if (Var && Var->getName() =="llvm.eh.catch.all.value") {
183assert(Var->hasInitializer() &&
184"The EH catch-all value must have an initializer");
185Value *Init = Var->getInitializer();
186 GV = dyn_cast<GlobalValue>(Init);
187if (!GV) V = cast<ConstantPointerNull>(Init);
188 }
189
190assert((GV || isa<ConstantPointerNull>(V)) &&
191"TypeInfo must be a global variable or NULL");
192return GV;
193}
194
195/// getFCmpCondCode - Return the ISD condition code corresponding to
196/// the given LLVM IR floating-point condition code. This includes
197/// consideration of global floating-point math flags.
198///
199ISD::CondCodellvm::getFCmpCondCode(FCmpInst::Predicate Pred) {
200switch (Pred) {
201case FCmpInst::FCMP_FALSE:returnISD::SETFALSE;
202case FCmpInst::FCMP_OEQ:returnISD::SETOEQ;
203case FCmpInst::FCMP_OGT:returnISD::SETOGT;
204case FCmpInst::FCMP_OGE:returnISD::SETOGE;
205case FCmpInst::FCMP_OLT:returnISD::SETOLT;
206case FCmpInst::FCMP_OLE:returnISD::SETOLE;
207case FCmpInst::FCMP_ONE:returnISD::SETONE;
208case FCmpInst::FCMP_ORD:returnISD::SETO;
209case FCmpInst::FCMP_UNO:returnISD::SETUO;
210case FCmpInst::FCMP_UEQ:returnISD::SETUEQ;
211case FCmpInst::FCMP_UGT:returnISD::SETUGT;
212case FCmpInst::FCMP_UGE:returnISD::SETUGE;
213case FCmpInst::FCMP_ULT:returnISD::SETULT;
214case FCmpInst::FCMP_ULE:returnISD::SETULE;
215case FCmpInst::FCMP_UNE:returnISD::SETUNE;
216case FCmpInst::FCMP_TRUE:returnISD::SETTRUE;
217default:llvm_unreachable("Invalid FCmp predicate opcode!");
218 }
219}
220
221ISD::CondCodellvm::getFCmpCodeWithoutNaN(ISD::CondCodeCC) {
222switch (CC) {
223caseISD::SETOEQ:caseISD::SETUEQ:returnISD::SETEQ;
224caseISD::SETONE:caseISD::SETUNE:returnISD::SETNE;
225caseISD::SETOLT:caseISD::SETULT:returnISD::SETLT;
226caseISD::SETOLE:caseISD::SETULE:returnISD::SETLE;
227caseISD::SETOGT:caseISD::SETUGT:returnISD::SETGT;
228caseISD::SETOGE:caseISD::SETUGE:returnISD::SETGE;
229default:returnCC;
230 }
231}
232
233ISD::CondCodellvm::getICmpCondCode(ICmpInst::Predicate Pred) {
234switch (Pred) {
235case ICmpInst::ICMP_EQ:returnISD::SETEQ;
236case ICmpInst::ICMP_NE:returnISD::SETNE;
237case ICmpInst::ICMP_SLE:returnISD::SETLE;
238case ICmpInst::ICMP_ULE:returnISD::SETULE;
239case ICmpInst::ICMP_SGE:returnISD::SETGE;
240case ICmpInst::ICMP_UGE:returnISD::SETUGE;
241case ICmpInst::ICMP_SLT:returnISD::SETLT;
242case ICmpInst::ICMP_ULT:returnISD::SETULT;
243case ICmpInst::ICMP_SGT:returnISD::SETGT;
244case ICmpInst::ICMP_UGT:returnISD::SETUGT;
245default:
246llvm_unreachable("Invalid ICmp predicate opcode!");
247 }
248}
249
250ICmpInst::Predicatellvm::getICmpCondCode(ISD::CondCode Pred) {
251switch (Pred) {
252caseISD::SETEQ:
253return ICmpInst::ICMP_EQ;
254caseISD::SETNE:
255return ICmpInst::ICMP_NE;
256caseISD::SETLE:
257return ICmpInst::ICMP_SLE;
258caseISD::SETULE:
259return ICmpInst::ICMP_ULE;
260caseISD::SETGE:
261return ICmpInst::ICMP_SGE;
262caseISD::SETUGE:
263return ICmpInst::ICMP_UGE;
264caseISD::SETLT:
265return ICmpInst::ICMP_SLT;
266caseISD::SETULT:
267return ICmpInst::ICMP_ULT;
268caseISD::SETGT:
269return ICmpInst::ICMP_SGT;
270caseISD::SETUGT:
271return ICmpInst::ICMP_UGT;
272default:
273llvm_unreachable("Invalid ISD integer condition code!");
274 }
275}
276
277staticboolisNoopBitcast(Type *T1,Type *T2,
278constTargetLoweringBase& TLI) {
279returnT1 == T2 || (T1->isPointerTy() && T2->isPointerTy()) ||
280 (isa<VectorType>(T1) && isa<VectorType>(T2) &&
281 TLI.isTypeLegal(EVT::getEVT(T1)) && TLI.isTypeLegal(EVT::getEVT(T2)));
282}
283
284/// Look through operations that will be free to find the earliest source of
285/// this value.
286///
287/// @param ValLoc If V has aggregate type, we will be interested in a particular
288/// scalar component. This records its address; the reverse of this list gives a
289/// sequence of indices appropriate for an extractvalue to locate the important
290/// value. This value is updated during the function and on exit will indicate
291/// similar information for the Value returned.
292///
293/// @param DataBits If this function looks through truncate instructions, this
294/// will record the smallest size attained.
295staticconstValue *getNoopInput(constValue *V,
296SmallVectorImpl<unsigned> &ValLoc,
297unsigned &DataBits,
298constTargetLoweringBase &TLI,
299constDataLayout &DL) {
300while (true) {
301// Try to look through V1; if V1 is not an instruction, it can't be looked
302// through.
303constInstruction *I = dyn_cast<Instruction>(V);
304if (!I ||I->getNumOperands() == 0)return V;
305constValue *NoopInput =nullptr;
306
307Value *Op =I->getOperand(0);
308if (isa<BitCastInst>(I)) {
309// Look through truly no-op bitcasts.
310if (isNoopBitcast(Op->getType(),I->getType(), TLI))
311 NoopInput =Op;
312 }elseif (isa<GetElementPtrInst>(I)) {
313// Look through getelementptr
314if (cast<GetElementPtrInst>(I)->hasAllZeroIndices())
315 NoopInput =Op;
316 }elseif (isa<IntToPtrInst>(I)) {
317// Look through inttoptr.
318// Make sure this isn't a truncating or extending cast. We could
319// support this eventually, but don't bother for now.
320if (!isa<VectorType>(I->getType()) &&
321DL.getPointerSizeInBits() ==
322 cast<IntegerType>(Op->getType())->getBitWidth())
323 NoopInput =Op;
324 }elseif (isa<PtrToIntInst>(I)) {
325// Look through ptrtoint.
326// Make sure this isn't a truncating or extending cast. We could
327// support this eventually, but don't bother for now.
328if (!isa<VectorType>(I->getType()) &&
329DL.getPointerSizeInBits() ==
330 cast<IntegerType>(I->getType())->getBitWidth())
331 NoopInput =Op;
332 }elseif (isa<TruncInst>(I) &&
333 TLI.allowTruncateForTailCall(Op->getType(),I->getType())) {
334 DataBits =
335 std::min((uint64_t)DataBits,
336I->getType()->getPrimitiveSizeInBits().getFixedValue());
337 NoopInput =Op;
338 }elseif (auto *CB = dyn_cast<CallBase>(I)) {
339constValue *ReturnedOp = CB->getReturnedArgOperand();
340if (ReturnedOp &&isNoopBitcast(ReturnedOp->getType(),I->getType(), TLI))
341 NoopInput = ReturnedOp;
342 }elseif (constInsertValueInst *IVI = dyn_cast<InsertValueInst>(V)) {
343// Value may come from either the aggregate or the scalar
344ArrayRef<unsigned> InsertLoc = IVI->getIndices();
345if (ValLoc.size() >= InsertLoc.size() &&
346 std::equal(InsertLoc.begin(), InsertLoc.end(), ValLoc.rbegin())) {
347// The type being inserted is a nested sub-type of the aggregate; we
348// have to remove those initial indices to get the location we're
349// interested in for the operand.
350 ValLoc.resize(ValLoc.size() - InsertLoc.size());
351 NoopInput = IVI->getInsertedValueOperand();
352 }else {
353// The struct we're inserting into has the value we're interested in, no
354// change of address.
355 NoopInput =Op;
356 }
357 }elseif (constExtractValueInst *EVI = dyn_cast<ExtractValueInst>(V)) {
358// The part we're interested in will inevitably be some sub-section of the
359// previous aggregate. Combine the two paths to obtain the true address of
360// our element.
361ArrayRef<unsigned> ExtractLoc = EVI->getIndices();
362 ValLoc.append(ExtractLoc.rbegin(), ExtractLoc.rend());
363 NoopInput =Op;
364 }
365// Terminate if we couldn't find anything to look through.
366if (!NoopInput)
367return V;
368
369 V = NoopInput;
370 }
371}
372
373/// Return true if this scalar return value only has bits discarded on its path
374/// from the "tail call" to the "ret". This includes the obvious noop
375/// instructions handled by getNoopInput above as well as free truncations (or
376/// extensions prior to the call).
377staticboolslotOnlyDiscardsData(constValue *RetVal,constValue *CallVal,
378SmallVectorImpl<unsigned> &RetIndices,
379SmallVectorImpl<unsigned> &CallIndices,
380bool AllowDifferingSizes,
381constTargetLoweringBase &TLI,
382constDataLayout &DL) {
383
384// Trace the sub-value needed by the return value as far back up the graph as
385// possible, in the hope that it will intersect with the value produced by the
386// call. In the simple case with no "returned" attribute, the hope is actually
387// that we end up back at the tail call instruction itself.
388unsigned BitsRequired = UINT_MAX;
389 RetVal =getNoopInput(RetVal, RetIndices, BitsRequired, TLI,DL);
390
391// If this slot in the value returned is undef, it doesn't matter what the
392// call puts there, it'll be fine.
393if (isa<UndefValue>(RetVal))
394returntrue;
395
396// Now do a similar search up through the graph to find where the value
397// actually returned by the "tail call" comes from. In the simple case without
398// a "returned" attribute, the search will be blocked immediately and the loop
399// a Noop.
400unsigned BitsProvided = UINT_MAX;
401 CallVal =getNoopInput(CallVal, CallIndices, BitsProvided, TLI,DL);
402
403// There's no hope if we can't actually trace them to (the same part of!) the
404// same value.
405if (CallVal != RetVal || CallIndices != RetIndices)
406returnfalse;
407
408// However, intervening truncates may have made the call non-tail. Make sure
409// all the bits that are needed by the "ret" have been provided by the "tail
410// call". FIXME: with sufficiently cunning bit-tracking, we could look through
411// extensions too.
412if (BitsProvided < BitsRequired ||
413 (!AllowDifferingSizes && BitsProvided != BitsRequired))
414returnfalse;
415
416returntrue;
417}
418
419/// For an aggregate type, determine whether a given index is within bounds or
420/// not.
421staticboolindexReallyValid(Type *T,unsignedIdx) {
422if (ArrayType *AT = dyn_cast<ArrayType>(T))
423returnIdx < AT->getNumElements();
424
425return Idx < cast<StructType>(T)->getNumElements();
426}
427
428/// Move the given iterators to the next leaf type in depth first traversal.
429///
430/// Performs a depth-first traversal of the type as specified by its arguments,
431/// stopping at the next leaf node (which may be a legitimate scalar type or an
432/// empty struct or array).
433///
434/// @param SubTypes List of the partial components making up the type from
435/// outermost to innermost non-empty aggregate. The element currently
436/// represented is SubTypes.back()->getTypeAtIndex(Path.back() - 1).
437///
438/// @param Path Set of extractvalue indices leading from the outermost type
439/// (SubTypes[0]) to the leaf node currently represented.
440///
441/// @returns true if a new type was found, false otherwise. Calling this
442/// function again on a finished iterator will repeatedly return
443/// false. SubTypes.back()->getTypeAtIndex(Path.back()) is either an empty
444/// aggregate or a non-aggregate
445staticbooladvanceToNextLeafType(SmallVectorImpl<Type *> &SubTypes,
446SmallVectorImpl<unsigned> &Path) {
447// First march back up the tree until we can successfully increment one of the
448// coordinates in Path.
449while (!Path.empty() && !indexReallyValid(SubTypes.back(), Path.back() + 1)) {
450 Path.pop_back();
451 SubTypes.pop_back();
452 }
453
454// If we reached the top, then the iterator is done.
455if (Path.empty())
456returnfalse;
457
458// We know there's *some* valid leaf now, so march back down the tree picking
459// out the left-most element at each node.
460 ++Path.back();
461Type *DeeperType =
462ExtractValueInst::getIndexedType(SubTypes.back(), Path.back());
463while (DeeperType->isAggregateType()) {
464if (!indexReallyValid(DeeperType, 0))
465returntrue;
466
467 SubTypes.push_back(DeeperType);
468 Path.push_back(0);
469
470 DeeperType =ExtractValueInst::getIndexedType(DeeperType, 0);
471 }
472
473returntrue;
474}
475
476/// Find the first non-empty, scalar-like type in Next and setup the iterator
477/// components.
478///
479/// Assuming Next is an aggregate of some kind, this function will traverse the
480/// tree from left to right (i.e. depth-first) looking for the first
481/// non-aggregate type which will play a role in function return.
482///
483/// For example, if Next was {[0 x i64], {{}, i32, {}}, i32} then we would setup
484/// Path as [1, 1] and SubTypes as [Next, {{}, i32, {}}] to represent the first
485/// i32 in that type.
486staticboolfirstRealType(Type *Next,SmallVectorImpl<Type *> &SubTypes,
487SmallVectorImpl<unsigned> &Path) {
488// First initialise the iterator components to the first "leaf" node
489// (i.e. node with no valid sub-type at any index, so {} does count as a leaf
490// despite nominally being an aggregate).
491while (Type *FirstInner =ExtractValueInst::getIndexedType(Next, 0)) {
492 SubTypes.push_back(Next);
493 Path.push_back(0);
494 Next = FirstInner;
495 }
496
497// If there's no Path now, Next was originally scalar already (or empty
498// leaf). We're done.
499if (Path.empty())
500returntrue;
501
502// Otherwise, use normal iteration to keep looking through the tree until we
503// find a non-aggregate type.
504while (ExtractValueInst::getIndexedType(SubTypes.back(), Path.back())
505 ->isAggregateType()) {
506if (!advanceToNextLeafType(SubTypes, Path))
507returnfalse;
508 }
509
510returntrue;
511}
512
513/// Set the iterator data-structures to the next non-empty, non-aggregate
514/// subtype.
515staticboolnextRealType(SmallVectorImpl<Type *> &SubTypes,
516SmallVectorImpl<unsigned> &Path) {
517do {
518if (!advanceToNextLeafType(SubTypes, Path))
519returnfalse;
520
521assert(!Path.empty() &&"found a leaf but didn't set the path?");
522 }while (ExtractValueInst::getIndexedType(SubTypes.back(), Path.back())
523 ->isAggregateType());
524
525returntrue;
526}
527
528
529/// Test if the given instruction is in a position to be optimized
530/// with a tail-call. This roughly means that it's in a block with
531/// a return and there's nothing that needs to be scheduled
532/// between it and the return.
533///
534/// This function only tests target-independent requirements.
535boolllvm::isInTailCallPosition(constCallBase &Call,constTargetMachine &TM,
536bool ReturnsFirstArg) {
537constBasicBlock *ExitBB = Call.getParent();
538constInstruction *Term = ExitBB->getTerminator();
539constReturnInst *Ret = dyn_cast<ReturnInst>(Term);
540
541// The block must end in a return statement or unreachable.
542//
543// FIXME: Decline tailcall if it's not guaranteed and if the block ends in
544// an unreachable, for now. The way tailcall optimization is currently
545// implemented means it will add an epilogue followed by a jump. That is
546// not profitable. Also, if the callee is a special function (e.g.
547// longjmp on x86), it can end up causing miscompilation that has not
548// been fully understood.
549if (!Ret && ((!TM.Options.GuaranteedTailCallOpt &&
550 Call.getCallingConv() !=CallingConv::Tail &&
551 Call.getCallingConv() !=CallingConv::SwiftTail) ||
552 !isa<UnreachableInst>(Term)))
553returnfalse;
554
555// If I will have a chain, make sure no other instruction that will have a
556// chain interposes between I and the return.
557// Check for all calls including speculatable functions.
558for (BasicBlock::const_iterator BBI = std::prev(ExitBB->end(), 2);; --BBI) {
559if (&*BBI == &Call)
560break;
561// Debug info intrinsics do not get in the way of tail call optimization.
562// Pseudo probe intrinsics do not block tail call optimization either.
563if (BBI->isDebugOrPseudoInst())
564continue;
565// A lifetime end, assume or noalias.decl intrinsic should not stop tail
566// call optimization.
567if (constIntrinsicInst *II = dyn_cast<IntrinsicInst>(BBI))
568if (II->getIntrinsicID() == Intrinsic::lifetime_end ||
569II->getIntrinsicID() == Intrinsic::assume ||
570II->getIntrinsicID() == Intrinsic::experimental_noalias_scope_decl ||
571II->getIntrinsicID() == Intrinsic::fake_use)
572continue;
573if (BBI->mayHaveSideEffects() || BBI->mayReadFromMemory() ||
574 !isSafeToSpeculativelyExecute(&*BBI))
575returnfalse;
576 }
577
578constFunction *F = ExitBB->getParent();
579returnreturnTypeIsEligibleForTailCall(
580F, &Call, Ret, *TM.getSubtargetImpl(*F)->getTargetLowering(),
581 ReturnsFirstArg);
582}
583
584boolllvm::attributesPermitTailCall(constFunction *F,constInstruction *I,
585constReturnInst *Ret,
586constTargetLoweringBase &TLI,
587bool *AllowDifferingSizes) {
588// ADS may be null, so don't write to it directly.
589bool DummyADS;
590bool &ADS = AllowDifferingSizes ? *AllowDifferingSizes : DummyADS;
591 ADS =true;
592
593AttrBuilder CallerAttrs(F->getContext(),F->getAttributes().getRetAttrs());
594AttrBuilder CalleeAttrs(F->getContext(),
595 cast<CallInst>(I)->getAttributes().getRetAttrs());
596
597// Following attributes are completely benign as far as calling convention
598// goes, they shouldn't affect whether the call is a tail call.
599for (constauto &Attr : {Attribute::Alignment, Attribute::Dereferenceable,
600 Attribute::DereferenceableOrNull, Attribute::NoAlias,
601 Attribute::NonNull, Attribute::NoUndef,
602 Attribute::Range, Attribute::NoFPClass}) {
603 CallerAttrs.removeAttribute(Attr);
604 CalleeAttrs.removeAttribute(Attr);
605 }
606
607if (CallerAttrs.contains(Attribute::ZExt)) {
608if (!CalleeAttrs.contains(Attribute::ZExt))
609returnfalse;
610
611 ADS =false;
612 CallerAttrs.removeAttribute(Attribute::ZExt);
613 CalleeAttrs.removeAttribute(Attribute::ZExt);
614 }elseif (CallerAttrs.contains(Attribute::SExt)) {
615if (!CalleeAttrs.contains(Attribute::SExt))
616returnfalse;
617
618 ADS =false;
619 CallerAttrs.removeAttribute(Attribute::SExt);
620 CalleeAttrs.removeAttribute(Attribute::SExt);
621 }
622
623// Drop sext and zext return attributes if the result is not used.
624// This enables tail calls for code like:
625//
626// define void @caller() {
627// entry:
628// %unused_result = tail call zeroext i1 @callee()
629// br label %retlabel
630// retlabel:
631// ret void
632// }
633if (I->use_empty()) {
634 CalleeAttrs.removeAttribute(Attribute::SExt);
635 CalleeAttrs.removeAttribute(Attribute::ZExt);
636 }
637
638// If they're still different, there's some facet we don't understand
639// (currently only "inreg", but in future who knows). It may be OK but the
640// only safe option is to reject the tail call.
641return CallerAttrs == CalleeAttrs;
642}
643
644boolllvm::returnTypeIsEligibleForTailCall(constFunction *F,
645constInstruction *I,
646constReturnInst *Ret,
647constTargetLoweringBase &TLI,
648bool ReturnsFirstArg) {
649// If the block ends with a void return or unreachable, it doesn't matter
650// what the call's return type is.
651if (!Ret || Ret->getNumOperands() == 0)returntrue;
652
653// If the return value is undef, it doesn't matter what the call's
654// return type is.
655if (isa<UndefValue>(Ret->getOperand(0)))returntrue;
656
657// Make sure the attributes attached to each return are compatible.
658bool AllowDifferingSizes;
659if (!attributesPermitTailCall(F,I, Ret, TLI, &AllowDifferingSizes))
660returnfalse;
661
662// If the return value is the first argument of the call.
663if (ReturnsFirstArg)
664returntrue;
665
666constValue *RetVal = Ret->getOperand(0), *CallVal =I;
667SmallVector<unsigned, 4> RetPath, CallPath;
668SmallVector<Type *, 4> RetSubTypes, CallSubTypes;
669
670bool RetEmpty = !firstRealType(RetVal->getType(), RetSubTypes, RetPath);
671bool CallEmpty = !firstRealType(CallVal->getType(), CallSubTypes, CallPath);
672
673// Nothing's actually returned, it doesn't matter what the callee put there
674// it's a valid tail call.
675if (RetEmpty)
676returntrue;
677
678// Iterate pairwise through each of the value types making up the tail call
679// and the corresponding return. For each one we want to know whether it's
680// essentially going directly from the tail call to the ret, via operations
681// that end up not generating any code.
682//
683// We allow a certain amount of covariance here. For example it's permitted
684// for the tail call to define more bits than the ret actually cares about
685// (e.g. via a truncate).
686do {
687if (CallEmpty) {
688// We've exhausted the values produced by the tail call instruction, the
689// rest are essentially undef. The type doesn't really matter, but we need
690// *something*.
691Type *SlotType =
692ExtractValueInst::getIndexedType(RetSubTypes.back(), RetPath.back());
693 CallVal =UndefValue::get(SlotType);
694 }
695
696// The manipulations performed when we're looking through an insertvalue or
697// an extractvalue would happen at the front of the RetPath list, so since
698// we have to copy it anyway it's more efficient to create a reversed copy.
699SmallVector<unsigned, 4> TmpRetPath(llvm::reverse(RetPath));
700SmallVector<unsigned, 4> TmpCallPath(llvm::reverse(CallPath));
701
702// Finally, we can check whether the value produced by the tail call at this
703// index is compatible with the value we return.
704if (!slotOnlyDiscardsData(RetVal, CallVal, TmpRetPath, TmpCallPath,
705 AllowDifferingSizes, TLI,
706F->getDataLayout()))
707returnfalse;
708
709 CallEmpty = !nextRealType(CallSubTypes, CallPath);
710 }while(nextRealType(RetSubTypes, RetPath));
711
712returntrue;
713}
714
715boolllvm::funcReturnsFirstArgOfCall(constCallInst &CI) {
716constReturnInst *Ret = dyn_cast<ReturnInst>(CI.getParent()->getTerminator());
717Value *RetVal = Ret ? Ret->getReturnValue() :nullptr;
718bool ReturnsFirstArg =false;
719if (RetVal && ((RetVal == CI.getArgOperand(0))))
720 ReturnsFirstArg =true;
721return ReturnsFirstArg;
722}
723
724staticvoidcollectEHScopeMembers(
725DenseMap<const MachineBasicBlock *, int> &EHScopeMembership,int EHScope,
726constMachineBasicBlock *MBB) {
727SmallVector<const MachineBasicBlock *, 16> Worklist = {MBB};
728while (!Worklist.empty()) {
729constMachineBasicBlock *Visiting = Worklist.pop_back_val();
730// Don't follow blocks which start new scopes.
731if (Visiting->isEHPad() && Visiting !=MBB)
732continue;
733
734// Add this MBB to our scope.
735autoP = EHScopeMembership.insert(std::make_pair(Visiting, EHScope));
736
737// Don't revisit blocks.
738if (!P.second) {
739assert(P.first->second == EHScope &&"MBB is part of two scopes!");
740continue;
741 }
742
743// Returns are boundaries where scope transfer can occur, don't follow
744// successors.
745if (Visiting->isEHScopeReturnBlock())
746continue;
747
748append_range(Worklist, Visiting->successors());
749 }
750}
751
752DenseMap<const MachineBasicBlock *, int>
753llvm::getEHScopeMembership(constMachineFunction &MF) {
754DenseMap<const MachineBasicBlock *, int> EHScopeMembership;
755
756// We don't have anything to do if there aren't any EH pads.
757if (!MF.hasEHScopes())
758return EHScopeMembership;
759
760int EntryBBNumber = MF.front().getNumber();
761bool IsSEH =isAsynchronousEHPersonality(
762classifyEHPersonality(MF.getFunction().getPersonalityFn()));
763
764constTargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
765SmallVector<const MachineBasicBlock *, 16> EHScopeBlocks;
766SmallVector<const MachineBasicBlock *, 16> UnreachableBlocks;
767SmallVector<const MachineBasicBlock *, 16> SEHCatchPads;
768SmallVector<std::pair<const MachineBasicBlock *, int>, 16> CatchRetSuccessors;
769for (constMachineBasicBlock &MBB : MF) {
770if (MBB.isEHScopeEntry()) {
771 EHScopeBlocks.push_back(&MBB);
772 }elseif (IsSEH &&MBB.isEHPad()) {
773 SEHCatchPads.push_back(&MBB);
774 }elseif (MBB.pred_empty()) {
775 UnreachableBlocks.push_back(&MBB);
776 }
777
778MachineBasicBlock::const_iteratorMBBI =MBB.getFirstTerminator();
779
780// CatchPads are not scopes for SEH so do not consider CatchRet to
781// transfer control to another scope.
782if (MBBI ==MBB.end() ||MBBI->getOpcode() !=TII->getCatchReturnOpcode())
783continue;
784
785// FIXME: SEH CatchPads are not necessarily in the parent function:
786// they could be inside a finally block.
787constMachineBasicBlock *Successor =MBBI->getOperand(0).getMBB();
788constMachineBasicBlock *SuccessorColor =MBBI->getOperand(1).getMBB();
789 CatchRetSuccessors.push_back(
790 {Successor, IsSEH ? EntryBBNumber : SuccessorColor->getNumber()});
791 }
792
793// We don't have anything to do if there aren't any EH pads.
794if (EHScopeBlocks.empty())
795return EHScopeMembership;
796
797// Identify all the basic blocks reachable from the function entry.
798collectEHScopeMembers(EHScopeMembership, EntryBBNumber, &MF.front());
799// All blocks not part of a scope are in the parent function.
800for (constMachineBasicBlock *MBB : UnreachableBlocks)
801collectEHScopeMembers(EHScopeMembership, EntryBBNumber,MBB);
802// Next, identify all the blocks inside the scopes.
803for (constMachineBasicBlock *MBB : EHScopeBlocks)
804collectEHScopeMembers(EHScopeMembership,MBB->getNumber(),MBB);
805// SEH CatchPads aren't really scopes, handle them separately.
806for (constMachineBasicBlock *MBB : SEHCatchPads)
807collectEHScopeMembers(EHScopeMembership, EntryBBNumber,MBB);
808// Finally, identify all the targets of a catchret.
809for (std::pair<const MachineBasicBlock *, int> CatchRetPair :
810 CatchRetSuccessors)
811collectEHScopeMembers(EHScopeMembership, CatchRetPair.second,
812 CatchRetPair.first);
813return EHScopeMembership;
814}
MBB
MachineBasicBlock & MBB
Definition:ARMSLSHardening.cpp:71
DL
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Definition:ARMSLSHardening.cpp:73
MBBI
MachineBasicBlock MachineBasicBlock::iterator MBBI
Definition:ARMSLSHardening.cpp:72
isNoopBitcast
static bool isNoopBitcast(Type *T1, Type *T2, const TargetLoweringBase &TLI)
Definition:Analysis.cpp:277
firstRealType
static bool firstRealType(Type *Next, SmallVectorImpl< Type * > &SubTypes, SmallVectorImpl< unsigned > &Path)
Find the first non-empty, scalar-like type in Next and setup the iterator components.
Definition:Analysis.cpp:486
slotOnlyDiscardsData
static bool slotOnlyDiscardsData(const Value *RetVal, const Value *CallVal, SmallVectorImpl< unsigned > &RetIndices, SmallVectorImpl< unsigned > &CallIndices, bool AllowDifferingSizes, const TargetLoweringBase &TLI, const DataLayout &DL)
Return true if this scalar return value only has bits discarded on its path from the "tail call" to t...
Definition:Analysis.cpp:377
collectEHScopeMembers
static void collectEHScopeMembers(DenseMap< const MachineBasicBlock *, int > &EHScopeMembership, int EHScope, const MachineBasicBlock *MBB)
Definition:Analysis.cpp:724
indexReallyValid
static bool indexReallyValid(Type *T, unsigned Idx)
For an aggregate type, determine whether a given index is within bounds or not.
Definition:Analysis.cpp:421
nextRealType
static bool nextRealType(SmallVectorImpl< Type * > &SubTypes, SmallVectorImpl< unsigned > &Path)
Set the iterator data-structures to the next non-empty, non-aggregate subtype.
Definition:Analysis.cpp:515
advanceToNextLeafType
static bool advanceToNextLeafType(SmallVectorImpl< Type * > &SubTypes, SmallVectorImpl< unsigned > &Path)
Move the given iterators to the next leaf type in depth first traversal.
Definition:Analysis.cpp:445
getNoopInput
static const Value * getNoopInput(const Value *V, SmallVectorImpl< unsigned > &ValLoc, unsigned &DataBits, const TargetLoweringBase &TLI, const DataLayout &DL)
Look through operations that will be free to find the earliest source of this value.
Definition:Analysis.cpp:295
DataLayout.h
Idx
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
Definition:DeadArgumentElimination.cpp:353
DerivedTypes.h
TII
const HexagonInstrInfo * TII
Definition:HexagonCopyToCombine.cpp:125
Function.h
IntrinsicInst.h
Module.h
Module.h This file contains the declarations for the Module class.
Instructions.h
F
#define F(x, y, z)
Definition:MD5.cpp:55
I
#define I(x, y, z)
Definition:MD5.cpp:58
MachineFunction.h
T1
#define T1
Definition:Mips16ISelLowering.cpp:340
II
uint64_t IntrinsicInst * II
Definition:NVVMIntrRange.cpp:51
P
#define P(N)
CC
auto CC
Definition:RISCVRedundantCopyElimination.cpp:79
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
TargetInstrInfo.h
TargetLowering.h
This file describes how to lower LLVM code to machine code.
TargetSubtargetInfo.h
ValueTracking.h
ArrayType
Definition:ItaniumDemangle.h:785
T
llvm::ArrayRef
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition:ArrayRef.h:41
llvm::ArrayRef::rend
reverse_iterator rend() const
Definition:ArrayRef.h:160
llvm::ArrayRef::end
iterator end() const
Definition:ArrayRef.h:157
llvm::ArrayRef::size
size_t size() const
size - Get the array size.
Definition:ArrayRef.h:168
llvm::ArrayRef::begin
iterator begin() const
Definition:ArrayRef.h:156
llvm::ArrayRef::rbegin
reverse_iterator rbegin() const
Definition:ArrayRef.h:159
llvm::ArrayType
Class to represent array types.
Definition:DerivedTypes.h:395
llvm::AttrBuilder
Definition:Attributes.h:1064
llvm::AttrBuilder::contains
bool contains(Attribute::AttrKind A) const
Return true if the builder has the specified attribute.
Definition:Attributes.cpp:2326
llvm::AttrBuilder::removeAttribute
AttrBuilder & removeAttribute(Attribute::AttrKind Val)
Remove an attribute from the builder.
Definition:Attributes.cpp:2118
llvm::BasicBlock
LLVM Basic Block Representation.
Definition:BasicBlock.h:61
llvm::BasicBlock::end
iterator end()
Definition:BasicBlock.h:464
llvm::BasicBlock::const_iterator
InstListType::const_iterator const_iterator
Definition:BasicBlock.h:178
llvm::BasicBlock::getParent
const Function * getParent() const
Return the enclosing method, or null if none.
Definition:BasicBlock.h:220
llvm::BasicBlock::getTerminator
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition:BasicBlock.h:240
llvm::CallBase
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition:InstrTypes.h:1112
llvm::CallBase::getArgOperand
Value * getArgOperand(unsigned i) const
Definition:InstrTypes.h:1286
llvm::CallInst
This class represents a function call, abstracting a target machine's calling convention.
Definition:Instructions.h:1479
llvm::CmpInst::Predicate
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition:InstrTypes.h:673
llvm::DWARFExpression::Operation
This class represents an Operation in the Expression.
Definition:DWARFExpression.h:32
llvm::DataLayout
A parsed version of the target data layout string in and methods for querying it.
Definition:DataLayout.h:63
llvm::DenseMapBase::insert
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition:DenseMap.h:211
llvm::DenseMap
Definition:DenseMap.h:727
llvm::ExtractValueInst
This instruction extracts a struct member or array element value from an aggregate value.
Definition:Instructions.h:2397
llvm::ExtractValueInst::getIndexedType
static Type * getIndexedType(Type *Agg, ArrayRef< unsigned > Idxs)
Returns the type of the element that would be extracted with an extractvalue instruction with the spe...
Definition:Instructions.cpp:2499
llvm::Function
Definition:Function.h:63
llvm::Function::getPersonalityFn
Constant * getPersonalityFn() const
Get the personality function associated with this function.
Definition:Function.cpp:1048
llvm::GlobalValue
Definition:GlobalValue.h:48
llvm::GlobalVariable
Definition:GlobalVariable.h:39
llvm::GlobalVariable::getInitializer
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
Definition:GlobalVariable.h:150
llvm::GlobalVariable::hasInitializer
bool hasInitializer() const
Definitions have initializers, declarations don't.
Definition:GlobalVariable.h:106
llvm::Init
Definition:Record.h:285
llvm::InsertValueInst
This instruction inserts a struct field of array element value into an aggregate value.
Definition:Instructions.h:2485
llvm::Instruction
Definition:Instruction.h:68
llvm::IntrinsicInst
A wrapper class for inspecting calls to intrinsic functions.
Definition:IntrinsicInst.h:48
llvm::MachineBasicBlock
Definition:MachineBasicBlock.h:125
llvm::MachineBasicBlock::isEHPad
bool isEHPad() const
Returns true if the block is a landing pad.
Definition:MachineBasicBlock.h:634
llvm::MachineBasicBlock::getNumber
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they're not in a MachineFuncti...
Definition:MachineBasicBlock.h:1217
llvm::MachineBasicBlock::getFirstTerminator
iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
Definition:MachineBasicBlock.cpp:244
llvm::MachineBasicBlock::pred_empty
bool pred_empty() const
Definition:MachineBasicBlock.h:420
llvm::MachineBasicBlock::isEHScopeEntry
bool isEHScopeEntry() const
Returns true if this is the entry block of an EH scope, i.e., the block that used to have a catchpad ...
Definition:MachineBasicBlock.h:647
llvm::MachineBasicBlock::end
iterator end()
Definition:MachineBasicBlock.h:357
llvm::MachineBasicBlock::successors
iterator_range< succ_iterator > successors()
Definition:MachineBasicBlock.h:444
llvm::MachineBasicBlock::isEHScopeReturnBlock
bool isEHScopeReturnBlock() const
Convenience function that returns true if the bock ends in a EH scope return instruction.
Definition:MachineBasicBlock.h:954
llvm::MachineFunction
Definition:MachineFunction.h:267
llvm::MachineFunction::getSubtarget
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
Definition:MachineFunction.h:733
llvm::MachineFunction::getFunction
Function & getFunction()
Return the LLVM function that this machine code represents.
Definition:MachineFunction.h:704
llvm::MachineFunction::front
const MachineBasicBlock & front() const
Definition:MachineFunction.h:959
llvm::MachineFunction::hasEHScopes
bool hasEHScopes() const
Definition:MachineFunction.h:1234
llvm::MachineInstrBundleIterator< const MachineInstr >
llvm::ReturnInst
Return a value (possibly void), from a function.
Definition:Instructions.h:2938
llvm::SmallVectorBase::empty
bool empty() const
Definition:SmallVector.h:81
llvm::SmallVectorBase::size
size_t size() const
Definition:SmallVector.h:78
llvm::SmallVectorImpl
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition:SmallVector.h:573
llvm::SmallVectorImpl::pop_back_val
T pop_back_val()
Definition:SmallVector.h:673
llvm::SmallVectorImpl::append
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
Definition:SmallVector.h:683
llvm::SmallVectorImpl::resize
void resize(size_type N)
Definition:SmallVector.h:638
llvm::SmallVectorTemplateBase::pop_back
void pop_back()
Definition:SmallVector.h:425
llvm::SmallVectorTemplateBase::push_back
void push_back(const T &Elt)
Definition:SmallVector.h:413
llvm::SmallVectorTemplateCommon::rbegin
reverse_iterator rbegin()
Definition:SmallVector.h:273
llvm::SmallVectorTemplateCommon::back
reference back()
Definition:SmallVector.h:308
llvm::SmallVector
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition:SmallVector.h:1196
llvm::StructLayout
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
Definition:DataLayout.h:567
llvm::StructLayout::getElementOffset
TypeSize getElementOffset(unsigned Idx) const
Definition:DataLayout.h:596
llvm::StructType
Class to represent struct types.
Definition:DerivedTypes.h:218
llvm::StructType::element_iterator
Type::subtype_iterator element_iterator
Definition:DerivedTypes.h:353
llvm::TargetInstrInfo
TargetInstrInfo - Interface to description of machine instruction set.
Definition:TargetInstrInfo.h:112
llvm::TargetLoweringBase
This base class for TargetLowering contains the SelectionDAG-independent parts that can be used from ...
Definition:TargetLowering.h:195
llvm::TargetLoweringBase::getMemValueType
EVT getMemValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Definition:TargetLowering.h:1697
llvm::TargetLoweringBase::getValueType
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
Definition:TargetLowering.h:1677
llvm::TargetLoweringBase::allowTruncateForTailCall
virtual bool allowTruncateForTailCall(Type *FromTy, Type *ToTy) const
Return true if a truncation from FromTy to ToTy is permitted when deciding whether a call is in tail ...
Definition:TargetLowering.h:2981
llvm::TargetLoweringBase::isTypeLegal
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
Definition:TargetLowering.h:1093
llvm::TargetLowering
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
Definition:TargetLowering.h:3780
llvm::TargetMachine
Primary interface to the complete machine description for the target machine.
Definition:TargetMachine.h:77
llvm::TargetSubtargetInfo::getInstrInfo
virtual const TargetInstrInfo * getInstrInfo() const
Definition:TargetSubtargetInfo.h:97
llvm::TypeSize
Definition:TypeSize.h:334
llvm::TypeSize::getFixed
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition:TypeSize.h:345
llvm::TypeSize::getZero
static constexpr TypeSize getZero()
Definition:TypeSize.h:351
llvm::Type
The instances of the Type class are immutable: once they are created, they are never changed.
Definition:Type.h:45
llvm::Type::isPointerTy
bool isPointerTy() const
True if this is an instance of PointerType.
Definition:Type.h:264
llvm::Type::isAggregateType
bool isAggregateType() const
Return true if the type is an aggregate type.
Definition:Type.h:303
llvm::Type::isScalableTy
bool isScalableTy(SmallPtrSetImpl< const Type * > &Visited) const
Return true if this is a type whose size is a known multiple of vscale.
llvm::Type::isVoidTy
bool isVoidTy() const
Return true if this is 'void'.
Definition:Type.h:139
llvm::UndefValue::get
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
Definition:Constants.cpp:1859
llvm::Value
LLVM Value Representation.
Definition:Value.h:74
llvm::Value::getType
Type * getType() const
All values are typed, get the type of this value.
Definition:Value.h:255
llvm::Value::getName
StringRef getName() const
Return a constant reference to the value's name.
Definition:Value.cpp:309
llvm::details::FixedOrScalableQuantity::isScalable
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition:TypeSize.h:171
llvm::details::FixedOrScalableQuantity::isZero
constexpr bool isZero() const
Definition:TypeSize.h:156
llvm::ilist_detail::node_parent_access::getParent
const ParentTy * getParent() const
Definition:ilist_node.h:32
uint64_t
Analysis.h
ErrorHandling.h
llvm_unreachable
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition:ErrorHandling.h:143
TargetMachine.h
llvm::CallingConv::Tail
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
Definition:CallingConv.h:76
llvm::CallingConv::SwiftTail
@ SwiftTail
This follows the Swift calling convention in how arguments are passed but guarantees tail calls will ...
Definition:CallingConv.h:87
llvm::ISD::CondCode
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
Definition:ISDOpcodes.h:1610
llvm::ISD::SETOEQ
@ SETOEQ
Definition:ISDOpcodes.h:1613
llvm::ISD::SETUNE
@ SETUNE
Definition:ISDOpcodes.h:1626
llvm::ISD::SETUEQ
@ SETUEQ
Definition:ISDOpcodes.h:1621
llvm::ISD::SETOLE
@ SETOLE
Definition:ISDOpcodes.h:1617
llvm::ISD::SETOLT
@ SETOLT
Definition:ISDOpcodes.h:1616
llvm::ISD::SETNE
@ SETNE
Definition:ISDOpcodes.h:1635
llvm::ISD::SETUGT
@ SETUGT
Definition:ISDOpcodes.h:1622
llvm::ISD::SETOGT
@ SETOGT
Definition:ISDOpcodes.h:1614
llvm::ISD::SETULT
@ SETULT
Definition:ISDOpcodes.h:1624
llvm::ISD::SETUO
@ SETUO
Definition:ISDOpcodes.h:1620
llvm::ISD::SETONE
@ SETONE
Definition:ISDOpcodes.h:1618
llvm::ISD::SETGT
@ SETGT
Definition:ISDOpcodes.h:1631
llvm::ISD::SETLT
@ SETLT
Definition:ISDOpcodes.h:1633
llvm::ISD::SETO
@ SETO
Definition:ISDOpcodes.h:1619
llvm::ISD::SETGE
@ SETGE
Definition:ISDOpcodes.h:1632
llvm::ISD::SETTRUE
@ SETTRUE
Definition:ISDOpcodes.h:1627
llvm::ISD::SETUGE
@ SETUGE
Definition:ISDOpcodes.h:1623
llvm::ISD::SETLE
@ SETLE
Definition:ISDOpcodes.h:1634
llvm::ISD::SETULE
@ SETULE
Definition:ISDOpcodes.h:1625
llvm::ISD::SETOGE
@ SETOGE
Definition:ISDOpcodes.h:1615
llvm::ISD::SETFALSE
@ SETFALSE
Definition:ISDOpcodes.h:1612
llvm::ISD::SETEQ
@ SETEQ
Definition:ISDOpcodes.h:1630
llvm
This is an optimization pass for GlobalISel generic memory operations.
Definition:AddressRanges.h:18
llvm::Offset
@ Offset
Definition:DWP.cpp:480
llvm::getICmpCondCode
ISD::CondCode getICmpCondCode(ICmpInst::Predicate Pred)
getICmpCondCode - Return the ISD condition code corresponding to the given LLVM IR integer condition ...
Definition:Analysis.cpp:233
llvm::Successor
@ Successor
Definition:SIMachineScheduler.h:35
llvm::enumerate
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition:STLExtras.h:2448
llvm::append_range
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition:STLExtras.h:2115
llvm::reverse
auto reverse(ContainerTy &&C)
Definition:STLExtras.h:420
llvm::returnTypeIsEligibleForTailCall
bool returnTypeIsEligibleForTailCall(const Function *F, const Instruction *I, const ReturnInst *Ret, const TargetLoweringBase &TLI, bool ReturnsFirstArg=false)
Test if given that the input instruction is in the tail call position if the return type or any attri...
Definition:Analysis.cpp:644
llvm::computeValueLLTs
void computeValueLLTs(const DataLayout &DL, Type &Ty, SmallVectorImpl< LLT > &ValueTys, SmallVectorImpl< uint64_t > *Offsets=nullptr, uint64_t StartingOffset=0)
computeValueLLTs - Given an LLVM IR type, compute a sequence of LLTs that represent all the individua...
Definition:Analysis.cpp:141
llvm::isSafeToSpeculativelyExecute
bool isSafeToSpeculativelyExecute(const Instruction *I, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr, bool UseVariableInfo=true)
Return true if the instruction does not have any effects besides calculating the result and does not ...
Definition:ValueTracking.cpp:7043
llvm::getFCmpCondCode
ISD::CondCode getFCmpCondCode(FCmpInst::Predicate Pred)
getFCmpCondCode - Return the ISD condition code corresponding to the given LLVM IR floating-point con...
Definition:Analysis.cpp:199
llvm::classifyEHPersonality
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
Definition:EHPersonalities.cpp:23
llvm::attributesPermitTailCall
bool attributesPermitTailCall(const Function *F, const Instruction *I, const ReturnInst *Ret, const TargetLoweringBase &TLI, bool *AllowDifferingSizes=nullptr)
Test if given that the input instruction is in the tail call position, if there is an attribute misma...
Definition:Analysis.cpp:584
llvm::isInTailCallPosition
bool isInTailCallPosition(const CallBase &Call, const TargetMachine &TM, bool ReturnsFirstArg=false)
Test if the given instruction is in a position to be optimized with a tail-call.
Definition:Analysis.cpp:535
llvm::Op
DWARFExpression::Operation Op
Definition:DWARFExpression.cpp:22
llvm::getFCmpCodeWithoutNaN
ISD::CondCode getFCmpCodeWithoutNaN(ISD::CondCode CC)
getFCmpCodeWithoutNaN - Given an ISD condition code comparing floats, return the equivalent code if w...
Definition:Analysis.cpp:221
llvm::ComputeValueVTs
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< EVT > *MemVTs, SmallVectorImpl< TypeSize > *Offsets=nullptr, TypeSize StartingOffset=TypeSize::getZero())
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
Definition:Analysis.cpp:79
llvm::isAsynchronousEHPersonality
bool isAsynchronousEHPersonality(EHPersonality Pers)
Returns true if this personality function catches asynchronous exceptions.
Definition:EHPersonalities.h:50
llvm::funcReturnsFirstArgOfCall
bool funcReturnsFirstArgOfCall(const CallInst &CI)
Returns true if the parent of CI returns CI's first argument after calling CI.
Definition:Analysis.cpp:715
llvm::ExtractTypeInfo
GlobalValue * ExtractTypeInfo(Value *V)
ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
Definition:Analysis.cpp:177
llvm::ComputeLinearIndex
unsigned ComputeLinearIndex(Type *Ty, const unsigned *Indices, const unsigned *IndicesEnd, unsigned CurIndex=0)
Compute the linearized index of a member in a nested aggregate/struct/array.
Definition:Analysis.cpp:33
llvm::getEHScopeMembership
DenseMap< const MachineBasicBlock *, int > getEHScopeMembership(const MachineFunction &MF)
Definition:Analysis.cpp:753
llvm::getLLTForType
LLT getLLTForType(Type &Ty, const DataLayout &DL)
Construct a low-level type based on an LLVM type.
Definition:LowLevelTypeUtils.cpp:20
llvm::EVT::getEVT
static EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
Definition:ValueTypes.cpp:289

Generated on Thu Jul 17 2025 11:11:25 for LLVM by doxygen 1.9.6
[8]ページ先頭

©2009-2025 Movatter.jp