1//===- Instructions.cpp - Implement the LLVM instructions -----------------===// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 7//===----------------------------------------------------------------------===// 9// This file implements all of the non-inline methods for the LLVM instruction 12//===----------------------------------------------------------------------===// 56"disable-i2p-p2i-opt",
cl::init(
false),
57cl::desc(
"Disables inttoptr/ptrtoint roundtrip optimization"));
59//===----------------------------------------------------------------------===// 61//===----------------------------------------------------------------------===// 63std::optional<TypeSize>
70assert(!
Size.isScalable() &&
"Array elements cannot have a scalable size");
80std::optional<TypeSize>
92//===----------------------------------------------------------------------===// 94//===----------------------------------------------------------------------===// 96/// areInvalidOperands - Return a string if the specified operands are invalid 97/// for a select operation, otherwise return null. 100return"both values to select must have same type";
103return"select values cannot have token type";
108return"vector select condition element type must be i1";
111return"selected values for vector select must be vectors";
113return"vector select requires selected vectors to have " 114"the same vector length as select condition";
116return"select condition must be i1 or <n x i1>";
121//===----------------------------------------------------------------------===// 123//===----------------------------------------------------------------------===// 125PHINode::PHINode(
constPHINode &PN)
127 ReservedSpace(PN.getNumOperands()) {
135// removeIncomingValue - Remove an incoming value. This is useful if a 136// predecessor basic block is deleted. 140// Move everything after this operand down. 142// FIXME: we could just swap with the end of the list, then erase. However, 143// clients might not expect this to happen. The code as it is thrashes the 144// use/def lists, which is kinda lame. 148// Nuke the last value. 149Op<-1>().set(
nullptr);
152// If the PHI node is dead, because it has zero entries, nuke it now. 154// If anyone is using this PHI, make them use a dummy value instead... 162bool DeletePHIIfEmpty) {
168if (RemoveIndices.
empty())
173return RemoveIndices.
contains(U.getOperandNo());
178// Remove incoming blocks. 186// If the PHI node is dead, because it has zero entries, nuke it now. 188// If anyone is using this PHI, make them use a dummy value instead... 194/// growOperands - grow operands - This grows the operand list in response 195/// to a push_back style of operation. This grows the number of ops by 1.5 198void PHINode::growOperands() {
200unsigned NumOps = e + e / 2;
201if (NumOps < 2) NumOps = 2;
// 2 op PHI nodes are VERY common. 203 ReservedSpace = NumOps;
207/// hasConstantValue - If the specified PHI node always merges together the same 208/// value, return the value, otherwise return null. 210// Exploit the fact that phi nodes always have at least one entry. 214if (ConstantValue !=
this)
215returnnullptr;
// Incoming values not all the same. 216// The case where the first value is this PHI. 219if (ConstantValue ==
this)
224/// hasConstantOrUndefValue - Whether the specified PHI node always merges 225/// together the same value, assuming that undefs result in the same value as 227/// Unlike \ref hasConstantValue, this does not return a value because the 228/// unique non-undef incoming value need not dominate the PHI node. 230Value *ConstantValue =
nullptr;
234if (ConstantValue && ConstantValue !=
Incoming)
242//===----------------------------------------------------------------------===// 243// LandingPadInst Implementation 244//===----------------------------------------------------------------------===// 246LandingPadInst::LandingPadInst(
Type *
RetTy,
unsigned NumReservedValues,
250 init(NumReservedValues, NameStr);
255 ReservedSpace(LP.getNumOperands()) {
260for (
unsignedI = 0, E = ReservedSpace;
I != E; ++
I)
272void LandingPadInst::init(
unsigned NumReservedValues,
constTwine &NameStr) {
273 ReservedSpace = NumReservedValues;
280/// growOperands - grow operands - This grows the operand list in response to a 281/// push_back style of operation. This grows the number of ops by 2 times. 282void LandingPadInst::growOperands(
unsignedSize) {
284if (ReservedSpace >= e +
Size)
return;
285 ReservedSpace = (std::max(e, 1U) +
Size / 2) * 2;
292assert(OpNo < ReservedSpace &&
"Growing didn't work!");
297//===----------------------------------------------------------------------===// 298// CallBase Implementation 299//===----------------------------------------------------------------------===// 304case Instruction::Call:
306case Instruction::Invoke:
308case Instruction::CallBr:
320if (ChildOB.getTagName() != OpB.
getTag())
331return cast<CallBrInst>(
this)->getNumIndirectDests() + 1;
336if (isa<Function>(V) || isa<Constant>(V))
341/// Tests if this call site must be tail call optimized. Only a CallInst can 342/// be tail call optimized. 344if (
auto *CI = dyn_cast<CallInst>(
this))
345return CI->isMustTailCall();
349/// Tests if this call site is marked as a tail call. 351if (
auto *CI = dyn_cast<CallInst>(
this))
352return CI->isTailCall();
358returnF->getIntrinsicID();
366 Mask |=
F->getAttributes().getRetNoFPClass();
374 Mask |=
F->getAttributes().getParamNoFPClass(i);
402if (
F->getAttributes().hasAttrSomewhere(Kind, &Index))
408/// Determine whether the argument or parameter has the given attribute. 419if (!
F->getAttributes().hasParamAttr(ArgNo, Kind))
422// Take into account mod/ref by operand bundles. 424case Attribute::ReadNone:
426case Attribute::ReadOnly:
428case Attribute::WriteOnly:
437returnF->getAttributes().hasFnAttr(Kind);
442bool CallBase::hasFnAttrOnCalledFunction(
StringRef Kind)
const{
444returnF->getAttributes().hasFnAttr(Kind);
449template <
typename AK>
450Attribute CallBase::getFnAttrOnCalledFunction(AK Kind)
const{
451ifconstexpr (std::is_same_v<AK, Attribute::AttrKind>) {
452// getMemoryEffects() correctly combines memory effects from the call-site, 453// operand bundles and function. 454assert(Kind != Attribute::Memory &&
"Use getMemoryEffects() instead");
458returnF->getAttributes().getFnAttr(Kind);
467template <
typename AK>
468Attribute CallBase::getParamAttrOnCalledFunction(
unsigned ArgNo,
472if (
auto *
F = dyn_cast<Function>(V))
473returnF->getAttributes().getParamAttr(ArgNo, Kind);
478CallBase::getParamAttrOnCalledFunction(
unsigned ArgNo,
480templateAttribute CallBase::getParamAttrOnCalledFunction(
unsigned ArgNo,
491constunsigned BeginIndex) {
493for (
auto &
B : Bundles)
494 It = std::copy(
B.input_begin(),
B.input_end(), It);
497auto BI = Bundles.
begin();
498unsigned CurrentIndex = BeginIndex;
501assert(BI != Bundles.
end() &&
"Incorrect allocation?");
503 BOI.Tag = ContextImpl->getOrInsertBundleTag(BI->getTag());
504 BOI.Begin = CurrentIndex;
505 BOI.End = CurrentIndex + BI->input_size();
506 CurrentIndex = BOI.End;
510assert(BI == Bundles.
end() &&
"Incorrect allocation?");
516 /// When there isn't many bundles, we do a simple linear search. 517 /// Else fallback to a binary-search that use the fact that bundles usually 518 /// have similar number of argument to get faster convergence. 521if (BOI.Begin <= OpIdx && OpIdx < BOI.End)
527assert(OpIdx >=
arg_size() &&
"the Idx is not in the operand bundles");
530"The Idx isn't in the operand bundle");
532 /// We need a decimal number below and to prevent using floating point numbers 533 /// we use an intergal value multiplied by this constant. 534constexprunsigned NumberScaling = 1024;
541unsigned ScaledOperandPerBundle =
542 NumberScaling * (std::prev(
End)->End - Begin->
Begin) / (
End - Begin);
543 Current = Begin + (((OpIdx - Begin->
Begin) * NumberScaling) /
544 ScaledOperandPerBundle);
546 Current = std::prev(
End);
547assert(Current < End && Current >= Begin &&
548"the operand bundle doesn't cover every value in the range");
549if (OpIdx >= Current->
Begin && OpIdx < Current->
End)
551if (OpIdx >= Current->
End)
558"the operand bundle doesn't cover every value in the range");
571returnCreate(CB, Bundles, InsertPt);
577bool CreateNew =
false;
581if (Bundle.getTagID() ==
ID) {
588return CreateNew ?
Create(CB, Bundles, InsertPt) : CB;
592// Implementation note: this is a conservative implementation of operand 593// bundle semantics, where *any* non-assume operand bundle (other than 594// ptrauth) forces a callsite to be at least readonly. 612// TODO: Add a method to get memory effects for operand bundles instead. 626/// Determine if the function does not access memory. 634/// Determine if the function does not access or only reads memory. 642/// Determine if the function does not access or only writes memory. 650/// Determine if the call can access memmory only using pointers based 659/// Determine if the function may only access memory that is 660/// inaccessible from the IR. 668/// Determine if the function may only access memory that is 669/// either inaccessible from the IR or pointed to by its arguments. 678//===----------------------------------------------------------------------===// 679// CallInst Implementation 680//===----------------------------------------------------------------------===// 686"NumOperands not set up?");
691"Calling a function with bad signature!");
693for (
unsigned i = 0; i != Args.size(); ++i)
696"Calling a function with a bad signature!");
699// Set operands in order of their index to match use-list-order 731"Wrong number of operands allocated");
746 Args, OpB, CI->
getName(), InsertPt);
755// Update profile weight for call instruction by scaling it using the ratio 756// of S/T. The meaning of "branch_weights" meta data for call instruction is 757// transfered to represent call count. 760LLVM_DEBUG(
dbgs() <<
"Attempting to update profile weights will result in " 761"div by 0. Ignoring. Likely the function " 763 <<
" has 0 entry count, and contains call instructions " 764"with non-zero prof info.");
770//===----------------------------------------------------------------------===// 771// InvokeInst Implementation 772//===----------------------------------------------------------------------===// 777constTwine &NameStr) {
782"NumOperands not set up?");
787"Invoking a function with bad signature");
789for (
unsigned i = 0, e = Args.size(); i != e; i++)
792"Invoking a function with a bad signature!");
795// Set operands in order of their index to match use-list-order 812"Wrong number of operands allocated");
815 std::copy(
II.bundle_op_info_begin(),
II.bundle_op_info_end(),
822 std::vector<Value *> Args(
II->arg_begin(),
II->arg_end());
825II->getFunctionType(),
II->getCalledOperand(),
II->getNormalDest(),
826II->getUnwindDest(), Args, OpB,
II->getName(), InsertPt);
827 NewII->setCallingConv(
II->getCallingConv());
828 NewII->SubclassOptionalData =
II->SubclassOptionalData;
829 NewII->setAttributes(
II->getAttributes());
830 NewII->setDebugLoc(
II->getDebugLoc());
835return cast<LandingPadInst>(
getUnwindDest()->getFirstNonPHIIt());
840LLVM_DEBUG(
dbgs() <<
"Attempting to update profile weights will result in " 841"div by 0. Ignoring. Likely the function " 843 <<
" has 0 entry count, and contains call instructions " 844"with non-zero prof info.");
850//===----------------------------------------------------------------------===// 851// CallBrInst Implementation 852//===----------------------------------------------------------------------===// 858constTwine &NameStr) {
862 IndirectDests.
size(),
864"NumOperands not set up?");
869"Calling a function with bad signature");
871for (
unsigned i = 0, e = Args.size(); i != e; i++)
874"Calling a function with a bad signature!");
877// Set operands in order of their index to match use-list-order 879 std::copy(Args.begin(), Args.end(),
op_begin());
880 NumIndirectDests = IndirectDests.
size();
882for (
unsigned i = 0; i != NumIndirectDests; ++i)
897"Wrong number of operands allocated");
903 NumIndirectDests = CBI.NumIndirectDests;
917 NewCBI->NumIndirectDests = CBI->NumIndirectDests;
921//===----------------------------------------------------------------------===// 922// ReturnInst Implementation 923//===----------------------------------------------------------------------===// 929"Wrong number of operands allocated");
943//===----------------------------------------------------------------------===// 944// ResumeInst Implementation 945//===----------------------------------------------------------------------===// 955 AllocMarker, InsertBefore) {
959//===----------------------------------------------------------------------===// 960// CleanupReturnInst Implementation 961//===----------------------------------------------------------------------===// 967"Wrong number of operands allocated");
968 setSubclassData<Instruction::OpaqueField>(
977 setSubclassData<UnwindDestField>(
true);
984CleanupReturnInst::CleanupReturnInst(
Value *CleanupPad,
BasicBlock *UnwindBB,
989 init(CleanupPad, UnwindBB);
992//===----------------------------------------------------------------------===// 993// CatchReturnInst Implementation 994//===----------------------------------------------------------------------===// 1010 AllocMarker, InsertBefore) {
1014//===----------------------------------------------------------------------===// 1015// CatchSwitchInst Implementation 1016//===----------------------------------------------------------------------===// 1018CatchSwitchInst::CatchSwitchInst(
Value *ParentPad,
BasicBlock *UnwindDest,
1019unsigned NumReservedValues,
1025 ++NumReservedValues;
1026 init(ParentPad, UnwindDest, NumReservedValues + 1);
1037for (
unsignedI = 1, E = ReservedSpace;
I != E; ++
I)
1042unsigned NumReservedValues) {
1043assert(ParentPad && NumReservedValues);
1045 ReservedSpace = NumReservedValues;
1051 setSubclassData<UnwindDestField>(
true);
1056/// growOperands - grow operands - This grows the operand list in response to a 1057/// push_back style of operation. This grows the number of ops by 2 times. 1058void CatchSwitchInst::growOperands(
unsignedSize) {
1061if (ReservedSpace >= NumOperands +
Size)
1063 ReservedSpace = (NumOperands +
Size / 2) * 2;
1070assert(OpNo < ReservedSpace &&
"Growing didn't work!");
1076// Move all subsequent handlers up one. 1078for (
Use *CurDst = HI.getCurrent(); CurDst != EndDst; ++CurDst)
1079 *CurDst = *(CurDst + 1);
1080// Null out the last handler use. 1086//===----------------------------------------------------------------------===// 1087// FuncletPadInst Implementation 1088//===----------------------------------------------------------------------===// 1090constTwine &NameStr) {
1100"Wrong number of operands allocated");
1110 init(ParentPad, Args, NameStr);
1113//===----------------------------------------------------------------------===// 1114// UnreachableInst Implementation 1115//===----------------------------------------------------------------------===// 1120 AllocMarker, InsertBefore) {}
1122//===----------------------------------------------------------------------===// 1123// BranchInst Implementation 1124//===----------------------------------------------------------------------===// 1126void BranchInst::AssertOK() {
1129"May only branch on boolean predicates!");
1136assert(IfTrue &&
"Branch destination may not be null!");
1144// Assign in order of operand index to make use-list order predictable. 1157"Wrong number of operands allocated");
1158// Assign in order of operand index to make use-list order predictable. 1161Op<-3>() = BI.
Op<-3>();
1162Op<-2>() = BI.
Op<-2>();
1164Op<-1>() = BI.
Op<-1>();
1170"Cannot swap successors of an unconditional branch");
1173// Update profile metadata if present and it matches our structural 1178//===----------------------------------------------------------------------===// 1179// AllocaInst Implementation 1180//===----------------------------------------------------------------------===// 1186assert(!isa<BasicBlock>(Amt) &&
1187"Passed basic block into allocation size parameter! Use other ctor");
1189"Allocation array size is not an integer!");
1196"Insertion position cannot be null when alignment not provided!");
1199"BB must be in a Function when alignment not provided!");
1201returnDL.getPrefTypeAlign(Ty);
1206 :
AllocaInst(Ty, AddrSpace,
/*ArraySize=*/nullptr,
Name, InsertBefore) {}
1218getAISize(Ty->getContext(), ArraySize), InsertBefore),
1231/// isStaticAlloca - Return true if this alloca is in the entry block of the 1232/// function and is a constant size. If so, the code generator will fold it 1233/// into the prolog/epilog code, so it is basically free. 1235// Must be constant size. 1238// Must be in the entry block. 1243//===----------------------------------------------------------------------===// 1244// LoadInst Implementation 1245//===----------------------------------------------------------------------===// 1247void LoadInst::AssertOK() {
1249"Ptr must have pointer type.");
1254"Insertion position cannot be null when alignment not provided!");
1257"BB must be in a Function when alignment not provided!");
1259returnDL.getABITypeAlign(Ty);
1274 SyncScope::System, InsertBef) {}
1287//===----------------------------------------------------------------------===// 1288// StoreInst Implementation 1289//===----------------------------------------------------------------------===// 1291void StoreInst::AssertOK() {
1294"Ptr must have pointer type!");
1309 SyncScope::System, InsertBefore) {}
1324//===----------------------------------------------------------------------===// 1325// AtomicCmpXchgInst Implementation 1326//===----------------------------------------------------------------------===// 1341"All operands must be non-null!");
1343"Ptr must have pointer type!");
1345"Cmp type and NewVal type must be same!");
1356 AtomicCmpXchg, AllocMarker, InsertBefore) {
1357Init(
Ptr, Cmp, NewVal, Alignment, SuccessOrdering, FailureOrdering, SSID);
1360//===----------------------------------------------------------------------===// 1361// AtomicRMWInst Implementation 1362//===----------------------------------------------------------------------===// 1368"atomicrmw instructions can only be atomic.");
1370"atomicrmw instructions cannot be unordered.");
1380"Ptr must have pointer type!");
1382"AtomicRMW instructions must be atomic!");
1433return"<invalid operation>";
1439//===----------------------------------------------------------------------===// 1440// FenceInst Implementation 1441//===----------------------------------------------------------------------===// 1450//===----------------------------------------------------------------------===// 1451// GetElementPtrInst Implementation 1452//===----------------------------------------------------------------------===// 1457"NumOperands not initialized?");
1466 SourceElementType(GEPI.SourceElementType),
1467 ResultElementType(GEPI.ResultElementType) {
1469"Wrong number of operands allocated");
1475if (
auto *
Struct = dyn_cast<StructType>(Ty)) {
1480if (!
Idx->getType()->isIntOrIntVectorTy())
1482if (
auto *Array = dyn_cast<ArrayType>(Ty))
1483return Array->getElementType();
1484if (
auto *
Vector = dyn_cast<VectorType>(Ty))
1485returnVector->getElementType();
1490if (
auto *
Struct = dyn_cast<StructType>(Ty)) {
1495if (
auto *Array = dyn_cast<ArrayType>(Ty))
1496return Array->getElementType();
1497if (
auto *
Vector = dyn_cast<VectorType>(Ty))
1498returnVector->getElementType();
1502template <
typename IndexTy>
1506for (IndexTy V : IdxList.
slice(1)) {
1527/// hasAllZeroIndices - Return true if all of the indices of this GEP are 1528/// zeros. If so, the result pointer and the first operand have the same 1529/// value, just potentially different types. 1533if (!CI->isZero())
returnfalse;
1541/// hasAllConstantIndices - Return true if all of the indices of this GEP are 1542/// constant integers. If so, the result pointer and the first operand have 1543/// a constant offset between them. 1566return cast<GEPOperator>(
this)->getNoWrapFlags();
1570return cast<GEPOperator>(
this)->isInBounds();
1574return cast<GEPOperator>(
this)->hasNoUnsignedSignedWrap();
1578return cast<GEPOperator>(
this)->hasNoUnsignedWrap();
1583// Delegate to the generic GEPOperator implementation. 1584return cast<GEPOperator>(
this)->accumulateConstantOffset(
DL,
Offset);
1590APInt &ConstantOffset)
const{
1591// Delegate to the generic GEPOperator implementation. 1592return cast<GEPOperator>(
this)->collectOffset(
DL,
BitWidth, VariableOffsets,
1596//===----------------------------------------------------------------------===// 1597// ExtractElementInst Implementation 1598//===----------------------------------------------------------------------===// 1600ExtractElementInst::ExtractElementInst(
Value *Val,
Value *Index,
1604 ExtractElement, AllocMarker, InsertBef) {
1605assert(isValidOperands(Val, Index) &&
1606"Invalid extractelement instruction operands!");
1618//===----------------------------------------------------------------------===// 1619// InsertElementInst Implementation 1620//===----------------------------------------------------------------------===// 1622InsertElementInst::InsertElementInst(
Value *Vec,
Value *Elt,
Value *Index,
1627"Invalid insertelement instruction operands!");
1637returnfalse;
// First operand of insertelement must be vector type. 1639if (Elt->
getType() != cast<VectorType>(Vec->
getType())->getElementType())
1640returnfalse;
// Second operand of insertelement must be vector element type. 1642if (!Index->getType()->isIntegerTy())
1643returnfalse;
// Third operand of insertelement must be i32. 1647//===----------------------------------------------------------------------===// 1648// ShuffleVectorInst Implementation 1649//===----------------------------------------------------------------------===// 1652assert(V &&
"Cannot create placeholder of nullptr V");
1673 ShuffleVector, AllocMarker, InsertBefore) {
1675"Invalid shuffle vector instruction operands!");
1691 ShuffleVector, AllocMarker, InsertBefore) {
1693"Invalid shuffle vector instruction operands!");
1701int NumOpElts = cast<FixedVectorType>(
Op<0>()->
getType())->getNumElements();
1702int NumMaskElts = ShuffleMask.
size();
1704for (
int i = 0; i != NumMaskElts; ++i) {
1710assert(MaskElt >= 0 && MaskElt < 2 * NumOpElts &&
"Out-of-range mask");
1711 MaskElt = (MaskElt < NumOpElts) ? MaskElt + NumOpElts : MaskElt - NumOpElts;
1712 NewMask[i] = MaskElt;
1720// V1 and V2 must be vectors of the same type. 1721if (!isa<VectorType>(V1->
getType()) || V1->
getType() != V2->getType())
1724// Make sure the mask elements make sense. 1726 cast<VectorType>(V1->
getType())->getElementCount().getKnownMinValue();
1727for (
int Elem : Mask)
1731if (isa<ScalableVectorType>(V1->
getType()))
1740// V1 and V2 must be vectors of the same type. 1744// Mask must be vector of i32, and must be the same kind of vector as the 1746auto *MaskTy = dyn_cast<VectorType>(Mask->getType());
1747if (!MaskTy || !MaskTy->getElementType()->isIntegerTy(32) ||
1748 isa<ScalableVectorType>(MaskTy) != isa<ScalableVectorType>(V1->
getType()))
1751// Check to see if Mask is valid. 1752if (isa<UndefValue>(Mask) || isa<ConstantAggregateZero>(Mask))
1755// NOTE: Through vector ConstantInt we have the potential to support more 1756// than just zero splat masks but that requires a LangRef change. 1757if (isa<ScalableVectorType>(MaskTy))
1760unsigned V1Size = cast<FixedVectorType>(V1->
getType())->getNumElements();
1762if (
constauto *CI = dyn_cast<ConstantInt>(Mask))
1763return !CI->uge(V1Size * 2);
1765if (
constauto *MV = dyn_cast<ConstantVector>(Mask)) {
1766for (
Value *
Op : MV->operands()) {
1767if (
auto *CI = dyn_cast<ConstantInt>(
Op)) {
1768if (CI->uge(V1Size*2))
1770 }
elseif (!isa<UndefValue>(
Op)) {
1777if (
constauto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
1778for (
unsigned i = 0, e = cast<FixedVectorType>(MaskTy)->
getNumElements();
1780if (CDS->getElementAsInteger(i) >= V1Size*2)
1790ElementCount EC = cast<VectorType>(Mask->getType())->getElementCount();
1792if (isa<ConstantAggregateZero>(Mask)) {
1793 Result.resize(EC.getKnownMinValue(), 0);
1797 Result.reserve(EC.getKnownMinValue());
1799if (EC.isScalable()) {
1800assert((isa<ConstantAggregateZero>(Mask) || isa<UndefValue>(Mask)) &&
1801"Scalable vector shuffle mask must be undef or zeroinitializer");
1802int MaskVal = isa<UndefValue>(Mask) ? -1 : 0;
1803for (
unsignedI = 0;
I < EC.getKnownMinValue(); ++
I)
1804 Result.emplace_back(MaskVal);
1808unsigned NumElts = EC.getKnownMinValue();
1810if (
auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
1811for (
unsigned i = 0; i != NumElts; ++i)
1812 Result.push_back(CDS->getElementAsInteger(i));
1815for (
unsigned i = 0; i != NumElts; ++i) {
1816Constant *
C = Mask->getAggregateElement(i);
1817 Result.push_back(isa<UndefValue>(
C) ? -1 :
1818 cast<ConstantInt>(
C)->getZExtValue());
1823 ShuffleMask.
assign(Mask.begin(), Mask.end());
1830if (isa<ScalableVectorType>(ResultTy)) {
1838for (
int Elem : Mask) {
1842 MaskConst.
push_back(ConstantInt::get(Int32Ty, Elem));
1848assert(!Mask.empty() &&
"Shuffle mask must contain elements");
1854assert(
I >= 0 &&
I < (NumOpElts * 2) &&
1855"Out-of-bounds shuffle mask element");
1856 UsesLHS |= (
I < NumOpElts);
1857 UsesRHS |= (
I >= NumOpElts);
1858if (UsesLHS && UsesRHS)
1861// Allow for degenerate case: completely undef mask means neither source is used. 1862return UsesLHS || UsesRHS;
1866// We don't have vector operand size information, so assume operands are the 1867// same size as the mask. 1874for (
int i = 0, NumMaskElts = Mask.size(); i < NumMaskElts; ++i) {
1877if (Mask[i] != i && Mask[i] != (NumOpElts + i))
1884if (Mask.size() !=
static_cast<unsigned>(NumSrcElts))
1886// We don't have vector operand size information, so assume operands are the 1887// same size as the mask. 1892if (Mask.size() !=
static_cast<unsigned>(NumSrcElts))
1897// The number of elements in the mask must be at least 2. 1901for (
intI = 0, E = Mask.size();
I < E; ++
I) {
1904if (Mask[
I] != (NumSrcElts - 1 -
I) &&
1905 Mask[
I] != (NumSrcElts + NumSrcElts - 1 -
I))
1912if (Mask.size() !=
static_cast<unsigned>(NumSrcElts))
1916for (
intI = 0, E = Mask.size();
I < E; ++
I) {
1919if (Mask[
I] != 0 && Mask[
I] != NumSrcElts)
1926if (Mask.size() !=
static_cast<unsigned>(NumSrcElts))
1928// Select is differentiated from identity. It requires using both sources. 1931for (
intI = 0, E = Mask.size();
I < E; ++
I) {
1934if (Mask[
I] !=
I && Mask[
I] != (NumSrcElts +
I))
1941// Example masks that will return true: 1944// trn1 = shufflevector v1, v2 <0, 4, 2, 6> = <a, e, c, g> 1945// trn2 = shufflevector v1, v2 <1, 5, 3, 7> = <b, f, d, h> 1947if (Mask.size() !=
static_cast<unsigned>(NumSrcElts))
1949// 1. The number of elements in the mask must be a power-of-2 and at least 2. 1950int Sz = Mask.size();
1954// 2. The first element of the mask must be either a 0 or a 1. 1955if (Mask[0] != 0 && Mask[0] != 1)
1958// 3. The difference between the first 2 elements must be equal to the 1959// number of elements in the mask. 1960if ((Mask[1] - Mask[0]) != NumSrcElts)
1963// 4. The difference between consecutive even-numbered and odd-numbered 1964// elements must be equal to 2. 1965for (
intI = 2;
I < Sz; ++
I) {
1966int MaskEltVal = Mask[
I];
1967if (MaskEltVal == -1)
1969int MaskEltPrevVal = Mask[
I - 2];
1970if (MaskEltVal - MaskEltPrevVal != 2)
1978if (Mask.size() !=
static_cast<unsigned>(NumSrcElts))
1980// Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4> 1982for (
intI = 0, E = Mask.size();
I != E; ++
I) {
1983int MaskEltVal = Mask[
I];
1984if (MaskEltVal == -1)
1987if (StartIndex == -1) {
1988// Don't support a StartIndex that begins in the second input, or if the 1989// first non-undef index would access below the StartIndex. 1990if (MaskEltVal <
I || NumSrcElts <= (MaskEltVal -
I))
1993 StartIndex = MaskEltVal -
I;
1997// Splice is sequential starting from StartIndex. 1998if (MaskEltVal != (StartIndex +
I))
2002if (StartIndex == -1)
2005// NOTE: This accepts StartIndex == 0 (COPY). 2011int NumSrcElts,
int &Index) {
2012// Must extract from a single source. 2016// Must be smaller (else this is an Identity shuffle). 2017if (NumSrcElts <= (
int)Mask.size())
2020// Find start of extraction, accounting that we may start with an UNDEF. 2022for (
int i = 0, e = Mask.size(); i != e; ++i) {
2026intOffset = (M % NumSrcElts) - i;
2027if (0 <= SubIndex && SubIndex !=
Offset)
2032if (0 <= SubIndex && SubIndex + (
int)Mask.size() <= NumSrcElts) {
2040int NumSrcElts,
int &NumSubElts,
2042int NumMaskElts = Mask.size();
2044// Don't try to match if we're shuffling to a smaller size. 2045if (NumMaskElts < NumSrcElts)
2048// TODO: We don't recognize self-insertion/widening. 2052// Determine which mask elements are attributed to which source. 2056bool Src0Identity =
true;
2057bool Src1Identity =
true;
2059for (
int i = 0; i != NumMaskElts; ++i) {
2065if (M < NumSrcElts) {
2067 Src0Identity &= (M == i);
2071 Src1Identity &= (M == (i + NumSrcElts));
2073assert((Src0Elts | Src1Elts | UndefElts).isAllOnes() &&
2074"unknown shuffle elements");
2076"2-source shuffle not found");
2078// Determine lo/hi span ranges. 2079// TODO: How should we handle undefs at the start of subvector insertions? 2085// If src0 is in place, see if the src1 elements is inplace within its own 2088int NumSub1Elts = Src1Hi - Src1Lo;
2091 NumSubElts = NumSub1Elts;
2097// If src1 is in place, see if the src0 elements is inplace within its own 2100int NumSub0Elts = Src0Hi - Src0Lo;
2103 NumSubElts = NumSub0Elts;
2113// FIXME: Not currently possible to express a shuffle mask for a scalable 2114// vector for this case. 2115if (isa<ScalableVectorType>(
getType()))
2118int NumOpElts = cast<FixedVectorType>(
Op<0>()->
getType())->getNumElements();
2119int NumMaskElts = cast<FixedVectorType>(
getType())->getNumElements();
2120if (NumMaskElts <= NumOpElts)
2123// The first part of the mask must choose elements from exactly 1 source op. 2128// All extending must be with undef elements. 2129for (
int i = NumOpElts; i < NumMaskElts; ++i)
2137// FIXME: Not currently possible to express a shuffle mask for a scalable 2138// vector for this case. 2139if (isa<ScalableVectorType>(
getType()))
2142int NumOpElts = cast<FixedVectorType>(
Op<0>()->
getType())->getNumElements();
2143int NumMaskElts = cast<FixedVectorType>(
getType())->getNumElements();
2144if (NumMaskElts >= NumOpElts)
2151// Vector concatenation is differentiated from identity with padding. 2152if (isa<UndefValue>(
Op<0>()) || isa<UndefValue>(
Op<1>()))
2155// FIXME: Not currently possible to express a shuffle mask for a scalable 2156// vector for this case. 2157if (isa<ScalableVectorType>(
getType()))
2160int NumOpElts = cast<FixedVectorType>(
Op<0>()->
getType())->getNumElements();
2161int NumMaskElts = cast<FixedVectorType>(
getType())->getNumElements();
2162if (NumMaskElts != NumOpElts * 2)
2165// Use the mask length rather than the operands' vector lengths here. We 2166// already know that the shuffle returns a vector twice as long as the inputs, 2167// and neither of the inputs are undef vectors. If the mask picks consecutive 2168// elements from both inputs, then this is a concatenation of the inputs. 2173int ReplicationFactor,
int VF) {
2174assert(Mask.size() == (
unsigned)ReplicationFactor * VF &&
2175"Unexpected mask size.");
2177for (
int CurrElt :
seq(VF)) {
2178ArrayRef<int> CurrSubMask = Mask.take_front(ReplicationFactor);
2179assert(CurrSubMask.
size() == (
unsigned)ReplicationFactor &&
2181 Mask = Mask.drop_front(ReplicationFactor);
2182if (!
all_of(CurrSubMask, [CurrElt](
int MaskElt) {
2187assert(Mask.empty() &&
"Did not consume the whole mask?");
2193int &ReplicationFactor,
int &VF) {
2194// undef-less case is trivial. 2197 Mask.take_while([](
int MaskElt) {
return MaskElt == 0; }).
size();
2198if (ReplicationFactor == 0 || Mask.size() % ReplicationFactor != 0)
2200 VF = Mask.size() / ReplicationFactor;
2204// However, if the mask contains undef's, we have to enumerate possible tuples 2205// and pick one. There are bounds on replication factor: [1, mask size] 2206// (where RF=1 is an identity shuffle, RF=mask size is a broadcast shuffle) 2207// Additionally, mask size is a replication factor multiplied by vector size, 2208// which further significantly reduces the search space. 2210// Before doing that, let's perform basic correctness checking first. 2212for (
int MaskElt : Mask) {
2215// Elements must be in non-decreasing order. 2216if (MaskElt < Largest)
2218 Largest = std::max(Largest, MaskElt);
2221// Prefer larger replication factor if all else equal. 2222for (
int PossibleReplicationFactor :
2223reverse(seq_inclusive<unsigned>(1, Mask.size()))) {
2224if (Mask.size() % PossibleReplicationFactor != 0)
2226int PossibleVF = Mask.size() / PossibleReplicationFactor;
2230 ReplicationFactor = PossibleReplicationFactor;
2240// Not possible to express a shuffle mask for a scalable vector for this 2242if (isa<ScalableVectorType>(
getType()))
2245 VF = cast<FixedVectorType>(
Op<0>()->
getType())->getNumElements();
2246if (ShuffleMask.
size() % VF != 0)
2248 ReplicationFactor = ShuffleMask.
size() / VF;
2254if (VF <= 0 || Mask.size() <
static_cast<unsigned>(VF) ||
2255 Mask.size() % VF != 0)
2257for (
unsigned K = 0, Sz = Mask.size(); K < Sz; K += VF) {
2262for (
intIdx : SubMask) {
2272/// Return true if this shuffle mask is a replication mask. 2274// Not possible to express a shuffle mask for a scalable vector for this 2276if (isa<ScalableVectorType>(
getType()))
2286// shuffle_vector can only interleave fixed length vectors - for scalable 2287// vectors, see the @llvm.vector.interleave2 intrinsic 2298unsigned NumElts = Mask.size();
2299if (NumElts % Factor)
2302unsigned LaneLen = NumElts / Factor;
2306 StartIndexes.
resize(Factor);
2308// Check whether each element matches the general interleaved rule. 2309// Ignore undef elements, as long as the defined elements match the rule. 2310// Outer loop processes all factors (x, y, z in the above example) 2312for (;
I < Factor;
I++) {
2313unsigned SavedLaneValue;
2314unsigned SavedNoUndefs = 0;
2316// Inner loop processes consecutive accesses (x, x+1... in the example) 2317for (J = 0; J < LaneLen - 1; J++) {
2318// Lane computes x's position in the Mask 2319unsigned Lane = J * Factor +
I;
2320unsigned NextLane = Lane + Factor;
2321int LaneValue = Mask[Lane];
2322int NextLaneValue = Mask[NextLane];
2324// If both are defined, values must be sequential 2325if (LaneValue >= 0 && NextLaneValue >= 0 &&
2326 LaneValue + 1 != NextLaneValue)
2329// If the next value is undef, save the current one as reference 2330if (LaneValue >= 0 && NextLaneValue < 0) {
2331 SavedLaneValue = LaneValue;
2335// Undefs are allowed, but defined elements must still be consecutive: 2336// i.e.: x,..., undef,..., x + 2,..., undef,..., undef,..., x + 5, .... 2337// Verify this by storing the last non-undef followed by an undef 2338// Check that following non-undef masks are incremented with the 2339// corresponding distance. 2340if (SavedNoUndefs > 0 && LaneValue < 0) {
2342if (NextLaneValue >= 0 &&
2343 SavedLaneValue + SavedNoUndefs != (
unsigned)NextLaneValue)
2353// Check that the start of the I range (J=0) is greater than 0 2354 StartMask = Mask[
I];
2355 }
elseif (Mask[(LaneLen - 1) * Factor +
I] >= 0) {
2356// StartMask defined by the last value in lane 2357 StartMask = Mask[(LaneLen - 1) * Factor +
I] - J;
2358 }
elseif (SavedNoUndefs > 0) {
2359// StartMask defined by some non-zero value in the j loop 2360 StartMask = SavedLaneValue - (LaneLen - 1 - SavedNoUndefs);
2362// else StartMask remains set to 0, i.e. all elements are undefs 2366// We must stay within the vectors; This case can happen with undefs. 2367if (StartMask + LaneLen > NumInputElts)
2370 StartIndexes[
I] = StartMask;
2376/// Check if the mask is a DE-interleave mask of the given factor 2378/// <Index, Index+Factor, ..., Index+(NumElts-1)*Factor> 2382// Check all potential start indices from 0 to (Factor - 1). 2383for (
unsignedIdx = 0;
Idx < Factor;
Idx++) {
2386// Check that elements are in ascending order by Factor. Ignore undef 2388for (;
I < Mask.size();
I++)
2389if (Mask[
I] >= 0 &&
static_cast<unsigned>(Mask[
I]) !=
Idx +
I * Factor)
2392if (
I == Mask.size()) {
2401/// Try to lower a vector shuffle as a bit rotation. 2403/// Look for a repeated rotation pattern in each sub group. 2404/// Returns an element-wise left bit rotation amount or -1 if failed. 2406int NumElts = Mask.size();
2407assert((NumElts % NumSubElts) == 0 &&
"Illegal shuffle mask");
2410for (
int i = 0; i != NumElts; i += NumSubElts) {
2411for (
int j = 0; j != NumSubElts; ++j) {
2415if (M < i || M >= i + NumSubElts)
2417intOffset = (NumSubElts - (M - (i + j))) % NumSubElts;
2418if (0 <= RotateAmt &&
Offset != RotateAmt)
2427ArrayRef<int> Mask,
unsigned EltSizeInBits,
unsigned MinSubElts,
2428unsigned MaxSubElts,
unsigned &NumSubElts,
unsigned &RotateAmt) {
2429for (NumSubElts = MinSubElts; NumSubElts <= MaxSubElts; NumSubElts *= 2) {
2431if (EltRotateAmt < 0)
2433 RotateAmt = EltRotateAmt * EltSizeInBits;
2440//===----------------------------------------------------------------------===// 2441// InsertValueInst Class 2442//===----------------------------------------------------------------------===// 2448// There's no fundamental reason why we require at least one index 2449// (other than weirdness with &*IdxBegin being invalid; see 2450// getelementptr's init routine for example). But there's no 2451// present need to support it. 2452assert(!Idxs.
empty() &&
"InsertValueInst must have at least one index");
2455 Val->
getType() &&
"Inserted value must match indexed type!");
2465 Indices(IVI.Indices) {
2471//===----------------------------------------------------------------------===// 2472// ExtractValueInst Class 2473//===----------------------------------------------------------------------===// 2478// There's no fundamental reason why we require at least one index. 2479// But there's no present need to support it. 2480assert(!Idxs.
empty() &&
"ExtractValueInst must have at least one index");
2489 Indices(EVI.Indices) {
2493// getIndexedType - Returns the type of the element that would be extracted 2494// with an extractvalue instruction with the specified parameters. 2496// A null type is returned if the indices are invalid for the specified 2501for (
unsigned Index : Idxs) {
2502// We can't use CompositeType::indexValid(Index) here. 2503// indexValid() always returns true for arrays because getelementptr allows 2504// out-of-bounds indices. Since we don't allow those for extractvalue and 2505// insertvalue we need to check array indexing manually. 2506// Since the only other types we can index into are struct types it's just 2507// as easy to check those manually as well. 2508if (
ArrayType *AT = dyn_cast<ArrayType>(Agg)) {
2509if (Index >= AT->getNumElements())
2511 Agg = AT->getElementType();
2512 }
elseif (
StructType *ST = dyn_cast<StructType>(Agg)) {
2513if (Index >= ST->getNumElements())
2515 Agg = ST->getElementType(Index);
2517// Not a valid type to index into. 2521returnconst_cast<Type*
>(Agg);
2524//===----------------------------------------------------------------------===// 2525// UnaryOperator Class 2526//===----------------------------------------------------------------------===// 2541void UnaryOperator::AssertOK() {
2543 (void)LHS;
// Silence warnings. 2548"Unary operation should return same type as operand!");
2550"Tried to create a floating-point operation on a " 2551"non-floating-point type!");
2558//===----------------------------------------------------------------------===// 2559// BinaryOperator Class 2560//===----------------------------------------------------------------------===// 2564 :
Instruction(Ty, iType, AllocMarker, InsertBefore) {
2571void BinaryOperator::AssertOK() {
2573 (void)LHS; (void)RHS;
// Silence warnings. 2575"Binary operator operand types must match!");
2581"Arithmetic operation should return same type as operands!");
2583"Tried to create an integer operation on a non-integer type!");
2588"Arithmetic operation should return same type as operands!");
2590"Tried to create a floating-point operation on a " 2591"non-floating-point type!");
2596"Arithmetic operation should return same type as operands!");
2598"Incorrect operand type (not integer) for S/UDIV");
2602"Arithmetic operation should return same type as operands!");
2604"Incorrect operand type (not floating point) for FDIV");
2609"Arithmetic operation should return same type as operands!");
2611"Incorrect operand type (not integer) for S/UREM");
2615"Arithmetic operation should return same type as operands!");
2617"Incorrect operand type (not floating point) for FREM");
2623"Shift operation should return same type as operands!");
2625"Tried to create a shift operation on a non-integral type!");
2630"Logical operation should return same type as operands!");
2632"Tried to create a logical operation on a non-integral type!");
2643"Cannot create binary operator with two operands of differing type!");
2649Value *Zero = ConstantInt::get(
Op->getType(), 0);
2656Value *Zero = ConstantInt::get(
Op->getType(), 0);
2657return BinaryOperator::CreateNSWSub(Zero,
Op,
Name, InsertBefore);
2664Op->getType(),
Name, InsertBefore);
2667// Exchange the two operands to this instruction. This instruction is safe to 2668// use on any binary instruction and does not modify the semantics of the 2669// instruction. If the instruction is order-dependent (SetLT f.e.), the opcode 2673returntrue;
// Can't commute operands 2678//===----------------------------------------------------------------------===// 2679// FPMathOperator Class 2680//===----------------------------------------------------------------------===// 2684 cast<Instruction>(
this)->getMetadata(LLVMContext::MD_fpmath);
2691//===----------------------------------------------------------------------===// 2693//===----------------------------------------------------------------------===// 2695// Just determine if this cast only deals with integral->integral conversion. 2699case Instruction::ZExt:
2700case Instruction::SExt:
2701case Instruction::Trunc:
2703case Instruction::BitCast:
2709/// This function determines if the CastInst does not require any bits to be 2710/// changed in order to effect the cast. Essentially, it identifies cases where 2711/// no code gen is necessary for the cast, hence the name no-op cast. For 2712/// example, the following are all no-op casts: 2713/// # bitcast i32* %x to i8* 2714/// # bitcast <2 x i32> %x to <4 x i16> 2715/// # ptrtoint i32* %x to i32 ; on 32-bit plaforms only 2716/// Determine if the described cast is a no-op. 2724case Instruction::Trunc:
2725case Instruction::ZExt:
2726case Instruction::SExt:
2727case Instruction::FPTrunc:
2728case Instruction::FPExt:
2729case Instruction::UIToFP:
2730case Instruction::SIToFP:
2731case Instruction::FPToUI:
2732case Instruction::FPToSI:
2733case Instruction::AddrSpaceCast:
2734// TODO: Target informations may give a more accurate answer here. 2736case Instruction::BitCast:
2737returntrue;
// BitCast never modifies bits. 2738case Instruction::PtrToInt:
2739returnDL.getIntPtrType(SrcTy)->getScalarSizeInBits() ==
2741case Instruction::IntToPtr:
2742returnDL.getIntPtrType(DestTy)->getScalarSizeInBits() ==
2751/// This function determines if a pair of casts can be eliminated and what 2752/// opcode should be used in the elimination. This assumes that there are two 2753/// instructions like this: 2754/// * %F = firstOpcode SrcTy %x to MidTy 2755/// * %S = secondOpcode MidTy %F to DstTy 2756/// The function returns a resultOpcode so these two casts can be replaced with: 2757/// * %Replacement = resultOpcode %SrcTy %x to DstTy 2758/// If no such cast is permitted, the function returns 0. 2763// Define the 144 possibilities for these two cast instructions. The values 2764// in this matrix determine what to do in a given situation and select the 2765// case in the switch below. The rows correspond to firstOp, the columns 2766// correspond to secondOp. In looking at the table below, keep in mind 2767// the following cast properties: 2769// Size Compare Source Destination 2770// Operator Src ? Size Type Sign Type Sign 2771// -------- ------------ ------------------- --------------------- 2772// TRUNC > Integer Any Integral Any 2773// ZEXT < Integral Unsigned Integer Any 2774// SEXT < Integral Signed Integer Any 2775// FPTOUI n/a FloatPt n/a Integral Unsigned 2776// FPTOSI n/a FloatPt n/a Integral Signed 2777// UITOFP n/a Integral Unsigned FloatPt n/a 2778// SITOFP n/a Integral Signed FloatPt n/a 2779// FPTRUNC > FloatPt n/a FloatPt n/a 2780// FPEXT < FloatPt n/a FloatPt n/a 2781// PTRTOINT n/a Pointer n/a Integral Unsigned 2782// INTTOPTR n/a Integral Unsigned Pointer n/a 2783// BITCAST = FirstClass n/a FirstClass n/a 2784// ADDRSPCST n/a Pointer n/a Pointer n/a 2786// NOTE: some transforms are safe, but we consider them to be non-profitable. 2787// For example, we could merge "fptoui double to i32" + "zext i32 to i64", 2788// into "fptoui double to i64", but this loses information about the range 2789// of the produced value (we no longer know the top-part is all zeros). 2790// Further this conversion is often much more expensive for typical hardware, 2791// and causes issues when building libgcc. We disallow fptosi+sext for the 2793constunsigned numCastOps =
2794 Instruction::CastOpsEnd - Instruction::CastOpsBegin;
2795staticconstuint8_t CastResults[numCastOps][numCastOps] = {
2796// T F F U S F F P I B A -+ 2797// R Z S P P I I T P 2 N T S | 2798// U E E 2 2 2 2 R E I T C C +- secondOp 2799// N X X U S F F N X N 2 V V | 2800// C T T I I P P C T T P T T -+ 2801 { 1, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0},
// Trunc -+ 2802 { 8, 1, 9,99,99, 2,17,99,99,99, 2, 3, 0},
// ZExt | 2803 { 8, 0, 1,99,99, 0, 2,99,99,99, 0, 3, 0},
// SExt | 2804 { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0},
// FPToUI | 2805 { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0},
// FPToSI | 2806 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0},
// UIToFP +- firstOp 2807 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0},
// SIToFP | 2808 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0},
// FPTrunc | 2809 { 99,99,99, 2, 2,99,99, 8, 2,99,99, 4, 0},
// FPExt | 2810 { 1, 0, 0,99,99, 0, 0,99,99,99, 7, 3, 0},
// PtrToInt | 2811 { 99,99,99,99,99,99,99,99,99,11,99,15, 0},
// IntToPtr | 2812 { 5, 5, 5, 0, 0, 5, 5, 0, 0,16, 5, 1,14},
// BitCast | 2813 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,13,12},
// AddrSpaceCast -+ 2816// TODO: This logic could be encoded into the table above and handled in the 2818// If either of the casts are a bitcast from scalar to vector, disallow the 2819// merging. However, any pair of bitcasts are allowed. 2820bool IsFirstBitcast = (firstOp == Instruction::BitCast);
2821bool IsSecondBitcast = (secondOp == Instruction::BitCast);
2822bool AreBothBitcasts = IsFirstBitcast && IsSecondBitcast;
2824// Check if any of the casts convert scalars <-> vectors. 2825if ((IsFirstBitcast && isa<VectorType>(SrcTy) != isa<VectorType>(MidTy)) ||
2826 (IsSecondBitcast && isa<VectorType>(MidTy) != isa<VectorType>(DstTy)))
2827if (!AreBothBitcasts)
2830int ElimCase = CastResults[firstOp-Instruction::CastOpsBegin]
2831 [secondOp-Instruction::CastOpsBegin];
2834// Categorically disallowed. 2837// Allowed, use first cast's opcode. 2840// Allowed, use second cast's opcode. 2843// No-op cast in second op implies firstOp as long as the DestTy 2844// is integer and we are not converting between a vector and a 2850// No-op cast in second op implies firstOp as long as the DestTy 2856// No-op cast in first op implies secondOp as long as the SrcTy 2862// Disable inttoptr/ptrtoint optimization if enabled. 2866// Cannot simplify if address spaces are different! 2871// We can still fold this without knowing the actual sizes as long we 2872// know that the intermediate pointer is the largest possible 2874// FIXME: Is this always true? 2876return Instruction::BitCast;
2878// ptrtoint, inttoptr -> bitcast (ptr -> ptr) if int size is >= ptr size. 2879if (!SrcIntPtrTy || DstIntPtrTy != SrcIntPtrTy)
2882if (MidSize >= PtrSize)
2883return Instruction::BitCast;
2887// ext, trunc -> bitcast, if the SrcTy and DstTy are the same 2888// ext, trunc -> ext, if sizeof(SrcTy) < sizeof(DstTy) 2889// ext, trunc -> trunc, if sizeof(SrcTy) > sizeof(DstTy) 2893return Instruction::BitCast;
2894if (SrcSize < DstSize)
2896if (SrcSize > DstSize)
2901// zext, sext -> zext, because sext can't sign extend after zext 2902return Instruction::ZExt;
2904// inttoptr, ptrtoint -> bitcast if SrcSize<=PtrSize and SrcSize==DstSize 2910if (SrcSize <= PtrSize && SrcSize == DstSize)
2911return Instruction::BitCast;
2915// addrspacecast, addrspacecast -> bitcast, if SrcAS == DstAS 2916// addrspacecast, addrspacecast -> addrspacecast, if SrcAS != DstAS 2918return Instruction::AddrSpaceCast;
2919return Instruction::BitCast;
2921// FIXME: this state can be merged with (1), but the following assert 2922// is useful to check the correcteness of the sequence due to semantic 2923// change of bitcast. 2930"Illegal addrspacecast, bitcast sequence!");
2931// Allowed, use first cast's opcode 2934// bitcast, addrspacecast -> addrspacecast 2935return Instruction::AddrSpaceCast;
2937// FIXME: this state can be merged with (1), but the following assert 2938// is useful to check the correcteness of the sequence due to semantic 2939// change of bitcast. 2945"Illegal inttoptr, bitcast sequence!");
2946// Allowed, use first cast's opcode 2949// FIXME: this state can be merged with (2), but the following assert 2950// is useful to check the correcteness of the sequence due to semantic 2951// change of bitcast. 2957"Illegal bitcast, ptrtoint sequence!");
2958// Allowed, use second cast's opcode 2961// (sitofp (zext x)) -> (uitofp x) 2962return Instruction::UIToFP;
2964// Cast combination can't happen (error in input). This is for all cases 2965// where the MidTy is not the same for the two cast instructions. 2975// Construct and return the appropriate CastInst subclass 3000returnCreate(Instruction::BitCast, S, Ty,
Name, InsertBefore);
3001returnCreate(Instruction::ZExt, S, Ty,
Name, InsertBefore);
3007returnCreate(Instruction::BitCast, S, Ty,
Name, InsertBefore);
3008returnCreate(Instruction::SExt, S, Ty,
Name, InsertBefore);
3014returnCreate(Instruction::BitCast, S, Ty,
Name, InsertBefore);
3015returnCreate(Instruction::Trunc, S, Ty,
Name, InsertBefore);
3018/// Create a BitCast or a PtrToInt cast instruction 3026 cast<VectorType>(Ty)->getElementCount() ==
3027 cast<VectorType>(S->
getType())->getElementCount()) &&
3031returnCreate(Instruction::PtrToInt, S, Ty,
Name, InsertBefore);
3042returnCreate(Instruction::AddrSpaceCast, S, Ty,
Name, InsertBefore);
3044returnCreate(Instruction::BitCast, S, Ty,
Name, InsertBefore);
3051returnCreate(Instruction::PtrToInt, S, Ty,
Name, InsertBefore);
3053returnCreate(Instruction::IntToPtr, S, Ty,
Name, InsertBefore);
3055returnCreate(Instruction::BitCast, S, Ty,
Name, InsertBefore);
3062"Invalid integer cast");
3063unsigned SrcBits =
C->getType()->getScalarSizeInBits();
3066 (SrcBits == DstBits ? Instruction::BitCast :
3067 (SrcBits > DstBits ? Instruction::Trunc :
3068 (
isSigned ? Instruction::SExt : Instruction::ZExt)));
3076unsigned SrcBits =
C->getType()->getScalarSizeInBits();
3078assert((
C->getType() == Ty || SrcBits != DstBits) &&
"Invalid cast");
3080 (SrcBits == DstBits ? Instruction::BitCast :
3081 (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt));
3092if (
VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) {
3093if (
VectorType *DestVecTy = dyn_cast<VectorType>(DestTy)) {
3094if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) {
3095// An element by element cast. Valid if casting the elements is valid. 3096 SrcTy = SrcVecTy->getElementType();
3097 DestTy = DestVecTy->getElementType();
3102if (
PointerType *DestPtrTy = dyn_cast<PointerType>(DestTy)) {
3103if (
PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy)) {
3104return SrcPtrTy->getAddressSpace() == DestPtrTy->getAddressSpace();
3111// Could still have vectors of pointers if the number of elements doesn't 3116if (SrcBits != DestBits)
3124// ptrtoint and inttoptr are not allowed on non-integral pointers 3125if (
auto *PtrTy = dyn_cast<PointerType>(SrcTy))
3126if (
auto *IntTy = dyn_cast<IntegerType>(DestTy))
3127return (IntTy->getBitWidth() ==
DL.getPointerTypeSizeInBits(PtrTy) &&
3128 !
DL.isNonIntegralPointerType(PtrTy));
3129if (
auto *PtrTy = dyn_cast<PointerType>(DestTy))
3130if (
auto *IntTy = dyn_cast<IntegerType>(SrcTy))
3131return (IntTy->getBitWidth() ==
DL.getPointerTypeSizeInBits(PtrTy) &&
3132 !
DL.isNonIntegralPointerType(PtrTy));
3137// Provide a way to get a "cast" where the cast opcode is inferred from the 3138// types and size of the operand. This, basically, is a parallel of the 3139// logic in the castIsValid function below. This axiom should hold: 3140// castIsValid( getCastOpcode(Val, Ty), Val, Ty) 3141// should not assert in castIsValid. In other words, this produces a "correct" 3142// casting opcode for the arguments passed to it. 3145constValue *Src,
bool SrcIsSigned,
Type *DestTy,
bool DestIsSigned) {
3146Type *SrcTy = Src->getType();
3149"Only first class types are castable!");
3154// FIXME: Check address space sizes here 3155if (
VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy))
3156if (
VectorType *DestVecTy = dyn_cast<VectorType>(DestTy))
3157if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) {
3158// An element by element cast. Find the appropriate opcode based on the 3160 SrcTy = SrcVecTy->getElementType();
3161 DestTy = DestVecTy->getElementType();
3164// Get the bit sizes, we'll need these 3168// Run through the possibilities ... 3170if (SrcTy->
isIntegerTy()) {
// Casting from integral 3171if (DestBits < SrcBits)
3172return Trunc;
// int -> smaller int 3173elseif (DestBits > SrcBits) {
// its an extension 3175return SExt;
// signed -> SEXT 3177return ZExt;
// unsigned -> ZEXT 3179return BitCast;
// Same size, No-op cast 3183return FPToSI;
// FP -> sint 3185return FPToUI;
// FP -> uint 3187assert(DestBits == SrcBits &&
3188"Casting vector to integer of different width");
3189return BitCast;
// Same size, no-op cast 3192"Casting from a value that is not first-class type");
3193return PtrToInt;
// ptr -> int 3196if (SrcTy->
isIntegerTy()) {
// Casting from integral 3198return SIToFP;
// sint -> FP 3200return UIToFP;
// uint -> FP 3202if (DestBits < SrcBits) {
3203return FPTrunc;
// FP -> smaller FP 3204 }
elseif (DestBits > SrcBits) {
3205return FPExt;
// FP -> larger FP 3207return BitCast;
// same size, no-op cast 3210assert(DestBits == SrcBits &&
3211"Casting vector to floating point of different width");
3212return BitCast;
// same size, no-op cast 3216assert(DestBits == SrcBits &&
3217"Illegal cast to vector (wrong type or size)");
3222return AddrSpaceCast;
3223return BitCast;
// ptr -> ptr 3225return IntToPtr;
// int -> ptr 3232//===----------------------------------------------------------------------===// 3233// CastInst SubClass Constructors 3234//===----------------------------------------------------------------------===// 3236/// Check that the construction parameters for a CastInst are correct. This 3237/// could be broken out into the separate constructors but it is useful to have 3238/// it in one place and to eliminate the redundant code for getting the sizes 3239/// of the types involved. 3246// Get the size of the types in bits, and whether we are dealing 3247// with vector types, we'll need this later. 3248bool SrcIsVec = isa<VectorType>(SrcTy);
3249bool DstIsVec = isa<VectorType>(DstTy);
3253// If these are vector types, get the lengths of the vectors (using zero for 3254// scalar types means that checking that vector lengths match also checks that 3255// scalars are not being converted to vectors or vectors to scalars). 3256ElementCount SrcEC = SrcIsVec ? cast<VectorType>(SrcTy)->getElementCount()
3258ElementCount DstEC = DstIsVec ? cast<VectorType>(DstTy)->getElementCount()
3261// Switch on the opcode provided 3263default:
returnfalse;
// This is an input error 3264case Instruction::Trunc:
3266 SrcEC == DstEC && SrcScalarBitSize > DstScalarBitSize;
3267case Instruction::ZExt:
3269 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3270case Instruction::SExt:
3272 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3273case Instruction::FPTrunc:
3275 SrcEC == DstEC && SrcScalarBitSize > DstScalarBitSize;
3276case Instruction::FPExt:
3278 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
3279case Instruction::UIToFP:
3280case Instruction::SIToFP:
3283case Instruction::FPToUI:
3284case Instruction::FPToSI:
3287case Instruction::PtrToInt:
3291case Instruction::IntToPtr:
3295case Instruction::BitCast: {
3299// BitCast implies a no-op cast of type only. No bits change. 3300// However, you can't cast pointers to anything but pointers. 3301if (!SrcPtrTy != !DstPtrTy)
3304// For non-pointer cases, the cast is okay if the source and destination bit 3305// widths are identical. 3309// If both are pointers then the address spaces must match. 3313// A vector of pointers must have the same number of elements. 3314if (SrcIsVec && DstIsVec)
3315return SrcEC == DstEC;
3323case Instruction::AddrSpaceCast: {
3335return SrcEC == DstEC;
3418//===----------------------------------------------------------------------===// 3420//===----------------------------------------------------------------------===// 3436if (
Op == Instruction::ICmp) {
3464if (
ICmpInst *IC = dyn_cast<ICmpInst>(
this))
3467 cast<FCmpInst>(
this)->swapOperands();
3471if (
constICmpInst *IC = dyn_cast<ICmpInst>(
this))
3472return IC->isCommutative();
3473return cast<FCmpInst>(
this)->isCommutative();
3484// Returns true if either operand of CmpInst is a provably non-zero 3485// floating-point constant. 3487auto *
LHS = dyn_cast<Constant>(Cmp->getOperand(0));
3488auto *
RHS = dyn_cast<Constant>(Cmp->getOperand(1));
3496// Floating-point equality is not an equivalence when comparing +0.0 with 3497// -0.0, when comparing NaN with another value, or when flushing 3498// denormals-to-zero. 3549default:
return"unknown";
3890// If the predicates match, then we know the first condition implies the 3904// A == B implies A >=u B, A <=u B, A >=s B, and A <=s B are true. 3934//===----------------------------------------------------------------------===// 3935// CmpPredicate Implementation 3936//===----------------------------------------------------------------------===// 3958if (
auto *ICI = dyn_cast<ICmpInst>(Cmp))
3959return ICI->getCmpPredicate();
3960return Cmp->getPredicate();
3971//===----------------------------------------------------------------------===// 3972// SwitchInst Implementation 3973//===----------------------------------------------------------------------===// 3977 ReservedSpace = NumReserved;
3985/// SwitchInst ctor - Create a new switch instruction, specifying a value to 3986/// switch on and a default destination. The number of additional cases can 3987/// be specified here to make memory allocation more efficient. This 3988/// constructor can also autoinsert before another instruction. 3992 AllocMarker, InsertBefore) {
3998init(
SI.getCondition(),
SI.getDefaultDest(),
SI.getNumOperands());
3999 setNumHungOffUseOperands(
SI.getNumOperands());
4000Use *OL = getOperandList();
4001constUse *InOL =
SI.getOperandList();
4002for (
unsigned i = 2, E =
SI.getNumOperands(); i != E; i += 2) {
4004 OL[i+1] = InOL[i+1];
4006 SubclassOptionalData =
SI.SubclassOptionalData;
4009/// addCase - Add an entry to the switch instruction... 4014if (OpNo+2 > ReservedSpace)
4015 growOperands();
// Get more space! 4016// Initialize some new operands. 4017assert(OpNo+1 < ReservedSpace &&
"Growing didn't work!");
4024/// removeCase - This method removes the specified case and its successor 4025/// from the switch instruction. 4027unsigned idx =
I->getCaseIndex();
4034// Overwrite this case with the end of the list. 4035if (2 + (idx + 1) * 2 != NumOps) {
4036 OL[2 + idx * 2] = OL[NumOps - 2];
4037 OL[2 + idx * 2 + 1] = OL[NumOps - 1];
4040// Nuke the last value. 4041 OL[NumOps-2].
set(
nullptr);
4042 OL[NumOps-2+1].
set(
nullptr);
4048/// growOperands - grow operands - This grows the operand list in response 4049/// to a push_back style of operation. This grows the number of ops by 3 times. 4051void SwitchInst::growOperands() {
4053unsigned NumOps = e*3;
4055 ReservedSpace = NumOps;
4060assert(Changed &&
"called only if metadata has changed");
4065assert(SI.getNumSuccessors() == Weights->size() &&
4066"num of prof branch_weights must accord with num of successors");
4068bool AllZeroes =
all_of(*Weights, [](
uint32_t W) {
return W == 0; });
4070if (AllZeroes || Weights->size() < 2)
4083"not correspond to number of succesors");
4089 this->Weights = std::move(Weights);
4095assert(SI.getNumSuccessors() == Weights->size() &&
4096"num of prof branch_weights must accord with num of successors");
4098// Copy the last case to the place of the removed one and shrink. 4099// This is tightly coupled with the way SwitchInst::removeCase() removes 4100// the cases in SwitchInst::removeCase(CaseIt). 4101 (*Weights)[
I->getCaseIndex() + 1] = Weights->back();
4102 Weights->pop_back();
4104return SI.removeCase(
I);
4110 SI.addCase(OnVal, Dest);
4112if (!Weights && W && *W) {
4115 (*Weights)[SI.getNumSuccessors() - 1] = *W;
4118 Weights->push_back(W.value_or(0));
4121assert(SI.getNumSuccessors() == Weights->size() &&
4122"num of prof branch_weights must accord with num of successors");
4127// Instruction is erased. Mark as unchanged to not touch it in the destructor. 4131return SI.eraseFromParent();
4138return (*Weights)[idx];
4150auto &OldW = (*Weights)[idx];
4162if (ProfileData->getNumOperands() == SI.getNumSuccessors() + 1)
4163return mdconst::extract<ConstantInt>(ProfileData->getOperand(idx + 1))
4170//===----------------------------------------------------------------------===// 4171// IndirectBrInst Implementation 4172//===----------------------------------------------------------------------===// 4174void IndirectBrInst::init(
Value *
Address,
unsigned NumDests) {
4176"Address of indirectbr must be a pointer");
4177 ReservedSpace = 1+NumDests;
4185/// growOperands - grow operands - This grows the operand list in response 4186/// to a push_back style of operation. This grows the number of ops by 2 times. 4188void IndirectBrInst::growOperands() {
4190unsigned NumOps = e*2;
4192 ReservedSpace = NumOps;
4196IndirectBrInst::IndirectBrInst(
Value *
Address,
unsigned NumCases,
4199Instruction::IndirectBr, AllocMarker, InsertBefore) {
4208Use *OL = getOperandList();
4215/// addDestination - Add a destination. 4219if (OpNo+1 > ReservedSpace)
4220 growOperands();
// Get more space! 4221// Initialize some new operands. 4222assert(OpNo < ReservedSpace &&
"Growing didn't work!");
4227/// removeDestination - This method removes the specified successor from the 4228/// indirectbr instruction. 4235// Replace this value with the last one. 4236 OL[idx+1] = OL[NumOps-1];
4238// Nuke the last value. 4239 OL[NumOps-1].
set(
nullptr);
4243//===----------------------------------------------------------------------===// 4244// FreezeInst Implementation 4245//===----------------------------------------------------------------------===// 4252//===----------------------------------------------------------------------===// 4253// cloneImpl() implementations 4254//===----------------------------------------------------------------------===// 4256// Define these methods here so vtables don't get emitted into every translation 4257// unit that uses these classes. 4311 Result->setWeak(
isWeak());
4384returnnew (AllocMarker)
CallInst(*
this, AllocMarker);
4387returnnew (AllocMarker)
CallInst(*
this, AllocMarker);
4418returnnew (AllocMarker)
ReturnInst(*
this, AllocMarker);
4423returnnew (AllocMarker)
BranchInst(*
this, AllocMarker);
4437returnnew (AllocMarker)
InvokeInst(*
this, AllocMarker);
4440returnnew (AllocMarker)
InvokeInst(*
this, AllocMarker);
4448returnnew (AllocMarker)
CallBrInst(*
this, AllocMarker);
4451returnnew (AllocMarker)
CallBrInst(*
this, AllocMarker);
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Atomic ordering constants.
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static bool isSigned(unsigned int Opcode)
Module.h This file contains the declarations for the Module class.
static Align computeLoadStoreDefaultAlign(Type *Ty, InsertPosition Pos)
static bool isImpliedFalseByMatchingCmp(CmpPredicate Pred1, CmpPredicate Pred2)
static Value * createPlaceholderForShuffleVector(Value *V)
static Align computeAllocaDefaultAlign(Type *Ty, InsertPosition Pos)
static cl::opt< bool > DisableI2pP2iOpt("disable-i2p-p2i-opt", cl::init(false), cl::desc("Disables inttoptr/ptrtoint roundtrip optimization"))
static bool hasNonZeroFPOperands(const CmpInst *Cmp)
static int matchShuffleAsBitRotate(ArrayRef< int > Mask, int NumSubElts)
Try to lower a vector shuffle as a bit rotation.
static Type * getIndexedTypeInternal(Type *Ty, ArrayRef< IndexTy > IdxList)
static bool isReplicationMaskWithParams(ArrayRef< int > Mask, int ReplicationFactor, int VF)
static bool isIdentityMaskImpl(ArrayRef< int > Mask, int NumOpElts)
static bool isSingleSourceMaskImpl(ArrayRef< int > Mask, int NumOpElts)
static Value * getAISize(LLVMContext &Context, Value *Amt)
static bool isImpliedTrueByMatchingCmp(CmpPredicate Pred1, CmpPredicate Pred2)
This file contains the declarations for metadata subclasses.
uint64_t IntrinsicInst * II
PowerPC Reduce CR logical Operation
This file contains the declarations for profiling metadata utility functions.
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static unsigned getNumElements(Type *Ty)
This file implements the SmallBitVector class.
This file defines the SmallVector class.
static SymbolRef::Type getType(const Symbol *Sym)
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
float convertToFloat() const
Converts this APFloat to host float value.
Class for arbitrary precision integers.
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
unsigned countr_zero() const
Count the number of trailing zero bits.
unsigned countl_zero() const
The APInt version of std::countl_zero.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
This class represents a conversion between pointers from one address space to another.
AddrSpaceCastInst * cloneImpl() const
Clone an identical AddrSpaceCastInst.
AddrSpaceCastInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
an instruction to allocate memory on the stack
std::optional< TypeSize > getAllocationSizeInBits(const DataLayout &DL) const
Get allocation size in bits.
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
AllocaInst * cloneImpl() const
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
bool isUsedWithInAlloca() const
Return true if this alloca is used as an inalloca argument to a call.
unsigned getAddressSpace() const
Return the address space for the allocation.
std::optional< TypeSize > getAllocationSize(const DataLayout &DL) const
Get allocation size in bytes.
bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
void setAlignment(Align Align)
const Value * getArraySize() const
Get the number of elements allocated.
AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, const Twine &Name, InsertPosition InsertBefore)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
Class to represent array types.
An instruction that atomically checks whether a specified value is in a memory location,...
void setSyncScopeID(SyncScope::ID SSID)
Sets the synchronization scope ID of this cmpxchg instruction.
bool isVolatile() const
Return true if this is a cmpxchg from a volatile memory location.
void setFailureOrdering(AtomicOrdering Ordering)
Sets the failure ordering constraint of this cmpxchg instruction.
AtomicOrdering getFailureOrdering() const
Returns the failure ordering constraint of this cmpxchg instruction.
void setSuccessOrdering(AtomicOrdering Ordering)
Sets the success ordering constraint of this cmpxchg instruction.
AtomicCmpXchgInst * cloneImpl() const
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
bool isWeak() const
Return true if this cmpxchg may spuriously fail.
void setAlignment(Align Align)
AtomicOrdering getSuccessOrdering() const
Returns the success ordering constraint of this cmpxchg instruction.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this cmpxchg instruction.
AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment, AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering, SyncScope::ID SSID, InsertPosition InsertBefore=nullptr)
an instruction that atomically reads a memory location, combines it with another value,...
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
AtomicRMWInst * cloneImpl() const
bool isVolatile() const
Return true if this is a RMW on a volatile memory location.
BinOp
This enumeration lists the possible modifications atomicrmw can make.
@ USubCond
Subtract only if no unsigned overflow.
@ Min
*p = old <signed v ? old : v
@ USubSat
*p = usub.sat(old, v) usub.sat matches the behavior of llvm.usub.sat.
@ UIncWrap
Increment one up to a maximum value.
@ Max
*p = old >signed v ? old : v
@ UMin
*p = old <unsigned v ? old : v
@ FMin
*p = minnum(old, v) minnum matches the behavior of llvm.minnum.
@ UMax
*p = old >unsigned v ? old : v
@ FMax
*p = maxnum(old, v) maxnum matches the behavior of llvm.maxnum.
@ UDecWrap
Decrement one until a minimum value or zero.
void setSyncScopeID(SyncScope::ID SSID)
Sets the synchronization scope ID of this rmw instruction.
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this rmw instruction.
void setOperation(BinOp Operation)
BinOp getOperation() const
AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment, AtomicOrdering Ordering, SyncScope::ID SSID, InsertPosition InsertBefore=nullptr)
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this rmw instruction.
void setAlignment(Align Align)
static StringRef getOperationName(BinOp Op)
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
bool hasAttrSomewhere(Attribute::AttrKind Kind, unsigned *Index=nullptr) const
Return true if the specified attribute is set for at least one parameter or for the return value.
FPClassTest getRetNoFPClass() const
Get the disallowed floating-point classes of the return value.
bool hasParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Return true if the attribute exists for the given argument.
FPClassTest getParamNoFPClass(unsigned ArgNo) const
Get the disallowed floating-point classes of the argument value.
MemoryEffects getMemoryEffects() const
Returns memory effects of the function.
const ConstantRange & getRange() const
Returns the value of the range attribute.
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
static Attribute getWithMemoryEffects(LLVMContext &Context, MemoryEffects ME)
bool isValid() const
Return true if the attribute is any kind of attribute.
LLVM Basic Block Representation.
bool isEntryBlock() const
Return true if this is the entry block of the containing function.
const Function * getParent() const
Return the enclosing method, or null if none.
const DataLayout & getDataLayout() const
Get the data layout of the module this basic block belongs to.
static BinaryOperator * CreateNeg(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Helper functions to construct and inspect unary operations (NEG and NOT) via binary operators SUB and...
BinaryOps getOpcode() const
bool swapOperands()
Exchange the two operands to this instruction.
static BinaryOperator * CreateNot(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
static BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
BinaryOperator(BinaryOps iType, Value *S1, Value *S2, Type *Ty, const Twine &Name, InsertPosition InsertBefore)
static BinaryOperator * CreateNSWNeg(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
BinaryOperator * cloneImpl() const
This class represents a no-op cast from one type to another.
BitCastInst * cloneImpl() const
Clone an identical BitCastInst.
BitCastInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
Conditional or Unconditional Branch instruction.
void swapSuccessors()
Swap the successors of this branch instruction.
BranchInst * cloneImpl() const
bool isConditional() const
Value * getCondition() const
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
FPClassTest getParamNoFPClass(unsigned i) const
Extract a test mask for disallowed floating-point value classes for the parameter.
bool isInlineAsm() const
Check if this call is an inline asm statement.
BundleOpInfo & getBundleOpInfoForOperand(unsigned OpIdx)
Return the BundleOpInfo for the operand at index OpIdx.
Attribute getRetAttr(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind for the return value.
void setCallingConv(CallingConv::ID CC)
FPClassTest getRetNoFPClass() const
Extract a test mask for disallowed floating-point value classes for the return value.
bundle_op_iterator bundle_op_info_begin()
Return the start of the list of BundleOpInfo instances associated with this OperandBundleUser.
MemoryEffects getMemoryEffects() const
void addFnAttr(Attribute::AttrKind Kind)
Adds the attribute to the function.
bool doesNotAccessMemory() const
Determine if the call does not access memory.
void getOperandBundlesAsDefs(SmallVectorImpl< OperandBundleDef > &Defs) const
Return the list of operand bundles attached to this instruction as a vector of OperandBundleDefs.
void setOnlyAccessesArgMemory()
OperandBundleUse getOperandBundleAt(unsigned Index) const
Return the operand bundle at a specific index.
void setOnlyAccessesInaccessibleMemOrArgMem()
std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
void setDoesNotAccessMemory()
bool hasRetAttr(Attribute::AttrKind Kind) const
Determine whether the return value has the given attribute.
bool onlyAccessesInaccessibleMemory() const
Determine if the function may only access memory that is inaccessible from the IR.
unsigned getNumOperandBundles() const
Return the number of operand bundles associated with this User.
CallingConv::ID getCallingConv() const
bundle_op_iterator bundle_op_info_end()
Return the end of the list of BundleOpInfo instances associated with this OperandBundleUser.
unsigned getNumSubclassExtraOperandsDynamic() const
Get the number of extra operands for instructions that don't have a fixed number of extra operands.
bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
bool isMustTailCall() const
Tests if this call site must be tail call optimized.
bool isIndirectCall() const
Return true if the callsite is an indirect call.
bool onlyReadsMemory() const
Determine if the call does not access or only reads memory.
iterator_range< bundle_op_iterator > bundle_op_infos()
Return the range [bundle_op_info_begin, bundle_op_info_end).
void setOnlyReadsMemory()
static CallBase * addOperandBundle(CallBase *CB, uint32_t ID, OperandBundleDef OB, InsertPosition InsertPt=nullptr)
Create a clone of CB with operand bundle OB added.
bool onlyAccessesInaccessibleMemOrArgMem() const
Determine if the function may only access memory that is either inaccessible from the IR or pointed t...
Value * getCalledOperand() const
void setOnlyWritesMemory()
op_iterator populateBundleOperandInfos(ArrayRef< OperandBundleDef > Bundles, const unsigned BeginIndex)
Populate the BundleOpInfo instances and the Use& vector from Bundles.
AttributeList Attrs
parameter attributes for callable
bool hasOperandBundlesOtherThan(ArrayRef< uint32_t > IDs) const
Return true if this operand bundle user contains operand bundles with tags other than those specified...
std::optional< ConstantRange > getRange() const
If this return value has a range attribute, return the value range of the argument.
bool isReturnNonNull() const
Return true if the return value is known to be not null.
Value * getArgOperand(unsigned i) const
uint64_t getRetDereferenceableBytes() const
Extract the number of dereferenceable bytes for a call or parameter (0=unknown).
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
FunctionType * getFunctionType() const
Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
static unsigned CountBundleInputs(ArrayRef< OperandBundleDef > Bundles)
Return the total number of values used in Bundles.
Value * getArgOperandWithAttribute(Attribute::AttrKind Kind) const
If one of the arguments has the specified attribute, returns its operand value.
void setOnlyAccessesInaccessibleMemory()
static CallBase * Create(CallBase *CB, ArrayRef< OperandBundleDef > Bundles, InsertPosition InsertPt=nullptr)
Create a clone of CB with a different set of operand bundles and insert it before InsertPt.
bool onlyWritesMemory() const
Determine if the call does not access or only writes memory.
bool hasClobberingOperandBundles() const
Return true if this operand bundle user has operand bundles that may write to the heap.
void setCalledOperand(Value *V)
static CallBase * removeOperandBundle(CallBase *CB, uint32_t ID, InsertPosition InsertPt=nullptr)
Create a clone of CB with operand bundle ID removed.
bool hasReadingOperandBundles() const
Return true if this operand bundle user has operand bundles that may read from the heap.
bool onlyAccessesArgMemory() const
Determine if the call can access memmory only using pointers based on its arguments.
unsigned arg_size() const
AttributeList getAttributes() const
Return the attributes for this call.
void setMemoryEffects(MemoryEffects ME)
bool hasOperandBundles() const
Return true if this User has any operand bundles.
bool isTailCall() const
Tests if this call site is marked as a tail call.
Function * getCaller()
Helper to get the caller (the parent function).
CallBr instruction, tracking function calls that may not return control but instead transfer it to a ...
SmallVector< BasicBlock *, 16 > getIndirectDests() const
void setDefaultDest(BasicBlock *B)
void setIndirectDest(unsigned i, BasicBlock *B)
BasicBlock * getDefaultDest() const
static CallBrInst * Create(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, ArrayRef< BasicBlock * > IndirectDests, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
CallBrInst * cloneImpl() const
This class represents a function call, abstracting a target machine's calling convention.
void updateProfWeight(uint64_t S, uint64_t T)
Updates profile metadata by scaling it by S / T.
TailCallKind getTailCallKind() const
CallInst * cloneImpl() const
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
This is the base class for all instructions that perform data casts.
static Instruction::CastOps getCastOpcode(const Value *Val, bool SrcIsSigned, Type *Ty, bool DstIsSigned)
Returns the opcode necessary to cast Val into Ty using usual casting rules.
static CastInst * CreatePointerBitCastOrAddrSpaceCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a BitCast or an AddrSpaceCast cast instruction.
Instruction::CastOps getOpcode() const
Return the opcode of this CastInst.
static CastInst * CreateIntegerCast(Value *S, Type *Ty, bool isSigned, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a ZExt, BitCast, or Trunc for int -> int casts.
static CastInst * CreateFPCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create an FPExt, BitCast, or FPTrunc for fp -> fp casts.
static unsigned isEliminableCastPair(Instruction::CastOps firstOpcode, Instruction::CastOps secondOpcode, Type *SrcTy, Type *MidTy, Type *DstTy, Type *SrcIntPtrTy, Type *MidIntPtrTy, Type *DstIntPtrTy)
Determine how a pair of casts can be eliminated, if they can be at all.
static bool isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy, const DataLayout &DL)
Check whether a bitcast, inttoptr, or ptrtoint cast between these types is valid and a no-op.
static bool isBitCastable(Type *SrcTy, Type *DestTy)
Check whether a bitcast between these types is valid.
static CastInst * CreateTruncOrBitCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a Trunc or BitCast cast instruction.
static CastInst * CreatePointerCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a BitCast, AddrSpaceCast or a PtrToInt cast instruction.
static CastInst * CreateBitOrPointerCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a BitCast, a PtrToInt, or an IntToPTr cast instruction.
static bool isNoopCast(Instruction::CastOps Opcode, Type *SrcTy, Type *DstTy, const DataLayout &DL)
A no-op cast is one that can be effected without changing any bits.
static CastInst * CreateZExtOrBitCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a ZExt or BitCast cast instruction.
static CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
bool isIntegerCast() const
There are several places where we need to know if a cast instruction only deals with integer source a...
static CastInst * CreateSExtOrBitCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a SExt or BitCast cast instruction.
static bool castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy)
This method can be used to determine if a cast from SrcTy to DstTy using Opcode op is valid or not.
CatchReturnInst * cloneImpl() const
void setUnwindDest(BasicBlock *UnwindDest)
void addHandler(BasicBlock *Dest)
Add an entry to the switch instruction... Note: This action invalidates handler_end().
CatchSwitchInst * cloneImpl() const
Value * getParentPad() const
void setParentPad(Value *ParentPad)
BasicBlock * getUnwindDest() const
void removeHandler(handler_iterator HI)
bool hasUnwindDest() const
CleanupReturnInst * cloneImpl() const
This class is the base class for the comparison instructions.
Predicate getStrictPredicate() const
For example, SGE -> SGT, SLE -> SLT, ULE -> ULT, UGE -> UGT.
bool isEquality() const
Determine if this is an equals/not equals predicate.
void setPredicate(Predicate P)
Set the predicate for this instruction to the specified value.
bool isFalseWhenEqual() const
This is just a convenience.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ FCMP_ULT
1 1 0 0 True if unordered or less than
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
@ ICMP_ULT
unsigned less than
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
@ ICMP_SGE
signed greater or equal
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
@ ICMP_ULE
unsigned less or equal
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
bool isEquivalence(bool Invert=false) const
Determine if one operand of this compare can always be replaced by the other operand,...
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
bool isTrueWhenEqual() const
This is just a convenience.
static CmpInst * Create(OtherOps Op, Predicate Pred, Value *S1, Value *S2, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Construct a compare instruction, given the opcode, the predicate and the two operands.
Predicate getNonStrictPredicate() const
For example, SGT -> SGE, SLT -> SLE, ULT -> ULE, UGT -> UGE.
static CmpInst * CreateWithCopiedFlags(OtherOps Op, Predicate Pred, Value *S1, Value *S2, const Instruction *FlagsSource, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Construct a compare instruction, given the opcode, the predicate, the two operands and the instructio...
bool isNonStrictPredicate() const
bool isFPPredicate() const
void swapOperands()
This is just a convenience that dispatches to the subclasses.
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
static StringRef getPredicateName(Predicate P)
Predicate getPredicate() const
Return the predicate for this instruction.
bool isStrictPredicate() const
static bool isUnordered(Predicate predicate)
Determine if the predicate is an unordered operation.
Predicate getFlippedStrictnessPredicate() const
For predicate of kind "is X or equal to 0" returns the predicate "is X".
bool isIntPredicate() const
static bool isOrdered(Predicate predicate)
Determine if the predicate is an ordered operation.
CmpInst(Type *ty, Instruction::OtherOps op, Predicate pred, Value *LHS, Value *RHS, const Twine &Name="", InsertPosition InsertBefore=nullptr, Instruction *FlagsSource=nullptr)
bool isCommutative() const
This is just a convenience that dispatches to the subclasses.
bool isRelational() const
Return true if the predicate is relational (not EQ or NE).
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
static std::optional< CmpPredicate > getMatching(CmpPredicate A, CmpPredicate B)
Compares two CmpPredicates taking samesign into account and returns the canonicalized CmpPredicate if...
CmpPredicate()
Default constructor.
static CmpPredicate get(const CmpInst *Cmp)
Do a ICmpInst::getCmpPredicate() or CmpInst::getPredicate(), as appropriate.
CmpInst::Predicate getPreferredSignedPredicate() const
Attempts to return a signed CmpInst::Predicate from the CmpPredicate.
bool hasSameSign() const
Query samesign information, for optimizations.
static CmpPredicate getSwapped(CmpPredicate P)
Get the swapped predicate of a CmpPredicate.
ConstantFP - Floating Point Values [float, double].
const APFloat & getValueAPF() const
This is the shared class of boolean and integer constants.
static Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
static Constant * getAllOnesValue(Type *Ty)
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
static constexpr ElementCount getFixed(ScalarTy MinVal)
This instruction extracts a single (scalar) element from a VectorType value.
ExtractElementInst * cloneImpl() const
static ExtractElementInst * Create(Value *Vec, Value *Idx, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static bool isValidOperands(const Value *Vec, const Value *Idx)
Return true if an extractelement instruction can be formed with the specified operands.
This instruction extracts a struct member or array element value from an aggregate value.
static Type * getIndexedType(Type *Agg, ArrayRef< unsigned > Idxs)
Returns the type of the element that would be extracted with an extractvalue instruction with the spe...
ExtractValueInst * cloneImpl() const
This instruction compares its operands according to the predicate given to the constructor.
static bool compare(const APFloat &LHS, const APFloat &RHS, FCmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
FCmpInst * cloneImpl() const
Clone an identical FCmpInst.
This class represents an extension of floating point types.
FPExtInst * cloneImpl() const
Clone an identical FPExtInst.
FPExtInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
float getFPAccuracy() const
Get the maximum error permitted by this operation in ULPs.
This class represents a cast from floating point to signed integer.
FPToSIInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
FPToSIInst * cloneImpl() const
Clone an identical FPToSIInst.
This class represents a cast from floating point to unsigned integer.
FPToUIInst * cloneImpl() const
Clone an identical FPToUIInst.
FPToUIInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
This class represents a truncation of floating point types.
FPTruncInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
FPTruncInst * cloneImpl() const
Clone an identical FPTruncInst.
An instruction for ordering other memory operations.
FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System, InsertPosition InsertBefore=nullptr)
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this fence instruction.
void setSyncScopeID(SyncScope::ID SSID)
Sets the synchronization scope ID of this fence instruction.
FenceInst * cloneImpl() const
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this fence instruction.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this fence instruction.
Class to represent fixed width SIMD vectors.
unsigned getNumElements() const
This class represents a freeze function that returns random concrete value if an operand is either a ...
FreezeInst(Value *S, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
FreezeInst * cloneImpl() const
Clone an identical FreezeInst.
void setParentPad(Value *ParentPad)
Value * getParentPad() const
Convenience accessors.
FuncletPadInst * cloneImpl() const
Class to represent function types.
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
Type * getParamType(unsigned i) const
Parameter type accessors.
Represents flags for the getelementptr instruction/expression.
static GEPNoWrapFlags inBounds()
GEPNoWrapFlags withoutInBounds() const
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
bool isInBounds() const
Determine whether the GEP has the inbounds flag.
bool hasNoUnsignedSignedWrap() const
Determine whether the GEP has the nusw flag.
static Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
bool hasAllZeroIndices() const
Return true if all of the indices of this GEP are zeros.
bool hasNoUnsignedWrap() const
Determine whether the GEP has the nuw flag.
bool hasAllConstantIndices() const
Return true if all of the indices of this GEP are constant integers.
void setIsInBounds(bool b=true)
Set or clear the inbounds flag on this GEP instruction.
static Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const
Accumulate the constant address offset of this GEP if possible.
GetElementPtrInst * cloneImpl() const
bool collectOffset(const DataLayout &DL, unsigned BitWidth, SmallMapVector< Value *, APInt, 4 > &VariableOffsets, APInt &ConstantOffset) const
void setNoWrapFlags(GEPNoWrapFlags NW)
Set nowrap flags for GEP instruction.
GEPNoWrapFlags getNoWrapFlags() const
Get the nowrap flags for the GEP instruction.
This instruction compares its operands according to the predicate given to the constructor.
static bool compare(const APInt &LHS, const APInt &RHS, ICmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
ICmpInst * cloneImpl() const
Clone an identical ICmpInst.
CmpPredicate getInverseCmpPredicate() const
Predicate getFlippedSignednessPredicate() const
For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->EQ.
Predicate getSignedPredicate() const
For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
bool isEquality() const
Return true if this predicate is either EQ or NE.
static std::optional< bool > isImpliedByMatchingCmp(CmpPredicate Pred1, CmpPredicate Pred2)
Determine if Pred1 implies Pred2 is true, false, or if nothing can be inferred about the implication,...
Predicate getUnsignedPredicate() const
For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
Indirect Branch Instruction.
void addDestination(BasicBlock *Dest)
Add a destination.
void removeDestination(unsigned i)
This method removes the specified successor from the indirectbr instruction.
IndirectBrInst * cloneImpl() const
This instruction inserts a single (scalar) element into a VectorType value.
InsertElementInst * cloneImpl() const
static InsertElementInst * Create(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static bool isValidOperands(const Value *Vec, const Value *NewElt, const Value *Idx)
Return true if an insertelement instruction can be formed with the specified operands.
BasicBlock * getBasicBlock()
This instruction inserts a struct field of array element value into an aggregate value.
InsertValueInst * cloneImpl() const
BitfieldElement::Type getSubclassData() const
bool hasNoNaNs() const LLVM_READONLY
Determine whether the no-NaNs flag is set.
void copyIRFlags(const Value *V, bool IncludeWrapFlags=true)
Convenience method to copy supported exact, fast-math, and (optionally) wrapping flags from V to this...
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
bool isCommutative() const LLVM_READONLY
Return true if the instruction is commutative:
InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
void swapProfMetadata()
If the instruction has "branch_weights" MD_prof metadata and the MDNode has three operands (including...
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
This class represents a cast from an integer to a pointer.
IntToPtrInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
IntToPtrInst * cloneImpl() const
Clone an identical IntToPtrInst.
BasicBlock * getUnwindDest() const
void setNormalDest(BasicBlock *B)
InvokeInst * cloneImpl() const
LandingPadInst * getLandingPadInst() const
Get the landingpad instruction from the landing pad block (the unwind destination).
void setUnwindDest(BasicBlock *B)
void updateProfWeight(uint64_t S, uint64_t T)
Updates profile metadata by scaling it by S / T.
static InvokeInst * Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, const Twine &NameStr, InsertPosition InsertBefore=nullptr)
This is an important class for using LLVM in a threaded context.
LLVMContextImpl *const pImpl
The landingpad instruction holds all of the information necessary to generate correct exception handl...
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
LandingPadInst * cloneImpl() const
static LandingPadInst * Create(Type *RetTy, unsigned NumReservedClauses, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedClauses is a hint for the number of incoming clauses that this landingpad w...
void addClause(Constant *ClauseVal)
Add a catch or filter clause to the landing pad.
void setCleanup(bool V)
Indicate that this landingpad instruction is a cleanup.
An instruction for reading from memory.
void setAlignment(Align Align)
bool isVolatile() const
Return true if this is a load from a volatile memory location.
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this load instruction.
LoadInst * cloneImpl() const
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
void setVolatile(bool V)
Specify whether this is a volatile load or not.
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, InsertPosition InsertBefore)
Align getAlign() const
Return the alignment of the access that is being performed.
MDNode * createBranchWeights(uint32_t TrueWeight, uint32_t FalseWeight, bool IsExpected=false)
Return metadata containing two branch weights.
const MDOperand & getOperand(unsigned I) const
static MemoryEffectsBase readOnly()
Create MemoryEffectsBase that can read any memory.
bool onlyWritesMemory() const
Whether this function only (at most) writes memory.
bool doesNotAccessMemory() const
Whether this function accesses no memory.
static MemoryEffectsBase argMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Create MemoryEffectsBase that can only access argument memory.
static MemoryEffectsBase inaccessibleMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Create MemoryEffectsBase that can only access inaccessible memory.
bool onlyAccessesInaccessibleMem() const
Whether this function only (at most) accesses inaccessible memory.
bool onlyAccessesArgPointees() const
Whether this function only (at most) accesses argument memory.
bool onlyReadsMemory() const
Whether this function only (at most) reads memory.
static MemoryEffectsBase writeOnly()
Create MemoryEffectsBase that can write any memory.
static MemoryEffectsBase inaccessibleOrArgMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Create MemoryEffectsBase that can only access inaccessible or argument memory.
static MemoryEffectsBase none()
Create MemoryEffectsBase that cannot read or write any memory.
bool onlyAccessesInaccessibleOrArgMem() const
Whether this function only (at most) accesses argument and inaccessible memory.
A container for an operand bundle being viewed as a set of values rather than a set of uses.
iterator_range< const_block_iterator > blocks() const
void allocHungoffUses(unsigned N)
const_block_iterator block_begin() const
void removeIncomingValueIf(function_ref< bool(unsigned)> Predicate, bool DeletePHIIfEmpty=true)
Remove all incoming values for which the predicate returns true.
Value * removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty=true)
Remove an incoming value.
bool hasConstantOrUndefValue() const
Whether the specified PHI node always merges together the same value, assuming undefs are equal to a ...
void copyIncomingBlocks(iterator_range< const_block_iterator > BBRange, uint32_t ToIdx=0)
Copies the basic blocks from BBRange to the incoming basic block list of this PHINode,...
const_block_iterator block_end() const
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
Value * hasConstantValue() const
If the specified PHI node always merges together the same value, return the value,...
PHINode * cloneImpl() const
unsigned getNumIncomingValues() const
Return the number of incoming edges.
Class to represent pointers.
unsigned getAddressSpace() const
Return the address space of the Pointer type.
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
This class represents a cast from a pointer to an integer.
PtrToIntInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
PtrToIntInst * cloneImpl() const
Clone an identical PtrToIntInst.
Resume the propagation of an exception.
ResumeInst * cloneImpl() const
Return a value (possibly void), from a function.
ReturnInst * cloneImpl() const
This class represents a sign extension of integer types.
SExtInst * cloneImpl() const
Clone an identical SExtInst.
SExtInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
This class represents a cast from signed integer to floating point.
SIToFPInst * cloneImpl() const
Clone an identical SIToFPInst.
SIToFPInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
Class to represent scalable SIMD vectors.
This class represents the LLVM 'select' instruction.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, Instruction *MDFrom=nullptr)
SelectInst * cloneImpl() const
static const char * areInvalidOperands(Value *Cond, Value *True, Value *False)
Return a string if the specified operands are invalid for a select operation, otherwise return null.
This instruction constructs a fixed permutation of two input vectors.
static bool isZeroEltSplatMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses all elements with the same value as the first element of exa...
ArrayRef< int > getShuffleMask() const
static bool isSpliceMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)
Return true if this shuffle mask is a splice mask, concatenating the two inputs together and then ext...
int getMaskValue(unsigned Elt) const
Return the shuffle mask value of this instruction for the given element index.
ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static bool isValidOperands(const Value *V1, const Value *V2, const Value *Mask)
Return true if a shufflevector instruction can be formed with the specified operands.
static bool isSelectMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses elements from its source vectors without lane crossings.
static bool isBitRotateMask(ArrayRef< int > Mask, unsigned EltSizeInBits, unsigned MinSubElts, unsigned MaxSubElts, unsigned &NumSubElts, unsigned &RotateAmt)
Checks if the shuffle is a bit rotation of the first operand across multiple subelements,...
VectorType * getType() const
Overload to return most specific vector type.
bool isIdentityWithExtract() const
Return true if this shuffle extracts the first N elements of exactly one source vector.
static bool isOneUseSingleSourceMask(ArrayRef< int > Mask, int VF)
Return true if this shuffle mask represents "clustered" mask of size VF, i.e.
bool isIdentityWithPadding() const
Return true if this shuffle lengthens exactly one source vector with undefs in the high elements.
static bool isSingleSourceMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses elements from exactly one source vector.
bool isConcat() const
Return true if this shuffle concatenates its 2 source vectors.
static bool isDeInterleaveMaskOfFactor(ArrayRef< int > Mask, unsigned Factor, unsigned &Index)
Check if the mask is a DE-interleave mask of the given factor Factor like: <Index,...
ShuffleVectorInst * cloneImpl() const
static bool isIdentityMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses elements from exactly one source vector without lane crossin...
static bool isExtractSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)
Return true if this shuffle mask is an extract subvector mask.
void setShuffleMask(ArrayRef< int > Mask)
bool isInterleave(unsigned Factor)
Return if this shuffle interleaves its two input vectors together.
static bool isReverseMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask swaps the order of elements from exactly one source vector.
static bool isTransposeMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask is a transpose mask.
void commute()
Swap the operands and adjust the mask to preserve the semantics of the instruction.
static bool isInsertSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &NumSubElts, int &Index)
Return true if this shuffle mask is an insert subvector mask.
static Constant * convertShuffleMaskForBitcode(ArrayRef< int > Mask, Type *ResultTy)
static bool isReplicationMask(ArrayRef< int > Mask, int &ReplicationFactor, int &VF)
Return true if this shuffle mask replicates each of the VF elements in a vector ReplicationFactor tim...
static bool isInterleaveMask(ArrayRef< int > Mask, unsigned Factor, unsigned NumInputElts, SmallVectorImpl< unsigned > &StartIndexes)
Return true if the mask interleaves one or more input vectors together.
This is a 'bitvector' (really, a variable-sized bit array), optimized for the case when the array is ...
Implements a dense probed hash-table based set with some number of buckets stored inline.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void assign(size_type NumElts, ValueParamT Elt)
reference emplace_back(ArgTypes &&... Args)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
AtomicOrdering getOrdering() const
Returns the ordering constraint of this store instruction.
void setVolatile(bool V)
Specify whether this is a volatile store or not.
void setAlignment(Align Align)
StoreInst * cloneImpl() const
StoreInst(Value *Val, Value *Ptr, InsertPosition InsertBefore)
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this store instruction.
bool isVolatile() const
Return true if this is a store to a volatile memory location.
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this store instruction.
StringRef - Represent a constant reference to a string, i.e.
Class to represent struct types.
void setSuccessorWeight(unsigned idx, CaseWeightOpt W)
Instruction::InstListType::iterator eraseFromParent()
Delegate the call to the underlying SwitchInst::eraseFromParent() and mark this object to not touch t...
void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W)
Delegate the call to the underlying SwitchInst::addCase() and set the specified branch weight for the...
CaseWeightOpt getSuccessorWeight(unsigned idx)
MDNode * buildProfBranchWeightsMD()
std::optional< uint32_t > CaseWeightOpt
SwitchInst::CaseIt removeCase(SwitchInst::CaseIt I)
Delegate the call to the underlying SwitchInst::removeCase() and remove correspondent branch weight.
void setValue(ConstantInt *V) const
Sets the new value for current case.
void setSuccessor(BasicBlock *S) const
Sets the new successor for current case.
SwitchInst * cloneImpl() const
void addCase(ConstantInt *OnVal, BasicBlock *Dest)
Add an entry to the switch instruction.
CaseIteratorImpl< CaseHandle > CaseIt
unsigned getNumCases() const
Return the number of 'cases' in this switch instruction, excluding the default case.
CaseIt removeCase(CaseIt I)
This method removes the specified case and its successor from the switch instruction.
This class represents a truncation of integer types.
TruncInst * cloneImpl() const
Clone an identical TruncInst.
TruncInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
static constexpr TypeSize getFixed(ScalarTy ExactSize)
static constexpr TypeSize get(ScalarTy Quantity, bool Scalable)
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
bool isPointerTy() const
True if this is an instance of PointerType.
static IntegerType * getInt1Ty(LLVMContext &C)
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isFirstClassType() const
Return true if the type is "first class", meaning it is a valid type for a Value.
bool isAggregateType() const
Return true if the type is an aggregate type.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
static IntegerType * getInt32Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
bool isTokenTy() const
Return true if this is 'token'.
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
bool isVoidTy() const
Return true if this is 'void'.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
This class represents a cast unsigned integer to floating point.
UIToFPInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
UIToFPInst * cloneImpl() const
Clone an identical UIToFPInst.
static UnaryOperator * Create(UnaryOps Op, Value *S, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a unary instruction, given the opcode and an operand.
UnaryOperator(UnaryOps iType, Value *S, Type *Ty, const Twine &Name, InsertPosition InsertBefore)
UnaryOperator * cloneImpl() const
UnaryOps getOpcode() const
This function has undefined behavior.
UnreachableInst(LLVMContext &C, InsertPosition InsertBefore=nullptr)
UnreachableInst * cloneImpl() const
A Use represents the edge between a Value definition and its users.
const Use * getOperandList() const
void allocHungoffUses(unsigned N, bool IsPhi=false)
Allocate the array of Uses, followed by a pointer (with bottom bit set) to the User.
void setNumHungOffUseOperands(unsigned NumOps)
Subclasses with hung off uses need to manage the operand count themselves.
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
void growHungoffUses(unsigned N, bool IsPhi=false)
Grow the number of hung off uses.
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
VAArgInst * cloneImpl() const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
unsigned char SubclassOptionalData
Hold subclass data that can be dropped.
void setName(const Twine &Name)
Change the name of the value.
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
LLVMContext & getContext() const
All values hold a context through their type.
StringRef getName() const
Return a constant reference to the value's name.
Base class of all SIMD vector types.
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
static VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
This class represents zero extension of integer types.
ZExtInst(Value *S, Type *Ty, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructor with insert-before-instruction semantics.
ZExtInst * cloneImpl() const
Clone an identical ZExtInst.
std::pair< iterator, bool > insert(const ValueT &V)
bool contains(const_arg_type_t< ValueT > V) const
Check if the set contains the given element.
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
base_list_type::iterator iterator
This class implements an extremely fast bulk output stream that can only output to a stream.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
@ C
The default llvm calling convention, compatible with C.
bool match(Val *V, const Pattern &P)
cstfp_pred_ty< is_non_zero_not_denormal_fp > m_NonZeroNotDenormalFP()
Match a floating-point non-zero that is not a denormal.
initializer< Ty > init(const Ty &Val)
@ Switch
The "resume-switch" lowering, where there are separate resume and destroy functions that are shared b...
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
unsigned getPointerAddressSpace(const Type *T)
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
MDNode * getBranchWeightMDNode(const Instruction &I)
Get the branch weights metadata node.
std::enable_if_t< std::is_unsigned_v< T >, std::optional< T > > checkedMulUnsigned(T LHS, T RHS)
Multiply two unsigned integers LHS and RHS.
auto reverse(ContainerTy &&C)
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
bool isPointerTy(const Type *T)
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
constexpr int PoisonMaskElem
unsigned getNumBranchWeights(const MDNode &ProfileData)
AtomicOrdering
Atomic ordering for LLVM's memory model.
auto remove_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::remove_if which take ranges instead of having to pass begin/end explicitly.
@ Or
Bitwise or logical OR of integers.
@ Mul
Product of integers.
@ Xor
Bitwise or logical XOR of integers.
@ And
Bitwise or logical AND of integers.
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
OutputIt copy(R &&Range, OutputIt Out)
constexpr unsigned BitWidth
bool extractBranchWeights(const MDNode *ProfileData, SmallVectorImpl< uint32_t > &Weights)
Extract branch weights from MD_prof metadata.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
bool all_equal(std::initializer_list< T > Values)
Returns true if all Values in the initializer lists are equal or the list.
auto seq(T Begin, T End)
Iterate over an integral type from Begin up to - but not including - End.
@ Default
The result values are uniform if and only if all operands are uniform.
void scaleProfData(Instruction &I, uint64_t S, uint64_t T)
Scaling the profile data attached to 'I' using the ratio of S/T.
cmpResult
IEEE-754R 5.11: Floating Point Comparison Relations.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Summary of memprof metadata on allocations.
Describes an element of a Bitfield.
Used to keep track of an operand bundle.
uint32_t End
The index in the Use& vector where operands for this operand bundle ends.
uint32_t Begin
The index in the Use& vector where operands for this operand bundle starts.
Incoming for lane maks phi as machine instruction, incoming register Reg and incoming block Block are...
static std::optional< bool > eq(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_EQ result.
static std::optional< bool > ne(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_NE result.
static std::optional< bool > sge(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SGE result.
static std::optional< bool > ugt(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_UGT result.
static std::optional< bool > slt(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SLT result.
static std::optional< bool > ult(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_ULT result.
static std::optional< bool > ule(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_ULE result.
static std::optional< bool > sle(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SLE result.
static std::optional< bool > sgt(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_SGT result.
static std::optional< bool > uge(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_UGE result.
A MapVector that performs no allocations if smaller than a certain size.
Indicates this User has operands co-allocated.
Indicates this User has operands and a descriptor co-allocated .