LLVM 20.0.0git |
#include "Target/NVPTX/NVPTXISelLowering.h"
Public Member Functions | |
NVPTXTargetLowering (constNVPTXTargetMachine &TM,constNVPTXSubtarget &STI) | |
SDValue | LowerOperation (SDValueOp,SelectionDAG &DAG)const override |
This callback is invoked for operations that are unsupported by the target, which are registered to use 'custom' lowering, and whose defined values are all legal. | |
SDValue | LowerGlobalAddress (SDValueOp,SelectionDAG &DAG)const |
constchar * | getTargetNodeName (unsigned Opcode)const override |
This method returns the name of a target specific DAG node. | |
bool | getTgtMemIntrinsic (IntrinsicInfo &Info,constCallInst &I,MachineFunction &MF,unsigned Intrinsic)const override |
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (touches memory). | |
Align | getFunctionArgumentAlignment (constFunction *F,Type *Ty,unsignedIdx,constDataLayout &DL)const |
Align | getFunctionParamOptimizedAlign (constFunction *F,Type *ArgTy,constDataLayout &DL)const |
getFunctionParamOptimizedAlign - since function arguments are passed via .param space, we may want to increase their alignment in a way that ensures that we can effectively vectorize their loads & stores. | |
Align | getFunctionByValParamAlign (constFunction *F,Type *ArgTy,Align InitialAlign,constDataLayout &DL)const |
Helper for computing alignment of a device function byval parameter. | |
std::string | getParamName (constFunction *F, intIdx)const |
bool | isLegalAddressingMode (constDataLayout &DL,constAddrMode &AM,Type *Ty,unsigned AS,Instruction *I=nullptr)const override |
isLegalAddressingMode - Return true if the addressing mode represented by AM is legal for this target, for a load/store of the specified type Used to guide target specific optimizations, like loop strength reduction (LoopStrengthReduce.cpp) and memory optimization for address mode (CodeGenPrepare.cpp) | |
bool | isTruncateFree (Type *SrcTy,Type *DstTy)const override |
Return true if it's free to truncate a value of type FromTy to type ToTy. | |
EVT | getSetCCResultType (constDataLayout &DL,LLVMContext &Ctx,EVT VT)const override |
Return the ValueType of the result of SETCC operations. | |
ConstraintType | getConstraintType (StringRef Constraint)const override |
getConstraintType - Given a constraint letter, return the type of constraint it is for this target. | |
std::pair<unsigned,constTargetRegisterClass * > | getRegForInlineAsmConstraint (constTargetRegisterInfo *TRI,StringRef Constraint,MVT VT)const override |
Given a physical register constraint (e.g. | |
SDValue | LowerFormalArguments (SDValue Chain,CallingConv::ID CallConv,bool isVarArg,constSmallVectorImpl<ISD::InputArg > &Ins,constSDLoc &dl,SelectionDAG &DAG,SmallVectorImpl<SDValue > &InVals)const override |
This hook must be implemented to lower the incoming (formal) arguments, described by the Ins array, into the specified DAG. | |
SDValue | LowerCall (CallLoweringInfo &CLI,SmallVectorImpl<SDValue > &InVals)const override |
This hook must be implemented to lower calls into the specified DAG. | |
SDValue | LowerDYNAMIC_STACKALLOC (SDValueOp,SelectionDAG &DAG)const |
SDValue | LowerSTACKSAVE (SDValueOp,SelectionDAG &DAG)const |
SDValue | LowerSTACKRESTORE (SDValueOp,SelectionDAG &DAG)const |
std::string | getPrototype (constDataLayout &DL,Type *,constArgListTy &,constSmallVectorImpl<ISD::OutputArg > &,MaybeAlign retAlignment, std::optional< std::pair<unsigned,constAPInt & > > VAInfo,constCallBase &CB,unsigned UniqueCallSite)const |
SDValue | LowerReturn (SDValue Chain,CallingConv::ID CallConv,bool isVarArg,constSmallVectorImpl<ISD::OutputArg > &Outs,constSmallVectorImpl<SDValue > &OutVals,constSDLoc &dl,SelectionDAG &DAG)const override |
This hook must be implemented to lower outgoing return values, described by the Outs array, into the specified DAG. | |
void | LowerAsmOperandForConstraint (SDValueOp,StringRef Constraint, std::vector<SDValue > &Ops,SelectionDAG &DAG)const override |
Lower the specified operand into the Ops vector. | |
MVT | getScalarShiftAmountTy (constDataLayout &,EVT)const override |
Return the type to use for a scalar shift opcode, given the shifted amount type. | |
TargetLoweringBase::LegalizeTypeAction | getPreferredVectorAction (MVT VT)const override |
Return the preferred vector type legalization action. | |
int | getDivF32Level ()const |
bool | usePrecSqrtF32 ()const |
bool | useF32FTZ (constMachineFunction &MF)const |
SDValue | getSqrtEstimate (SDValue Operand,SelectionDAG &DAG, intEnabled, int &ExtraSteps,bool &UseOneConst,bool Reciprocal)const override |
Hooks for building estimates in place of slower divisions and square roots. | |
unsigned | combineRepeatedFPDivisors ()const override |
Indicate whether this target prefers to combine FDIVs with the same divisor. | |
bool | allowFMA (MachineFunction &MF,CodeGenOptLevel OptLevel)const |
bool | allowUnsafeFPMath (MachineFunction &MF)const |
bool | isFMAFasterThanFMulAndFAdd (constMachineFunction &MF,EVT)const override |
Return true if an FMA operation is faster than a pair of fmul and fadd instructions. | |
MVT | getJumpTableRegTy (constDataLayout &)const override |
unsigned | getJumpTableEncoding ()const override |
Return the entry encoding for a jump table in the current function. | |
bool | enableAggressiveFMAFusion (EVT VT)const override |
Return true if target always benefits from combining into FMA for a given value type. | |
bool | isCheapToSpeculateCtlz (Type *Ty)const override |
Return true if it is cheap to speculate a call to intrinsic ctlz. | |
AtomicExpansionKind | shouldCastAtomicLoadInIR (LoadInst *LI)const override |
Returns how the given (atomic) load should be cast by the IR-level AtomicExpand pass. | |
AtomicExpansionKind | shouldCastAtomicStoreInIR (StoreInst *SI)const override |
Returns how the given (atomic) store should be cast by the IR-level AtomicExpand pass into. | |
AtomicExpansionKind | shouldExpandAtomicRMWInIR (AtomicRMWInst *AI)const override |
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all. | |
bool | aggressivelyPreferBuildVectorSources (EVT VecVT)const override |
![]() | |
TargetLowering (constTargetLowering &)=delete | |
TargetLowering & | operator= (constTargetLowering &)=delete |
TargetLowering (constTargetMachine &TM) | |
NOTE: TheTargetMachine owns TLOF. | |
bool | isPositionIndependent ()const |
virtualbool | isSDNodeSourceOfDivergence (constSDNode *N,FunctionLoweringInfo *FLI,UniformityInfo *UA)const |
virtualbool | isReassocProfitable (SelectionDAG &DAG,SDValue N0,SDValue N1)const |
virtualbool | isReassocProfitable (MachineRegisterInfo &MRI,Register N0,Register N1)const |
virtualbool | isSDNodeAlwaysUniform (constSDNode *N)const |
virtualbool | getPreIndexedAddressParts (SDNode *,SDValue &,SDValue &,ISD::MemIndexedMode &,SelectionDAG &)const |
Returns true by value, base pointer and offset pointer and addressing mode by reference if the node's address can be legally represented as pre-indexed load / store address. | |
virtualbool | getPostIndexedAddressParts (SDNode *,SDNode *,SDValue &,SDValue &,ISD::MemIndexedMode &,SelectionDAG &)const |
Returns true by value, base pointer and offset pointer and addressing mode by reference if this node can be combined with a load / store to form a post-indexed load / store. | |
virtualbool | isIndexingLegal (MachineInstr &MI,RegisterBase,RegisterOffset,bool IsPre,MachineRegisterInfo &MRI)const |
Returns true if the specified base+offset is a legal indexed addressing mode for this target. | |
virtualunsigned | getJumpTableEncoding ()const |
Return the entry encoding for a jump table in the current function. | |
virtualMVT | getJumpTableRegTy (constDataLayout &DL)const |
virtualconstMCExpr * | LowerCustomJumpTableEntry (constMachineJumpTableInfo *,constMachineBasicBlock *,unsigned,MCContext &)const |
virtualSDValue | getPICJumpTableRelocBase (SDValue Table,SelectionDAG &DAG)const |
Returns relocation base for the given PIC jumptable. | |
virtualconstMCExpr * | getPICJumpTableRelocBaseExpr (constMachineFunction *MF,unsigned JTI,MCContext &Ctx)const |
This returns the relocation base for the given PIC jumptable, the same as getPICJumpTableRelocBase, but as anMCExpr. | |
virtualbool | isOffsetFoldingLegal (constGlobalAddressSDNode *GA)const |
Return true if folding a constant offset with the given GlobalAddress is legal. | |
virtualbool | isInlineAsmTargetBranch (constSmallVectorImpl<StringRef > &AsmStrs,unsigned OpNo)const |
On x86, return true if the operand with index OpNo is a CALL or JUMP instruction, which can use either a memory constraint or an address constraint. | |
bool | isInTailCallPosition (SelectionDAG &DAG,SDNode *Node,SDValue &Chain)const |
Check whether a given call node is in tail position within its function. | |
void | softenSetCCOperands (SelectionDAG &DAG,EVT VT,SDValue &NewLHS,SDValue &NewRHS,ISD::CondCode &CCCode,constSDLoc &DL,constSDValue OldLHS,constSDValue OldRHS)const |
Soften the operands of a comparison. | |
void | softenSetCCOperands (SelectionDAG &DAG,EVT VT,SDValue &NewLHS,SDValue &NewRHS,ISD::CondCode &CCCode,constSDLoc &DL,constSDValue OldLHS,constSDValue OldRHS,SDValue &Chain,bool IsSignaling=false)const |
virtualSDValue | visitMaskedLoad (SelectionDAG &DAG,constSDLoc &DL,SDValue Chain,MachineMemOperand *MMO,SDValue &NewLoad,SDValuePtr,SDValue PassThru,SDValue Mask)const |
virtualSDValue | visitMaskedStore (SelectionDAG &DAG,constSDLoc &DL,SDValue Chain,MachineMemOperand *MMO,SDValuePtr,SDValue Val,SDValue Mask)const |
std::pair<SDValue,SDValue > | makeLibCall (SelectionDAG &DAG,RTLIB::Libcall LC,EVT RetVT,ArrayRef<SDValue > Ops,MakeLibCallOptions CallOptions,constSDLoc &dl,SDValue Chain=SDValue())const |
Returns a pair of (return value, chain). | |
bool | parametersInCSRMatch (constMachineRegisterInfo &MRI,constuint32_t *CallerPreservedMask,constSmallVectorImpl<CCValAssign > &ArgLocs,constSmallVectorImpl<SDValue > &OutVals)const |
Check whether parameters to a call that are passed in callee saved registers are the same as from the calling function. | |
virtualbool | findOptimalMemOpLowering (std::vector<EVT > &MemOps,unsigned Limit,constMemOp &Op,unsigned DstAS,unsigned SrcAS,constAttributeList &FuncAttributes)const |
Determines the optimal series of memory ops to replace the memset / memcpy. | |
bool | ShrinkDemandedConstant (SDValueOp,constAPInt &DemandedBits,constAPInt &DemandedElts,TargetLoweringOpt &TLO)const |
Check to see if the specified operand of the specified instruction is a constant integer. | |
bool | ShrinkDemandedConstant (SDValueOp,constAPInt &DemandedBits,TargetLoweringOpt &TLO)const |
Helper wrapper around ShrinkDemandedConstant, demanding all elements. | |
virtualbool | targetShrinkDemandedConstant (SDValueOp,constAPInt &DemandedBits,constAPInt &DemandedElts,TargetLoweringOpt &TLO)const |
bool | ShrinkDemandedOp (SDValueOp,unsignedBitWidth,constAPInt &DemandedBits,TargetLoweringOpt &TLO)const |
Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free. | |
bool | SimplifyDemandedBits (SDValueOp,constAPInt &DemandedBits,constAPInt &DemandedElts,KnownBits &Known,TargetLoweringOpt &TLO,unsignedDepth=0,bool AssumeSingleUse=false)const |
Look at Op. | |
bool | SimplifyDemandedBits (SDValueOp,constAPInt &DemandedBits,KnownBits &Known,TargetLoweringOpt &TLO,unsignedDepth=0,bool AssumeSingleUse=false)const |
Helper wrapper around SimplifyDemandedBits, demanding all elements. | |
bool | SimplifyDemandedBits (SDValueOp,constAPInt &DemandedBits,DAGCombinerInfo &DCI)const |
Helper wrapper around SimplifyDemandedBits. | |
bool | SimplifyDemandedBits (SDValueOp,constAPInt &DemandedBits,constAPInt &DemandedElts,DAGCombinerInfo &DCI)const |
Helper wrapper around SimplifyDemandedBits. | |
SDValue | SimplifyMultipleUseDemandedBits (SDValueOp,constAPInt &DemandedBits,constAPInt &DemandedElts,SelectionDAG &DAG,unsignedDepth=0)const |
More limited version of SimplifyDemandedBits that can be used to "lookthrough" ops that don't contribute to the DemandedBits/DemandedElts - bitwise ops etc. | |
SDValue | SimplifyMultipleUseDemandedBits (SDValueOp,constAPInt &DemandedBits,SelectionDAG &DAG,unsignedDepth=0)const |
Helper wrapper around SimplifyMultipleUseDemandedBits, demanding all elements. | |
SDValue | SimplifyMultipleUseDemandedVectorElts (SDValueOp,constAPInt &DemandedElts,SelectionDAG &DAG,unsignedDepth=0)const |
Helper wrapper around SimplifyMultipleUseDemandedBits, demanding all bits from only some vector elements. | |
bool | SimplifyDemandedVectorElts (SDValueOp,constAPInt &DemandedEltMask,APInt &KnownUndef,APInt &KnownZero,TargetLoweringOpt &TLO,unsignedDepth=0,bool AssumeSingleUse=false)const |
Look at Vector Op. | |
bool | SimplifyDemandedVectorElts (SDValueOp,constAPInt &DemandedElts,DAGCombinerInfo &DCI)const |
Helper wrapper around SimplifyDemandedVectorElts. | |
virtualbool | shouldSimplifyDemandedVectorElts (SDValueOp,constTargetLoweringOpt &TLO)const |
Return true if the target supports simplifying demanded vector elements by converting them to undefs. | |
virtual void | computeKnownBitsForTargetNode (constSDValueOp,KnownBits &Known,constAPInt &DemandedElts,constSelectionDAG &DAG,unsignedDepth=0)const |
Determine which of the bits specified in Mask are known to be either zero or one and return them in the KnownZero/KnownOne bitsets. | |
virtual void | computeKnownBitsForTargetInstr (GISelKnownBits &Analysis,Register R,KnownBits &Known,constAPInt &DemandedElts,constMachineRegisterInfo &MRI,unsignedDepth=0)const |
Determine which of the bits specified in Mask are known to be either zero or one and return them in the KnownZero/KnownOne bitsets. | |
virtualAlign | computeKnownAlignForTargetInstr (GISelKnownBits &Analysis,Register R,constMachineRegisterInfo &MRI,unsignedDepth=0)const |
Determine the known alignment for the pointer valueR . | |
virtual void | computeKnownBitsForFrameIndex (int FIOp,KnownBits &Known,constMachineFunction &MF)const |
Determine which of the bits of FrameIndexFIOp are known to be 0. | |
virtualunsigned | ComputeNumSignBitsForTargetNode (SDValueOp,constAPInt &DemandedElts,constSelectionDAG &DAG,unsignedDepth=0)const |
This method can be implemented by targets that want to expose additional information about sign bits to the DAGCombiner. | |
virtualunsigned | computeNumSignBitsForTargetInstr (GISelKnownBits &Analysis,Register R,constAPInt &DemandedElts,constMachineRegisterInfo &MRI,unsignedDepth=0)const |
This method can be implemented by targets that want to expose additional information about sign bits to GlobalISel combiners. | |
virtualbool | SimplifyDemandedVectorEltsForTargetNode (SDValueOp,constAPInt &DemandedElts,APInt &KnownUndef,APInt &KnownZero,TargetLoweringOpt &TLO,unsignedDepth=0)const |
Attempt to simplify any target nodes based on the demanded vector elements, returning true on success. | |
virtualbool | SimplifyDemandedBitsForTargetNode (SDValueOp,constAPInt &DemandedBits,constAPInt &DemandedElts,KnownBits &Known,TargetLoweringOpt &TLO,unsignedDepth=0)const |
Attempt to simplify any target nodes based on the demanded bits/elts, returning true on success. | |
virtualSDValue | SimplifyMultipleUseDemandedBitsForTargetNode (SDValueOp,constAPInt &DemandedBits,constAPInt &DemandedElts,SelectionDAG &DAG,unsignedDepth)const |
More limited version of SimplifyDemandedBits that can be used to "lookthrough" ops that don't contribute to the DemandedBits/DemandedElts - bitwise ops etc. | |
virtualbool | isGuaranteedNotToBeUndefOrPoisonForTargetNode (SDValueOp,constAPInt &DemandedElts,constSelectionDAG &DAG,boolPoisonOnly,unsignedDepth)const |
Return true if this function can prove thatOp is never poison and, ifPoisonOnly is false, does not have undef bits. | |
virtualbool | canCreateUndefOrPoisonForTargetNode (SDValueOp,constAPInt &DemandedElts,constSelectionDAG &DAG,boolPoisonOnly,bool ConsiderFlags,unsignedDepth)const |
Return true if Op can create undef or poison from non-undef & non-poison operands. | |
SDValue | buildLegalVectorShuffle (EVT VT,constSDLoc &DL,SDValue N0,SDValue N1,MutableArrayRef< int > Mask,SelectionDAG &DAG)const |
Tries to build a legal vector shuffle using the provided parameters or equivalent variations. | |
virtualconstConstant * | getTargetConstantFromLoad (LoadSDNode *LD)const |
This method returns the constant pool value that will be loaded by LD. | |
virtualbool | isKnownNeverNaNForTargetNode (SDValueOp,constSelectionDAG &DAG,bool SNaN=false,unsignedDepth=0)const |
IfSNaN is false,. | |
virtualbool | isSplatValueForTargetNode (SDValueOp,constAPInt &DemandedElts,APInt &UndefElts,constSelectionDAG &DAG,unsignedDepth=0)const |
Return true if vectorOp has the same value across allDemandedElts , indicating any elements which may be undef in the outputUndefElts . | |
virtualbool | isTargetCanonicalConstantNode (SDValueOp)const |
Returns true if the given Opc is considered a canonical constant for the target, which should not be transformed back into a BUILD_VECTOR. | |
bool | isConstTrueVal (SDValueN)const |
Return if the N is a constant or constant vector equal to the true value fromgetBooleanContents(). | |
bool | isConstFalseVal (SDValueN)const |
Return if the N is a constant or constant vector equal to the false value fromgetBooleanContents(). | |
bool | isExtendedTrueVal (constConstantSDNode *N,EVT VT,bool SExt)const |
Return ifN is a True value when extended toVT . | |
SDValue | SimplifySetCC (EVT VT,SDValue N0,SDValue N1,ISD::CondCodeCond,bool foldBooleans,DAGCombinerInfo &DCI,constSDLoc &dl)const |
Try to simplify a setcc built with the specified operands and cc. | |
virtualSDValue | unwrapAddress (SDValueN)const |
virtualbool | isGAPlusOffset (SDNode *N,constGlobalValue *&GA, int64_t &Offset)const |
Returns true (and theGlobalValue and the offset) if the node is a GlobalAddress + offset. | |
virtualSDValue | PerformDAGCombine (SDNode *N,DAGCombinerInfo &DCI)const |
This method will be invoked for all target nodes and for any target-independent nodes that the target has registered with invoke it for. | |
virtualbool | isDesirableToCommuteWithShift (constSDNode *N,CombineLevel Level)const |
Return true if it is profitable to move this shift by a constant amount through its operand, adjusting any immediate operands as necessary to preserve semantics. | |
virtualbool | isDesirableToCommuteWithShift (constMachineInstr &MI,bool IsAfterLegal)const |
GlobalISel - return true if it is profitable to move this shift by a constant amount through its operand, adjusting any immediate operands as necessary to preserve semantics. | |
virtualbool | isDesirableToPullExtFromShl (constMachineInstr &MI)const |
GlobalISel - return true if it's profitable to perform the combine: shl ([sza]ext x), y => zext (shl x, y) | |
virtualAndOrSETCCFoldKind | isDesirableToCombineLogicOpOfSETCC (constSDNode *LogicOp,constSDNode *SETCC0,constSDNode *SETCC1)const |
virtualbool | isDesirableToCommuteXorWithShift (constSDNode *N)const |
Return true if it is profitable to combine an XOR of a logical shift to create a logical shift of NOT. | |
virtualbool | isTypeDesirableForOp (unsigned,EVT VT)const |
Return true if the target has native support for the specified value type and it is 'desirable' to use the type for the given node type. | |
virtualbool | isDesirableToTransformToIntegerOp (unsigned,EVT)const |
Return true if it is profitable for dag combiner to transform a floating point op of specified opcode to a equivalent op of an integer type. | |
virtualbool | IsDesirableToPromoteOp (SDValue,EVT &)const |
This method query the target whether it is beneficial for dag combiner to promote the specified node. | |
virtualbool | supportSwiftError ()const |
Return true if the target supports swifterror attribute. | |
virtualbool | supportSplitCSR (MachineFunction *MF)const |
Return true if the target supports that a subset of CSRs for the given machine function is handled explicitly via copies. | |
virtualbool | supportKCFIBundles ()const |
Return true if the target supports kcfi operand bundles. | |
virtualbool | supportPtrAuthBundles ()const |
Return true if the target supports ptrauth operand bundles. | |
virtual void | initializeSplitCSR (MachineBasicBlock *Entry)const |
Perform necessary initialization to handle a subset of CSRs explicitly via copies. | |
virtual void | insertCopiesSplitCSR (MachineBasicBlock *Entry,constSmallVectorImpl<MachineBasicBlock * > &Exits)const |
Insert explicit copies in entry and exit blocks. | |
virtualSDValue | getNegatedExpression (SDValueOp,SelectionDAG &DAG,bool LegalOps,bool OptForSize,NegatibleCost &Cost,unsignedDepth=0)const |
Return the newly negated expression if the cost is not expensive and set the cost inCost to indicate that if it is cheaper or neutral to do the negation. | |
SDValue | getCheaperOrNeutralNegatedExpression (SDValueOp,SelectionDAG &DAG,bool LegalOps,bool OptForSize,constNegatibleCostCostThreshold=NegatibleCost::Neutral,unsignedDepth=0)const |
SDValue | getCheaperNegatedExpression (SDValueOp,SelectionDAG &DAG,bool LegalOps,bool OptForSize,unsignedDepth=0)const |
This is the helper function to return the newly negated expression only when the cost is cheaper. | |
SDValue | getNegatedExpression (SDValueOp,SelectionDAG &DAG,bool LegalOps,bool OptForSize,unsignedDepth=0)const |
This is the helper function to return the newly negated expression if the cost is not expensive. | |
virtualbool | splitValueIntoRegisterParts (SelectionDAG &DAG,constSDLoc &DL,SDValue Val,SDValue *Parts,unsigned NumParts,MVT PartVT, std::optional<CallingConv::ID >CC)const |
Target-specific splitting of values into parts that fit a register storing a legal type. | |
virtualbool | checkForPhysRegDependency (SDNode *Def,SDNode *User,unsignedOp,constTargetRegisterInfo *TRI,constTargetInstrInfo *TII,unsigned &PhysReg, int &Cost)const |
Allows the target to handle physreg-carried dependency in target-specific way. | |
virtualSDValue | joinRegisterPartsIntoValue (SelectionDAG &DAG,constSDLoc &DL,constSDValue *Parts,unsigned NumParts,MVT PartVT,EVT ValueVT, std::optional<CallingConv::ID >CC)const |
Target-specific combining of register parts into its original value. | |
virtualSDValue | LowerFormalArguments (SDValue,CallingConv::ID,bool,constSmallVectorImpl<ISD::InputArg > &,constSDLoc &,SelectionDAG &,SmallVectorImpl<SDValue > &)const |
This hook must be implemented to lower the incoming (formal) arguments, described by the Ins array, into the specified DAG. | |
std::pair<SDValue,SDValue > | LowerCallTo (CallLoweringInfo &CLI)const |
This function lowers an abstract call to a function into an actual call. | |
virtualSDValue | LowerCall (CallLoweringInfo &,SmallVectorImpl<SDValue > &)const |
This hook must be implemented to lower calls into the specified DAG. | |
virtual void | HandleByVal (CCState *,unsigned &,Align)const |
Target-specific cleanup for formal ByVal parameters. | |
virtualbool | CanLowerReturn (CallingConv::ID,MachineFunction &,bool,constSmallVectorImpl<ISD::OutputArg > &,LLVMContext &,constType *RetTy)const |
This hook should be implemented to check whether the return values described by the Outs array can fit into the return registers. | |
virtualSDValue | LowerReturn (SDValue,CallingConv::ID,bool,constSmallVectorImpl<ISD::OutputArg > &,constSmallVectorImpl<SDValue > &,constSDLoc &,SelectionDAG &)const |
This hook must be implemented to lower outgoing return values, described by the Outs array, into the specified DAG. | |
virtualbool | isUsedByReturnOnly (SDNode *,SDValue &)const |
Return true if result of the specified node is used by a return node only. | |
virtualbool | mayBeEmittedAsTailCall (constCallInst *)const |
Return true if the target may be able emit the call instruction as a tail call. | |
virtualRegister | getRegisterByName (constchar *RegName,LLT Ty,constMachineFunction &MF)const |
Return the register ID of the name passed in. | |
virtualEVT | getTypeForExtReturn (LLVMContext &Context,EVT VT,ISD::NodeType)const |
Return the type that should be used to zero or sign extend a zeroext/signext integer return value. | |
virtualbool | functionArgumentNeedsConsecutiveRegisters (Type *Ty,CallingConv::ID CallConv,bool isVarArg,constDataLayout &DL)const |
For some targets, an LLVM struct type must be broken down into multiple simple types, but the calling convention specifies that the entire struct must be passed in a block of consecutive registers. | |
virtualbool | shouldSplitFunctionArgumentsAsLittleEndian (constDataLayout &DL)const |
For most targets, an LLVM type must be broken down into multiple smaller types. | |
virtualconstMCPhysReg * | getScratchRegisters (CallingConv::IDCC)const |
Returns a 0 terminated array of registers that can be safely used as scratch registers. | |
virtualArrayRef<MCPhysReg > | getRoundingControlRegisters ()const |
Returns a 0 terminated array of rounding control registers that can be attached into strict FP call. | |
virtualSDValue | prepareVolatileOrAtomicLoad (SDValue Chain,constSDLoc &DL,SelectionDAG &DAG)const |
This callback is used to prepare for a volatile or atomic load. | |
virtual void | LowerOperationWrapper (SDNode *N,SmallVectorImpl<SDValue > &Results,SelectionDAG &DAG)const |
This callback is invoked by the type legalizer to legalize nodes with an illegal operand type but legal result types. | |
virtualSDValue | LowerOperation (SDValueOp,SelectionDAG &DAG)const |
This callback is invoked for operations that are unsupported by the target, which are registered to use 'custom' lowering, and whose defined values are all legal. | |
virtual void | ReplaceNodeResults (SDNode *,SmallVectorImpl<SDValue > &,SelectionDAG &)const |
This callback is invoked when a node result type is illegal for the target, and the operation was registered to use 'custom' lowering for that result type. | |
virtualconstchar * | getTargetNodeName (unsigned Opcode)const |
This method returns the name of a target specific DAG node. | |
virtualFastISel * | createFastISel (FunctionLoweringInfo &,constTargetLibraryInfo *)const |
This method returns a target specificFastISel object, or null if the target does not support "fast" ISel. | |
bool | verifyReturnAddressArgumentIsConstant (SDValueOp,SelectionDAG &DAG)const |
virtual void | verifyTargetSDNode (constSDNode *N)const |
Check the givenSDNode. Aborts if it is invalid. | |
virtualbool | ExpandInlineAsm (CallInst *)const |
This hook allows the target to expand an inline asm call to be explicit llvm code if it wants to. | |
virtualAsmOperandInfoVector | ParseConstraints (constDataLayout &DL,constTargetRegisterInfo *TRI,constCallBase &Call)const |
Split up the constraint string from the inline assembly value into the specific constraints and their prefixes, and also tie in the associated operand values. | |
virtualConstraintWeight | getMultipleConstraintMatchWeight (AsmOperandInfo &info, int maIndex)const |
Examine constraint type and operand type and determine a weight value. | |
virtualConstraintWeight | getSingleConstraintMatchWeight (AsmOperandInfo &info,constchar *constraint)const |
Examine constraint string and operand type and determine a weight value. | |
virtual void | ComputeConstraintToUse (AsmOperandInfo &OpInfo,SDValueOp,SelectionDAG *DAG=nullptr)const |
Determines the constraint code and constraint type to use for the specificAsmOperandInfo, setting OpInfo.ConstraintCode and OpInfo.ConstraintType. | |
virtualConstraintType | getConstraintType (StringRef Constraint)const |
Given a constraint, return the type of constraint it is for this target. | |
ConstraintGroup | getConstraintPreferences (AsmOperandInfo &OpInfo)const |
Given an OpInfo with list of constraints codes as strings, return a sorted Vector of pairs of constraint codes and their types in priority of what we'd prefer to lower them as. | |
virtual std::pair<unsigned,constTargetRegisterClass * > | getRegForInlineAsmConstraint (constTargetRegisterInfo *TRI,StringRef Constraint,MVT VT)const |
Given a physical register constraint (e.g. | |
virtualInlineAsm::ConstraintCode | getInlineAsmMemConstraint (StringRef ConstraintCode)const |
virtualconstchar * | LowerXConstraint (EVT ConstraintVT)const |
Try to replace an X constraint, which matches anything, with another that has more specific requirements based on the type of the corresponding operand. | |
virtual void | LowerAsmOperandForConstraint (SDValueOp,StringRef Constraint, std::vector<SDValue > &Ops,SelectionDAG &DAG)const |
Lower the specified operand into the Ops vector. | |
virtualSDValue | LowerAsmOutputForConstraint (SDValue &Chain,SDValue &Glue,constSDLoc &DL,constAsmOperandInfo &OpInfo,SelectionDAG &DAG)const |
virtual void | CollectTargetIntrinsicOperands (constCallInst &I,SmallVectorImpl<SDValue > &Ops,SelectionDAG &DAG)const |
SDValue | BuildSDIV (SDNode *N,SelectionDAG &DAG,bool IsAfterLegalization,bool IsAfterLegalTypes,SmallVectorImpl<SDNode * > &Created)const |
Given anISD::SDIV node expressing a divide by constant, return a DAG expression to select that will generate the same value by multiplying by a magic number. | |
SDValue | BuildUDIV (SDNode *N,SelectionDAG &DAG,bool IsAfterLegalization,bool IsAfterLegalTypes,SmallVectorImpl<SDNode * > &Created)const |
Given anISD::UDIV node expressing a divide by constant, return a DAG expression to select that will generate the same value by multiplying by a magic number. | |
SDValue | buildSDIVPow2WithCMov (SDNode *N,constAPInt &Divisor,SelectionDAG &DAG,SmallVectorImpl<SDNode * > &Created)const |
Build sdiv by power-of-2 with conditional move instructions Ref: "Hacker's Delight" by Henry Warren 10-1 If conditional move/branch is preferred, we lower sdiv x, +/-2**k into: bgez x, label add x, x, 2**k-1 label: sra res, x, k neg res, res (when the divisor is negative) | |
virtualSDValue | BuildSDIVPow2 (SDNode *N,constAPInt &Divisor,SelectionDAG &DAG,SmallVectorImpl<SDNode * > &Created)const |
Targets may override this function to provide custom SDIV lowering for power-of-2 denominators. | |
virtualSDValue | BuildSREMPow2 (SDNode *N,constAPInt &Divisor,SelectionDAG &DAG,SmallVectorImpl<SDNode * > &Created)const |
Targets may override this function to provide custom SREM lowering for power-of-2 denominators. | |
virtualunsigned | combineRepeatedFPDivisors ()const |
Indicate whether this target prefers to combine FDIVs with the same divisor. | |
virtualSDValue | getSqrtEstimate (SDValue Operand,SelectionDAG &DAG, intEnabled, int &RefinementSteps,bool &UseOneConstNR,bool Reciprocal)const |
Hooks for building estimates in place of slower divisions and square roots. | |
SDValue | createSelectForFMINNUM_FMAXNUM (SDNode *Node,SelectionDAG &DAG)const |
Try to convert the fminnum/fmaxnum to a compare/select sequence. | |
virtualSDValue | getRecipEstimate (SDValue Operand,SelectionDAG &DAG, intEnabled, int &RefinementSteps)const |
Return a reciprocal estimate value for the input operand. | |
virtualSDValue | getSqrtInputTest (SDValue Operand,SelectionDAG &DAG,constDenormalMode &Mode)const |
Return a target-dependent comparison result if the input operand is suitable for use with a square root estimate calculation. | |
virtualSDValue | getSqrtResultForDenormInput (SDValue Operand,SelectionDAG &DAG)const |
Return a target-dependent result if the input operand is not suitable for use with a square root estimate calculation. | |
bool | expandMUL_LOHI (unsigned Opcode,EVT VT,constSDLoc &dl,SDValueLHS,SDValueRHS,SmallVectorImpl<SDValue > &Result,EVT HiLoVT,SelectionDAG &DAG,MulExpansionKind Kind,SDValue LL=SDValue(),SDValue LH=SDValue(),SDValue RL=SDValue(),SDValue RH=SDValue())const |
Expand a MUL or [US]MUL_LOHI of n-bit values into two or four nodes, respectively, each computing an n/2-bit part of the result. | |
bool | expandMUL (SDNode *N,SDValue &Lo,SDValue &Hi,EVT HiLoVT,SelectionDAG &DAG,MulExpansionKind Kind,SDValue LL=SDValue(),SDValue LH=SDValue(),SDValue RL=SDValue(),SDValue RH=SDValue())const |
Expand a MUL into two nodes. | |
bool | expandDIVREMByConstant (SDNode *N,SmallVectorImpl<SDValue > &Result,EVT HiLoVT,SelectionDAG &DAG,SDValue LL=SDValue(),SDValue LH=SDValue())const |
Attempt to expand an n-bit div/rem/divrem by constant using a n/2-bit urem by constant and other arithmetic ops. | |
SDValue | expandFunnelShift (SDNode *N,SelectionDAG &DAG)const |
Expand funnel shift. | |
SDValue | expandROT (SDNode *N,bool AllowVectorOps,SelectionDAG &DAG)const |
Expand rotations. | |
void | expandShiftParts (SDNode *N,SDValue &Lo,SDValue &Hi,SelectionDAG &DAG)const |
Expand shift-by-parts. | |
bool | expandFP_TO_SINT (SDNode *N,SDValue &Result,SelectionDAG &DAG)const |
Expand float(f32) to SINT(i64) conversion. | |
bool | expandFP_TO_UINT (SDNode *N,SDValue &Result,SDValue &Chain,SelectionDAG &DAG)const |
Expand float to UINT conversion. | |
bool | expandUINT_TO_FP (SDNode *N,SDValue &Result,SDValue &Chain,SelectionDAG &DAG)const |
Expand UINT(i64) to double(f64) conversion. | |
SDValue | expandFMINNUM_FMAXNUM (SDNode *N,SelectionDAG &DAG)const |
Expand fminnum/fmaxnum into fminnum_ieee/fmaxnum_ieee with quieted inputs. | |
SDValue | expandFMINIMUM_FMAXIMUM (SDNode *N,SelectionDAG &DAG)const |
Expand fminimum/fmaximum into multiple comparison with selects. | |
SDValue | expandFMINIMUMNUM_FMAXIMUMNUM (SDNode *N,SelectionDAG &DAG)const |
Expand fminimumnum/fmaximumnum into multiple comparison with selects. | |
SDValue | expandFP_TO_INT_SAT (SDNode *N,SelectionDAG &DAG)const |
Expand FP_TO_[US]INT_SAT into FP_TO_[US]INT and selects or min/max. | |
SDValue | expandRoundInexactToOdd (EVT ResultVT,SDValueOp,constSDLoc &DL,SelectionDAG &DAG)const |
Truncate Op to ResultVT. | |
SDValue | expandFP_ROUND (SDNode *Node,SelectionDAG &DAG)const |
Expand round(fp) to fp conversion. | |
SDValue | expandIS_FPCLASS (EVT ResultVT,SDValueOp,FPClassTestTest,SDNodeFlags Flags,constSDLoc &DL,SelectionDAG &DAG)const |
Expand check for floating point class. | |
SDValue | expandCTPOP (SDNode *N,SelectionDAG &DAG)const |
Expand CTPOP nodes. | |
SDValue | expandVPCTPOP (SDNode *N,SelectionDAG &DAG)const |
Expand VP_CTPOP nodes. | |
SDValue | expandCTLZ (SDNode *N,SelectionDAG &DAG)const |
Expand CTLZ/CTLZ_ZERO_UNDEF nodes. | |
SDValue | expandVPCTLZ (SDNode *N,SelectionDAG &DAG)const |
Expand VP_CTLZ/VP_CTLZ_ZERO_UNDEF nodes. | |
SDValue | CTTZTableLookup (SDNode *N,SelectionDAG &DAG,constSDLoc &DL,EVT VT,SDValueOp,unsigned NumBitsPerElt)const |
Expand CTTZ via Table Lookup. | |
SDValue | expandCTTZ (SDNode *N,SelectionDAG &DAG)const |
Expand CTTZ/CTTZ_ZERO_UNDEF nodes. | |
SDValue | expandVPCTTZ (SDNode *N,SelectionDAG &DAG)const |
Expand VP_CTTZ/VP_CTTZ_ZERO_UNDEF nodes. | |
SDValue | expandVPCTTZElements (SDNode *N,SelectionDAG &DAG)const |
Expand VP_CTTZ_ELTS/VP_CTTZ_ELTS_ZERO_UNDEF nodes. | |
SDValue | expandVectorFindLastActive (SDNode *N,SelectionDAG &DAG)const |
Expand VECTOR_FIND_LAST_ACTIVE nodes. | |
SDValue | expandABS (SDNode *N,SelectionDAG &DAG,bool IsNegative=false)const |
Expand ABS nodes. | |
SDValue | expandABD (SDNode *N,SelectionDAG &DAG)const |
Expand ABDS/ABDU nodes. | |
SDValue | expandAVG (SDNode *N,SelectionDAG &DAG)const |
Expand vector/scalar AVGCEILS/AVGCEILU/AVGFLOORS/AVGFLOORU nodes. | |
SDValue | expandBSWAP (SDNode *N,SelectionDAG &DAG)const |
Expand BSWAP nodes. | |
SDValue | expandVPBSWAP (SDNode *N,SelectionDAG &DAG)const |
Expand VP_BSWAP nodes. | |
SDValue | expandBITREVERSE (SDNode *N,SelectionDAG &DAG)const |
Expand BITREVERSE nodes. | |
SDValue | expandVPBITREVERSE (SDNode *N,SelectionDAG &DAG)const |
Expand VP_BITREVERSE nodes. | |
std::pair<SDValue,SDValue > | scalarizeVectorLoad (LoadSDNode *LD,SelectionDAG &DAG)const |
Turn load of vector type into a load of the individual elements. | |
SDValue | scalarizeVectorStore (StoreSDNode *ST,SelectionDAG &DAG)const |
std::pair<SDValue,SDValue > | expandUnalignedLoad (LoadSDNode *LD,SelectionDAG &DAG)const |
Expands an unaligned load to 2 half-size loads for an integer, and possibly more for vectors. | |
SDValue | expandUnalignedStore (StoreSDNode *ST,SelectionDAG &DAG)const |
Expands an unaligned store to 2 half-size stores for integer values, and possibly more for vectors. | |
SDValue | IncrementMemoryAddress (SDValueAddr,SDValue Mask,constSDLoc &DL,EVT DataVT,SelectionDAG &DAG,bool IsCompressedMemory)const |
Increments memory addressAddr according to the type of the valueDataVT that should be stored. | |
SDValue | getVectorElementPointer (SelectionDAG &DAG,SDValue VecPtr,EVT VecVT,SDValueIndex)const |
Get a pointer to vector elementIdx located in memory for a vector of typeVecVT starting at a base address ofVecPtr . | |
SDValue | getVectorSubVecPointer (SelectionDAG &DAG,SDValue VecPtr,EVT VecVT,EVT SubVecVT,SDValueIndex)const |
Get a pointer to a sub-vector of typeSubVecVT at indexIdx located in memory for a vector of typeVecVT starting at a base address ofVecPtr . | |
SDValue | expandIntMINMAX (SDNode *Node,SelectionDAG &DAG)const |
Method for building the DAG expansion ofISD::[US][MIN|MAX]. | |
SDValue | expandAddSubSat (SDNode *Node,SelectionDAG &DAG)const |
Method for building the DAG expansion ofISD::[US][ADD|SUB]SAT. | |
SDValue | expandCMP (SDNode *Node,SelectionDAG &DAG)const |
Method for building the DAG expansion ofISD::[US]CMP. | |
SDValue | expandShlSat (SDNode *Node,SelectionDAG &DAG)const |
Method for building the DAG expansion ofISD::[US]SHLSAT. | |
SDValue | expandFixedPointMul (SDNode *Node,SelectionDAG &DAG)const |
Method for building the DAG expansion ofISD::[U|S]MULFIX[SAT]. | |
SDValue | expandFixedPointDiv (unsigned Opcode,constSDLoc &dl,SDValueLHS,SDValueRHS,unsigned Scale,SelectionDAG &DAG)const |
Method for building the DAG expansion ofISD::[US]DIVFIX[SAT]. | |
void | expandUADDSUBO (SDNode *Node,SDValue &Result,SDValue &Overflow,SelectionDAG &DAG)const |
Method for building the DAG expansion of ISD::U(ADD|SUB)O. | |
void | expandSADDSUBO (SDNode *Node,SDValue &Result,SDValue &Overflow,SelectionDAG &DAG)const |
Method for building the DAG expansion of ISD::S(ADD|SUB)O. | |
bool | expandMULO (SDNode *Node,SDValue &Result,SDValue &Overflow,SelectionDAG &DAG)const |
Method for building the DAG expansion ofISD::[US]MULO. | |
void | forceExpandMultiply (SelectionDAG &DAG,constSDLoc &dl,boolSigned,SDValue &Lo,SDValue &Hi,SDValueLHS,SDValueRHS,SDValue HiLHS=SDValue(),SDValue HiRHS=SDValue())const |
Calculate the product twice the width of LHS and RHS. | |
void | forceExpandWideMUL (SelectionDAG &DAG,constSDLoc &dl,boolSigned,constSDValueLHS,constSDValueRHS,SDValue &Lo,SDValue &Hi)const |
Calculate full product of LHS and RHS either via a libcall or through brute force expansion of the multiplication. | |
SDValue | expandVecReduce (SDNode *Node,SelectionDAG &DAG)const |
Expand a VECREDUCE_* into an explicit calculation. | |
SDValue | expandVecReduceSeq (SDNode *Node,SelectionDAG &DAG)const |
Expand a VECREDUCE_SEQ_* into an explicit ordered calculation. | |
bool | expandREM (SDNode *Node,SDValue &Result,SelectionDAG &DAG)const |
Expand an SREM or UREM using SDIV/UDIV or SDIVREM/UDIVREM, if legal. | |
SDValue | expandVectorSplice (SDNode *Node,SelectionDAG &DAG)const |
Method for building the DAG expansion ofISD::VECTOR_SPLICE. | |
SDValue | expandVECTOR_COMPRESS (SDNode *Node,SelectionDAG &DAG)const |
Expand a vector VECTOR_COMPRESS into a sequence of extract element, store temporarily, advance store position, before re-loading the final vector. | |
bool | LegalizeSetCCCondCode (SelectionDAG &DAG,EVT VT,SDValue &LHS,SDValue &RHS,SDValue &CC,SDValue Mask,SDValue EVL,bool &NeedInvert,constSDLoc &dl,SDValue &Chain,bool IsSignaling=false)const |
Legalize a SETCC or VP_SETCC with given LHS and RHS and condition code CC on the current target. | |
virtualMachineBasicBlock * | EmitInstrWithCustomInserter (MachineInstr &MI,MachineBasicBlock *MBB)const |
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' flag. | |
virtual void | AdjustInstrPostInstrSelection (MachineInstr &MI,SDNode *Node)const |
This method should be implemented by targets that mark instructions with the 'hasPostISelHook' flag. | |
virtualbool | useLoadStackGuardNode (constModule &M)const |
If this function returns true,SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering Intrinsic::stackprotector. | |
virtualSDValue | emitStackGuardXorFP (SelectionDAG &DAG,SDValue Val,constSDLoc &DL)const |
virtualSDValue | LowerToTLSEmulatedModel (constGlobalAddressSDNode *GA,SelectionDAG &DAG)const |
Lower TLS global addressSDNode for target independent emulated TLS model. | |
virtualSDValue | expandIndirectJTBranch (constSDLoc &dl,SDValueValue,SDValueAddr, int JTI,SelectionDAG &DAG)const |
Expands target specific indirect branch for the case ofJumpTable expansion. | |
SDValue | lowerCmpEqZeroToCtlzSrl (SDValueOp,SelectionDAG &DAG)const |
virtualbool | isXAndYEqZeroPreferableToXAndYEqY (ISD::CondCode,EVT)const |
SDValue | expandVectorNaryOpBySplitting (SDNode *Node,SelectionDAG &DAG)const |
![]() | |
virtual void | markLibCallAttributes (MachineFunction *MF,unsignedCC,ArgListTy &Args)const |
TargetLoweringBase (constTargetMachine &TM) | |
NOTE: TheTargetMachine owns TLOF. | |
TargetLoweringBase (constTargetLoweringBase &)=delete | |
TargetLoweringBase & | operator= (constTargetLoweringBase &)=delete |
virtual | ~TargetLoweringBase ()=default |
bool | isStrictFPEnabled ()const |
Return true if the target support strict float operation. | |
constTargetMachine & | getTargetMachine ()const |
virtualbool | useSoftFloat ()const |
virtualMVT | getPointerTy (constDataLayout &DL,uint32_t AS=0)const |
Return the pointer type for the given address space, defaults to the pointer type from the data layout. | |
virtualMVT | getPointerMemTy (constDataLayout &DL,uint32_t AS=0)const |
Return the in-memory pointer type for the given address space, defaults to the pointer type from the data layout. | |
MVT | getFrameIndexTy (constDataLayout &DL)const |
Return the type for frame index, which is determined by the alloca address space specified through the data layout. | |
MVT | getProgramPointerTy (constDataLayout &DL)const |
Return the type for code pointers, which is determined by the program address space specified through the data layout. | |
virtualMVT | getFenceOperandTy (constDataLayout &DL)const |
Return the type for operands of fence. | |
virtualMVT | getScalarShiftAmountTy (constDataLayout &,EVT)const |
Return the type to use for a scalar shift opcode, given the shifted amount type. | |
EVT | getShiftAmountTy (EVT LHSTy,constDataLayout &DL)const |
Returns the type for the shift amount of a shift opcode. | |
virtualLLVM_READONLYLLT | getPreferredShiftAmountTy (LLT ShiftValueTy)const |
Return the preferred type to use for a shift opcode, given the shifted amount type isShiftValueTy . | |
virtualMVT | getVectorIdxTy (constDataLayout &DL)const |
Returns the type to be used for the index operand of:ISD::INSERT_VECTOR_ELT,ISD::EXTRACT_VECTOR_ELT,ISD::INSERT_SUBVECTOR, andISD::EXTRACT_SUBVECTOR. | |
virtualMVT | getVPExplicitVectorLengthTy ()const |
Returns the type to be used for the EVL/AVL operand of VP nodes: ISD::VP_ADD, ISD::VP_SUB, etc. | |
virtualMachineMemOperand::Flags | getTargetMMOFlags (constInstruction &I)const |
This callback is used to inspect load/store instructions and add target-specificMachineMemOperand flags to them. | |
virtualMachineMemOperand::Flags | getTargetMMOFlags (constMemSDNode &Node)const |
This callback is used to inspect load/storeSDNode. | |
MachineMemOperand::Flags | getLoadMemOperandFlags (constLoadInst &LI,constDataLayout &DL,AssumptionCache *AC=nullptr,constTargetLibraryInfo *LibInfo=nullptr)const |
MachineMemOperand::Flags | getStoreMemOperandFlags (constStoreInst &SI,constDataLayout &DL)const |
MachineMemOperand::Flags | getAtomicMemOperandFlags (constInstruction &AI,constDataLayout &DL)const |
virtualbool | isSelectSupported (SelectSupportKind)const |
virtualbool | shouldExpandPartialReductionIntrinsic (constIntrinsicInst *I)const |
Return true if the @llvm.experimental.vector.partial.reduce. | |
virtualbool | shouldExpandGetActiveLaneMask (EVT VT,EVT OpVT)const |
Return true if the @llvm.get.active.lane.mask intrinsic should be expanded using generic code inSelectionDAGBuilder. | |
virtualbool | shouldExpandGetVectorLength (EVT CountVT,unsigned VF,bool IsScalable)const |
virtualbool | shouldExpandCttzElements (EVT VT)const |
Return true if the @llvm.experimental.cttz.elts intrinsic should be expanded using generic code inSelectionDAGBuilder. | |
unsigned | getBitWidthForCttzElements (Type *RetTy,ElementCount EC,bool ZeroIsPoison,constConstantRange *VScaleRange)const |
Return the minimum number of bits required to hold the maximum possible number of trailing zero vector elements. | |
virtualbool | shouldExpandVectorMatch (EVT VT,unsigned SearchSize)const |
Return true if the @llvm.experimental.vector.match intrinsic should be expanded for vector type ‘VT’ and search size ‘SearchSize’ using generic code inSelectionDAGBuilder. | |
virtualbool | shouldReassociateReduction (unsigned RedOpc,EVT VT)const |
virtualbool | reduceSelectOfFPConstantLoads (EVT CmpOpVT)const |
Return true if it is profitable to convert a select of FP constants into a constant pool load whose address depends on the select condition. | |
bool | hasMultipleConditionRegisters ()const |
Return true if multiple condition registers are available. | |
bool | hasExtractBitsInsn ()const |
Return true if the target has BitExtract instructions. | |
virtualTargetLoweringBase::LegalizeTypeAction | getPreferredVectorAction (MVT VT)const |
Return the preferred vector type legalization action. | |
virtualbool | softPromoteHalfType ()const |
virtualbool | useFPRegsForHalfType ()const |
virtualbool | shouldExpandBuildVectorWithShuffles (EVT,unsigned DefinedValues)const |
virtualbool | isIntDivCheap (EVT VT,AttributeList Attr)const |
Return true if integer divide is usually cheaper than a sequence of several shifts, adds, and multiplies for this target. | |
virtualbool | hasStandaloneRem (EVT VT)const |
Return true if the target can handle a standalone remainder operation. | |
virtualbool | isFsqrtCheap (SDValueX,SelectionDAG &DAG)const |
Return true if SQRT(X) shouldn't be replaced with X*RSQRT(X). | |
int | getRecipEstimateSqrtEnabled (EVT VT,MachineFunction &MF)const |
Return a ReciprocalEstimate enum value for a square root of the given type based on the function's attributes. | |
int | getRecipEstimateDivEnabled (EVT VT,MachineFunction &MF)const |
Return a ReciprocalEstimate enum value for a division of the given type based on the function's attributes. | |
int | getSqrtRefinementSteps (EVT VT,MachineFunction &MF)const |
Return the refinement step count for a square root of the given type based on the function's attributes. | |
int | getDivRefinementSteps (EVT VT,MachineFunction &MF)const |
Return the refinement step count for a division of the given type based on the function's attributes. | |
bool | isSlowDivBypassed ()const |
Returns true if target has indicated at least one type should be bypassed. | |
constDenseMap<unsigned int,unsigned int > & | getBypassSlowDivWidths ()const |
Returns map of slow types for division or remainder with corresponding fast types. | |
virtualbool | isVScaleKnownToBeAPowerOfTwo ()const |
Return true only if vscale must be a power of two. | |
bool | isJumpExpensive ()const |
Return true if Flow Control is an expensive operation that should be avoided. | |
virtualCondMergingParams | getJumpConditionMergingParams (Instruction::BinaryOps,constValue *,constValue *)const |
bool | isPredictableSelectExpensive ()const |
Return true if selects are only cheaper than branches if the branch is unlikely to be predicted right. | |
virtualbool | fallBackToDAGISel (constInstruction &Inst)const |
virtualbool | isLoadBitCastBeneficial (EVT LoadVT,EVT BitcastVT,constSelectionDAG &DAG,constMachineMemOperand &MMO)const |
Return true if the following transform is beneficial: fold (conv (load x)) -> (load (conv*)x) On architectures that don't natively support some vector loads efficiently, casting the load to a smaller vector of larger types and loading is more efficient, however, this can be undone by optimizations in dag combiner. | |
virtualbool | isStoreBitCastBeneficial (EVT StoreVT,EVT BitcastVT,constSelectionDAG &DAG,constMachineMemOperand &MMO)const |
Return true if the following transform is beneficial: (store (y (conv x)), y*)) -> (store x, (x*)) | |
virtualbool | storeOfVectorConstantIsCheap (bool IsZero,EVT MemVT,unsigned NumElem,unsigned AddrSpace)const |
Return true if it is expected to be cheaper to do a store of vector constant with the given size and type for the address space than to store the individual scalar element constants. | |
virtualbool | mergeStoresAfterLegalization (EVT MemVT)const |
Allow store merging for the specified type after legalization in addition to before legalization. | |
virtualbool | canMergeStoresTo (unsigned AS,EVT MemVT,constMachineFunction &MF)const |
Returns if it's reasonable to merge stores to MemVT size. | |
virtualbool | isCheapToSpeculateCttz (Type *Ty)const |
Return true if it is cheap to speculate a call to intrinsic cttz. | |
virtualbool | isCheapToSpeculateCtlz (Type *Ty)const |
Return true if it is cheap to speculate a call to intrinsic ctlz. | |
virtualbool | isCtlzFast ()const |
Return true if ctlz instruction is fast. | |
virtualbool | isCtpopFast (EVT VT)const |
Return true if ctpop instruction is fast. | |
virtualunsigned | getCustomCtpopCost (EVT VT,ISD::CondCodeCond)const |
Return the maximum number of "x & (x - 1)" operations that can be done instead of deferring to a custom CTPOP. | |
virtualbool | isEqualityCmpFoldedWithSignedCmp ()const |
Return true if instruction generated for equality comparison is folded with instruction generated for signed comparison. | |
virtualbool | preferZeroCompareBranch ()const |
Return true if the heuristic to prefer icmp eq zero should be used in code gen prepare. | |
virtualbool | isMultiStoresCheaperThanBitsMerge (EVT LTy,EVT HTy)const |
Return true if it is cheaper to split the store of a merged int val from a pair of smaller values into multiple stores. | |
virtualbool | isMaskAndCmp0FoldingBeneficial (constInstruction &AndI)const |
Return if the target supports combining a chain like: | |
virtualbool | areTwoSDNodeTargetMMOFlagsMergeable (constMemSDNode &NodeX,constMemSDNode &NodeY)const |
Return true if it is valid to merge the TargetMMOFlags in two SDNodes. | |
virtualbool | convertSetCCLogicToBitwiseLogic (EVT VT)const |
Use bitwise logic to make pairs of compares more efficient. | |
virtualMVT | hasFastEqualityCompare (unsigned NumBits)const |
Return the preferred operand type if the target has a quick way to compare integer values of the given size. | |
virtualbool | hasAndNotCompare (SDValueY)const |
Return true if the target should transform: (X & Y) == Y —> (~X & Y) == 0 (X & Y) != Y —> (~X & Y) != 0. | |
virtualbool | hasAndNot (SDValueX)const |
Return true if the target has a bitwise and-not operation: X = ~A & B This can be used to simplify select or other instructions. | |
virtualbool | hasBitTest (SDValueX,SDValueY)const |
Return true if the target has a bit-test instruction: (X & (1 << Y)) ==/!= 0 This knowledge can be used to prevent breaking the pattern, or creating it if it could be recognized. | |
virtualbool | shouldFoldMaskToVariableShiftPair (SDValueX)const |
There are two ways to clear extreme bits (either low or high): Mask: x & (-1 << y) (the instcombine canonical form) Shifts: x >> y << y Return true if the variant with 2 variable shifts is preferred. | |
virtualbool | shouldFoldConstantShiftPairToMask (constSDNode *N,CombineLevel Level)const |
Return true if it is profitable to fold a pair of shifts into a mask. | |
virtualbool | shouldTransformSignedTruncationCheck (EVT XVT,unsigned KeptBits)const |
Should we tranform the IR-optimal check for whether given truncation down into KeptBits would be truncating or not: (add x, (1 << (KeptBits-1))) srccond (1 << KeptBits) Into it's more traditional form: ((x << C) a>> C) dstcond x Return true if we should transform. | |
virtualbool | shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd (SDValueX,ConstantSDNode *XC,ConstantSDNode *CC,SDValueY,unsigned OldShiftOpcode,unsigned NewShiftOpcode,SelectionDAG &DAG)const |
Given the pattern (X & (C l>>/<< Y)) ==/!= 0 return true if it should be transformed into: ((X <</l>> Y) & C) ==/!= 0 WARNING: if 'X' is a constant, the fold may deadlock! FIXME: we could avoid passing XC, but we can't useisConstOrConstSplat() here because it can end up being not linked in. | |
virtualbool | optimizeFMulOrFDivAsShiftAddBitcast (SDNode *N,SDValue FPConst,SDValue IntPow2)const |
virtualunsigned | preferedOpcodeForCmpEqPiecesOfOperand (EVT VT,unsigned ShiftOpc,bool MayTransformRotate,constAPInt &ShiftOrRotateAmt,const std::optional<APInt > &AndMask)const |
virtualbool | preferIncOfAddToSubOfNot (EVT VT)const |
These two forms are equivalent: sub y, (xor x, -1) add (add x, 1), y The variant with two add's is IR-canonical. | |
virtualbool | preferABDSToABSWithNSW (EVT VT)const |
virtualbool | preferScalarizeSplat (SDNode *N)const |
virtualbool | preferSextInRegOfTruncate (EVT TruncVT,EVT VT,EVT ExtVT)const |
bool | enableExtLdPromotion ()const |
Return true if the target wants to use the optimization that turns ext(promotableInst1(...(promotableInstN(load)))) into promotedInst1(...(promotedInstN(ext(load)))). | |
virtualbool | canCombineStoreAndExtract (Type *VectorTy,Value *Idx,unsigned &Cost)const |
Return true if the target can combine store(extractelement VectorTy,Idx). | |
virtualbool | shallExtractConstSplatVectorElementToStore (Type *VectorTy,unsigned ElemSizeInBits,unsigned &Index)const |
Return true if the target shall perform extract vector element and store given that the vector is known to be splat of constant. | |
virtualbool | shouldSplatInsEltVarIndex (EVT)const |
Return true if inserting a scalar into a variable element of an undef vector is more efficiently handled by splatting the scalar instead. | |
virtualbool | enableAggressiveFMAFusion (EVT VT)const |
Return true if target always benefits from combining into FMA for a given value type. | |
virtualbool | enableAggressiveFMAFusion (LLT Ty)const |
Return true if target always benefits from combining into FMA for a given value type. | |
virtualEVT | getSetCCResultType (constDataLayout &DL,LLVMContext &Context,EVT VT)const |
Return the ValueType of the result of SETCC operations. | |
virtualMVT::SimpleValueType | getCmpLibcallReturnType ()const |
Return the ValueType for comparison libcalls. | |
BooleanContent | getBooleanContents (bool isVec,bool isFloat)const |
For targets without i1 registers, this gives the nature of the high-bits of boolean values held in types wider than i1. | |
BooleanContent | getBooleanContents (EVTType)const |
SDValue | promoteTargetBoolean (SelectionDAG &DAG,SDValueBool,EVT ValVT)const |
Promote the given target boolean to a target boolean of the given type. | |
Sched::Preference | getSchedulingPreference ()const |
Return target scheduling preference. | |
virtualSched::Preference | getSchedulingPreference (SDNode *)const |
Some scheduler, e.g. | |
virtualconstTargetRegisterClass * | getRegClassFor (MVT VT,bool isDivergent=false)const |
Return the register class that should be used for the specified value type. | |
virtualbool | requiresUniformRegister (MachineFunction &MF,constValue *)const |
Allows target to decide about the register class of the specific value that is live outside the defining block. | |
virtualconstTargetRegisterClass * | getRepRegClassFor (MVT VT)const |
Return the 'representative' register class for the specified value type. | |
virtualuint8_t | getRepRegClassCostFor (MVT VT)const |
Return the cost of the 'representative' register class for the specified value type. | |
virtualShiftLegalizationStrategy | preferredShiftLegalizationStrategy (SelectionDAG &DAG,SDNode *N,unsigned ExpansionFactor)const |
bool | isTypeLegal (EVT VT)const |
Return true if the target has native support for the specified value type. | |
constValueTypeActionImpl & | getValueTypeActions ()const |
LegalizeKind | getTypeConversion (LLVMContext &Context,EVT VT)const |
Return pair that represents the legalization kind (first) that needs to happen toEVT (second) in order to type-legalize it. | |
LegalizeTypeAction | getTypeAction (LLVMContext &Context,EVT VT)const |
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we need to promote it to a larger type (return 'Promote'), or we need to expand it into multiple registers of smaller integer type (return 'Expand'). | |
LegalizeTypeAction | getTypeAction (MVT VT)const |
virtualEVT | getTypeToTransformTo (LLVMContext &Context,EVT VT)const |
For types supported by the target, this is an identity function. | |
EVT | getTypeToExpandTo (LLVMContext &Context,EVT VT)const |
For types supported by the target, this is an identity function. | |
unsigned | getVectorTypeBreakdown (LLVMContext &Context,EVT VT,EVT &IntermediateVT,unsigned &NumIntermediates,MVT &RegisterVT)const |
Vector types are broken down into some number of legal first class types. | |
virtualunsigned | getVectorTypeBreakdownForCallingConv (LLVMContext &Context,CallingConv::IDCC,EVT VT,EVT &IntermediateVT,unsigned &NumIntermediates,MVT &RegisterVT)const |
Certain targets such as MIPS require that some types such as vectors are always broken down into scalars in some contexts. | |
virtualbool | getTgtMemIntrinsic (IntrinsicInfo &,constCallInst &,MachineFunction &,unsigned)const |
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (touches memory). | |
virtualbool | isFPImmLegal (constAPFloat &,EVT,bool ForCodeSize=false)const |
Returns true if the target can instruction select the specified FP immediate natively. | |
virtualbool | isShuffleMaskLegal (ArrayRef< int >,EVT)const |
Targets can use this to indicate that they only supportsome VECTOR_SHUFFLE operations, those with specific masks. | |
virtualbool | canOpTrap (unsignedOp,EVT VT)const |
Returns true if the operation can trap for the value type. | |
virtualbool | isVectorClearMaskLegal (ArrayRef< int >,EVT)const |
Similar to isShuffleMaskLegal. | |
virtualLegalizeAction | getCustomOperationAction (SDNode &Op)const |
How to legalize this custom operation? | |
LegalizeAction | getOperationAction (unsignedOp,EVT VT)const |
Return how this operation should be treated: either it is legal, needs to be promoted to a larger size, needs to be expanded to some other code sequence, or the target has a custom expander for it. | |
virtualbool | isSupportedFixedPointOperation (unsignedOp,EVT VT,unsigned Scale)const |
Custom method defined by each target to indicate if an operation which may require a scale is supported natively by the target. | |
LegalizeAction | getFixedPointOperationAction (unsignedOp,EVT VT,unsigned Scale)const |
Some fixed point operations may be natively supported by the target but only for specific scales. | |
LegalizeAction | getStrictFPOperationAction (unsignedOp,EVT VT)const |
bool | isOperationLegalOrCustom (unsignedOp,EVT VT,bool LegalOnly=false)const |
Return true if the specified operation is legal on this target or can be made legal with custom lowering. | |
bool | isOperationLegalOrPromote (unsignedOp,EVT VT,bool LegalOnly=false)const |
Return true if the specified operation is legal on this target or can be made legal using promotion. | |
bool | isOperationLegalOrCustomOrPromote (unsignedOp,EVT VT,bool LegalOnly=false)const |
Return true if the specified operation is legal on this target or can be made legal with custom lowering or using promotion. | |
bool | isOperationCustom (unsignedOp,EVT VT)const |
Return true if the operation uses custom lowering, regardless of whether the type is legal or not. | |
virtualbool | areJTsAllowed (constFunction *Fn)const |
Return true if lowering to a jump table is allowed. | |
bool | rangeFitsInWord (constAPInt &Low,constAPInt &High,constDataLayout &DL)const |
Check whether the range [Low,High] fits in a machine word. | |
virtualbool | isSuitableForJumpTable (constSwitchInst *SI,uint64_t NumCases,uint64_tRange,ProfileSummaryInfo *PSI,BlockFrequencyInfo *BFI)const |
Return true if lowering to a jump table is suitable for a set of case clusters which may containNumCases cases,Range range of values. | |
virtualMVT | getPreferredSwitchConditionType (LLVMContext &Context,EVT ConditionVT)const |
Returns preferred type for switch condition. | |
bool | isSuitableForBitTests (unsigned NumDests,unsigned NumCmps,constAPInt &Low,constAPInt &High,constDataLayout &DL)const |
Return true if lowering to a bit test is suitable for a set of case clusters which containsNumDests unique destinations,Low andHigh as its lowest and highest case values, and expectsNumCmps case value comparisons. | |
bool | isOperationExpand (unsignedOp,EVT VT)const |
Return true if the specified operation is illegal on this target or unlikely to be made legal with custom lowering. | |
bool | isOperationLegal (unsignedOp,EVT VT)const |
Return true if the specified operation is legal on this target. | |
LegalizeAction | getLoadExtAction (unsigned ExtType,EVT ValVT,EVT MemVT)const |
Return how this load with extension should be treated: either it is legal, needs to be promoted to a larger size, needs to be expanded to some other code sequence, or the target has a custom expander for it. | |
bool | isLoadExtLegal (unsigned ExtType,EVT ValVT,EVT MemVT)const |
Return true if the specified load with extension is legal on this target. | |
bool | isLoadExtLegalOrCustom (unsigned ExtType,EVT ValVT,EVT MemVT)const |
Return true if the specified load with extension is legal or custom on this target. | |
LegalizeAction | getAtomicLoadExtAction (unsigned ExtType,EVT ValVT,EVT MemVT)const |
Same as getLoadExtAction, but for atomic loads. | |
bool | isAtomicLoadExtLegal (unsigned ExtType,EVT ValVT,EVT MemVT)const |
Return true if the specified atomic load with extension is legal on this target. | |
LegalizeAction | getTruncStoreAction (EVT ValVT,EVT MemVT)const |
Return how this store with truncation should be treated: either it is legal, needs to be promoted to a larger size, needs to be expanded to some other code sequence, or the target has a custom expander for it. | |
bool | isTruncStoreLegal (EVT ValVT,EVT MemVT)const |
Return true if the specified store with truncation is legal on this target. | |
bool | isTruncStoreLegalOrCustom (EVT ValVT,EVT MemVT)const |
Return true if the specified store with truncation has solution on this target. | |
virtualbool | canCombineTruncStore (EVT ValVT,EVT MemVT,bool LegalOnly)const |
LegalizeAction | getIndexedLoadAction (unsigned IdxMode,MVT VT)const |
Return how the indexed load should be treated: either it is legal, needs to be promoted to a larger size, needs to be expanded to some other code sequence, or the target has a custom expander for it. | |
bool | isIndexedLoadLegal (unsigned IdxMode,EVT VT)const |
Return true if the specified indexed load is legal on this target. | |
LegalizeAction | getIndexedStoreAction (unsigned IdxMode,MVT VT)const |
Return how the indexed store should be treated: either it is legal, needs to be promoted to a larger size, needs to be expanded to some other code sequence, or the target has a custom expander for it. | |
bool | isIndexedStoreLegal (unsigned IdxMode,EVT VT)const |
Return true if the specified indexed load is legal on this target. | |
LegalizeAction | getIndexedMaskedLoadAction (unsigned IdxMode,MVT VT)const |
Return how the indexed load should be treated: either it is legal, needs to be promoted to a larger size, needs to be expanded to some other code sequence, or the target has a custom expander for it. | |
bool | isIndexedMaskedLoadLegal (unsigned IdxMode,EVT VT)const |
Return true if the specified indexed load is legal on this target. | |
LegalizeAction | getIndexedMaskedStoreAction (unsigned IdxMode,MVT VT)const |
Return how the indexed store should be treated: either it is legal, needs to be promoted to a larger size, needs to be expanded to some other code sequence, or the target has a custom expander for it. | |
bool | isIndexedMaskedStoreLegal (unsigned IdxMode,EVT VT)const |
Return true if the specified indexed load is legal on this target. | |
virtualbool | shouldExtendGSIndex (EVT VT,EVT &EltTy)const |
Returns true if the index type for a masked gather/scatter requires extending. | |
virtualbool | shouldRemoveExtendFromGSIndex (SDValue Extend,EVT DataVT)const |
virtualbool | isLegalScaleForGatherScatter (uint64_t Scale,uint64_t ElemSize)const |
LegalizeAction | getCondCodeAction (ISD::CondCodeCC,MVT VT)const |
Return how the condition code should be treated: either it is legal, needs to be expanded to some other code sequence, or the target has a custom expander for it. | |
bool | isCondCodeLegal (ISD::CondCodeCC,MVT VT)const |
Return true if the specified condition code is legal for a comparison of the specified types on this target. | |
bool | isCondCodeLegalOrCustom (ISD::CondCodeCC,MVT VT)const |
Return true if the specified condition code is legal or custom for a comparison of the specified types on this target. | |
MVT | getTypeToPromoteTo (unsignedOp,MVT VT)const |
If the action for this operation is to promote, this method returns the ValueType to promote to. | |
virtualEVT | getAsmOperandValueType (constDataLayout &DL,Type *Ty,bool AllowUnknown=false)const |
EVT | getValueType (constDataLayout &DL,Type *Ty,bool AllowUnknown=false)const |
Return theEVT corresponding to this LLVM type. | |
EVT | getMemValueType (constDataLayout &DL,Type *Ty,bool AllowUnknown=false)const |
MVT | getSimpleValueType (constDataLayout &DL,Type *Ty,bool AllowUnknown=false)const |
Return theMVT corresponding to this LLVM type. See getValueType. | |
virtualAlign | getByValTypeAlignment (Type *Ty,constDataLayout &DL)const |
Returns the desired alignment for ByVal or InAlloca aggregate function arguments in the caller parameter area. | |
MVT | getRegisterType (MVT VT)const |
Return the type of registers that this ValueType will eventually require. | |
MVT | getRegisterType (LLVMContext &Context,EVT VT)const |
Return the type of registers that this ValueType will eventually require. | |
virtualunsigned | getNumRegisters (LLVMContext &Context,EVT VT, std::optional<MVT > RegisterVT=std::nullopt)const |
Return the number of registers that this ValueType will eventually require. | |
virtualMVT | getRegisterTypeForCallingConv (LLVMContext &Context,CallingConv::IDCC,EVT VT)const |
Certain combinations of ABIs, Targets and features require that types are legal for some operations and not for other operations. | |
virtualunsigned | getNumRegistersForCallingConv (LLVMContext &Context,CallingConv::IDCC,EVT VT)const |
Certain targets require unusual breakdowns of certain types. | |
virtualAlign | getABIAlignmentForCallingConv (Type *ArgTy,constDataLayout &DL)const |
Certain targets have context sensitive alignment requirements, where one type has the alignment requirement of another type. | |
virtualbool | ShouldShrinkFPConstant (EVT)const |
If true, then instruction selection should seek to shrink the FP constant of the specified type to a smaller type in order to save space and / or reduce runtime. | |
virtualbool | shouldReduceLoadWidth (SDNode *Load,ISD::LoadExtType ExtTy,EVT NewVT)const |
Return true if it is profitable to reduce a load to a smaller type. | |
virtualbool | shouldRemoveRedundantExtend (SDValueOp)const |
Return true (the default) if it is profitable to remove a sext_inreg(x) where the sext is redundant, and use x directly. | |
bool | isPaddedAtMostSignificantBitsWhenStored (EVT VT)const |
Indicates if any padding is guaranteed to go at the most significant bits when storing the type to memory and the type size isn't equal to the store size. | |
bool | hasBigEndianPartOrdering (EVT VT,constDataLayout &DL)const |
When splitting a value of the specified type into parts, does the Lo or Hi part come first? This usually follows the endianness, except for ppcf128, where the Hi part always comes first. | |
bool | hasTargetDAGCombine (ISD::NodeType NT)const |
If true, the target has custom DAG combine transformations that it can perform for the specified node. | |
unsigned | getGatherAllAliasesMaxDepth ()const |
virtualunsigned | getVaListSizeInBits (constDataLayout &DL)const |
Returns the size of the platform's va_list object. | |
unsigned | getMaxStoresPerMemset (bool OptSize)const |
Get maximum # of store operations permitted for llvm.memset. | |
unsigned | getMaxStoresPerMemcpy (bool OptSize)const |
Get maximum # of store operations permitted for llvm.memcpy. | |
virtualunsigned | getMaxGluedStoresPerMemcpy ()const |
Get maximum # of store operations to be glued together. | |
unsigned | getMaxExpandSizeMemcmp (bool OptSize)const |
Get maximum # of load operations permitted for memcmp. | |
unsigned | getMaxStoresPerMemmove (bool OptSize)const |
Get maximum # of store operations permitted for llvm.memmove. | |
virtualbool | allowsMisalignedMemoryAccesses (EVT,unsigned AddrSpace=0,Align Alignment=Align(1),MachineMemOperand::Flags Flags=MachineMemOperand::MONone,unsigned *=nullptr)const |
Determine if the target supports unaligned memory accesses. | |
virtualbool | allowsMisalignedMemoryAccesses (LLT,unsigned AddrSpace=0,Align Alignment=Align(1),MachineMemOperand::Flags Flags=MachineMemOperand::MONone,unsigned *=nullptr)const |
LLT handling variant. | |
bool | allowsMemoryAccessForAlignment (LLVMContext &Context,constDataLayout &DL,EVT VT,unsigned AddrSpace=0,Align Alignment=Align(1),MachineMemOperand::Flags Flags=MachineMemOperand::MONone,unsigned *Fast=nullptr)const |
This function returns true if the memory access is aligned or if the target allows this specific unaligned memory access. | |
bool | allowsMemoryAccessForAlignment (LLVMContext &Context,constDataLayout &DL,EVT VT,constMachineMemOperand &MMO,unsigned *Fast=nullptr)const |
Return true if the memory access of this type is aligned or if the target allows this specific unaligned access for the givenMachineMemOperand. | |
virtualbool | allowsMemoryAccess (LLVMContext &Context,constDataLayout &DL,EVT VT,unsigned AddrSpace=0,Align Alignment=Align(1),MachineMemOperand::Flags Flags=MachineMemOperand::MONone,unsigned *Fast=nullptr)const |
Return true if the target supports a memory access of this type for the given address space and alignment. | |
bool | allowsMemoryAccess (LLVMContext &Context,constDataLayout &DL,EVT VT,constMachineMemOperand &MMO,unsigned *Fast=nullptr)const |
Return true if the target supports a memory access of this type for the givenMachineMemOperand. | |
bool | allowsMemoryAccess (LLVMContext &Context,constDataLayout &DL,LLT Ty,constMachineMemOperand &MMO,unsigned *Fast=nullptr)const |
LLT handling variant. | |
virtualEVT | getOptimalMemOpType (constMemOp &Op,constAttributeList &)const |
Returns the target specific optimal type for load and store operations as a result of memset, memcpy, and memmove lowering. | |
virtualLLT | getOptimalMemOpLLT (constMemOp &Op,constAttributeList &)const |
LLT returning variant. | |
virtualbool | isSafeMemOpType (MVT)const |
Returns true if it's safe to use load / store of the specified type to expand memcpy / memset inline. | |
virtualunsigned | getMinimumJumpTableEntries ()const |
Return lower limit for number of blocks in a jump table. | |
unsigned | getMinimumJumpTableDensity (bool OptForSize)const |
Return lower limit of the density in a jump table. | |
unsigned | getMaximumJumpTableSize ()const |
Return upper limit for number of entries in a jump table. | |
virtualbool | isJumpTableRelative ()const |
Register | getStackPointerRegisterToSaveRestore ()const |
If a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save and restore. | |
virtualRegister | getExceptionPointerRegister (constConstant *PersonalityFn)const |
If a physical register, this returns the register that receives the exception address on entry to an EH pad. | |
virtualRegister | getExceptionSelectorRegister (constConstant *PersonalityFn)const |
If a physical register, this returns the register that receives the exception typeid on entry to a landing pad. | |
virtualbool | needsFixedCatchObjects ()const |
Align | getMinStackArgumentAlignment ()const |
Return the minimum stack alignment of an argument. | |
Align | getMinFunctionAlignment ()const |
Return the minimum function alignment. | |
Align | getPrefFunctionAlignment ()const |
Return the preferred function alignment. | |
virtualAlign | getPrefLoopAlignment (MachineLoop *ML=nullptr)const |
Return the preferred loop alignment. | |
virtualunsigned | getMaxPermittedBytesForAlignment (MachineBasicBlock *MBB)const |
Return the maximum amount of bytes allowed to be emitted when padding for alignment. | |
virtualbool | alignLoopsWithOptSize ()const |
Should loops be aligned even when the function is marked OptSize (but not MinSize). | |
virtualValue * | getIRStackGuard (IRBuilderBase &IRB)const |
If the target has a standard location for the stack protector guard, returns the address of that location. | |
virtual void | insertSSPDeclarations (Module &M)const |
Inserts necessary declarations for SSP (stack protection) purpose. | |
virtualValue * | getSDagStackGuard (constModule &M)const |
Return the variable that's previously inserted by insertSSPDeclarations, if any, otherwise return nullptr. | |
virtualbool | useStackGuardXorFP ()const |
If this function returns true, stack protection checks should XOR the frame pointer (or whichever pointer is used to address locals) into the stack guard value before checking it. | |
virtualFunction * | getSSPStackGuardCheck (constModule &M)const |
If the target has a standard stack protection check function that performs validation and error handling, returns the function. | |
virtualValue * | getSafeStackPointerLocation (IRBuilderBase &IRB)const |
Returns the target-specific address of the unsafe stack pointer. | |
virtualbool | hasStackProbeSymbol (constMachineFunction &MF)const |
Returns the name of the symbol used to emit stack probes or the empty string if not applicable. | |
virtualbool | hasInlineStackProbe (constMachineFunction &MF)const |
virtualStringRef | getStackProbeSymbolName (constMachineFunction &MF)const |
virtualbool | isFreeAddrSpaceCast (unsigned SrcAS,unsigned DestAS)const |
Returns true if a cast from SrcAS to DestAS is "cheap", such that e.g. | |
virtualbool | shouldAlignPointerArgs (CallInst *,unsigned &,Align &)const |
Return true if the pointer arguments to CI should be aligned by aligning the object whose address is being passed. | |
virtual void | emitAtomicCmpXchgNoStoreLLBalance (IRBuilderBase &Builder)const |
virtualbool | shouldSignExtendTypeInLibCall (Type *Ty,bool IsSigned)const |
Returns true if arguments should be sign-extended in lib calls. | |
virtualbool | shouldExtendTypeInLibCall (EVTType)const |
Returns true if arguments should be extended in lib calls. | |
virtualAtomicExpansionKind | shouldExpandAtomicLoadInIR (LoadInst *LI)const |
Returns how the given (atomic) load should be expanded by the IR-level AtomicExpand pass. | |
virtualAtomicExpansionKind | shouldCastAtomicLoadInIR (LoadInst *LI)const |
Returns how the given (atomic) load should be cast by the IR-level AtomicExpand pass. | |
virtualAtomicExpansionKind | shouldExpandAtomicStoreInIR (StoreInst *SI)const |
Returns how the given (atomic) store should be expanded by the IR-level AtomicExpand pass into. | |
virtualAtomicExpansionKind | shouldCastAtomicStoreInIR (StoreInst *SI)const |
Returns how the given (atomic) store should be cast by the IR-level AtomicExpand pass into. | |
virtualAtomicExpansionKind | shouldExpandAtomicCmpXchgInIR (AtomicCmpXchgInst *AI)const |
Returns how the given atomic cmpxchg should be expanded by the IR-level AtomicExpand pass. | |
virtualAtomicExpansionKind | shouldExpandAtomicRMWInIR (AtomicRMWInst *RMW)const |
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all. | |
virtualAtomicExpansionKind | shouldCastAtomicRMWIInIR (AtomicRMWInst *RMWI)const |
Returns how the given atomic atomicrmw should be cast by the IR-level AtomicExpand pass. | |
virtualLoadInst * | lowerIdempotentRMWIntoFencedLoad (AtomicRMWInst *RMWI)const |
On some platforms, an AtomicRMW that never actually modifies the value (such as fetch_add of 0) can be turned into a fence followed by an atomic load. | |
virtualISD::NodeType | getExtendForAtomicOps ()const |
Returns how the platform's atomic operations are extended (ZERO_EXTEND, SIGN_EXTEND, or ANY_EXTEND). | |
virtualISD::NodeType | getExtendForAtomicCmpSwapArg ()const |
Returns how the platform's atomic compare and swap expects its comparison value to be extended (ZERO_EXTEND, SIGN_EXTEND, or ANY_EXTEND). | |
virtualbool | shouldNormalizeToSelectSequence (LLVMContext &Context,EVT VT)const |
Returns true if we should normalize select(N0&N1, X, Y) => select(N0, select(N1, X, Y), Y) and select(N0|N1, X, Y) => select(N0, select(N1, X, Y, Y)) if it is likely that it saves us from materializing N0 and N1 in an integer register. | |
virtualbool | isProfitableToCombineMinNumMaxNum (EVT VT)const |
virtualbool | convertSelectOfConstantsToMath (EVT VT)const |
Return true if a select of constants (select Cond, C1, C2) should be transformed into simple math ops with the condition value. | |
virtualbool | decomposeMulByConstant (LLVMContext &Context,EVT VT,SDValueC)const |
Return true if it is profitable to transform an integer multiplication-by-constant into simpler operations like shifts and adds. | |
virtualbool | isMulAddWithConstProfitable (SDValue AddNode,SDValue ConstNode)const |
Return true if it may be profitable to transform (mul (add x, c1), c2) -> (add (mul x, c2), c1*c2). | |
virtualbool | shouldUseStrictFP_TO_INT (EVT FpVT,EVT IntVT,bool IsSigned)const |
Return true if it is more correct/profitable to use strict FP_TO_INT conversion operations - canonicalizing the FP source value instead of converting all cases and then selecting based on value. | |
bool | isBeneficialToExpandPowI (int64_tExponent,bool OptForSize)const |
Return true if it is beneficial to expand an @llvm.powi. | |
virtualbool | getAddrModeArguments (constIntrinsicInst *,SmallVectorImpl<Value * > &,Type *&)const |
CodeGenPrepare sinks address calculations into the same BB as Load/Store instructions reading the address. | |
virtualbool | isLegalAddressingMode (constDataLayout &DL,constAddrMode &AM,Type *Ty,unsigned AddrSpace,Instruction *I=nullptr)const |
Return true if the addressing mode represented by AM is legal for this target, for a load/store of the specified type. | |
virtualbool | addressingModeSupportsTLS (constGlobalValue &)const |
Returns true if the targets addressing mode can target thread local storage (TLS). | |
virtual int64_t | getPreferredLargeGEPBaseOffset (int64_t MinOffset, int64_t MaxOffset)const |
Return the prefered common base offset. | |
virtualbool | isLegalICmpImmediate (int64_t)const |
Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructions which can compare a register against the immediate without having to materialize the immediate into a register. | |
virtualbool | isLegalAddImmediate (int64_t)const |
Return true if the specified immediate is legal add immediate, that is the target has add instructions which can add a register with the immediate without having to materialize the immediate into a register. | |
virtualbool | isLegalAddScalableImmediate (int64_t)const |
Return true if adding the specified scalable immediate is legal, that is the target has add instructions which can add a register with the immediate (multiplied by vscale) without having to materialize the immediate into a register. | |
virtualbool | isLegalStoreImmediate (int64_tValue)const |
Return true if the specified immediate is legal for the value input of a store instruction. | |
virtualType * | shouldConvertSplatType (ShuffleVectorInst *SVI)const |
Given a shuffle vector SVI representing a vector splat, return a new scalar type of size equal to SVI's scalar type if the new type is more profitable. | |
virtualbool | shouldConvertPhiType (Type *From,Type *To)const |
Given a set in interconnected phis of type 'From' that are loaded/stored or bitcast to type 'To', return true if the set should be converted to 'To'. | |
virtualbool | isCommutativeBinOp (unsigned Opcode)const |
Returns true if the opcode is a commutative binary operation. | |
virtualbool | isBinOp (unsigned Opcode)const |
Return true if the node is a math/logic binary operator. | |
virtualbool | isTruncateFree (Type *FromTy,Type *ToTy)const |
Return true if it's free to truncate a value of type FromTy to type ToTy. | |
virtualbool | allowTruncateForTailCall (Type *FromTy,Type *ToTy)const |
Return true if a truncation from FromTy to ToTy is permitted when deciding whether a call is in tail position. | |
virtualbool | isTruncateFree (EVT FromVT,EVT ToVT)const |
virtualbool | isTruncateFree (LLT FromTy,LLT ToTy,LLVMContext &Ctx)const |
virtualbool | isTruncateFree (SDValue Val,EVT VT2)const |
Return true if truncating the specific node Val to type VT2 is free. | |
virtualbool | isProfitableToHoist (Instruction *I)const |
bool | isExtFree (constInstruction *I)const |
Return true if the extension represented byI is free. | |
bool | isExtLoad (constLoadInst *Load,constInstruction *Ext,constDataLayout &DL)const |
Return true ifLoad andExt can form an ExtLoad. | |
virtualbool | isZExtFree (Type *FromTy,Type *ToTy)const |
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the value to ToTy in the result register. | |
virtualbool | isZExtFree (EVT FromTy,EVT ToTy)const |
virtualbool | isZExtFree (LLT FromTy,LLT ToTy,LLVMContext &Ctx)const |
virtualbool | isZExtFree (SDValue Val,EVT VT2)const |
Return true if zero-extending the specific node Val to type VT2 is free (either because it's implicitly zero-extended such asARM ldrb / ldrh or because it's folded such asX86 zero-extending loads). | |
virtualbool | isSExtCheaperThanZExt (EVT FromTy,EVT ToTy)const |
Return true if sign-extension from FromTy to ToTy is cheaper than zero-extension. | |
virtualbool | signExtendConstant (constConstantInt *C)const |
Return true if this constant should be sign extended when promoting to a larger type. | |
virtualbool | optimizeExtendOrTruncateConversion (Instruction *I,Loop *L,constTargetTransformInfo &TTI)const |
Try to optimize extending or truncating conversion instructions (like zext, trunc, fptoui, uitofp) for the target. | |
virtualbool | hasPairedLoad (EVT,Align &)const |
Return true if the target supplies and combines to a paired load two loaded values of type LoadedType next to each other in memory. | |
virtualbool | hasVectorBlend ()const |
Return true if the target has a vector blend instruction. | |
virtualunsigned | getMaxSupportedInterleaveFactor ()const |
Get the maximum supported factor for interleaved memory accesses. | |
virtualbool | lowerInterleavedLoad (LoadInst *LI,ArrayRef<ShuffleVectorInst * > Shuffles,ArrayRef<unsigned > Indices,unsigned Factor)const |
Lower an interleaved load to target specific intrinsics. | |
virtualbool | lowerInterleavedStore (StoreInst *SI,ShuffleVectorInst *SVI,unsigned Factor)const |
Lower an interleaved store to target specific intrinsics. | |
virtualbool | lowerDeinterleaveIntrinsicToLoad (LoadInst *LI,ArrayRef<Value * > DeinterleaveValues)const |
Lower a deinterleave intrinsic to a target specific load intrinsic. | |
virtualbool | lowerInterleaveIntrinsicToStore (StoreInst *SI,ArrayRef<Value * > InterleaveValues)const |
Lower an interleave intrinsic to a target specific store intrinsic. | |
virtualbool | isFPExtFree (EVT DestVT,EVT SrcVT)const |
Return true if an fpext operation is free (for instance, because single-precision floating-point numbers are implicitly extended to double-precision). | |
virtualbool | isFPExtFoldable (constMachineInstr &MI,unsigned Opcode,LLT DestTy,LLT SrcTy)const |
Return true if an fpext operation input to anOpcode operation is free (for instance, because half-precision floating-point numbers are implicitly extended to float-precision) for an FMA instruction. | |
virtualbool | isFPExtFoldable (constSelectionDAG &DAG,unsigned Opcode,EVT DestVT,EVT SrcVT)const |
Return true if an fpext operation input to anOpcode operation is free (for instance, because half-precision floating-point numbers are implicitly extended to float-precision) for an FMA instruction. | |
virtualbool | isVectorLoadExtDesirable (SDValue ExtVal)const |
Return true if folding a vector load into ExtVal (a sign, zero, or any extend node) is profitable. | |
virtualbool | isFNegFree (EVT VT)const |
Return true if an fneg operation is free to the point where it is never worthwhile to replace it with a bitwise operation. | |
virtualbool | isFAbsFree (EVT VT)const |
Return true if an fabs operation is free to the point where it is never worthwhile to replace it with a bitwise operation. | |
virtualbool | isFMAFasterThanFMulAndFAdd (constMachineFunction &MF,EVT)const |
Return true if an FMA operation is faster than a pair of fmul and fadd instructions. | |
virtualbool | isFMAFasterThanFMulAndFAdd (constMachineFunction &MF,LLT)const |
Return true if an FMA operation is faster than a pair of fmul and fadd instructions. | |
virtualbool | isFMAFasterThanFMulAndFAdd (constFunction &F,Type *)const |
IR version. | |
virtualbool | isFMADLegal (constMachineInstr &MI,LLT Ty)const |
Returns true ifMI can be combined with another instruction to form TargetOpcode::G_FMAD. | |
virtualbool | isFMADLegal (constSelectionDAG &DAG,constSDNode *N)const |
Returns true if be combined with to form anISD::FMAD. | |
virtualbool | generateFMAsInMachineCombiner (EVT VT,CodeGenOptLevel OptLevel)const |
virtualbool | isNarrowingProfitable (SDNode *N,EVT SrcVT,EVT DestVT)const |
Return true if it's profitable to narrow operations of type SrcVT to DestVT. | |
virtualbool | shouldFoldSelectWithIdentityConstant (unsigned BinOpcode,EVT VT)const |
Return true if pulling a binary operation into a select with an identity constant is profitable. | |
virtualbool | shouldConvertConstantLoadToIntImm (constAPInt &Imm,Type *Ty)const |
Return true if it is beneficial to convert a load of a constant to just the constant itself. | |
virtualbool | isExtractSubvectorCheap (EVT ResVT,EVT SrcVT,unsignedIndex)const |
Return true if EXTRACT_SUBVECTOR is cheap for extracting this result type from this source type with this index. | |
virtualbool | shouldScalarizeBinop (SDValue VecOp)const |
Try to convert an extract element of a vector binary operation into an extract element followed by a scalar operation. | |
virtualbool | isExtractVecEltCheap (EVT VT,unsignedIndex)const |
Return true if extraction of a scalar element from the given vector type at the given index is cheap. | |
virtualbool | shouldFormOverflowOp (unsigned Opcode,EVT VT,bool MathUsed)const |
Try to convert math with an overflow comparison into the corresponding DAG node operation. | |
virtualbool | aggressivelyPreferBuildVectorSources (EVT VecVT)const |
virtualbool | shouldConsiderGEPOffsetSplit ()const |
virtualbool | shouldAvoidTransformToShift (EVT VT,unsigned Amount)const |
Return true if creating a shift of the type by the given amount is not profitable. | |
virtualbool | shouldFoldSelectWithSingleBitTest (EVT VT,constAPInt &AndMask)const |
virtualbool | shouldKeepZExtForFP16Conv ()const |
Does this target require the clearing of high-order bits in a register passed to the fp16 to fp conversion library function. | |
virtualbool | shouldConvertFpToSat (unsignedOp,EVT FPVT,EVT VT)const |
Should we generate fp_to_si_sat and fp_to_ui_sat from type FPVT to type VT from min(max(fptoi)) saturation patterns. | |
virtualbool | shouldExpandCmpUsingSelects (EVT VT)const |
Should we expand [US]CMP nodes using two selects and two compares, or by doing arithmetic on boolean types. | |
virtualbool | isComplexDeinterleavingSupported ()const |
Does this target support complex deinterleaving. | |
virtualbool | isComplexDeinterleavingOperationSupported (ComplexDeinterleavingOperationOperation,Type *Ty)const |
Does this target support complex deinterleaving with the given operation and type. | |
virtualValue * | createComplexDeinterleavingIR (IRBuilderBase &B,ComplexDeinterleavingOperation OperationType,ComplexDeinterleavingRotation Rotation,Value *InputA,Value *InputB,Value *Accumulator=nullptr)const |
Create the IR node for the given complex deinterleaving operation. | |
void | setLibcallName (RTLIB::Libcall Call,constchar *Name) |
Rename the default libcall routine name for the specified libcall. | |
void | setLibcallName (ArrayRef<RTLIB::Libcall > Calls,constchar *Name) |
constchar * | getLibcallName (RTLIB::Libcall Call)const |
Get the libcall routine name for the specified libcall. | |
void | setCmpLibcallCC (RTLIB::Libcall Call,ISD::CondCodeCC) |
Override the default CondCode to be used to test the result of the comparison libcall against zero. | |
ISD::CondCode | getCmpLibcallCC (RTLIB::Libcall Call)const |
Get the CondCode that's to be used to test the result of the comparison libcall against zero. | |
void | setLibcallCallingConv (RTLIB::Libcall Call,CallingConv::IDCC) |
Set theCallingConv that should be used for the specified libcall. | |
CallingConv::ID | getLibcallCallingConv (RTLIB::Libcall Call)const |
Get theCallingConv that should be used for the specified libcall. | |
virtual void | finalizeLowering (MachineFunction &MF)const |
Execute target specific actions to finalize target lowering. | |
virtualbool | shouldLocalize (constMachineInstr &MI,constTargetTransformInfo *TTI)const |
Check whether or notMI needs to be moved close to its uses. | |
int | InstructionOpcodeToISD (unsigned Opcode)const |
Get theISD node that corresponds to theInstruction class opcode. | |
int | IntrinsicIDToISD (Intrinsic::IDID)const |
Get theISD node that corresponds to theIntrinsic ID. | |
unsigned | getMaxAtomicSizeInBitsSupported ()const |
Returns the maximum atomic operation size (in bits) supported by the backend. | |
unsigned | getMaxDivRemBitWidthSupported ()const |
Returns the size in bits of the maximum div/rem the backend supports. | |
unsigned | getMaxLargeFPConvertBitWidthSupported ()const |
Returns the size in bits of the maximum larget fp convert the backend supports. | |
unsigned | getMinCmpXchgSizeInBits ()const |
Returns the size of the smallest cmpxchg or ll/sc instruction the backend supports. | |
bool | supportsUnalignedAtomics ()const |
Whether the target supports unaligned atomic operations. | |
virtualbool | shouldInsertFencesForAtomic (constInstruction *I)const |
WhetherAtomicExpandPass should automatically insert fences and reduce ordering for this atomic. | |
virtualbool | shouldInsertTrailingFenceForAtomicStore (constInstruction *I)const |
WhetherAtomicExpandPass should automatically insert a trailing fence without reducing the ordering for this atomic. | |
virtualValue * | emitLoadLinked (IRBuilderBase &Builder,Type *ValueTy,Value *Addr,AtomicOrdering Ord)const |
Perform a load-linked operation on Addr, returning a "Value *" with the corresponding pointee type. | |
virtualValue * | emitStoreConditional (IRBuilderBase &Builder,Value *Val,Value *Addr,AtomicOrdering Ord)const |
Perform a store-conditional operation to Addr. | |
virtualValue * | emitMaskedAtomicRMWIntrinsic (IRBuilderBase &Builder,AtomicRMWInst *AI,Value *AlignedAddr,Value *Incr,Value *Mask,Value *ShiftAmt,AtomicOrdering Ord)const |
Perform a masked atomicrmw using a target-specific intrinsic. | |
virtual void | emitExpandAtomicRMW (AtomicRMWInst *AI)const |
Perform a atomicrmw expansion using a target-specific way. | |
virtual void | emitExpandAtomicCmpXchg (AtomicCmpXchgInst *CI)const |
Perform a cmpxchg expansion using a target-specific method. | |
virtual void | emitBitTestAtomicRMWIntrinsic (AtomicRMWInst *AI)const |
Perform a bit test atomicrmw using a target-specific intrinsic. | |
virtual void | emitCmpArithAtomicRMWIntrinsic (AtomicRMWInst *AI)const |
Perform a atomicrmw which the result is only used by comparison, using a target-specific intrinsic. | |
virtualValue * | emitMaskedAtomicCmpXchgIntrinsic (IRBuilderBase &Builder,AtomicCmpXchgInst *CI,Value *AlignedAddr,Value *CmpVal,Value *NewVal,Value *Mask,AtomicOrdering Ord)const |
Perform a masked cmpxchg using a target-specific intrinsic. | |
virtualMachineInstr * | EmitKCFICheck (MachineBasicBlock &MBB,MachineBasicBlock::instr_iterator &MBBI,constTargetInstrInfo *TII)const |
virtualInstruction * | emitLeadingFence (IRBuilderBase &Builder,Instruction *Inst,AtomicOrdering Ord)const |
Inserts in the IR a target-specific intrinsic specifying a fence. | |
virtualInstruction * | emitTrailingFence (IRBuilderBase &Builder,Instruction *Inst,AtomicOrdering Ord)const |
Public Attributes | |
constNVPTXTargetMachine * | nvTM |
Additional Inherited Members | |
![]() | |
enum | ConstraintType { C_Register,C_RegisterClass,C_Memory,C_Address, C_Immediate,C_Other,C_Unknown } |
enum | ConstraintWeight { CW_Invalid = -1,CW_Okay = 0,CW_Good = 1,CW_Better = 2, CW_Best = 3,CW_SpecificReg = CW_Okay,CW_Register = CW_Good,CW_Memory = CW_Better, CW_Constant = CW_Best,CW_Default = CW_Okay } |
using | AsmOperandInfoVector = std::vector<AsmOperandInfo > |
using | ConstraintPair = std::pair<StringRef,TargetLowering::ConstraintType > |
using | ConstraintGroup =SmallVector<ConstraintPair > |
![]() | |
enum | LegalizeAction : uint8_t { Legal,Promote,Expand,LibCall, Custom } |
This enum indicates whether operations are valid for a target, and if not, what action should be used to make them valid.More... | |
enum | LegalizeTypeAction : uint8_t { TypeLegal,TypePromoteInteger,TypeExpandInteger,TypeSoftenFloat, TypeExpandFloat,TypeScalarizeVector,TypeSplitVector,TypeWidenVector, TypePromoteFloat,TypeSoftPromoteHalf,TypeScalarizeScalableVector } |
This enum indicates whether a types are legal for a target, and if not, what action should be used to make them valid.More... | |
enum | BooleanContent {UndefinedBooleanContent,ZeroOrOneBooleanContent,ZeroOrNegativeOneBooleanContent } |
Enum that describes how the target represents true/false values.More... | |
enum | SelectSupportKind {ScalarValSelect,ScalarCondVectorVal,VectorMaskSelect } |
Enum that describes what type of support for selects the target has.More... | |
enum class | AtomicExpansionKind { None,CastToInteger,LLSC,LLOnly, CmpXChg,MaskedIntrinsic,BitTestIntrinsic,CmpArithIntrinsic, Expand,NotAtomic } |
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.More... | |
enum class | MulExpansionKind {Always,OnlyLegalOrCustom } |
Enum that specifies when a multiplication should be expanded.More... | |
enum class | NegatibleCost {Cheaper = 0,Neutral = 1,Expensive = 2 } |
Enum that specifies when a float negation is beneficial.More... | |
enum | AndOrSETCCFoldKind : uint8_t {None = 0,AddAnd = 1,NotAnd = 2,ABS = 4 } |
Enum of different potentially desirable ways to fold (and/or (setcc ...), (setcc ...)).More... | |
enum | ReciprocalEstimate : int {Unspecified = -1,Disabled = 0,Enabled = 1 } |
Reciprocal estimate status values used by the functions below.More... | |
enum class | ShiftLegalizationStrategy {ExpandToParts,ExpandThroughStack,LowerToLibcall } |
Return the preferred strategy to legalize tihs SHIFT instruction, withExpansionFactor being the recursion depth - how many expansion needed.More... | |
using | LegalizeKind = std::pair<LegalizeTypeAction,EVT > |
LegalizeKind holds the legalization kind that needs to happen toEVT in order to type-legalize it. | |
using | ArgListTy = std::vector<ArgListEntry > |
![]() | |
staticISD::NodeType | getExtendForContent (BooleanContentContent) |
![]() | |
void | initActions () |
Initialize all of the actions to default values. | |
Value * | getDefaultSafeStackPointerLocation (IRBuilderBase &IRB,bool UseTLS)const |
void | setBooleanContents (BooleanContent Ty) |
Specify how the target extends the result of integer and floating point boolean values from i1 to a wider type. | |
void | setBooleanContents (BooleanContent IntTy,BooleanContent FloatTy) |
Specify how the target extends the result of integer and floating point boolean values from i1 to a wider type. | |
void | setBooleanVectorContents (BooleanContent Ty) |
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider type. | |
void | setSchedulingPreference (Sched::Preference Pref) |
Specify the target scheduling preference. | |
void | setMinimumJumpTableEntries (unsigned Val) |
Indicate the minimum number of blocks to generate jump tables. | |
void | setMaximumJumpTableSize (unsigned) |
Indicate the maximum number of entries in jump tables. | |
void | setStackPointerRegisterToSaveRestore (Register R) |
If set to a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save and restore. | |
void | setHasMultipleConditionRegisters (bool hasManyRegs=true) |
Tells the code generator that the target has multiple (allocatable) condition registers that can be used to store the results of comparisons for use by selects and conditional branches. | |
void | setHasExtractBitsInsn (bool hasExtractInsn=true) |
Tells the code generator that the target has BitExtract instructions. | |
void | setJumpIsExpensive (bool isExpensive=true) |
Tells the code generator not to expand logic operations on comparison predicates into separate sequences that increase the amount of flow control. | |
void | addBypassSlowDiv (unsigned int SlowBitWidth,unsigned int FastBitWidth) |
Tells the code generator which bitwidths to bypass. | |
void | addRegisterClass (MVT VT,constTargetRegisterClass *RC) |
Add the specified register class as an available regclass for the specified value type. | |
virtual std::pair<constTargetRegisterClass *,uint8_t > | findRepresentativeClass (constTargetRegisterInfo *TRI,MVT VT)const |
Return the largest legal super-reg register class of the register class for the specified type and its associated "cost". | |
void | computeRegisterProperties (constTargetRegisterInfo *TRI) |
Once all of the register classes are added, this allows us to compute derived properties we expose. | |
void | setOperationAction (unsignedOp,MVT VT,LegalizeAction Action) |
Indicate that the specified operation does not work with the specified type and indicate what to do about it. | |
void | setOperationAction (ArrayRef<unsigned > Ops,MVT VT,LegalizeAction Action) |
void | setOperationAction (ArrayRef<unsigned > Ops,ArrayRef<MVT > VTs,LegalizeAction Action) |
void | setLoadExtAction (unsigned ExtType,MVT ValVT,MVT MemVT,LegalizeAction Action) |
Indicate that the specified load with extension does not work with the specified type and indicate what to do about it. | |
void | setLoadExtAction (ArrayRef<unsigned > ExtTypes,MVT ValVT,MVT MemVT,LegalizeAction Action) |
void | setLoadExtAction (ArrayRef<unsigned > ExtTypes,MVT ValVT,ArrayRef<MVT > MemVTs,LegalizeAction Action) |
void | setAtomicLoadExtAction (unsigned ExtType,MVT ValVT,MVT MemVT,LegalizeAction Action) |
Let target indicate that an extending atomic load of the specified type is legal. | |
void | setAtomicLoadExtAction (ArrayRef<unsigned > ExtTypes,MVT ValVT,MVT MemVT,LegalizeAction Action) |
void | setAtomicLoadExtAction (ArrayRef<unsigned > ExtTypes,MVT ValVT,ArrayRef<MVT > MemVTs,LegalizeAction Action) |
void | setTruncStoreAction (MVT ValVT,MVT MemVT,LegalizeAction Action) |
Indicate that the specified truncating store does not work with the specified type and indicate what to do about it. | |
void | setIndexedLoadAction (ArrayRef<unsigned > IdxModes,MVT VT,LegalizeAction Action) |
Indicate that the specified indexed load does or does not work with the specified type and indicate what to do abort it. | |
void | setIndexedLoadAction (ArrayRef<unsigned > IdxModes,ArrayRef<MVT > VTs,LegalizeAction Action) |
void | setIndexedStoreAction (ArrayRef<unsigned > IdxModes,MVT VT,LegalizeAction Action) |
Indicate that the specified indexed store does or does not work with the specified type and indicate what to do about it. | |
void | setIndexedStoreAction (ArrayRef<unsigned > IdxModes,ArrayRef<MVT > VTs,LegalizeAction Action) |
void | setIndexedMaskedLoadAction (unsigned IdxMode,MVT VT,LegalizeAction Action) |
Indicate that the specified indexed masked load does or does not work with the specified type and indicate what to do about it. | |
void | setIndexedMaskedStoreAction (unsigned IdxMode,MVT VT,LegalizeAction Action) |
Indicate that the specified indexed masked store does or does not work with the specified type and indicate what to do about it. | |
void | setCondCodeAction (ArrayRef<ISD::CondCode > CCs,MVT VT,LegalizeAction Action) |
Indicate that the specified condition code is or isn't supported on the target and indicate what to do about it. | |
void | setCondCodeAction (ArrayRef<ISD::CondCode > CCs,ArrayRef<MVT > VTs,LegalizeAction Action) |
void | AddPromotedToType (unsigned Opc,MVT OrigVT,MVT DestVT) |
If Opc/OrigVT is specified as being promoted, the promotion code defaults to trying a larger integer/fp until it can find one that works. | |
void | setOperationPromotedToType (unsigned Opc,MVT OrigVT,MVT DestVT) |
Convenience method to set an operation to Promote and specify the type in a single call. | |
void | setOperationPromotedToType (ArrayRef<unsigned > Ops,MVT OrigVT,MVT DestVT) |
void | setTargetDAGCombine (ArrayRef<ISD::NodeType > NTs) |
Targets should invoke this method for each target independent node that they want to provide a custom DAG combiner for by implementing the PerformDAGCombine virtual method. | |
void | setMinFunctionAlignment (Align Alignment) |
Set the target's minimum function alignment. | |
void | setPrefFunctionAlignment (Align Alignment) |
Set the target's preferred function alignment. | |
void | setPrefLoopAlignment (Align Alignment) |
Set the target's preferred loop alignment. | |
void | setMaxBytesForAlignment (unsigned MaxBytes) |
void | setMinStackArgumentAlignment (Align Alignment) |
Set the minimum stack alignment of an argument. | |
void | setMaxAtomicSizeInBitsSupported (unsigned SizeInBits) |
Set the maximum atomic operation size supported by the backend. | |
void | setMaxDivRemBitWidthSupported (unsigned SizeInBits) |
Set the size in bits of the maximum div/rem the backend supports. | |
void | setMaxLargeFPConvertBitWidthSupported (unsigned SizeInBits) |
Set the size in bits of the maximum fp convert the backend supports. | |
void | setMinCmpXchgSizeInBits (unsigned SizeInBits) |
Sets the minimum cmpxchg or ll/sc size supported by the backend. | |
void | setSupportsUnalignedAtomics (bool UnalignedSupported) |
Sets whether unaligned atomic operations are supported. | |
virtualbool | isExtFreeImpl (constInstruction *I)const |
Return true if the extension represented byI is free. | |
bool | isLegalRC (constTargetRegisterInfo &TRI,constTargetRegisterClass &RC)const |
Return true if the value types that can be represented by the specified register class are all legal. | |
MachineBasicBlock * | emitPatchPoint (MachineInstr &MI,MachineBasicBlock *MBB)const |
Replace/modify any TargetFrameIndex operands with a targte-dependent sequence of memory operands that is recognized by PrologEpilogInserter. | |
![]() | |
unsigned | GatherAllAliasesMaxDepth |
Depth that GatherAllAliases should continue looking for chain dependencies when trying to find a more preferable chain. | |
unsigned | MaxStoresPerMemset |
Specify maximum number of store instructions per memset call. | |
unsigned | MaxStoresPerMemsetOptSize |
Likewise for functions with the OptSize attribute. | |
unsigned | MaxStoresPerMemcpy |
Specify maximum number of store instructions per memcpy call. | |
unsigned | MaxStoresPerMemcpyOptSize |
Likewise for functions with the OptSize attribute. | |
unsigned | MaxGluedStoresPerMemcpy = 0 |
Specify max number of store instructions to glue in inlined memcpy. | |
unsigned | MaxLoadsPerMemcmp |
Specify maximum number of load instructions per memcmp call. | |
unsigned | MaxLoadsPerMemcmpOptSize |
Likewise for functions with the OptSize attribute. | |
unsigned | MaxStoresPerMemmove |
Specify maximum number of store instructions per memmove call. | |
unsigned | MaxStoresPerMemmoveOptSize |
Likewise for functions with the OptSize attribute. | |
bool | PredictableSelectIsExpensive |
Tells the code generator that select is more expensive than a branch if the branch is usually predicted right. | |
bool | EnableExtLdPromotion |
bool | IsStrictFPEnabled |
Definition at line99 of fileNVPTXISelLowering.h.
| explicit |
Definition at line499 of fileNVPTXISelLowering.cpp.
Referencesllvm::ISD::ABS,llvm::ISD::ADD,llvm::TargetLoweringBase::addBypassSlowDiv(),llvm::ISD::ADDC,llvm::ISD::ADDE,llvm::TargetLoweringBase::AddPromotedToType(),llvm::TargetLoweringBase::addRegisterClass(),llvm::NVPTXSubtarget::allowFP16Math(),llvm::ISD::AND,llvm::ISD::BF16_TO_FP,llvm::ISD::BITCAST,llvm::ISD::BITREVERSE,llvm::ISD::BR_CC,llvm::ISD::BR_JT,llvm::ISD::BRIND,llvm::ISD::BSWAP,llvm::ISD::BUILD_VECTOR,llvm::TargetLoweringBase::computeRegisterProperties(),llvm::ISD::ConstantFP,llvm::ISD::CopyFromReg,llvm::ISD::CopyToReg,llvm::ISD::CTLZ,llvm::ISD::CTPOP,llvm::ISD::CTTZ,llvm::TargetLoweringBase::Custom,llvm::ISD::DEBUGTRAP,llvm::ISD::DYNAMIC_STACKALLOC,llvm::TargetLoweringBase::Expand,llvm::ISD::EXTLOAD,llvm::ISD::EXTRACT_VECTOR_ELT,llvm::ISD::FABS,llvm::ISD::FADD,llvm::ISD::FCEIL,llvm::ISD::FCOPYSIGN,llvm::ISD::FCOS,llvm::ISD::FDIV,llvm::ISD::FEXP2,llvm::ISD::FFLOOR,llvm::MVT::fixedlen_vector_valuetypes(),llvm::ISD::FLOG2,llvm::ISD::FMA,llvm::ISD::FMAXIMUM,llvm::ISD::FMAXNUM,llvm::ISD::FMAXNUM_IEEE,llvm::ISD::FMINIMUM,llvm::ISD::FMINNUM,llvm::ISD::FMINNUM_IEEE,llvm::ISD::FMUL,llvm::ISD::FNEARBYINT,llvm::ISD::FNEG,llvm::ISD::FP_EXTEND,llvm::ISD::FP_ROUND,llvm::ISD::FP_TO_SINT,llvm::ISD::FP_TO_UINT,llvm::ISD::FREM,llvm::ISD::FRINT,llvm::ISD::FROUND,llvm::ISD::FROUNDEVEN,llvm::ISD::FSHL,llvm::ISD::FSHR,llvm::ISD::FSIN,llvm::ISD::FSQRT,llvm::ISD::FSUB,llvm::ISD::FTRUNC,llvm::NVPTXSubtarget::getMinCmpXchgSizeInBits(),llvm::TargetLoweringBase::getOperationAction(),llvm::NVPTXSubtarget::getPTXVersion(),llvm::NVPTXSubtarget::getRegisterInfo(),llvm::NVPTXSubtarget::getSmVersion(),llvm::ISD::GlobalAddress,llvm::NVPTXSubtarget::hasBF16Math(),llvm::NVPTXSubtarget::hasHWROT32(),llvm::NVPTXSubtarget::hasNativeBF16Support(),llvm::ISD::INSERT_VECTOR_ELT,llvm::MVT::integer_valuetypes(),llvm::ISD::INTRINSIC_W_CHAIN,IsPTXVectorType(),llvm::IRSimilarity::Legal,llvm::ISD::LOAD,llvm::TargetLoweringBase::MaxStoresPerMemcpy,llvm::TargetLoweringBase::MaxStoresPerMemcpyOptSize,llvm::TargetLoweringBase::MaxStoresPerMemmove,llvm::TargetLoweringBase::MaxStoresPerMemmoveOptSize,llvm::TargetLoweringBase::MaxStoresPerMemset,llvm::TargetLoweringBase::MaxStoresPerMemsetOptSize,llvm::ISD::MUL,llvm::ISD::MULHS,llvm::ISD::MULHU,llvm::ISD::PARITY,llvm::TargetLoweringBase::Promote,llvm::ISD::READCYCLECOUNTER,llvm::ISD::READSTEADYCOUNTER,llvm::Sched::RegPressure,llvm::ISD::ROTL,llvm::ISD::ROTR,llvm::ISD::SADDO,llvm::ISD::SADDO_CARRY,llvm::ISD::SADDSAT,sched4reg,llvm::ISD::SDIV,llvm::ISD::SDIVREM,llvm::ISD::SELECT,llvm::ISD::SELECT_CC,llvm::TargetLoweringBase::setBooleanContents(),llvm::TargetLoweringBase::setBooleanVectorContents(),llvm::ISD::SETCC,llvm::TargetLoweringBase::setCondCodeAction(),llvm::ISD::SETEQ,llvm::ISD::SETGE,llvm::ISD::SETGT,llvm::TargetLoweringBase::setJumpIsExpensive(),llvm::ISD::SETLE,llvm::TargetLoweringBase::setLoadExtAction(),llvm::ISD::SETLT,llvm::TargetLoweringBase::setMaxAtomicSizeInBitsSupported(),llvm::TargetLoweringBase::setMaxDivRemBitWidthSupported(),llvm::TargetLoweringBase::setMinCmpXchgSizeInBits(),llvm::ISD::SETNE,llvm::TargetLoweringBase::setOperationAction(),llvm::TargetLoweringBase::setOperationPromotedToType(),llvm::TargetLoweringBase::setSchedulingPreference(),llvm::TargetLoweringBase::setTargetDAGCombine(),llvm::TargetLoweringBase::setTruncStoreAction(),llvm::ISD::SETUGE,llvm::ISD::SETUGT,llvm::ISD::SETULE,llvm::ISD::SETULT,llvm::ISD::SEXTLOAD,llvm::ISD::SHL,llvm::ISD::SHL_PARTS,llvm::ISD::SIGN_EXTEND_INREG,llvm::ISD::SINT_TO_FP,llvm::ISD::SMAX,llvm::ISD::SMIN,llvm::ISD::SMUL_LOHI,llvm::ISD::SMULO,llvm::Sched::Source,llvm::ISD::SRA,llvm::ISD::SRA_PARTS,llvm::ISD::SREM,llvm::ISD::SRL,llvm::ISD::SRL_PARTS,llvm::ISD::SSHLSAT,llvm::ISD::SSUBO,llvm::ISD::SSUBO_CARRY,llvm::ISD::SSUBSAT,llvm::ISD::STACKRESTORE,llvm::ISD::STACKSAVE,llvm::ISD::STORE,llvm::ISD::SUB,llvm::ISD::SUBC,llvm::ISD::SUBE,llvm::ISD::TRAP,llvm::ISD::UADDO,llvm::ISD::UADDO_CARRY,llvm::ISD::UADDSAT,llvm::ISD::UDIV,llvm::ISD::UDIVREM,llvm::ISD::UINT_TO_FP,llvm::ISD::UMAX,llvm::ISD::UMIN,llvm::ISD::UMUL_LOHI,llvm::ISD::UMULO,llvm::ISD::UREM,UseApproxLog2F32,llvm::ISD::USHLSAT,llvm::ISD::USUBO,llvm::ISD::USUBO_CARRY,llvm::ISD::USUBSAT,llvm::ISD::VAARG,llvm::ISD::VACOPY,llvm::ISD::VAEND,llvm::ISD::VASTART,llvm::ISD::VECTOR_SHUFFLE,llvm::ISD::VSELECT,llvm::TargetLoweringBase::ZeroOrNegativeOneBooleanContent, andllvm::ISD::ZEXTLOAD.
| inlineoverridevirtual |
Reimplemented fromllvm::TargetLoweringBase.
Definition at line257 of fileNVPTXISelLowering.h.
bool NVPTXTargetLowering::allowFMA | ( | MachineFunction & | MF, |
CodeGenOptLevel | OptLevel | ||
) | const |
bool NVPTXTargetLowering::allowUnsafeFPMath | ( | MachineFunction & | MF | ) | const |
Definition at line4460 of fileNVPTXISelLowering.cpp.
ReferencesF,llvm::MachineFunction::getFunction(),llvm::MachineFunction::getTarget(),llvm::TargetMachine::Options, andllvm::TargetOptions::UnsafeFPMath.
Referenced byallowFMA().
| inlineoverridevirtual |
Indicate whether this target prefers to combine FDIVs with the same divisor.
If the transform should never be done, return zero. If the transform should be done, return the minimum number of divisor uses that must exist.
Reimplemented fromllvm::TargetLowering.
Definition at line223 of fileNVPTXISelLowering.h.
Return true if target always benefits from combining into FMA for a given value type.
This must typically return false on targets where FMA takes more cycles to execute than FADD.
Reimplemented fromllvm::TargetLoweringBase.
Definition at line238 of fileNVPTXISelLowering.h.
| overridevirtual |
getConstraintType - Given a constraint letter, return the type of constraint it is for this target.
Reimplemented fromllvm::TargetLowering.
Definition at line4386 of fileNVPTXISelLowering.cpp.
Referencesllvm::TargetLowering::C_RegisterClass,llvm::TargetLowering::getConstraintType(), andllvm::StringRef::size().
int NVPTXTargetLowering::getDivF32Level | ( | ) | const |
Definition at line110 of fileNVPTXISelLowering.cpp.
Referencesllvm::TargetLoweringBase::getTargetMachine(),Options, andUsePrecDivF32.
Align NVPTXTargetLowering::getFunctionArgumentAlignment | ( | constFunction * | F, |
Type * | Ty, | ||
unsigned | Idx, | ||
constDataLayout & | DL | ||
) | const |
Definition at line1266 of fileNVPTXISelLowering.cpp.
ReferencesDL,F,llvm::getAlign(),getFunctionParamOptimizedAlign(), andIdx.
Referenced byLowerFormalArguments().
Align NVPTXTargetLowering::getFunctionByValParamAlign | ( | constFunction * | F, |
Type * | ArgTy, | ||
Align | InitialAlign, | ||
constDataLayout & | DL | ||
) | const |
Helper for computing alignment of a device function byval parameter.
Definition at line4300 of fileNVPTXISelLowering.cpp.
ReferencesDL,F,ForceMinByValParamAlign, andgetFunctionParamOptimizedAlign().
Referenced bygetPrototype(), andLowerCall().
Align NVPTXTargetLowering::getFunctionParamOptimizedAlign | ( | constFunction * | F, |
Type * | ArgTy, | ||
constDataLayout & | DL | ||
) | const |
getFunctionParamOptimizedAlign - since function arguments are passed via .param space, we may want to increase their alignment in a way that ensures that we can effectively vectorize their loads & stores.
We can increase alignment only if the function has internal or has private linkage as for other linkage types callers may already rely on default alignment. To allow using 128-bit vectorized loads/stores, this function ensures that alignment is 16 or greater.
Definition at line4279 of fileNVPTXISelLowering.cpp.
Referencesassert(),DL,F, andllvm::isKernelFunction().
Referenced byadjustByValArgAlignment(),getFunctionArgumentAlignment(),getFunctionByValParamAlign(), andLowerReturn().
| overridevirtual |
Return the entry encoding for a jump table in the current function.
The returned value is a member of theMachineJumpTableInfo::JTEntryKind enum.
Reimplemented fromllvm::TargetLowering.
Definition at line2766 of fileNVPTXISelLowering.cpp.
Referencesllvm::MachineJumpTableInfo::EK_Inline.
| inlineoverridevirtual |
Reimplemented fromllvm::TargetLowering.
Definition at line234 of fileNVPTXISelLowering.h.
Definition at line4326 of fileNVPTXISelLowering.cpp.
ReferencesF,llvm::MCSymbol::getName(),llvm::TargetMachine::getSymbol(),llvm::TargetLoweringBase::getTargetMachine(), andIdx.
| overridevirtual |
Return the preferred vector type legalization action.
Reimplemented fromllvm::TargetLoweringBase.
Definition at line1082 of fileNVPTXISelLowering.cpp.
Referencesllvm::TargetLoweringBase::getPreferredVectorAction(),llvm::MVT::getScalarType(),llvm::MVT::getVectorNumElements(),llvm::MVT::isScalableVector(), andllvm::TargetLoweringBase::TypeSplitVector.
std::string NVPTXTargetLowering::getPrototype | ( | constDataLayout & | DL, |
Type * | retTy, | ||
constArgListTy & | Args, | ||
constSmallVectorImpl<ISD::OutputArg > & | Outs, | ||
MaybeAlign | retAlignment, | ||
std::optional< std::pair<unsigned,constAPInt & > > | VAInfo, | ||
constCallBase & | CB, | ||
unsigned | UniqueCallSite | ||
) | const |
Definition at line1152 of fileNVPTXISelLowering.cpp.
Referencesassert(),llvm::ComputeValueVTs(),DL,llvm::AttributeList::FirstArgIndex,getFunctionByValParamAlign(),llvm::TargetLoweringBase::getPointerTy(),llvm::Type::getPrimitiveSizeInBits(),llvm::NVPTXSubtarget::getSmVersion(),llvm::Type::getTypeID(),llvm::TargetLoweringBase::getValueType(),if(),llvm::Type::isFloatingPointTy(),llvm::Type::isIntegerTy(),IsTypePassedAsArray(),llvm_unreachable,nvTM,llvm::promoteScalarArgumentSize(),llvm::shouldEmitPTXNoReturn(),llvm::SmallVectorBase< Size_T >::size(),llvm::size(),llvm::Align::value(), andllvm::Type::VoidTyID.
Referenced byLowerCall().
| overridevirtual |
Given a physical register constraint (e.g.
{edx}), return the register number and the register class for the register.
Given a register class constraint, like 'r', if this corresponds directly to an LLVM register class, return a register of 0 and the register class pointer.
This should only be used for C_Register constraints. On error, this returns a register number of 0 and a null register class pointer.
Reimplemented fromllvm::TargetLowering.
Definition at line4408 of fileNVPTXISelLowering.cpp.
Referencesllvm::TargetLowering::getRegForInlineAsmConstraint(),llvm::NVPTXSubtarget::getSmVersion(),llvm::report_fatal_error(),llvm::StringRef::size(), andTRI.
| inlineoverridevirtual |
Return the type to use for a scalar shift opcode, given the shifted amount type.
Targets should return a legal type if the input type is legal. Targets can return a type that is too small if the input type is illegal.
Reimplemented fromllvm::TargetLoweringBase.
Definition at line196 of fileNVPTXISelLowering.h.
| inlineoverridevirtual |
Return the ValueType of the result of SETCC operations.
Reimplemented fromllvm::TargetLoweringBase.
Definition at line153 of fileNVPTXISelLowering.h.
Referencesllvm::EVT::getVectorNumElements(),llvm::EVT::getVectorVT(), andllvm::EVT::isVector().
| overridevirtual |
Hooks for building estimates in place of slower divisions and square roots.
Return either a square root or its reciprocal estimate value for the input operand.Enabled
is a ReciprocalEstimate enum with value either 'Unspecified' or 'Enabled' as set by a potential default override attribute. IfRefinementSteps
is 'Unspecified', the number of Newton-Raphson refinement iterations required to generate a sufficient (though not necessarily IEEE-754 compliant) estimate is returned in that parameter. The boolean UseOneConstNR output is used to select a Newton-Raphson algorithm implementation that uses either one or two constants. The boolean Reciprocal is used to select whether the estimate is for the square root of the input operand or the reciprocal of its square root. A target may choose to implement its own refinement within this function. If that's true, then return '0' as the number of RefinementSteps to avoid any further refinement of the estimate. An emptySDValue return means no estimate sequence can be created.
Reimplemented fromllvm::TargetLowering.
Definition at line1089 of fileNVPTXISelLowering.cpp.
ReferencesDL,llvm::TargetLoweringBase::Enabled,llvm::SelectionDAG::getConstant(),llvm::SelectionDAG::getMachineFunction(),llvm::SelectionDAG::getNode(),llvm::SDValue::getValueType(),llvm::ISD::INTRINSIC_WO_CHAIN,llvm::TargetLoweringBase::Unspecified,useF32FTZ(), andusePrecSqrtF32().
This method returns the name of a target specific DAG node.
Reimplemented fromllvm::TargetLowering.
Definition at line1003 of fileNVPTXISelLowering.cpp.
Referencesllvm::NVPTXISD::BFE,llvm::NVPTXISD::BFI,llvm::NVPTXISD::BrxEnd,llvm::NVPTXISD::BrxItem,llvm::NVPTXISD::BrxStart,llvm::NVPTXISD::CALL,llvm::NVPTXISD::CallArg,llvm::NVPTXISD::CallArgBegin,llvm::NVPTXISD::CallArgEnd,llvm::NVPTXISD::CallPrototype,llvm::NVPTXISD::CallSeqBegin,llvm::NVPTXISD::CallSeqEnd,llvm::NVPTXISD::CallSymbol,llvm::NVPTXISD::CallVal,llvm::NVPTXISD::CallVoid,llvm::NVPTXISD::DeclareParam,llvm::NVPTXISD::DeclareRet,llvm::NVPTXISD::DeclareRetParam,llvm::NVPTXISD::DeclareScalarParam,llvm::NVPTXISD::DeclareScalarRet,llvm::NVPTXISD::Dummy,llvm::NVPTXISD::DYNAMIC_STACKALLOC,llvm::NVPTXISD::FCOPYSIGN,llvm::NVPTXISD::FIRST_NUMBER,llvm::NVPTXISD::FSHL_CLAMP,llvm::NVPTXISD::FSHR_CLAMP,llvm::NVPTXISD::LastCallArg,llvm::NVPTXISD::LDUV2,llvm::NVPTXISD::LDUV4,llvm::NVPTXISD::LOAD_PARAM,llvm::NVPTXISD::LoadParam,llvm::NVPTXISD::LoadParamV2,llvm::NVPTXISD::LoadParamV4,llvm::NVPTXISD::LoadV2,llvm::NVPTXISD::LoadV4,MAKE_CASE,llvm::NVPTXISD::MoveParam,llvm::NVPTXISD::MUL_WIDE_SIGNED,llvm::NVPTXISD::MUL_WIDE_UNSIGNED,llvm::NVPTXISD::PrintCall,llvm::NVPTXISD::PrintCallUni,llvm::NVPTXISD::PrintConvergentCall,llvm::NVPTXISD::PrintConvergentCallUni,llvm::NVPTXISD::PRMT,llvm::NVPTXISD::Prototype,llvm::NVPTXISD::ProxyReg,llvm::NVPTXISD::PseudoUseParam,llvm::NVPTXISD::RET_GLUE,llvm::NVPTXISD::RETURN,llvm::NVPTXISD::SETP_BF16X2,llvm::NVPTXISD::SETP_F16X2,llvm::NVPTXISD::STACKRESTORE,llvm::NVPTXISD::STACKSAVE,llvm::NVPTXISD::StoreParam,llvm::NVPTXISD::StoreParamS32,llvm::NVPTXISD::StoreParamU32,llvm::NVPTXISD::StoreParamV2,llvm::NVPTXISD::StoreParamV4,llvm::NVPTXISD::StoreRetval,llvm::NVPTXISD::StoreRetvalV2,llvm::NVPTXISD::StoreRetvalV4,llvm::NVPTXISD::StoreV2,llvm::NVPTXISD::StoreV4, andllvm::NVPTXISD::Wrapper.
| overridevirtual |
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (touches memory).
If this is the case, it returns true and store the intrinsic information into the IntrinsicInfo that was passed to the function.
Reimplemented fromllvm::TargetLoweringBase.
Definition at line3439 of fileNVPTXISelLowering.cpp.
ReferencesDL,llvm::TargetLoweringBase::getPointerTy(),llvm::TargetLoweringBase::getValueType(),I,Info,llvm::ISD::INTRINSIC_VOID,llvm::ISD::INTRINSIC_W_CHAIN,llvm::MachineMemOperand::MOLoad, andllvm::MachineMemOperand::MOStore.
Return true if it is cheap to speculate a call to intrinsic ctlz.
Reimplemented fromllvm::TargetLoweringBase.
Definition at line244 of fileNVPTXISelLowering.h.
| inlineoverridevirtual |
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
fmuladd intrinsics will be expanded to FMAs when this method returns true, otherwise fmuladd is expanded to fmul + fadd.
NOTE: This may be called before legalization on types for which FMAs are not legal, but should return true if those types will eventually legalize to types that support FMAs. After legalization, it will only be called on types that support FMAs (via Legal or Custom actions)
Targets that care about soft float support should return false when soft float code is being generated (i.e. use-soft-float).
Reimplemented fromllvm::TargetLoweringBase.
Definition at line228 of fileNVPTXISelLowering.h.
| overridevirtual |
isLegalAddressingMode - Return true if the addressing mode represented by AM is legal for this target, for a load/store of the specified type Used to guide target specific optimizations, like loop strength reduction (LoopStrengthReduce.cpp) and memory optimization for address mode (CodeGenPrepare.cpp)
isLegalAddressingMode - Return true if the addressing mode represented by AM is legal for this target, for a load/store of the specified type.
Used to guide target specific optimizations, like loop strength reduction (LoopStrengthReduce.cpp) and memory optimization for address mode (CodeGenPrepare.cpp)
Reimplemented fromllvm::TargetLoweringBase.
Definition at line4345 of fileNVPTXISelLowering.cpp.
Referencesllvm::TargetLoweringBase::AddrMode::BaseGV,llvm::TargetLoweringBase::AddrMode::BaseOffs,llvm::TargetLoweringBase::AddrMode::HasBaseReg,llvm::APInt::isSignedIntN(), andllvm::TargetLoweringBase::AddrMode::Scale.
| inlineoverridevirtual |
Return true if it's free to truncate a value of type FromTy to type ToTy.
e.g. On x86 it's free to truncate a i32 value in register EAX to i16 by referencing its sub-register AX. Targets must return false when FromTy <= ToTy.
Reimplemented fromllvm::TargetLoweringBase.
Definition at line145 of fileNVPTXISelLowering.h.
Referencesllvm::Type::getPrimitiveSizeInBits(), andllvm::Type::isIntegerTy().
| overridevirtual |
Lower the specified operand into the Ops vector.
If it is invalid, don't add anything to Ops.
Reimplemented fromllvm::TargetLowering.
Definition at line3426 of fileNVPTXISelLowering.cpp.
Referencesllvm::TargetLowering::LowerAsmOperandForConstraint(), andllvm::StringRef::size().
| overridevirtual |
This hook must be implemented to lower calls into the specified DAG.
The outgoing arguments to the call are described by the Outs array, and the values to be returned by the call are described by the Ins array. The implementation should fill in the InVals array with legal-type return values from the call, and return the resulting token chain value.
Reimplemented fromllvm::TargetLowering.
Definition at line1411 of fileNVPTXISelLowering.cpp.
Referencesllvm::ISD::ADD,llvm::Function::addFnAttr(),llvm::alignTo(),llvm::ISD::ANY_EXTEND,llvm::TargetLowering::CallLoweringInfo::Args,assert(),llvm::NVPTXISD::CallArg,llvm::NVPTXISD::CallArgBegin,llvm::NVPTXISD::CallArgEnd,llvm::TargetLowering::CallLoweringInfo::Callee,llvm::NVPTXISD::CallPrototype,llvm::NVPTXISD::CallVoid,llvm::TargetLowering::CallLoweringInfo::CB,llvm::TargetLowering::CallLoweringInfo::Chain,llvm::SmallVectorImpl< T >::clear(),llvm::commonAlignment(),ComputePTXValueVTs(),llvm::ComputeValueVTs(),llvm::MachineRegisterInfo::createVirtualRegister(),llvm::TargetLowering::CallLoweringInfo::DAG,llvm::StringRef::data(),llvm::NVPTXISD::DeclareParam,llvm::NVPTXISD::DeclareRet,llvm::NVPTXISD::DeclareRetParam,llvm::NVPTXISD::DeclareScalarParam,llvm::TargetLowering::CallLoweringInfo::DL,DL,llvm::SmallVectorBase< Size_T >::empty(),llvm::TargetLowering::CallLoweringInfo::getArgs(),llvm::CallBase::getCalledFunction(),llvm::SelectionDAG::getCALLSEQ_END(),llvm::SelectionDAG::getCALLSEQ_START(),llvm::SelectionDAG::getConstant(),llvm::SDNode::getConstantOperandAPInt(),llvm::SelectionDAG::getContext(),llvm::SelectionDAG::getCopyFromReg(),llvm::SelectionDAG::getCopyToReg(),llvm::SelectionDAG::getDataLayout(),llvm::SelectionDAG::getEntryNode(),getFunctionByValParamAlign(),llvm::SelectionDAG::getLoad(),llvm::SelectionDAG::getMachineFunction(),llvm::NVPTXSubtarget::getMaxRequiredAlignment(),llvm::SelectionDAG::getMemIntrinsicNode(),llvm::SDValue::getNode(),llvm::SelectionDAG::getNode(),llvm::SDValue::getOpcode(),llvm::SDValue::getOperand(),llvm::TargetLoweringBase::getPointerTy(),getPrototype(),llvm::NVPTXSubtarget::getPTXVersion(),llvm::TargetLoweringBase::getRegClassFor(),llvm::MachineFunction::getRegInfo(),llvm::EVT::getSimpleVT(),llvm::EVT::getSizeInBits(),llvm::NVPTXSubtarget::getSmVersion(),llvm::NVPTXTargetMachine::getStrPool(),llvm::SelectionDAG::getSymbolFunctionGlobalAddress(),llvm::SelectionDAG::getTargetExternalSymbol(),llvm::SelectionDAG::getTargetLoweringInfo(),llvm::EVT::getTypeForEVT(),llvm::SDValue::getValue(),llvm::SDValue::getValueType(),llvm::SDNode::getVTList(),llvm::SelectionDAG::getVTList(),GlobalUniqueCallSite,llvm::TargetLowering::CallLoweringInfo::Ins,llvm::TargetLowering::CallLoweringInfo::IsConvergent,llvm::EVT::isFloatingPoint(),llvm::isIndirectCall(),llvm::EVT::isInteger(),llvm::Type::isIntegerTy(),llvm::TargetLowering::CallLoweringInfo::IsTailCall,IsTypePassedAsArray(),llvm::TargetLowering::CallLoweringInfo::IsVarArg,llvm::NVPTXISD::LastCallArg,llvm_unreachable,llvm::NVPTXISD::LoadParam,llvm::NVPTXISD::LoadParamV2,llvm::NVPTXISD::LoadParamV4,LowerUnalignedLoadRetParam(),LowerUnalignedStoreParam(),llvm::MachineMemOperand::MOLoad,llvm::SelectionDAG::MorphNodeTo(),llvm::MachineMemOperand::MOStore,llvm::TargetLowering::CallLoweringInfo::NumFixedArgs,nvTM,llvm::TargetLowering::CallLoweringInfo::Outs,llvm::TargetLowering::CallLoweringInfo::OutVals,llvm::NVPTXISD::PrintCall,llvm::NVPTXISD::PrintCallUni,llvm::NVPTXISD::PrintConvergentCall,llvm::NVPTXISD::PrintConvergentCallUni,llvm::promoteScalarArgumentSize(),PromoteScalarIntegerPTX(),llvm::NVPTXISD::Prototype,llvm::NVPTXISD::ProxyReg,llvm::SmallVectorTemplateBase< T, bool >::push_back(),PVF_FIRST,PVF_LAST,PVF_SCALAR,llvm::SelectionDAG::RemoveDeadNode(),llvm::SelectionDAG::ReplaceAllUsesWith(),llvm::report_fatal_error(),llvm::SmallVectorImpl< T >::resize(),llvm::TargetLowering::CallLoweringInfo::RetTy,RetTy,llvm::UniqueStringSaver::save(),shouldConvertToIndirectCall(),llvm::ISD::SIGN_EXTEND,llvm::SmallVectorBase< Size_T >::size(),llvm::NVPTXISD::StoreParam,llvm::NVPTXISD::StoreParamV2,llvm::NVPTXISD::StoreParamV4,llvm::ISD::TRUNCATE,llvm::Align::value(),VectorizePTXValueVTs(), andllvm::ISD::ZERO_EXTEND.
SDValue NVPTXTargetLowering::LowerDYNAMIC_STACKALLOC | ( | SDValue | Op, |
SelectionDAG & | DAG | ||
) | const |
Definition at line2007 of fileNVPTXISelLowering.cpp.
Referencesllvm::LLVMContext::diagnose(),DL,llvm::NVPTXISD::DYNAMIC_STACKALLOC,llvm::SelectionDAG::getConstant(),llvm::SelectionDAG::getContext(),getDebugLoc(),llvm::MachineFunction::getFunction(),llvm::SelectionDAG::getMachineFunction(),llvm::SelectionDAG::getMergeValues(),llvm::SelectionDAG::getNode(),llvm::NVPTXSubtarget::getPTXVersion(),llvm::NVPTXSubtarget::getSmVersion(),llvm::SelectionDAG::getTargetConstant(),llvm::SelectionDAG::getZExtOrTrunc(),llvm::NVPTXTargetMachine::is64Bit(),nvTM, andSize.
Referenced byLowerOperation().
| overridevirtual |
This hook must be implemented to lower the incoming (formal) arguments, described by the Ins array, into the specified DAG.
The implementation should fill in the InVals array with legal-type argument values, and return the resulting token chain value.
Reimplemented fromllvm::TargetLowering.
Definition at line3079 of fileNVPTXISelLowering.cpp.
Referencesllvm::ISD::ADD,llvm::NVPTXAS::ADDRESS_SPACE_PARAM,assert(),llvm::ISD::BITCAST,llvm::commonAlignment(),ComputePTXValueVTs(),DL,llvm::SmallVectorBase< Size_T >::empty(),llvm::ISD::EXTRACT_VECTOR_ELT,F,llvm::AttributeList::FirstArgIndex,llvm::PointerType::get(),llvm::SelectionDAG::getConstant(),llvm::SelectionDAG::getDataLayout(),llvm::EVT::getFixedSizeInBits(),llvm::MachineFunction::getFunction(),getFunctionArgumentAlignment(),llvm::SelectionDAG::getIntPtrConstant(),llvm::SelectionDAG::getLoad(),llvm::SelectionDAG::getMachineFunction(),llvm::SelectionDAG::getNode(),llvm::Constant::getNullValue(),llvm::TargetLoweringBase::getNumRegisters(),llvm::TargetLoweringBase::getPointerTy(),llvm::SelectionDAG::getRoot(),llvm::NVPTXSubtarget::getSmVersion(),llvm::NVPTXSubtarget::getTargetLowering(),llvm::EVT::getTypeForEVT(),llvm::TargetLoweringBase::getValueType(),llvm::EVT::getVectorVT(),llvm::AttributeList::hasParamAttr(),I,llvm::EVT::isInteger(),IsTypePassedAsArray(),llvm::Isv2x16VT(),llvm::Type::isVectorTy(),llvm::MachineMemOperand::MODereferenceable,llvm::MachineMemOperand::MOInvariant,llvm::NVPTXISD::MoveParam,P,PromoteScalarIntegerPTX(),llvm::SmallVectorTemplateBase< T, bool >::push_back(),PVF_FIRST,PVF_LAST,llvm::report_fatal_error(),llvm::SelectionDAG::setRoot(),llvm::ISD::SIGN_EXTEND,llvm::SmallVectorBase< Size_T >::size(),llvm::ISD::TokenFactor,llvm::ISD::TRUNCATE,llvm::ISD::UNDEF,VectorizePTXValueVTs(), andllvm::ISD::ZERO_EXTEND.
SDValue NVPTXTargetLowering::LowerGlobalAddress | ( | SDValue | Op, |
SelectionDAG & | DAG | ||
) | const |
Definition at line1139 of fileNVPTXISelLowering.cpp.
Referencesllvm::GlobalAddressSDNode::getAddressSpace(),llvm::SelectionDAG::getDataLayout(),llvm::GlobalAddressSDNode::getGlobal(),llvm::SelectionDAG::getNode(),llvm::TargetLoweringBase::getPointerTy(),llvm::SelectionDAG::getTargetGlobalAddress(), andllvm::NVPTXISD::Wrapper.
Referenced byLowerOperation().
| overridevirtual |
This callback is invoked for operations that are unsupported by the target, which are registered to use 'custom' lowering, and whose defined values are all legal.
If the target has no operations that require custom lowering, it need not implement this. The default implementation of this aborts.
Reimplemented fromllvm::TargetLowering.
Definition at line2649 of fileNVPTXISelLowering.cpp.
Referencesllvm::ISD::ABS,llvm::ISD::ADD,llvm::ISD::BITCAST,llvm::ISD::BR_JT,llvm::ISD::BUILD_VECTOR,llvm::ISD::CONCAT_VECTORS,llvm::ISD::CopyToReg,llvm::ISD::DYNAMIC_STACKALLOC,llvm::ISD::EXTRACT_SUBVECTOR,llvm::ISD::EXTRACT_VECTOR_ELT,llvm::ISD::FADD,llvm::ISD::FCOPYSIGN,llvm::ISD::FMUL,llvm::ISD::FP_EXTEND,llvm::ISD::FP_ROUND,llvm::ISD::FP_TO_SINT,llvm::ISD::FP_TO_UINT,llvm::ISD::FRAMEADDR,llvm::ISD::FROUND,llvm::ISD::FSUB,llvm::ISD::GlobalAddress,llvm::ISD::INSERT_VECTOR_ELT,llvm::ISD::INTRINSIC_W_CHAIN,llvm_unreachable,llvm::ISD::LOAD,LowerDYNAMIC_STACKALLOC(),LowerGlobalAddress(),LowerSTACKRESTORE(),LowerSTACKSAVE(),LowerVectorArith(),llvm::ISD::MUL,llvm::ISD::RETURNADDR,llvm::ISD::SELECT,llvm::ISD::SHL,llvm::ISD::SHL_PARTS,llvm::ISD::SINT_TO_FP,llvm::ISD::SMAX,llvm::ISD::SMIN,llvm::ISD::SRA_PARTS,llvm::ISD::SREM,llvm::ISD::SRL_PARTS,llvm::ISD::STACKRESTORE,llvm::ISD::STACKSAVE,llvm::ISD::STORE,llvm::ISD::SUB,llvm::ISD::UINT_TO_FP,llvm::ISD::UMAX,llvm::ISD::UMIN,llvm::ISD::UREM,llvm::ISD::VAARG,llvm::ISD::VASTART, andllvm::ISD::VECTOR_SHUFFLE.
| overridevirtual |
This hook must be implemented to lower outgoing return values, described by the Outs array, into the specified DAG.
The implementation should return the resulting token chain value.
Reimplemented fromllvm::TargetLowering.
Definition at line3304 of fileNVPTXISelLowering.cpp.
Referencesllvm::ISD::ANY_EXTEND,assert(),llvm::SmallVectorImpl< T >::clear(),llvm::commonAlignment(),ComputePTXValueVTs(),DL,llvm::SmallVectorBase< Size_T >::empty(),F,llvm::SelectionDAG::getConstant(),llvm::SelectionDAG::getDataLayout(),llvm::MachineFunction::getFunction(),getFunctionParamOptimizedAlign(),llvm::SelectionDAG::getMachineFunction(),llvm::SelectionDAG::getMemIntrinsicNode(),llvm::SelectionDAG::getNode(),llvm::Function::getReturnType(),llvm::NVPTXSubtarget::getSmVersion(),llvm::SDValue::getValueSizeInBits(),llvm::SDValue::getValueType(),llvm::SelectionDAG::getVTList(),llvm_unreachable,LowerUnalignedStoreRet(),llvm::MachineMemOperand::MOStore,PromoteScalarIntegerPTX(),llvm::SmallVectorTemplateBase< T, bool >::push_back(),PVF_FIRST,PVF_LAST,PVF_SCALAR,llvm::NVPTXISD::RET_GLUE,RetTy,llvm::ISD::SIGN_EXTEND,llvm::SmallVectorBase< Size_T >::size(),llvm::NVPTXISD::StoreRetval,llvm::NVPTXISD::StoreRetvalV2,llvm::NVPTXISD::StoreRetvalV4,VectorizePTXValueVTs(), andllvm::ISD::ZERO_EXTEND.
SDValue NVPTXTargetLowering::LowerSTACKRESTORE | ( | SDValue | Op, |
SelectionDAG & | DAG | ||
) | const |
Definition at line2038 of fileNVPTXISelLowering.cpp.
Referencesllvm::NVPTXAS::ADDRESS_SPACE_GENERIC,llvm::NVPTXAS::ADDRESS_SPACE_LOCAL,llvm::LLVMContext::diagnose(),DL,llvm::SelectionDAG::getAddrSpaceCast(),llvm::SelectionDAG::getContext(),llvm::SelectionDAG::getDataLayout(),llvm::MachineFunction::getFunction(),llvm::SelectionDAG::getMachineFunction(),llvm::SelectionDAG::getNode(),llvm::TargetLoweringBase::getPointerTy(),llvm::NVPTXSubtarget::getPTXVersion(),llvm::NVPTXSubtarget::getSmVersion(),Ptr, andllvm::NVPTXISD::STACKRESTORE.
Referenced byLowerOperation().
SDValue NVPTXTargetLowering::LowerSTACKSAVE | ( | SDValue | Op, |
SelectionDAG & | DAG | ||
) | const |
Definition at line2061 of fileNVPTXISelLowering.cpp.
Referencesllvm::NVPTXAS::ADDRESS_SPACE_GENERIC,llvm::NVPTXAS::ADDRESS_SPACE_LOCAL,llvm::LLVMContext::diagnose(),DL,llvm::SelectionDAG::getAddrSpaceCast(),llvm::SelectionDAG::getConstant(),llvm::SelectionDAG::getContext(),llvm::SelectionDAG::getDataLayout(),llvm::MachineFunction::getFunction(),llvm::SelectionDAG::getMachineFunction(),llvm::SelectionDAG::getMergeValues(),llvm::SelectionDAG::getNode(),llvm::TargetLoweringBase::getPointerTy(),llvm::NVPTXSubtarget::getPTXVersion(),llvm::NVPTXSubtarget::getSmVersion(), andllvm::NVPTXISD::STACKSAVE.
Referenced byLowerOperation().
| inlineoverridevirtual |
Returns how the given (atomic) load should be cast by the IR-level AtomicExpand pass.
Reimplemented fromllvm::TargetLoweringBase.
Definition at line246 of fileNVPTXISelLowering.h.
Referencesllvm::TargetLoweringBase::None.
| inlineoverridevirtual |
Returns how the given (atomic) store should be cast by the IR-level AtomicExpand pass into.
For instanceAtomicExpansionKind::CastToInteger will try to cast the operands to integer values.
Reimplemented fromllvm::TargetLoweringBase.
Definition at line250 of fileNVPTXISelLowering.h.
Referencesllvm::TargetLoweringBase::None.
| overridevirtual |
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
Default is to never expand.
Reimplemented fromllvm::TargetLoweringBase.
Definition at line5522 of fileNVPTXISelLowering.cpp.
Referencesllvm::AtomicRMWInst::Add,llvm::AtomicRMWInst::And,assert(),llvm::TargetLoweringBase::CmpXChg,llvm::AtomicRMWInst::FAdd,llvm::AtomicRMWInst::getOperation(),llvm::NVPTXSubtarget::getPTXVersion(),llvm::NVPTXSubtarget::getSmVersion(),llvm::Value::getType(),llvm::AtomicRMWInst::getValOperand(),llvm::NVPTXSubtarget::hasAtomAddF64(),llvm::NVPTXSubtarget::hasAtomBitwise64(),llvm::NVPTXSubtarget::hasAtomMinMax64(),llvm::Type::isBFloatTy(),llvm::Type::isDoubleTy(),llvm::AtomicRMWInst::isFloatingPointOperation(),llvm::Type::isFloatTy(),llvm::Type::isHalfTy(),llvm::Type::isIntegerTy(),llvm_unreachable,llvm::AtomicRMWInst::Max,llvm::AtomicRMWInst::Min,llvm::TargetLoweringBase::None,llvm::AtomicRMWInst::Or,llvm::AtomicRMWInst::Sub,llvm::AtomicRMWInst::UMax,llvm::AtomicRMWInst::UMin,llvm::AtomicRMWInst::Xchg, andllvm::AtomicRMWInst::Xor.
bool NVPTXTargetLowering::useF32FTZ | ( | constMachineFunction & | MF | ) | const |
Definition at line133 of fileNVPTXISelLowering.cpp.
Referencesllvm::MachineFunction::getDenormalMode(),llvm::APFloatBase::IEEEsingle(),llvm::DenormalMode::Output, andllvm::DenormalMode::PreserveSign.
Referenced bygetSqrtEstimate().
bool NVPTXTargetLowering::usePrecSqrtF32 | ( | ) | const |
Definition at line123 of fileNVPTXISelLowering.cpp.
Referencesllvm::cl::Option::getNumOccurrences(),llvm::TargetLoweringBase::getTargetMachine(),llvm::TargetMachine::Options,llvm::TargetOptions::UnsafeFPMath, andUsePrecSqrtF32.
Referenced bygetSqrtEstimate().
constNVPTXTargetMachine* llvm::NVPTXTargetLowering::nvTM |
Definition at line193 of fileNVPTXISelLowering.h.
Referenced bygetPrototype(),LowerCall(), andLowerDYNAMIC_STACKALLOC().