1//===--- SPIRVUtils.cpp ---- SPIR-V Utility Functions -----------*- C++ -*-===// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 7//===----------------------------------------------------------------------===// 9// This file contains miscellaneous utility functions. 11//===----------------------------------------------------------------------===// 26#include "llvm/IR/IntrinsicsSPIRV.h" 32// The following functions are used to add these string literals as a series of 33// 32-bit integer operands with the correct format, and unpack them if necessary 34// when making string comparisons in compiler passes. 35// SPIR-V requires null-terminated UTF-8 strings padded to 32-bit alignment. 37uint32_t Word = 0u;
// Build up this 32-bit word from 4 8-bit chars. 38for (
unsigned WordIndex = 0; WordIndex < 4; ++WordIndex) {
39unsigned StrIndex = i + WordIndex;
40uint8_t CharToAdd = 0;
// Initilize char as padding/null. 41if (StrIndex < Str.size()) {
// If it's within the string, get a real char. 42 CharToAdd = Str[StrIndex];
44 Word |= (CharToAdd << (WordIndex * 8));
49// Get length including padding and null terminator. 51return (Str.size() + 4) & ~3;
56for (
unsigned i = 0; i < PaddedLen; i += 4) {
57// Add an operand for the 32-bits of chars or padding. 64for (
unsigned i = 0; i < PaddedLen; i += 4) {
65// Add an operand for the 32-bits of chars or padding. 71 std::vector<Value *> &Args) {
73for (
unsigned i = 0; i < PaddedLen; i += 4) {
74// Add a vector element for the 32-bits of chars or padding. 84constauto Bitwidth = Imm.getBitWidth();
86return;
// Already handled 87elseif (Bitwidth <= 32) {
88 MIB.
addImm(Imm.getZExtValue());
89// Asm Printer needs this info to print floating-type correctly 93 }
elseif (Bitwidth <= 64) {
94uint64_t FullImm = Imm.getZExtValue();
95uint32_t LowBits = FullImm & 0xffffffff;
96uint32_t HighBits = (FullImm >> 32) & 0xffffffff;
115BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpName))
122const std::vector<uint32_t> &DecArgs,
126for (
constauto &DecArg : DecArgs)
131 SPIRV::Decoration::Decoration Dec,
132const std::vector<uint32_t> &DecArgs,
StringRef StrImm) {
133auto MIB = MIRBuilder.
buildInstr(SPIRV::OpDecorate)
140 SPIRV::Decoration::Decoration Dec,
141const std::vector<uint32_t> &DecArgs,
StringRef StrImm) {
152auto *OpMD = dyn_cast<MDNode>(GVarMD->
getOperand(
I));
155if (OpMD->getNumOperands() == 0)
158 mdconst::dyn_extract<ConstantInt>(OpMD->getOperand(0));
161"element of the decoration");
162auto MIB = MIRBuilder.
buildInstr(SPIRV::OpDecorate)
165for (
unsigned OpI = 1, OpE = OpMD->getNumOperands(); OpI != OpE; ++OpI) {
167 mdconst::dyn_extract<ConstantInt>(OpMD->getOperand(OpI)))
169elseif (
MDString *OpV = dyn_cast<MDString>(OpMD->getOperand(OpI)))
184for (; It !=
E && It !=
I; ++It) {
185 Opcode = It->getOpcode();
186if (Opcode == SPIRV::OpFunction || Opcode == SPIRV::OpFunctionParameter) {
189 !(Opcode == SPIRV::ASSIGN_TYPE || Opcode == SPIRV::OpLabel)) {
202while (
I->isTerminator() ||
I->isDebugValue()) {
210SPIRV::StorageClass::StorageClass
214return SPIRV::StorageClass::Function;
216return SPIRV::StorageClass::CrossWorkgroup;
218return SPIRV::StorageClass::UniformConstant;
220return SPIRV::StorageClass::Workgroup;
222return SPIRV::StorageClass::Generic;
224return STI.
canUseExtension(SPIRV::Extension::SPV_INTEL_usm_storage_classes)
225 ? SPIRV::StorageClass::DeviceOnlyINTEL
226 : SPIRV::StorageClass::CrossWorkgroup;
228return STI.
canUseExtension(SPIRV::Extension::SPV_INTEL_usm_storage_classes)
229 ? SPIRV::StorageClass::HostOnlyINTEL
230 : SPIRV::StorageClass::CrossWorkgroup;
232return SPIRV::StorageClass::Input;
234return SPIRV::StorageClass::Output;
236return SPIRV::StorageClass::CodeSectionINTEL;
238return SPIRV::StorageClass::Private;
244SPIRV::MemorySemantics::MemorySemantics
247case SPIRV::StorageClass::StorageBuffer:
248case SPIRV::StorageClass::Uniform:
249return SPIRV::MemorySemantics::UniformMemory;
250case SPIRV::StorageClass::Workgroup:
251return SPIRV::MemorySemantics::WorkgroupMemory;
252case SPIRV::StorageClass::CrossWorkgroup:
253return SPIRV::MemorySemantics::CrossWorkgroupMemory;
254case SPIRV::StorageClass::AtomicCounter:
255return SPIRV::MemorySemantics::AtomicCounterMemory;
256case SPIRV::StorageClass::Image:
257return SPIRV::MemorySemantics::ImageMemory;
259return SPIRV::MemorySemantics::None;
266return SPIRV::MemorySemantics::Acquire;
268return SPIRV::MemorySemantics::Release;
270return SPIRV::MemorySemantics::AcquireRelease;
272return SPIRV::MemorySemantics::SequentiallyConsistent;
276return SPIRV::MemorySemantics::None;
283// https://registry.khronos.org/SPIR-V/specs/unified1/SPIRV.html#_scope_id. 284// We don't need aliases for Invocation and CrossDevice, as we already have 285// them covered by "singlethread" and "" strings respectively (see 286// implementation of LLVMContext::LLVMContext()). 295return SPIRV::Scope::Invocation;
297return SPIRV::Scope::CrossDevice;
298elseif (Id == SubGroup)
299return SPIRV::Scope::Subgroup;
300elseif (Id == WorkGroup)
301return SPIRV::Scope::Workgroup;
303return SPIRV::Scope::Device;
304return SPIRV::Scope::CrossDevice;
311MI->getOpcode() == SPIRV::G_TRUNC ||
MI->getOpcode() == SPIRV::G_ZEXT
312 ?
MRI->getVRegDef(
MI->getOperand(1).getReg())
314if (
auto *GI = dyn_cast<GIntrinsic>(ConstInstr)) {
315if (GI->is(Intrinsic::spv_track_constant)) {
317returnMRI->getVRegDef(ConstReg);
319 }
elseif (ConstInstr->
getOpcode() == SPIRV::ASSIGN_TYPE) {
321returnMRI->getVRegDef(ConstReg);
323returnMRI->getVRegDef(ConstReg);
328assert(
MI &&
MI->getOpcode() == TargetOpcode::G_CONSTANT);
329returnMI->getOperand(1).getCImm()->getValue().getZExtValue();
333if (
constauto *GI = dyn_cast<GIntrinsic>(&
MI))
334return GI->is(IntrinsicID);
339Type *ElementTy = cast<ValueAsMetadata>(
N->getOperand(
I))->getType();
343// The set of names is borrowed from the SPIR-V translator. 344// TODO: may be implemented in SPIRVBuiltins.td. 346return MangledName ==
"write_pipe_2" || MangledName ==
"read_pipe_2" ||
347 MangledName ==
"write_pipe_2_bl" || MangledName ==
"read_pipe_2_bl" ||
348 MangledName ==
"write_pipe_4" || MangledName ==
"read_pipe_4" ||
349 MangledName ==
"reserve_write_pipe" ||
350 MangledName ==
"reserve_read_pipe" ||
351 MangledName ==
"commit_write_pipe" ||
352 MangledName ==
"commit_read_pipe" ||
353 MangledName ==
"work_group_reserve_write_pipe" ||
354 MangledName ==
"work_group_reserve_read_pipe" ||
355 MangledName ==
"work_group_commit_write_pipe" ||
356 MangledName ==
"work_group_commit_read_pipe" ||
357 MangledName ==
"get_pipe_num_packets_ro" ||
358 MangledName ==
"get_pipe_max_packets_ro" ||
359 MangledName ==
"get_pipe_num_packets_wo" ||
360 MangledName ==
"get_pipe_max_packets_wo" ||
361 MangledName ==
"sub_group_reserve_write_pipe" ||
362 MangledName ==
"sub_group_reserve_read_pipe" ||
363 MangledName ==
"sub_group_commit_write_pipe" ||
364 MangledName ==
"sub_group_commit_read_pipe" ||
365 MangledName ==
"to_global" || MangledName ==
"to_local" ||
366 MangledName ==
"to_private";
370return MangledName ==
"__enqueue_kernel_basic" ||
371 MangledName ==
"__enqueue_kernel_basic_events" ||
372 MangledName ==
"__enqueue_kernel_varargs" ||
373 MangledName ==
"__enqueue_kernel_events_varargs";
377return MangledName ==
"__get_kernel_work_group_size_impl" ||
378 MangledName ==
"__get_kernel_sub_group_count_for_ndrange_impl" ||
379 MangledName ==
"__get_kernel_max_sub_group_size_for_ndrange_impl" ||
380 MangledName ==
"__get_kernel_preferred_work_group_size_multiple_impl";
384if (!
Name.starts_with(
"__"))
389Name ==
"__translate_sampler_initializer";
394bool IsNonMangledSPIRV =
Name.starts_with(
"__spirv_");
395bool IsNonMangledHLSL =
Name.starts_with(
"__hlsl_");
396bool IsMangled =
Name.starts_with(
"_Z");
398// Otherwise use simple demangling to return the function name. 399if (IsNonMangledOCL || IsNonMangledSPIRV || IsNonMangledHLSL || !IsMangled)
402// Try to use the itanium demangler. 404 std::string Result = DemangledName;
409// Autocheck C++, maybe need to do explicit check of the source language. 410// OpenCL C++ built-ins are declared in cl namespace. 411// TODO: consider using 'St' abbriviation for cl namespace mangling. 412// Similar to ::std:: in C++. 413size_t Start, Len = 0;
414size_t DemangledNameLenStart = 2;
415if (
Name.starts_with(
"_ZN")) {
416// Skip CV and ref qualifiers. 417size_t NameSpaceStart =
Name.find_first_not_of(
"rVKRO", 3);
418// All built-ins are in the ::cl:: namespace. 419if (
Name.substr(NameSpaceStart, 11) !=
"2cl7__spirv")
421 DemangledNameLenStart = NameSpaceStart + 11;
423 Start =
Name.find_first_not_of(
"0123456789", DemangledNameLenStart);
424Name.substr(DemangledNameLenStart, Start - DemangledNameLenStart)
425 .getAsInteger(10, Len);
426returnName.substr(Start, Len).str();
430if (
Name.starts_with(
"opencl.") ||
Name.starts_with(
"ocl_") ||
431Name.starts_with(
"spirv."))
446// OpenCL handling: any function with the SPIR_KERNEL 447// calling convention will be a potential entry point. 451// HLSL handling: special attribute are emitted from the 453if (
F.getFnAttribute(
"hlsl.shader").isValid())
460 TypeName.consume_front(
"atomic_");
461if (TypeName.consume_front(
"void"))
463elseif (TypeName.consume_front(
"bool") || TypeName.consume_front(
"_Bool"))
465elseif (TypeName.consume_front(
"char") ||
466 TypeName.consume_front(
"signed char") ||
467 TypeName.consume_front(
"unsigned char") ||
468 TypeName.consume_front(
"uchar"))
470elseif (TypeName.consume_front(
"short") ||
471 TypeName.consume_front(
"signed short") ||
472 TypeName.consume_front(
"unsigned short") ||
473 TypeName.consume_front(
"ushort"))
475elseif (TypeName.consume_front(
"int") ||
476 TypeName.consume_front(
"signed int") ||
477 TypeName.consume_front(
"unsigned int") ||
478 TypeName.consume_front(
"uint"))
480elseif (TypeName.consume_front(
"long") ||
481 TypeName.consume_front(
"signed long") ||
482 TypeName.consume_front(
"unsigned long") ||
483 TypeName.consume_front(
"ulong"))
485elseif (TypeName.consume_front(
"half") ||
486 TypeName.consume_front(
"_Float16") ||
487 TypeName.consume_front(
"__fp16"))
489elseif (TypeName.consume_front(
"float"))
491elseif (TypeName.consume_front(
"double"))
494// Unable to recognize SPIRV type name 498std::unordered_set<BasicBlock *>
499PartialOrderingVisitor::getReachableFrom(BasicBlock *Start) {
500 std::queue<BasicBlock *> ToVisit;
503 std::unordered_set<BasicBlock *> Output;
504while (ToVisit.size() != 0) {
505 BasicBlock *BB = ToVisit.front();
508if (Output.count(BB) != 0)
522bool PartialOrderingVisitor::CanBeVisited(BasicBlock *BB)
const{
528// One of the predecessor hasn't been visited. Not ready yet. 529if (BlockToOrder.count(
P) == 0)
532// If the block is a loop exit, the loop must be finished before 535if (L ==
nullptr ||
L->contains(BB))
538// SPIR-V requires a single back-edge. And the backend first 539// step transforms loops into the simplified format. If we have 540// more than 1 back-edge, something is wrong. 541assert(
L->getNumBackEdges() <= 1);
543// If the loop has no latch, loop's rank won't matter, so we can 550// The latch is not ready yet, let's wait. 551if (BlockToOrder.count(Latch) == 0)
559auto It = BlockToOrder.find(BB);
560if (It != BlockToOrder.end())
561return It->second.Rank;
569auto Iterator = BlockToOrder.end();
571BasicBlock *Latch = L ? L->getLoopLatch() :
nullptr;
573// If the predecessor is either outside a loop, or part of 574// the same loop, simply take its rank + 1. 575if (L ==
nullptr || L->contains(BB) || Latch ==
nullptr) {
576 Iterator = BlockToOrder.find(
P);
578// Otherwise, take the loop's rank (highest rank in the loop) as base. 579// Since loops have a single latch, highest rank is easy to find. 580// If the loop has no latch, then it doesn't matter. 581 Iterator = BlockToOrder.find(Latch);
584assert(Iterator != BlockToOrder.end());
585 result = std::max(result, Iterator->second.Rank + 1);
591size_t PartialOrderingVisitor::visit(
BasicBlock *BB,
size_t Unused) {
595size_t QueueIndex = 0;
596while (ToVisit.size() != 0) {
600if (!CanBeVisited(BB)) {
602if (QueueIndex >= ToVisit.size())
604"No valid candidate in the queue. Is the graph reducible?");
611 OrderInfo
Info = {Rank, BlockToOrder.size()};
612 BlockToOrder.emplace(BB,
Info);
615if (Queued.count(S) != 0)
629 visit(&*
F.begin(), 0);
631 Order.reserve(
F.size());
632for (
auto &[BB,
Info] : BlockToOrder)
633 Order.emplace_back(BB);
635 std::sort(Order.begin(), Order.end(), [&](
constauto &
LHS,
constauto &
RHS) {
636 return compare(LHS, RHS);
642const OrderInfo &InfoLHS = BlockToOrder.at(
const_cast<BasicBlock *
>(
LHS));
643const OrderInfo &InfoRHS = BlockToOrder.at(
const_cast<BasicBlock *
>(
RHS));
644if (InfoLHS.Rank != InfoRHS.Rank)
645return InfoLHS.Rank < InfoRHS.Rank;
646return InfoLHS.TraversalIndex < InfoRHS.TraversalIndex;
651 std::unordered_set<BasicBlock *> Reachable = getReachableFrom(&Start);
652assert(BlockToOrder.count(&Start) != 0);
654// Skipping blocks with a rank inferior to |Start|'s rank. 655auto It = Order.begin();
656while (It != Order.end() && *It != &Start)
659// This is unexpected. Worst case |Start| is the last block, 660// so It should point to the last block, not past-end. 663// By default, there is no rank limit. Setting it to the maximum value. 664 std::optional<size_t> EndRank = std::nullopt;
665for (; It != Order.end(); ++It) {
666if (EndRank.has_value() && BlockToOrder[*It].Rank > *EndRank)
669if (Reachable.count(*It) == 0) {
674 EndRank = BlockToOrder[*It].Rank;
684 std::vector<BasicBlock *> Order;
685 Order.reserve(
F.size());
691assert(&*
F.begin() == Order[0]);
694if (BB != LastBlock && &*LastBlock->
getNextNode() != BB) {
706if (MaybeDef && MaybeDef->
getOpcode() == SPIRV::ASSIGN_TYPE)
712// It's a bit of paranoia, but still we don't want to have even a chance that 713// the loop will work for too long. 714constexprunsigned MaxIters = 1024;
715for (
unsignedI = 0;
I < MaxIters; ++
I) {
717if (!M.getFunction(OrdName)) {
725// Assign SPIR-V type to the register. If the register has no valid assigned 726// class, set register LLT type and class according to the SPIR-V type. 731if (!
MRI->getRegClassOrNull(
Reg) || Force) {
737// Create a SPIR-V type, assign SPIR-V type to the register. If the register has 738// no valid assigned class, set register LLT type and class according to the 746// Create a virtual register and assign SPIR-V type to the register. Set 747// register LLT type and class according to the SPIR-V type. 757// Create a virtual register and assign SPIR-V type to the register. Set 758// register LLT type and class according to the SPIR-V type. 765// Create a SPIR-V type, virtual register and assign SPIR-V type to the 766// register. Set register LLT type and class according to the SPIR-V type. 773// Return true if there is an opaque pointer type nested in the argument. 777if (
constFunctionType *RefTy = dyn_cast<FunctionType>(Ty)) {
780for (
constType *ArgTy : RefTy->params())
785if (
constArrayType *RefTy = dyn_cast<ArrayType>(Ty))
791if (
constauto *
II = dyn_cast<IntrinsicInst>(Arg))
793if (
F->getName().starts_with(
"llvm.spv."))
unsigned const MachineRegisterInfo * MRI
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Analysis containing CSE Info
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
This file declares the MachineIRBuilder class.
uint64_t IntrinsicInst * II
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
Class for arbitrary precision integers.
Class to represent array types.
LLVM Basic Block Representation.
const Instruction & front() const
void moveAfter(BasicBlock *MovePos)
Unlink this basic block from its current function and insert it right after MovePos in the function M...
This is the shared class of boolean and integer constants.
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
This class represents an Operation in the Expression.
bool dominates(const DomTreeNodeBase< NodeT > *A, const DomTreeNodeBase< NodeT > *B) const
dominates - Returns true iff A dominates B.
void recalculate(ParentType &Func)
recalculate - compute a dominator tree for the given function
Class to represent function types.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
This is an important class for using LLVM in a threaded context.
SyncScope::ID getOrInsertSyncScopeID(StringRef SSN)
getOrInsertSyncScopeID - Maps synchronization scope name to synchronization scope ID.
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
Represents a single loop in the control flow graph.
Instances of this class represent a single low-level machine instruction.
void addOperand(const MCOperand Op)
static MCOperand createImm(int64_t Val)
const MDOperand & getOperand(unsigned I) const
unsigned getNumOperands() const
Return number of MDNode operands.
iterator SkipPHIsAndLabels(iterator I)
Return the first instruction in MBB after I that is not a PHI or a label.
const MachineBasicBlock & front() const
Helper class to build MachineInstr.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineFunction & getMF()
Getter for the function we currently build.
MachineRegisterInfo * getMRI()
Getter for MRI.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
void setAsmPrinterFlag(uint8_t Flag)
Set a flag for the AsmPrinter.
const MachineOperand & getOperand(unsigned i) const
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
A Module instance is used to store all the information related to an LLVM module.
size_t GetNodeRank(BasicBlock *BB) const
void partialOrderVisit(BasicBlock &Start, std::function< bool(BasicBlock *)> Op)
bool compare(const BasicBlock *LHS, const BasicBlock *RHS) const
PartialOrderingVisitor(Function &F)
Wrapper class representing virtual and physical registers.
void assignSPIRVTypeToVReg(SPIRVType *Type, Register VReg, const MachineFunction &MF)
SPIRVType * getOrCreateSPIRVType(const Type *Type, MachineIRBuilder &MIRBuilder, SPIRV::AccessQualifier::AccessQualifier AQ=SPIRV::AccessQualifier::ReadWrite, bool EmitIR=true)
const TargetRegisterClass * getRegClass(SPIRVType *SpvType) const
LLT getRegType(SPIRVType *SpvType) const
bool canUseExtension(SPIRV::Extension::Extension E) const
StringRef - Represent a constant reference to a string, i.e.
constexpr bool empty() const
empty - Check if the string is empty.
Class to represent target extensions types, which are generally unintrospectable from target-independ...
Target - Wrapper for Target specific information.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
std::string str() const
Return the twine contents as a std::string.
The instances of the Type class are immutable: once they are created, they are never changed.
static Type * getHalfTy(LLVMContext &C)
static Type * getDoubleTy(LLVMContext &C)
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
static Type * getVoidTy(LLVMContext &C)
static IntegerType * getInt16Ty(LLVMContext &C)
static IntegerType * getInt8Ty(LLVMContext &C)
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
static IntegerType * getInt32Ty(LLVMContext &C)
static IntegerType * getInt64Ty(LLVMContext &C)
static Type * getFloatTy(LLVMContext &C)
LLVM Value Representation.
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ SPIR_KERNEL
Used for SPIR kernel functions.
@ BasicBlock
Various leaf nodes.
@ SingleThread
Synchronized with respect to signal handlers executing in the same thread.
@ System
Synchronized with respect to all concurrently executing threads.
This is an optimization pass for GlobalISel generic memory operations.
void buildOpName(Register Target, const StringRef &Name, MachineIRBuilder &MIRBuilder)
bool getVacantFunctionName(Module &M, std::string &Name)
std::string getStringImm(const MachineInstr &MI, unsigned StartIndex)
bool isTypedPointerWrapper(const TargetExtType *ExtTy)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
static void finishBuildOpDecorate(MachineInstrBuilder &MIB, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
static uint32_t convertCharsToWord(const StringRef &Str, unsigned i)
void addNumImm(const APInt &Imm, MachineInstrBuilder &MIB)
auto successors(const MachineBasicBlock *BB)
bool sortBlocks(Function &F)
uint64_t getIConstVal(Register ConstReg, const MachineRegisterInfo *MRI)
SPIRV::MemorySemantics::MemorySemantics getMemSemanticsForStorageClass(SPIRV::StorageClass::StorageClass SC)
bool isNestedPointer(const Type *Ty)
std::string getOclOrSpirvBuiltinDemangledName(StringRef Name)
void buildOpDecorate(Register Reg, MachineIRBuilder &MIRBuilder, SPIRV::Decoration::Decoration Dec, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
MachineBasicBlock::iterator getOpVariableMBBIt(MachineInstr &I)
Register createVirtualRegister(SPIRVType *SpvType, SPIRVGlobalRegistry *GR, MachineRegisterInfo *MRI, const MachineFunction &MF)
std::string getSPIRVStringOperand(const InstType &MI, unsigned StartIndex)
Type * toTypedPointer(Type *Ty)
char * itaniumDemangle(std::string_view mangled_name, bool ParseParams=true)
Returns a non-NULL pointer to a NUL-terminated C style string that should be explicitly freed,...
bool isSpecialOpaqueType(const Type *Ty)
void setRegClassType(Register Reg, SPIRVType *SpvType, SPIRVGlobalRegistry *GR, MachineRegisterInfo *MRI, const MachineFunction &MF, bool Force)
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
MachineBasicBlock::iterator getInsertPtValidEnd(MachineBasicBlock *MBB)
static bool isNonMangledOCLBuiltin(StringRef Name)
bool isEntryPoint(const Function &F)
SPIRV::StorageClass::StorageClass addressSpaceToStorageClass(unsigned AddrSpace, const SPIRVSubtarget &STI)
AtomicOrdering
Atomic ordering for LLVM's memory model.
SPIRV::Scope::Scope getMemScope(LLVMContext &Ctx, SyncScope::ID Id)
static bool isPipeOrAddressSpaceCastBI(const StringRef MangledName)
Type * parseBasicTypeName(StringRef &TypeName, LLVMContext &Ctx)
DWARFExpression::Operation Op
MachineInstr * getDefInstrMaybeConstant(Register &ConstReg, const MachineRegisterInfo *MRI)
bool hasBuiltinTypePrefix(StringRef Name)
Type * getMDOperandAsType(const MDNode *N, unsigned I)
auto predecessors(const MachineBasicBlock *BB)
static size_t getPaddedLen(const StringRef &Str)
bool isSpvIntrinsic(const MachineInstr &MI, Intrinsic::ID IntrinsicID)
void addStringImm(const StringRef &Str, MCInst &Inst)
static bool isKernelQueryBI(const StringRef MangledName)
MachineInstr * getVRegDef(MachineRegisterInfo &MRI, Register Reg)
void buildOpSpirvDecorations(Register Reg, MachineIRBuilder &MIRBuilder, const MDNode *GVarMD)
static bool isEnqueueKernelBI(const StringRef MangledName)
SPIRV::MemorySemantics::MemorySemantics getMemSemantics(AtomicOrdering Ord)