1//===- SPIRVBuiltins.cpp - SPIR-V Built-in Functions ------------*- C++ -*-===// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 7//===----------------------------------------------------------------------===// 9// This file implements lowering builtin function calls and types using their 10// demangled names and TableGen records. 12//===----------------------------------------------------------------------===// 20#include "llvm/IR/IntrinsicsSPIRV.h" 25#define DEBUG_TYPE "spirv-builtins" 29#define GET_BuiltinGroup_DECL 30#include "SPIRVGenTables.inc" 34 InstructionSet::InstructionSet
Set;
40#define GET_DemangledBuiltins_DECL 41#define GET_DemangledBuiltins_IMPL 63 InstructionSet::InstructionSet
Set;
67#define GET_NativeBuiltins_DECL 68#define GET_NativeBuiltins_IMPL 86#define GET_GroupBuiltins_DECL 87#define GET_GroupBuiltins_IMPL 97#define GET_IntelSubgroupsBuiltins_DECL 98#define GET_IntelSubgroupsBuiltins_IMPL 105#define GET_AtomicFloatingBuiltins_DECL 106#define GET_AtomicFloatingBuiltins_IMPL 113#define GET_GroupUniformBuiltins_DECL 114#define GET_GroupUniformBuiltins_IMPL 118 InstructionSet::InstructionSet
Set;
122using namespaceBuiltIn;
123#define GET_GetBuiltins_DECL 124#define GET_GetBuiltins_IMPL 128 InstructionSet::InstructionSet
Set;
132#define GET_ImageQueryBuiltins_DECL 133#define GET_ImageQueryBuiltins_IMPL 137 InstructionSet::InstructionSet
Set;
147 InstructionSet::InstructionSet
Set;
154using namespaceFPRoundingMode;
155#define GET_ConvertBuiltins_DECL 156#define GET_ConvertBuiltins_IMPL 158using namespaceInstructionSet;
159#define GET_VectorLoadStoreBuiltins_DECL 160#define GET_VectorLoadStoreBuiltins_IMPL 162#define GET_CLMemoryScope_DECL 163#define GET_CLSamplerAddressingMode_DECL 164#define GET_CLMemoryFenceFlags_DECL 165#define GET_ExtendedBuiltins_DECL 166#include "SPIRVGenTables.inc" 169//===----------------------------------------------------------------------===// 170// Misc functions for looking up builtins and veryfying requirements using 172//===----------------------------------------------------------------------===// 175/// Parses the name part of the demangled builtin call. 178conststatic std::string PassPrefix =
"(anonymous namespace)::";
179 std::string BuiltinName;
180// Itanium Demangler result may have "(anonymous namespace)::" prefix 182 BuiltinName = DemangledCall.
substr(PassPrefix.length());
184 BuiltinName = DemangledCall;
185// Extract the builtin function name and types of arguments from the call 187 BuiltinName = BuiltinName.
substr(0, BuiltinName.find(
'('));
189// Account for possible "__spirv_ocl_" prefix in SPIR-V friendly LLVM IR 190if (BuiltinName.rfind(
"__spirv_ocl_", 0) == 0)
191 BuiltinName = BuiltinName.
substr(12);
193// Check if the extracted name contains type information between angle 194// brackets. If so, the builtin is an instantiated template - needs to have 195// the information after angle brackets and return type removed. 196 std::size_t Pos1 = BuiltinName.
rfind(
'<');
197if (Pos1 != std::string::npos && BuiltinName.back() ==
'>') {
198 std::size_t Pos2 = BuiltinName.rfind(
' ', Pos1);
199if (Pos2 == std::string::npos)
203 BuiltinName = BuiltinName.substr(Pos2, Pos1 - Pos2);
204 BuiltinName = BuiltinName.substr(BuiltinName.find_last_of(
' ') + 1);
207// Check if the extracted name begins with: 208// - "__spirv_ImageSampleExplicitLod" 209// - "__spirv_ImageRead" 210// - "__spirv_ImageQuerySizeLod" 211// - "__spirv_UDotKHR" 212// - "__spirv_SDotKHR" 213// - "__spirv_SUDotKHR" 214// - "__spirv_SDotAccSatKHR" 215// - "__spirv_UDotAccSatKHR" 216// - "__spirv_SUDotAccSatKHR" 217// - "__spirv_ReadClockKHR" 218// - "__spirv_SubgroupBlockReadINTEL" 219// - "__spirv_SubgroupImageBlockReadINTEL" 220// - "__spirv_SubgroupImageMediaBlockReadINTEL" 221// - "__spirv_SubgroupImageMediaBlockWriteINTEL" 222// - "__spirv_Convert" 223// - "__spirv_UConvert" 224// - "__spirv_SConvert" 225// - "__spirv_FConvert" 226// - "__spirv_SatConvert" 227// and contains return type information at the end "_R<type>". 228// If so, extract the plain builtin name without the type information. 229staticconst std::regex SpvWithR(
230"(__spirv_(ImageSampleExplicitLod|ImageRead|ImageQuerySizeLod|UDotKHR|" 231"SDotKHR|SUDotKHR|SDotAccSatKHR|UDotAccSatKHR|SUDotAccSatKHR|" 232"ReadClockKHR|SubgroupBlockReadINTEL|SubgroupImageBlockReadINTEL|" 233"SubgroupImageMediaBlockReadINTEL|SubgroupImageMediaBlockWriteINTEL|" 235"UConvert|SConvert|FConvert|SatConvert).*)_R[^_]*_?(\\w+)?.*");
237if (std::regex_match(BuiltinName,
Match, SpvWithR) &&
Match.size() > 1) {
238 std::ssub_match SubMatch;
239if (DecorationId &&
Match.size() > 3) {
244 BuiltinName = SubMatch.str();
251/// Looks up the demangled builtin call in the SPIRVBuiltins.td records using 252/// the provided \p DemangledCall and specified \p Set. 254/// The lookup follows the following algorithm, returning the first successful 256/// 1. Search with the plain demangled name (expecting a 1:1 match). 257/// 2. Search with the prefix before or suffix after the demangled name 258/// signyfying the type of the first argument. 260/// \returns Wrapper around the demangled call and found builtin definition. 261static std::unique_ptr<const SPIRV::IncomingCall>
263 SPIRV::InstructionSet::InstructionSet Set,
270 DemangledCall.
slice(DemangledCall.
find(
'(') + 1, DemangledCall.
find(
')'));
271 BuiltinArgs.
split(BuiltinArgumentTypes,
',', -1,
false);
273// Look up the builtin in the defined set. Start with the plain demangled 274// name, expecting a 1:1 match in the defined builtin set. 276if ((Builtin = SPIRV::lookupBuiltin(BuiltinName, Set)))
277return std::make_unique<SPIRV::IncomingCall>(
278 BuiltinName, Builtin, ReturnRegister, ReturnType,
Arguments);
280// If the initial look up was unsuccessful and the demangled call takes at 281// least 1 argument, add a prefix or suffix signifying the type of the first 282// argument and repeat the search. 283if (BuiltinArgumentTypes.
size() >= 1) {
284char FirstArgumentType = BuiltinArgumentTypes[0][0];
285// Prefix to be added to the builtin's name for lookup. 286// For example, OpenCL "abs" taking an unsigned value has a prefix "u_". 289switch (FirstArgumentType) {
292if (Set == SPIRV::InstructionSet::OpenCL_std)
294elseif (Set == SPIRV::InstructionSet::GLSL_std_450)
302if (Set == SPIRV::InstructionSet::OpenCL_std)
304elseif (Set == SPIRV::InstructionSet::GLSL_std_450)
311if (Set == SPIRV::InstructionSet::OpenCL_std ||
312 Set == SPIRV::InstructionSet::GLSL_std_450)
317// If argument-type name prefix was added, look up the builtin again. 318if (!Prefix.empty() &&
319 (Builtin = SPIRV::lookupBuiltin(Prefix + BuiltinName, Set)))
320return std::make_unique<SPIRV::IncomingCall>(
321 BuiltinName, Builtin, ReturnRegister, ReturnType,
Arguments);
323// If lookup with a prefix failed, find a suffix to be added to the 324// builtin's name for lookup. For example, OpenCL "group_reduce_max" taking 325// an unsigned value has a suffix "u". 328switch (FirstArgumentType) {
348// If argument-type name suffix was added, look up the builtin again. 349if (!Suffix.empty() &&
350 (Builtin = SPIRV::lookupBuiltin(BuiltinName + Suffix, Set)))
351return std::make_unique<SPIRV::IncomingCall>(
352 BuiltinName, Builtin, ReturnRegister, ReturnType,
Arguments);
355// No builtin with such name was found in the set. 361// We expect the following sequence of instructions: 362// %0:_(pN) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.spv.alloca) 363// or = G_GLOBAL_VALUE @block_literal_global 364// %1:_(pN) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.spv.bitcast), %0 365// %2:_(p4) = G_ADDRSPACE_CAST %1:_(pN) 367assert(
MI->getOpcode() == TargetOpcode::G_ADDRSPACE_CAST &&
368MI->getOperand(1).isReg());
369Register BitcastReg =
MI->getOperand(1).getReg();
378// Return an integer constant corresponding to the given register and 379// defined in spv_track_constant. 380// TODO: maybe unify with prelegalizer pass. 391// Return type of the instruction result from spv_assign_type intrinsic. 392// TODO: maybe unify with prelegalizer pass. 406assert(Ty &&
"Type is expected");
412// In principle, this information should be passed to us from Clang via 413// an elementtype attribute. However, said attribute requires that 414// the function call be an intrinsic, which is not. Instead, we rely on being 415// able to trace this to the declaration of a variable: OpenCL C specification 416// section 6.12.5 should guarantee that we can do this. 418if (
MI->getOpcode() == TargetOpcode::G_GLOBAL_VALUE)
419returnMI->getOperand(1).getGlobal()->getType();
421"Blocks in OpenCL C must be traceable to allocation site");
425//===----------------------------------------------------------------------===// 426// Helper functions for building misc instructions 427//===----------------------------------------------------------------------===// 429/// Helper function building either a resulting scalar or vector bool register 430/// depending on the expected \p ResultType. 432/// \returns Tuple of the resulting register and its type. 433static std::tuple<Register, SPIRVType *>
439if (ResultType->
getOpcode() == SPIRV::OpTypeVector) {
454return std::make_tuple(ResultRegister, BoolType);
457/// Helper function for building either a vector or scalar select instruction 458/// depending on the expected \p ResultType. 465if (ReturnType->getOpcode() == SPIRV::OpTypeVector) {
475return MIRBuilder.
buildSelect(ReturnRegister, SourceRegister, TrueConst,
479/// Helper function for building a load instruction loading into the 480/// \p DestinationReg. 485if (!DestinationReg.isValid())
487// TODO: consider using correct address space and alignment (p0 is canonical 488// type for selection though). 490 MIRBuilder.
buildLoad(DestinationReg, PtrRegister, PtrInfo,
Align());
491return DestinationReg;
494/// Helper function for building a load instruction for loading a builtin global 495/// variable of \p BuiltinValue value. 507 VariableType, MIRBuilder, SPIRV::StorageClass::Input);
510// Set up the global OpVariable with the necessary builtin decorations. 513 SPIRV::StorageClass::Input,
nullptr,
/* isConst= */ isConst,
514/* HasLinkageTy */ hasLinkageTy, SPIRV::LinkageType::Import, MIRBuilder,
517// Load the value from the global variable. 521return LoadedRegister;
524/// Helper external function for inserting ASSIGN_TYPE instuction between \p Reg 525/// and its definition, set the new register as a destination of the definition, 526/// assign SPIRVType to both registers. If SpirvTy is provided, use it as 527/// SPIRVType in ASSIGN_TYPE, otherwise create it from \p Ty. Defined in 528/// SPIRVPreLegalizer.cpp. 530 SPIRVGlobalRegistry *GR,
531 MachineIRBuilder &MIB,
532 MachineRegisterInfo &
MRI);
534// TODO: Move to TableGen. 535static SPIRV::MemorySemantics::MemorySemantics
538case std::memory_order_relaxed:
539return SPIRV::MemorySemantics::None;
540case std::memory_order_acquire:
541return SPIRV::MemorySemantics::Acquire;
542case std::memory_order_release:
543return SPIRV::MemorySemantics::Release;
544case std::memory_order_acq_rel:
545return SPIRV::MemorySemantics::AcquireRelease;
546case std::memory_order_seq_cst:
547return SPIRV::MemorySemantics::SequentiallyConsistent;
555case SPIRV::CLMemoryScope::memory_scope_work_item:
556return SPIRV::Scope::Invocation;
557case SPIRV::CLMemoryScope::memory_scope_work_group:
558return SPIRV::Scope::Workgroup;
559case SPIRV::CLMemoryScope::memory_scope_device:
560return SPIRV::Scope::Device;
561case SPIRV::CLMemoryScope::memory_scope_all_svm_devices:
562return SPIRV::Scope::CrossDevice;
563case SPIRV::CLMemoryScope::memory_scope_sub_group:
564return SPIRV::Scope::Subgroup;
577 SPIRV::Scope::Scope Scope,
581if (CLScopeRegister.
isValid()) {
586if (CLScope ==
static_cast<unsigned>(Scope)) {
587MRI->setRegClass(CLScopeRegister, &SPIRV::iIDRegClass);
588return CLScopeRegister;
596if (
MRI->getRegClassOrNull(
Reg))
600 SpvType ? GR->
getRegClass(SpvType) : &SPIRV::iIDRegClass);
604Register PtrRegister,
unsigned &Semantics,
607if (SemanticsRegister.
isValid()) {
609 std::memory_order Order =
614if (
static_cast<unsigned>(Order) == Semantics) {
615MRI->setRegClass(SemanticsRegister, &SPIRV::iIDRegClass);
616return SemanticsRegister;
629unsigned Sz = Call->Arguments.size() - ImmArgs.size();
630for (
unsigned i = 0; i < Sz; ++i)
631 MIB.addUse(Call->Arguments[i]);
637/// Helper function for translating atomic init to OpStore. 640if (Call->isSpirvOp())
643assert(Call->Arguments.size() == 2 &&
644"Need 2 arguments for atomic init translation");
646 .
addUse(Call->Arguments[0])
647 .
addUse(Call->Arguments[1]);
651/// Helper function for building an atomic load instruction. 656if (Call->isSpirvOp())
659Register PtrRegister = Call->Arguments[0];
660// TODO: if true insert call to __translate_ocl_memory_sccope before 661// OpAtomicLoad and the function implementation. We can use Translator's 662// output for transcoding/atomic_explicit_arguments.cl as an example. 664 Call->Arguments.size() > 1
668if (Call->Arguments.size() > 2) {
669// TODO: Insert call to __translate_ocl_memory_order before OpAtomicLoad. 670 MemSemanticsReg = Call->Arguments[2];
673 SPIRV::MemorySemantics::SequentiallyConsistent |
679 .
addDef(Call->ReturnRegister)
687/// Helper function for building an atomic store instruction. 691if (Call->isSpirvOp())
696Register PtrRegister = Call->Arguments[0];
698 SPIRV::MemorySemantics::SequentiallyConsistent |
705 .
addUse(Call->Arguments[1]);
709/// Helper function for building an atomic compare-exchange instruction. 713if (Call->isSpirvOp())
717bool IsCmpxchg = Call->Builtin->Name.contains(
"cmpxchg");
720Register ObjectPtr = Call->Arguments[0];
// Pointer (volatile A *object.) 721Register ExpectedArg = Call->Arguments[1];
// Comparator (C* expected). 722Register Desired = Call->Arguments[2];
// Value (C Desired). 724LLT DesiredLLT =
MRI->getType(Desired);
727 SPIRV::OpTypePointer);
730assert(IsCmpxchg ? ExpectedType == SPIRV::OpTypeInt
731 : ExpectedType == SPIRV::OpTypePointer);
736autoStorageClass =
static_cast<SPIRV::StorageClass::StorageClass
>(
744 ? SPIRV::MemorySemantics::None
745 : SPIRV::MemorySemantics::SequentiallyConsistent | MemSemStorage;
748 ? SPIRV::MemorySemantics::None
749 : SPIRV::MemorySemantics::SequentiallyConsistent | MemSemStorage;
750if (Call->Arguments.size() >= 4) {
751assert(Call->Arguments.size() >= 5 &&
752"Need 5+ args for explicit atomic cmpxchg");
759if (
static_cast<unsigned>(MemOrdEq) == MemSemEqual)
760 MemSemEqualReg = Call->Arguments[3];
761if (
static_cast<unsigned>(MemOrdNeq) == MemSemEqual)
762 MemSemUnequalReg = Call->Arguments[4];
766if (!MemSemUnequalReg.
isValid())
770auto Scope = IsCmpxchg ? SPIRV::Scope::Workgroup : SPIRV::Scope::Device;
771if (Call->Arguments.size() >= 6) {
772assert(Call->Arguments.size() == 6 &&
773"Extra args for explicit atomic cmpxchg");
774auto ClScope =
static_cast<SPIRV::CLMemoryScope
>(
777if (ClScope ==
static_cast<unsigned>(Scope))
778 ScopeReg = Call->Arguments[5];
788Register Tmp = !IsCmpxchg ?
MRI->createGenericVirtualRegister(DesiredLLT)
789 : Call->ReturnRegister;
790if (!
MRI->getRegClassOrNull(Tmp))
811/// Helper function for building atomic instructions. 815if (Call->isSpirvOp())
821 Call->Arguments.size() >= 4 ? Call->Arguments[3] :
Register();
823assert(Call->Arguments.size() <= 4 &&
824"Too many args for explicit atomic RMW");
825 ScopeRegister =
buildScopeReg(ScopeRegister, SPIRV::Scope::Workgroup,
828Register PtrRegister = Call->Arguments[0];
829unsigned Semantics = SPIRV::MemorySemantics::None;
831 Call->Arguments.size() >= 3 ? Call->Arguments[2] :
Register();
833 Semantics, MIRBuilder, GR);
834Register ValueReg = Call->Arguments[1];
836// support cl_ext_float_atomics 837if (Call->ReturnType->getOpcode() == SPIRV::OpTypeFloat) {
838if (Opcode == SPIRV::OpAtomicIAdd) {
839 Opcode = SPIRV::OpAtomicFAddEXT;
840 }
elseif (Opcode == SPIRV::OpAtomicISub) {
841// Translate OpAtomicISub applied to a floating type argument to 842// OpAtomicFAddEXT with the negative value operand 843 Opcode = SPIRV::OpAtomicFAddEXT;
845MRI->createGenericVirtualRegister(
MRI->getType(ValueReg));
854 ValueReg = NegValueReg;
858 .
addDef(Call->ReturnRegister)
867/// Helper function for building an atomic floating-type instruction. 872assert(Call->Arguments.size() == 4 &&
873"Wrong number of atomic floating-type builtin");
874Register PtrReg = Call->Arguments[0];
875Register ScopeReg = Call->Arguments[1];
876Register MemSemanticsReg = Call->Arguments[2];
877Register ValueReg = Call->Arguments[3];
879 .
addDef(Call->ReturnRegister)
888/// Helper function for building atomic flag instructions (e.g. 889/// OpAtomicFlagTestAndSet). 893bool IsSet = Opcode == SPIRV::OpAtomicFlagTestAndSet;
895if (Call->isSpirvOp())
900Register PtrRegister = Call->Arguments[0];
901unsigned Semantics = SPIRV::MemorySemantics::SequentiallyConsistent;
903 Call->Arguments.size() >= 2 ? Call->Arguments[1] :
Register();
905 Semantics, MIRBuilder, GR);
907assert((Opcode != SPIRV::OpAtomicFlagClear ||
908 (Semantics != SPIRV::MemorySemantics::Acquire &&
909 Semantics != SPIRV::MemorySemantics::AcquireRelease)) &&
910"Invalid memory order argument!");
913 Call->Arguments.size() >= 3 ? Call->Arguments[2] :
Register();
925/// Helper function for building barriers, i.e., memory/control ordering 933if ((Opcode == SPIRV::OpControlBarrierArriveINTEL ||
934 Opcode == SPIRV::OpControlBarrierWaitINTEL) &&
935 !ST->canUseExtension(SPIRV::Extension::SPV_INTEL_split_barrier)) {
936 std::string DiagMsg = std::string(Builtin->
Name) +
937": the builtin requires the following SPIR-V " 938"extension: SPV_INTEL_split_barrier";
942if (Call->isSpirvOp())
947unsigned MemSemantics = SPIRV::MemorySemantics::None;
949if (MemFlags & SPIRV::CLK_LOCAL_MEM_FENCE)
950 MemSemantics |= SPIRV::MemorySemantics::WorkgroupMemory;
952if (MemFlags & SPIRV::CLK_GLOBAL_MEM_FENCE)
953 MemSemantics |= SPIRV::MemorySemantics::CrossWorkgroupMemory;
955if (MemFlags & SPIRV::CLK_IMAGE_MEM_FENCE)
956 MemSemantics |= SPIRV::MemorySemantics::ImageMemory;
958if (Opcode == SPIRV::OpMemoryBarrier)
962elseif (Opcode == SPIRV::OpControlBarrierArriveINTEL)
963 MemSemantics |= SPIRV::MemorySemantics::Release;
964elseif (Opcode == SPIRV::OpControlBarrierWaitINTEL)
965 MemSemantics |= SPIRV::MemorySemantics::Acquire;
967 MemSemantics |= SPIRV::MemorySemantics::SequentiallyConsistent;
970 MemFlags == MemSemantics
974 SPIRV::Scope::Scope Scope = SPIRV::Scope::Workgroup;
975 SPIRV::Scope::Scope MemScope = Scope;
976if (Call->Arguments.size() >= 2) {
978 ((Opcode != SPIRV::OpMemoryBarrier && Call->Arguments.size() == 2) ||
979 (Opcode == SPIRV::OpMemoryBarrier && Call->Arguments.size() == 3)) &&
980"Extra args for explicitly scoped barrier");
981Register ScopeArg = (Opcode == SPIRV::OpMemoryBarrier) ? Call->Arguments[2]
982 : Call->Arguments[1];
983 SPIRV::CLMemoryScope CLScope =
986if (!(MemFlags & SPIRV::CLK_LOCAL_MEM_FENCE) ||
987 (Opcode == SPIRV::OpMemoryBarrier))
989if (CLScope ==
static_cast<unsigned>(Scope))
990 ScopeReg = Call->Arguments[1];
997if (Opcode != SPIRV::OpMemoryBarrier)
999 MIB.
addUse(MemSemanticsReg);
1005case SPIRV::Dim::DIM_1D:
1006case SPIRV::Dim::DIM_Buffer:
1008case SPIRV::Dim::DIM_2D:
1009case SPIRV::Dim::DIM_Cube:
1010case SPIRV::Dim::DIM_Rect:
1012case SPIRV::Dim::DIM_3D:
1019/// Helper function for obtaining the number of size components. 1025return arrayed ? numComps + 1 : numComps;
1028//===----------------------------------------------------------------------===// 1029// Implementation functions for each builtin group 1030//===----------------------------------------------------------------------===// 1035// Lookup the extended instruction number in the TableGen records. 1038 SPIRV::lookupExtendedBuiltin(Builtin->
Name, Builtin->
Set)->Number;
1040// Build extended instruction. 1043 .
addDef(Call->ReturnRegister)
1045 .
addImm(
static_cast<uint32_t>(SPIRV::InstructionSet::OpenCL_std))
1048for (
autoArgument : Call->Arguments)
1056// Lookup the instruction opcode in the TableGen records. 1059 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
1063 std::tie(CompareRegister, RelationType) =
1066// Build relational instruction. 1071for (
autoArgument : Call->Arguments)
1074// Build select instruction. 1075returnbuildSelectInst(MIRBuilder, Call->ReturnRegister, CompareRegister,
1076 Call->ReturnType, GR);
1084 SPIRV::lookupGroupBuiltin(Builtin->
Name);
1087if (Call->isSpirvOp()) {
1092// Group Operation is a literal 1093Register GroupOpReg = Call->Arguments[1];
1095if (!
MI ||
MI->getOpcode() != TargetOpcode::G_CONSTANT)
1097"Group Operation parameter must be an integer constant");
1098uint64_t GrpOp =
MI->getOperand(1).getCImm()->getValue().getZExtValue();
1099Register ScopeReg = Call->Arguments[0];
1101 .
addDef(Call->ReturnRegister)
1105for (
unsigned i = 2; i < Call->Arguments.size(); ++i)
1106 MIB.
addUse(Call->Arguments[i]);
1113Register BoolReg = Call->Arguments[0];
1118if (ArgInstruction->
getOpcode() == TargetOpcode::G_CONSTANT) {
1119if (BoolRegType->
getOpcode() != SPIRV::OpTypeBool)
1123if (BoolRegType->
getOpcode() == SPIRV::OpTypeInt) {
1125MRI->setRegClass(Arg0, &SPIRV::iIDRegClass);
1131 }
elseif (BoolRegType->
getOpcode() != SPIRV::OpTypeBool) {
1134// if BoolReg is a boolean register, we don't need to do anything 1138Register GroupResultRegister = Call->ReturnRegister;
1139SPIRVType *GroupResultType = Call->ReturnType;
1141// TODO: maybe we need to check whether the result type is already boolean 1142// and in this case do not insert select instruction. 1143constbool HasBoolReturnTy =
1149 std::tie(GroupResultRegister, GroupResultType) =
1152auto Scope = Builtin->
Name.
starts_with(
"sub_group") ? SPIRV::Scope::Subgroup
1153 : SPIRV::Scope::Workgroup;
1157if (GroupBuiltin->
Opcode == SPIRV::OpGroupBroadcast &&
1158 Call->Arguments.size() > 2) {
1159// For OpGroupBroadcast "LocalId must be an integer datatype. It must be a 1160// scalar, a vector with 2 components, or a vector with 3 components.", 1161// meaning that we must create a vector from the function arguments if 1162// it's a work_group_broadcast(val, local_id_x, local_id_y) or 1163// work_group_broadcast(val, local_id_x, local_id_y, local_id_z) call. 1164Register ElemReg = Call->Arguments[1];
1166if (!ElemType || ElemType->
getOpcode() != SPIRV::OpTypeInt)
1168unsigned VecLen = Call->Arguments.size() - 1;
1169 VecReg =
MRI->createGenericVirtualRegister(
1171MRI->setRegClass(VecReg, &SPIRV::vIDRegClass);
1177for (
unsigned i = 1; i < Call->Arguments.size(); i++) {
1178 MIB.
addUse(Call->Arguments[i]);
1185// Build work/sub group instruction. 1187 .
addDef(GroupResultRegister)
1193if (Call->Arguments.size() > 0) {
1199for (
unsigned i = 1; i < Call->Arguments.size(); i++)
1200 MIB.addUse(Call->Arguments[i]);
1203// Build select instruction. 1206 Call->ReturnType, GR);
1217 SPIRV::lookupIntelSubgroupsBuiltin(Builtin->
Name);
1220 !ST->canUseExtension(SPIRV::Extension::SPV_INTEL_media_block_io)) {
1221 std::string DiagMsg = std::string(Builtin->
Name) +
1222": the builtin requires the following SPIR-V " 1223"extension: SPV_INTEL_media_block_io";
1225 }
elseif (!IntelSubgroups->
IsMedia &&
1226 !ST->canUseExtension(SPIRV::Extension::SPV_INTEL_subgroups)) {
1227 std::string DiagMsg = std::string(Builtin->
Name) +
1228": the builtin requires the following SPIR-V " 1229"extension: SPV_INTEL_subgroups";
1234if (Call->isSpirvOp()) {
1235bool IsSet = OpCode != SPIRV::OpSubgroupBlockWriteINTEL &&
1236 OpCode != SPIRV::OpSubgroupImageBlockWriteINTEL &&
1237 OpCode != SPIRV::OpSubgroupImageMediaBlockWriteINTEL;
1244// Minimal number or arguments set in TableGen records is 1 1246if (Arg0Type->getOpcode() == SPIRV::OpTypeImage) {
1247// TODO: add required validation from the specification: 1248// "'Image' must be an object whose type is OpTypeImage with a 'Sampled' 1249// operand of 0 or 2. If the 'Sampled' operand is 2, then some 1250// dimensions require a capability." 1252case SPIRV::OpSubgroupBlockReadINTEL:
1253 OpCode = SPIRV::OpSubgroupImageBlockReadINTEL;
1255case SPIRV::OpSubgroupBlockWriteINTEL:
1256 OpCode = SPIRV::OpSubgroupImageBlockWriteINTEL;
1263// TODO: opaque pointers types should be eventually resolved in such a way 1264// that validation of block read is enabled with respect to the following 1265// specification requirement: 1266// "'Result Type' may be a scalar or vector type, and its component type must 1267// be equal to the type pointed to by 'Ptr'." 1268// For example, function parameter type should not be default i8 pointer, but 1269// depend on the result type of the instruction where it is used as a pointer 1270// argument of OpSubgroupBlockReadINTEL 1272// Build Intel subgroups instruction 1277 .
addDef(Call->ReturnRegister)
1279for (
size_t i = 0; i < Call->Arguments.size(); ++i)
1280 MIB.
addUse(Call->Arguments[i]);
1290if (!ST->canUseExtension(
1291 SPIRV::Extension::SPV_KHR_uniform_group_instructions)) {
1292 std::string DiagMsg = std::string(Builtin->
Name) +
1293": the builtin requires the following SPIR-V " 1294"extension: SPV_KHR_uniform_group_instructions";
1298 SPIRV::lookupGroupUniformBuiltin(Builtin->
Name);
1301Register GroupResultReg = Call->ReturnRegister;
1302Register ScopeReg = Call->Arguments[0];
1303Register ValueReg = Call->Arguments[2];
1306Register ConstGroupOpReg = Call->Arguments[1];
1308if (!Const || Const->getOpcode() != TargetOpcode::G_CONSTANT)
1310"expect a constant group operation for a uniform group instruction",
1313if (!ConstOperand.
isCImm())
1323 MIB.addUse(ValueReg);
1334if (!ST->canUseExtension(SPIRV::Extension::SPV_KHR_shader_clock)) {
1335 std::string DiagMsg = std::string(Builtin->
Name) +
1336": the builtin requires the following SPIR-V " 1337"extension: SPV_KHR_shader_clock";
1341Register ResultReg = Call->ReturnRegister;
1343// Deduce the `Scope` operand from the builtin function name. 1344 SPIRV::Scope::Scope ScopeArg =
1346 .
EndsWith(
"device", SPIRV::Scope::Scope::Device)
1347 .
EndsWith(
"work_group", SPIRV::Scope::Scope::Workgroup)
1348 .
EndsWith(
"sub_group", SPIRV::Scope::Scope::Subgroup);
1359// These queries ask for a single size_t result for a given dimension index, e.g 1360// size_t get_global_id(uint dimindex). In SPIR-V, the builtins corresonding to 1361// these values are all vec3 types, so we need to extract the correct index or 1362// return defaultVal (0 or 1 depending on the query). We also handle extending 1363// or tuncating in case size_t does not match the expected result type's 1366// For a constant index >= 3 we generate: 1367// %res = OpConstant %SizeT 0 1369// For other indices we generate: 1370// %g = OpVariable %ptr_V3_SizeT Input 1371// OpDecorate %g BuiltIn XXX 1372// OpDecorate %g LinkageAttributes "__spirv_BuiltInXXX" 1373// OpDecorate %g Constant 1374// %loadedVec = OpLoad %V3_SizeT %g 1376// Then, if the index is constant < 3, we generate: 1377// %res = OpCompositeExtract %SizeT %loadedVec idx 1378// If the index is dynamic, we generate: 1379// %tmp = OpVectorExtractDynamic %SizeT %loadedVec %idx 1380// %cmp = OpULessThan %bool %idx %const_3 1381// %res = OpSelect %SizeT %cmp %tmp %const_0 1383// If the bitwidth of %res does not match the expected return type, we add an 1384// extend or truncate. 1388 SPIRV::BuiltIn::BuiltIn BuiltinValue,
1390Register IndexRegister = Call->Arguments[0];
1391constunsigned ResultWidth = Call->ReturnType->getOperand(1).getImm();
1398// Set up the final register to do truncation or extension on at the end. 1399Register ToTruncate = Call->ReturnRegister;
1401// If the index is constant, we can statically determine if it is in range. 1402bool IsConstantIndex =
1403 IndexInstruction->getOpcode() == TargetOpcode::G_CONSTANT;
1405// If it's out of range (max dimension is 3), we can just return the constant 1406// default value (0 or 1 depending on which query function). 1408Register DefaultReg = Call->ReturnRegister;
1409if (PointerSize != ResultWidth) {
1410 DefaultReg =
MRI->createGenericVirtualRegister(
LLT::scalar(PointerSize));
1411MRI->setRegClass(DefaultReg, &SPIRV::iIDRegClass);
1413 MIRBuilder.
getMF());
1414 ToTruncate = DefaultReg;
1418 MIRBuilder.
buildCopy(DefaultReg, NewRegister);
1419 }
else {
// If it could be in range, we need to load from the given builtin. 1425// Set up the vreg to extract the result to (possibly a new temporary one). 1426Register Extracted = Call->ReturnRegister;
1427if (!IsConstantIndex || PointerSize != ResultWidth) {
1428 Extracted =
MRI->createGenericVirtualRegister(
LLT::scalar(PointerSize));
1429MRI->setRegClass(Extracted, &SPIRV::iIDRegClass);
1432// Use Intrinsic::spv_extractelt so dynamic vs static extraction is 1433// handled later: extr = spv_extractelt LoadedVector, IndexRegister. 1436 ExtractInst.
addUse(LoadedVector).
addUse(IndexRegister);
1438// If the index is dynamic, need check if it's < 3, and then use a select. 1439if (!IsConstantIndex) {
1448MRI->setRegClass(CompareRegister, &SPIRV::iIDRegClass);
1451// Use G_ICMP to check if idxVReg < 3. 1455// Get constant for the default value (0 or 1 depending on which 1460// Get a register for the selection result (possibly a new temporary one). 1461Register SelectionResult = Call->ReturnRegister;
1462if (PointerSize != ResultWidth) {
1465MRI->setRegClass(SelectionResult, &SPIRV::iIDRegClass);
1467 MIRBuilder.
getMF());
1469// Create the final G_SELECT to return the extracted value or the default. 1470 MIRBuilder.
buildSelect(SelectionResult, CompareRegister, Extracted,
1472 ToTruncate = SelectionResult;
1474 ToTruncate = Extracted;
1477// Alter the result's bitwidth if it does not match the SizeT value extracted. 1478if (PointerSize != ResultWidth)
1486// Lookup the builtin variable record. 1488 SPIRV::BuiltIn::BuiltIn
Value =
1489 SPIRV::lookupGetBuiltin(Builtin->
Name, Builtin->
Set)->
Value;
1491if (
Value == SPIRV::BuiltIn::GlobalInvocationId)
1494// Build a load instruction for the builtin variable. 1497if (Call->ReturnType->getOpcode() == SPIRV::OpTypeVector)
1504 LLType, Call->ReturnRegister);
1510// Lookup the instruction opcode in the TableGen records. 1513 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
1518case SPIRV::OpAtomicLoad:
1520case SPIRV::OpAtomicStore:
1522case SPIRV::OpAtomicCompareExchange:
1523case SPIRV::OpAtomicCompareExchangeWeak:
1526case SPIRV::OpAtomicIAdd:
1527case SPIRV::OpAtomicISub:
1528case SPIRV::OpAtomicOr:
1529case SPIRV::OpAtomicXor:
1530case SPIRV::OpAtomicAnd:
1531case SPIRV::OpAtomicExchange:
1533case SPIRV::OpMemoryBarrier:
1535case SPIRV::OpAtomicFlagTestAndSet:
1536case SPIRV::OpAtomicFlagClear:
1539if (Call->isSpirvOp())
1549// Lookup the instruction opcode in the TableGen records. 1551unsigned Opcode = SPIRV::lookupAtomicFloatingBuiltin(Builtin->
Name)->Opcode;
1554case SPIRV::OpAtomicFAddEXT:
1555case SPIRV::OpAtomicFMinEXT:
1556case SPIRV::OpAtomicFMaxEXT:
1566// Lookup the instruction opcode in the TableGen records. 1569 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
1576 MIRBuilder.
buildInstr(TargetOpcode::G_ADDRSPACE_CAST)
1577 .
addDef(Call->ReturnRegister)
1578 .
addUse(Call->Arguments[0]);
1585if (Call->isSpirvOp())
1589bool IsVec = Opcode == SPIRV::OpTypeVector;
1590// Use OpDot only in case of vector args and OpFMul in case of scalar args. 1591 MIRBuilder.
buildInstr(IsVec ? SPIRV::OpDot : SPIRV::OpFMulS)
1592 .
addDef(Call->ReturnRegister)
1594 .
addUse(Call->Arguments[0])
1595 .
addUse(Call->Arguments[1]);
1603 SPIRV::BuiltIn::BuiltIn
Value =
1604 SPIRV::lookupGetBuiltin(Builtin->
Name, Builtin->
Set)->
Value;
1606// For now, we only support a single Wave intrinsic with a single return type. 1607assert(Call->ReturnType->getOpcode() == SPIRV::OpTypeInt);
1611 MIRBuilder, Call->ReturnType, GR,
Value, LLType, Call->ReturnRegister,
1612/* isConst= */false,
/* hasLinkageTy= */false);
1615// We expect a builtin 1616// Name(ptr sret([RetType]) %result, Type %operand1, Type %operand1) 1617// where %result is a pointer to where the result of the builtin execution 1618// is to be stored, and generate the following instructions: 1619// Res = Opcode RetType Operand1 Operand1 1620// OpStore RetVariable Res 1626 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
1628Register SRetReg = Call->Arguments[0];
1633if (RetType->
getOpcode() != SPIRV::OpTypeStruct)
1635"overflow builtins");
1639if (!OpType1 || !OpType2 || OpType1 != OpType2)
1641if (OpType1->
getOpcode() == SPIRV::OpTypeVector)
1643case SPIRV::OpIAddCarryS:
1644 Opcode = SPIRV::OpIAddCarryV;
1646case SPIRV::OpISubBorrowS:
1647 Opcode = SPIRV::OpISubBorrowV;
1652Register ResReg =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1654MRI->getRegClassOrNull(Call->Arguments[1])) {
1655MRI->setRegClass(ResReg, DstRC);
1656MRI->setType(ResReg,
MRI->getType(Call->Arguments[1]));
1664 .
addUse(Call->Arguments[1])
1665 .
addUse(Call->Arguments[2]);
1673// Lookup the builtin record. 1674 SPIRV::BuiltIn::BuiltIn
Value =
1675 SPIRV::lookupGetBuiltin(Call->Builtin->Name, Call->Builtin->Set)->
Value;
1677Value == SPIRV::BuiltIn::WorkgroupSize ||
1678Value == SPIRV::BuiltIn::EnqueuedWorkgroupSize);
1685// Lookup the image size query component number in the TableGen records. 1688 SPIRV::lookupImageQueryBuiltin(Builtin->
Name, Builtin->
Set)->Component;
1689// Query result may either be a vector or a scalar. If return type is not a 1690// vector, expect only a single size component. Otherwise get the number of 1691// expected components. 1693unsigned NumExpectedRetComponents =
RetTy->getOpcode() == SPIRV::OpTypeVector
1694 ?
RetTy->getOperand(2).getImm()
1696// Get the actual number of query result/size components. 1699Register QueryResult = Call->ReturnRegister;
1700SPIRVType *QueryResultType = Call->ReturnType;
1701if (NumExpectedRetComponents != NumActualRetComponents) {
1707 IntTy, NumActualRetComponents, MIRBuilder);
1712 IsDimBuf ? SPIRV::OpImageQuerySize : SPIRV::OpImageQuerySizeLod;
1716 .
addUse(Call->Arguments[0]);
1719if (NumExpectedRetComponents == NumActualRetComponents)
1721if (NumExpectedRetComponents == 1) {
1722// Only 1 component is expected, build OpCompositeExtract instruction. 1723unsigned ExtractedComposite =
1724 Component == 3 ? NumActualRetComponents - 1 : Component;
1725assert(ExtractedComposite < NumActualRetComponents &&
1726"Invalid composite index!");
1729if (QueryResultType->
getOpcode() == SPIRV::OpTypeVector) {
1731if (TypeReg != NewTypeReg &&
1733 TypeReg = NewTypeReg;
1735 MIRBuilder.
buildInstr(SPIRV::OpCompositeExtract)
1736 .
addDef(Call->ReturnRegister)
1739 .
addImm(ExtractedComposite);
1740if (NewType !=
nullptr)
1744// More than 1 component is expected, fill a new vector. 1745auto MIB = MIRBuilder.
buildInstr(SPIRV::OpVectorShuffle)
1746 .
addDef(Call->ReturnRegister)
1750for (
unsigned i = 0; i < NumExpectedRetComponents; ++i)
1751 MIB.
addImm(i < NumActualRetComponents ? i : 0xffffffff);
1759assert(Call->ReturnType->getOpcode() == SPIRV::OpTypeInt &&
1760"Image samples query result must be of int type!");
1762// Lookup the instruction opcode in the TableGen records. 1765 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
1767Register Image = Call->Arguments[0];
1768 SPIRV::Dim::Dim ImageDimensionality =
static_cast<SPIRV::Dim::Dim
>(
1770 (void)ImageDimensionality;
1773case SPIRV::OpImageQuerySamples:
1774assert(ImageDimensionality == SPIRV::Dim::DIM_2D &&
1775"Image must be of 2D dimensionality");
1777case SPIRV::OpImageQueryLevels:
1778assert((ImageDimensionality == SPIRV::Dim::DIM_1D ||
1779 ImageDimensionality == SPIRV::Dim::DIM_2D ||
1780 ImageDimensionality == SPIRV::Dim::DIM_3D ||
1781 ImageDimensionality == SPIRV::Dim::DIM_Cube) &&
1782"Image must be of 1D/2D/3D/Cube dimensionality");
1787 .
addDef(Call->ReturnRegister)
1793// TODO: Move to TableGen. 1794static SPIRV::SamplerAddressingMode::SamplerAddressingMode
1796switch (Bitmask & SPIRV::CLK_ADDRESS_MODE_MASK) {
1797case SPIRV::CLK_ADDRESS_CLAMP:
1798return SPIRV::SamplerAddressingMode::Clamp;
1799case SPIRV::CLK_ADDRESS_CLAMP_TO_EDGE:
1800return SPIRV::SamplerAddressingMode::ClampToEdge;
1801case SPIRV::CLK_ADDRESS_REPEAT:
1802return SPIRV::SamplerAddressingMode::Repeat;
1803case SPIRV::CLK_ADDRESS_MIRRORED_REPEAT:
1804return SPIRV::SamplerAddressingMode::RepeatMirrored;
1805case SPIRV::CLK_ADDRESS_NONE:
1806return SPIRV::SamplerAddressingMode::None;
1813return (Bitmask & SPIRV::CLK_NORMALIZED_COORDS_TRUE) ? 1 : 0;
1816static SPIRV::SamplerFilterMode::SamplerFilterMode
1818if (Bitmask & SPIRV::CLK_FILTER_LINEAR)
1819return SPIRV::SamplerFilterMode::Linear;
1820if (Bitmask & SPIRV::CLK_FILTER_NEAREST)
1821return SPIRV::SamplerFilterMode::Nearest;
1822return SPIRV::SamplerFilterMode::Nearest;
1829Register Image = Call->Arguments[0];
1834Register Sampler = Call->Arguments[1];
1848Register SampledImage =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1859if (Call->ReturnType->getOpcode() != SPIRV::OpTypeVector) {
1866 MIRBuilder.
buildInstr(SPIRV::OpImageSampleExplicitLod)
1870 .
addUse(Call->Arguments[2])
// Coordinate. 1871 .
addImm(SPIRV::ImageOperand::Lod)
1873 MIRBuilder.
buildInstr(SPIRV::OpCompositeExtract)
1874 .
addDef(Call->ReturnRegister)
1879 MIRBuilder.
buildInstr(SPIRV::OpImageSampleExplicitLod)
1880 .
addDef(Call->ReturnRegister)
1883 .
addUse(Call->Arguments[2])
// Coordinate. 1884 .
addImm(SPIRV::ImageOperand::Lod)
1889 .
addDef(Call->ReturnRegister)
1892 .
addUse(Call->Arguments[1])
// Coordinate. 1893 .
addImm(SPIRV::ImageOperand::Sample)
1894 .
addUse(Call->Arguments[2]);
1897 .
addDef(Call->ReturnRegister)
1900 .
addUse(Call->Arguments[1]);
// Coordinate. 1909 .
addUse(Call->Arguments[0])
// Image. 1910 .
addUse(Call->Arguments[1])
// Coordinate. 1911 .
addUse(Call->Arguments[2]);
// Texel. 1920if (Call->Builtin->Name.contains_insensitive(
1921"__translate_sampler_initializer")) {
1922// Build sampler literal. 1928return Sampler.isValid();
1929 }
elseif (Call->Builtin->Name.contains_insensitive(
"__spirv_SampledImage")) {
1930// Create OpSampledImage. 1931Register Image = Call->Arguments[0];
1936 Call->ReturnRegister.isValid()
1937 ? Call->ReturnRegister
1938 :
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1943 .
addUse(Call->Arguments[1]);
// Sampler. 1945 }
elseif (Call->Builtin->Name.contains_insensitive(
1946"__spirv_ImageSampleExplicitLod")) {
1947// Sample an image using an explicit level of detail. 1948 std::string ReturnType = DemangledCall.
str();
1950 ReturnType = ReturnType.substr(ReturnType.find(
"_R") + 2);
1951 ReturnType = ReturnType.substr(0, ReturnType.find(
'('));
1958 std::string DiagMsg =
1959"Unable to recognize SPIRV type name: " + ReturnType;
1962 MIRBuilder.
buildInstr(SPIRV::OpImageSampleExplicitLod)
1963 .
addDef(Call->ReturnRegister)
1965 .
addUse(Call->Arguments[0])
// Image. 1966 .
addUse(Call->Arguments[1])
// Coordinate. 1967 .
addImm(SPIRV::ImageOperand::Lod)
1968 .
addUse(Call->Arguments[3]);
1976 MIRBuilder.
buildSelect(Call->ReturnRegister, Call->Arguments[0],
1977 Call->Arguments[1], Call->Arguments[2]);
1993 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
1994bool IsSet = Opcode != SPIRV::OpCooperativeMatrixStoreKHR &&
1995 Opcode != SPIRV::OpCooperativeMatrixStoreCheckedINTEL &&
1996 Opcode != SPIRV::OpCooperativeMatrixPrefetchINTEL;
1997unsigned ArgSz = Call->Arguments.size();
1998unsigned LiteralIdx = 0;
2000// Memory operand is optional and is literal. 2001case SPIRV::OpCooperativeMatrixLoadKHR:
2002 LiteralIdx = ArgSz > 3 ? 3 : 0;
2004case SPIRV::OpCooperativeMatrixStoreKHR:
2005 LiteralIdx = ArgSz > 4 ? 4 : 0;
2007case SPIRV::OpCooperativeMatrixLoadCheckedINTEL:
2008 LiteralIdx = ArgSz > 7 ? 7 : 0;
2010case SPIRV::OpCooperativeMatrixStoreCheckedINTEL:
2011 LiteralIdx = ArgSz > 8 ? 8 : 0;
2013// Cooperative Matrix Operands operand is optional and is literal. 2014case SPIRV::OpCooperativeMatrixMulAddKHR:
2015 LiteralIdx = ArgSz > 3 ? 3 : 0;
2021if (Opcode == SPIRV::OpCooperativeMatrixPrefetchINTEL) {
2023auto MIB = MIRBuilder.
buildInstr(SPIRV::OpCooperativeMatrixPrefetchINTEL)
2024 .
addUse(Call->Arguments[0])
// pointer 2025 .
addUse(Call->Arguments[1])
// rows 2026 .
addUse(Call->Arguments[2])
// columns 2027 .
addImm(CacheLevel)
// cache level 2028 .
addUse(Call->Arguments[4]);
// memory layout 2030 MIB.
addUse(Call->Arguments[5]);
// stride 2033 MIB.addImm(
MemOp);
// memory operand 2040if (Opcode == SPIRV::OpCooperativeMatrixLengthKHR) {
2045 .
addDef(Call->ReturnRegister)
2051 IsSet ? TypeReg :
Register(0), ImmArgs);
2057// Lookup the instruction opcode in the TableGen records. 2060 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
2064case SPIRV::OpSpecConstant: {
2065// Build the SpecID decoration. 2068buildOpDecorate(Call->ReturnRegister, MIRBuilder, SPIRV::Decoration::SpecId,
2070// Determine the constant MI. 2071Register ConstRegister = Call->Arguments[1];
2074 (Const->getOpcode() == TargetOpcode::G_CONSTANT ||
2075 Const->getOpcode() == TargetOpcode::G_FCONSTANT) &&
2076"Argument should be either an int or floating-point constant");
2077// Determine the opcode and built the OpSpec MI. 2079if (Call->ReturnType->getOpcode() == SPIRV::OpTypeBool) {
2080assert(ConstOperand.
isCImm() &&
"Int constant operand is expected");
2082 ? SPIRV::OpSpecConstantTrue
2083 : SPIRV::OpSpecConstantFalse;
2086 .
addDef(Call->ReturnRegister)
2089if (Call->ReturnType->getOpcode() != SPIRV::OpTypeBool) {
2090if (Const->getOpcode() == TargetOpcode::G_CONSTANT)
2097case SPIRV::OpSpecConstantComposite: {
2099 .
addDef(Call->ReturnRegister)
2101for (
unsigned i = 0; i < Call->Arguments.size(); i++)
2102 MIB.
addUse(Call->Arguments[i]);
2120Register TmpReg =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2122// Skip the first arg, it's the destination pointer. OpBuildNDRange takes 2123// three other arguments, so pass zero constant on absence. 2124unsigned NumArgs = Call->Arguments.size();
2126Register GlobalWorkSize = Call->Arguments[NumArgs < 4 ? 1 : 2];
2128 NumArgs == 2 ?
Register(0) : Call->Arguments[NumArgs < 4 ? 2 : 3];
2133if (SpvTy->
getOpcode() == SPIRV::OpTypePointer) {
2138// TODO: Maybe simplify generation of the type of the fields. 2139unsignedSize = Call->Builtin->Name ==
"ndrange_3D" ? 3 : 2;
2144 GlobalWorkSize =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2153 SpvFieldTy, *ST.getInstrInfo());
2158 LocalWorkSize = Const;
2159if (!GlobalWorkOffset.
isValid())
2160 GlobalWorkOffset = Const;
2168 .
addUse(GlobalWorkOffset);
2170 .
addUse(Call->Arguments[0])
2174// TODO: maybe move to the global register. 2195bool IsSpirvOp = Call->isSpirvOp();
2196bool HasEvents = Call->Builtin->Name.contains(
"events") || IsSpirvOp;
2199// Make vararg instructions before OpEnqueueKernel. 2200// Local sizes arguments: Sizes of block invoke arguments. Clang generates 2201// local size operands as an array, so we need to unpack them. 2203if (Call->Builtin->Name.contains(
"_varargs") || IsSpirvOp) {
2204constunsigned LocalSizeArrayIdx = HasEvents ? 9 : 6;
2205Register GepReg = Call->Arguments[LocalSizeArrayIdx];
2212assert(LocalSizeTy &&
"Local size type is expected");
2214 cast<ArrayType>(LocalSizeTy)->getNumElements();
2218 Int32Ty, MIRBuilder, SPIRV::StorageClass::Function);
2219for (
unsignedI = 0;
I < LocalSizeNum; ++
I) {
2221MRI->setType(
Reg, LLType);
2234// SPIRV OpEnqueueKernel instruction has 10+ arguments. 2235auto MIB = MIRBuilder.
buildInstr(SPIRV::OpEnqueueKernel)
2236 .
addDef(Call->ReturnRegister)
2239// Copy all arguments before block invoke function pointer. 2240constunsigned BlockFIdx = HasEvents ? 6 : 3;
2241for (
unsigned i = 0; i < BlockFIdx; i++)
2242 MIB.addUse(Call->Arguments[i]);
2244// If there are no event arguments in the original call, add dummy ones. 2249 MIB.addUse(NullPtr);
// Dummy wait events. 2250 MIB.addUse(NullPtr);
// Dummy ret event. 2255// Invoke: Pointer to invoke function. 2258Register BlockLiteralReg = Call->Arguments[BlockFIdx + 1];
2259// Param: Pointer to block literal. 2260 MIB.addUse(BlockLiteralReg);
2263// TODO: these numbers should be obtained from block literal structure. 2264// Param Size: Size of block literal structure. 2266// Param Aligment: Aligment of block literal structure. 2270for (
unsigned i = 0; i < LocalSizes.
size(); i++)
2271 MIB.addUse(LocalSizes[i]);
2278// Lookup the instruction opcode in the TableGen records. 2281 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
2284case SPIRV::OpRetainEvent:
2285case SPIRV::OpReleaseEvent:
2287case SPIRV::OpCreateUserEvent:
2288case SPIRV::OpGetDefaultQueue:
2290 .
addDef(Call->ReturnRegister)
2292case SPIRV::OpIsValidEvent:
2294 .
addDef(Call->ReturnRegister)
2296 .
addUse(Call->Arguments[0]);
2297case SPIRV::OpSetUserEventStatus:
2299 .
addUse(Call->Arguments[0])
2300 .
addUse(Call->Arguments[1]);
2301case SPIRV::OpCaptureEventProfilingInfo:
2303 .
addUse(Call->Arguments[0])
2304 .
addUse(Call->Arguments[1])
2305 .
addUse(Call->Arguments[2]);
2306case SPIRV::OpBuildNDRange:
2308case SPIRV::OpEnqueueKernel:
2318// Lookup the instruction opcode in the TableGen records. 2321 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
2323bool IsSet = Opcode == SPIRV::OpGroupAsyncCopy;
2325if (Call->isSpirvOp())
2332case SPIRV::OpGroupAsyncCopy: {
2334 Call->ReturnType->getOpcode() == SPIRV::OpTypeEvent
2338unsigned NumArgs = Call->Arguments.size();
2339Register EventReg = Call->Arguments[NumArgs - 1];
2341 .
addDef(Call->ReturnRegister)
2344 .
addUse(Call->Arguments[0])
2345 .
addUse(Call->Arguments[1])
2346 .
addUse(Call->Arguments[2])
2347 .
addUse(Call->Arguments.size() > 4
2348 ? Call->Arguments[3]
2351if (NewType !=
nullptr)
2356case SPIRV::OpGroupWaitEvents:
2359 .
addUse(Call->Arguments[0])
2360 .
addUse(Call->Arguments[1]);
2370// Lookup the conversion builtin in the TableGen records. 2372 SPIRV::lookupConvertBuiltin(Call->Builtin->Name, Call->Builtin->Set);
2374if (!Builtin && Call->isSpirvOp()) {
2377 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
2384 SPIRV::Decoration::SaturatedConversion, {});
2387 SPIRV::Decoration::FPRoundingMode,
2388 {(unsigned)Builtin->RoundingMode});
2390 std::string NeedExtMsg;
// no errors if empty 2391bool IsRightComponentsNumber =
true;
// check if input/output accepts vectors 2392unsigned Opcode = SPIRV::OpNop;
2399 : SPIRV::OpSatConvertSToU;
2402 : SPIRV::OpSConvert;
2404 SPIRV::OpTypeFloat)) {
2409if (!ST->canUseExtension(
2410 SPIRV::Extension::SPV_INTEL_bfloat16_conversion))
2411 NeedExtMsg =
"SPV_INTEL_bfloat16_conversion";
2412 IsRightComponentsNumber =
2415 Opcode = SPIRV::OpConvertBF16ToFINTEL;
2417bool IsSourceSigned =
2419 Opcode = IsSourceSigned ? SPIRV::OpConvertSToF : SPIRV::OpConvertUToF;
2423 SPIRV::OpTypeFloat)) {
2430if (!ST->canUseExtension(
2431 SPIRV::Extension::SPV_INTEL_bfloat16_conversion))
2432 NeedExtMsg =
"SPV_INTEL_bfloat16_conversion";
2433 IsRightComponentsNumber =
2436 Opcode = SPIRV::OpConvertFToBF16INTEL;
2439 : SPIRV::OpConvertFToU;
2442 SPIRV::OpTypeFloat)) {
2444 Opcode = SPIRV::OpFConvert;
2448if (!NeedExtMsg.empty()) {
2449 std::string DiagMsg = std::string(Builtin->
Name) +
2450": the builtin requires the following SPIR-V " 2455if (!IsRightComponentsNumber) {
2456 std::string DiagMsg =
2457 std::string(Builtin->
Name) +
2458": result and argument must have the same number of components";
2461assert(Opcode != SPIRV::OpNop &&
2462"Conversion between the types not implemented!");
2465 .
addDef(Call->ReturnRegister)
2467 .
addUse(Call->Arguments[0]);
2474// Lookup the vector load/store builtin in the TableGen records. 2476 SPIRV::lookupVectorLoadStoreBuiltin(Call->Builtin->Name,
2477 Call->Builtin->Set);
2478// Build extended instruction. 2481 .
addDef(Call->ReturnRegister)
2483 .
addImm(
static_cast<uint32_t>(SPIRV::InstructionSet::OpenCL_std))
2485for (
autoArgument : Call->Arguments)
2490// Rounding mode should be passed as a last argument in the MI for builtins 2491// like "vstorea_halfn_r". 2500// Lookup the instruction opcode in the TableGen records. 2503 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
2504bool IsLoad = Opcode == SPIRV::OpLoad;
2505// Build the instruction. 2508 MIB.
addDef(Call->ReturnRegister);
2511// Add a pointer to the value to load/store. 2512 MIB.
addUse(Call->Arguments[0]);
2514// Add a value to store. 2516 MIB.addUse(Call->Arguments[1]);
2517// Add optional memory attributes and an alignment. 2518unsigned NumArgs = Call->Arguments.size();
2519if ((IsLoad && NumArgs >= 2) || NumArgs >= 3)
2521if ((IsLoad && NumArgs >= 3) || NumArgs >= 4)
2527// Try to find a builtin function attributes by a demangled function name and 2528// return a tuple <builtin group, op code, ext instruction number>, or a special 2529// tuple value <-1, 0, 0> if the builtin function is not found. 2530// Not all builtin functions are supported, only those with a ready-to-use op 2531// code or instruction number defined in TableGen. 2532// TODO: consider a major rework of mapping demangled calls into a builtin 2533// functions to unify search and decrease number of individual cases. 2534std::tuple<int, unsigned, unsigned>
2536 SPIRV::InstructionSet::InstructionSet Set) {
2539 std::unique_ptr<const IncomingCall> Call =
2542return std::make_tuple(-1, 0, 0);
2544switch (Call->Builtin->Group) {
2545case SPIRV::Relational:
2548case SPIRV::CastToPtr:
2549case SPIRV::ImageMiscQuery:
2550case SPIRV::SpecConstant:
2552case SPIRV::AsyncCopy:
2553case SPIRV::LoadStore:
2554case SPIRV::CoopMatr:
2556 SPIRV::lookupNativeBuiltin(Call->Builtin->Name, Call->Builtin->Set))
2557return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
2559case SPIRV::Extended:
2560if (
constauto *R = SPIRV::lookupExtendedBuiltin(Call->Builtin->Name,
2561 Call->Builtin->Set))
2562return std::make_tuple(Call->Builtin->Group, 0, R->Number);
2564case SPIRV::VectorLoadStore:
2565if (
constauto *R = SPIRV::lookupVectorLoadStoreBuiltin(Call->Builtin->Name,
2566 Call->Builtin->Set))
2567return std::make_tuple(SPIRV::Extended, 0, R->Number);
2570if (
constauto *R = SPIRV::lookupGroupBuiltin(Call->Builtin->Name))
2571return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
2573case SPIRV::AtomicFloating:
2574if (
constauto *R = SPIRV::lookupAtomicFloatingBuiltin(Call->Builtin->Name))
2575return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
2577case SPIRV::IntelSubgroups:
2578if (
constauto *R = SPIRV::lookupIntelSubgroupsBuiltin(Call->Builtin->Name))
2579return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
2581case SPIRV::GroupUniform:
2582if (
constauto *R = SPIRV::lookupGroupUniformBuiltin(Call->Builtin->Name))
2583return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
2585case SPIRV::WriteImage:
2586return std::make_tuple(Call->Builtin->Group, SPIRV::OpImageWrite, 0);
2588return std::make_tuple(Call->Builtin->Group, TargetOpcode::G_SELECT, 0);
2589case SPIRV::Construct:
2590return std::make_tuple(Call->Builtin->Group, SPIRV::OpCompositeConstruct,
2592case SPIRV::KernelClock:
2593return std::make_tuple(Call->Builtin->Group, SPIRV::OpReadClockKHR, 0);
2595return std::make_tuple(-1, 0, 0);
2597return std::make_tuple(-1, 0, 0);
2601 SPIRV::InstructionSet::InstructionSet Set,
2606LLVM_DEBUG(
dbgs() <<
"Lowering builtin call: " << DemangledCall <<
"\n");
2608// Lookup the builtin in the TableGen records. 2610assert(SpvType &&
"Inconsistent return register: expected valid type info");
2611 std::unique_ptr<const IncomingCall> Call =
2619// TODO: check if the provided args meet the builtin requirments. 2620assert(Args.size() >= Call->Builtin->MinNumArgs &&
2621"Too few arguments to generate the builtin");
2622if (Call->Builtin->MaxNumArgs && Args.size() > Call->Builtin->MaxNumArgs)
2625// Match the builtin with implementation based on the grouping. 2626switch (Call->Builtin->Group) {
2627case SPIRV::Extended:
2629case SPIRV::Relational:
2633case SPIRV::Variable:
2637case SPIRV::AtomicFloating:
2641case SPIRV::CastToPtr:
2647case SPIRV::ICarryBorrow:
2649case SPIRV::GetQuery:
2651case SPIRV::ImageSizeQuery:
2653case SPIRV::ImageMiscQuery:
2655case SPIRV::ReadImage:
2657case SPIRV::WriteImage:
2659case SPIRV::SampleImage:
2663case SPIRV::Construct:
2665case SPIRV::SpecConstant:
2669case SPIRV::AsyncCopy:
2673case SPIRV::VectorLoadStore:
2675case SPIRV::LoadStore:
2677case SPIRV::IntelSubgroups:
2679case SPIRV::GroupUniform:
2681case SPIRV::KernelClock:
2683case SPIRV::CoopMatr:
2690// Parse strings representing OpenCL builtin types. 2692// OpenCL builtin types in demangled call strings have the following format: 2693// e.g. ocl_image2d_ro 2694 [[maybe_unused]]
bool IsOCLBuiltinType = TypeStr.
consume_front(
"ocl_");
2695assert(IsOCLBuiltinType &&
"Invalid OpenCL builtin prefix");
2697// Check if this is pointer to a builtin type and not just pointer 2698// representing a builtin type. In case it is a pointer to builtin type, 2699// this will require additional handling in the method calling 2700// parseBuiltinCallArgumentBaseType(...) as this function only retrieves the 2709// Parse type name in either "typeN" or "type vector[N]" format, where 2710// N is the number of elements of the vector. 2712unsigned VecElts = 0;
2716// Unable to recognize SPIRV type name. 2719// Handle "typeN*" or "type vector[N]*". 2723 TypeStr = TypeStr.
substr(0, TypeStr.
find(
']'));
2735auto Pos1 = DemangledCall.
find(
'(');
2738auto Pos2 = DemangledCall.
find(
')');
2741 DemangledCall.
slice(Pos1 + 1, Pos2)
2742 .
split(BuiltinArgsTypeStrs,
',', -1,
false);
2750if (ArgIdx >= BuiltinArgsTypeStrs.
size())
2752StringRef TypeStr = BuiltinArgsTypeStrs[ArgIdx].trim();
2761#define GET_BuiltinTypes_DECL 2762#define GET_BuiltinTypes_IMPL 2769#define GET_OpenCLTypes_DECL 2770#define GET_OpenCLTypes_IMPL 2772#include "SPIRVGenTables.inc" 2775//===----------------------------------------------------------------------===// 2776// Misc functions for parsing builtin types. 2777//===----------------------------------------------------------------------===// 2780if (
Name.starts_with(
"void"))
2782elseif (
Name.starts_with(
"int") ||
Name.starts_with(
"uint"))
2784elseif (
Name.starts_with(
"float"))
2786elseif (
Name.starts_with(
"half"))
2791//===----------------------------------------------------------------------===// 2792// Implementation functions for builtin types. 2793//===----------------------------------------------------------------------===// 2799unsigned Opcode = TypeRecord->
Opcode;
2800// Create or get an existing type from GlobalRegistry. 2806// Create or get an existing type from GlobalRegistry. 2814"Invalid number of parameters for SPIR-V pipe builtin!");
2815// Create or get an existing type from GlobalRegistry. 2817 SPIRV::AccessQualifier::AccessQualifier(
2825"Invalid number of parameters for SPIR-V coop matrices builtin!");
2827"SPIR-V coop matrices builtin type must have a type parameter!");
2830// Create or get an existing type from GlobalRegistry. 2832 MIRBuilder, ExtensionType, ElemType, ExtensionType->
getIntParameter(0),
2839const SPIRV::AccessQualifier::AccessQualifier Qualifier,
2842"SPIR-V image builtin type must have sampled type parameter!");
2847"Invalid number of parameters for SPIR-V image builtin!");
2849 SPIRV::AccessQualifier::AccessQualifier accessQualifier =
2850 SPIRV::AccessQualifier::None;
2852 accessQualifier = Qualifier == SPIRV::AccessQualifier::WriteOnly
2853 ? SPIRV::AccessQualifier::WriteOnly
2854 : SPIRV::AccessQualifier::AccessQualifier(
2858// Create or get an existing type from GlobalRegistry. 2860 MIRBuilder, SampledType,
2872 OpaqueType, SPIRV::AccessQualifier::ReadOnly, MIRBuilder, GR);
2873// Create or get an existing type from GlobalRegistry. 2882// Pointers-to-opaque-structs representing OpenCL types are first translated 2883// to equivalent SPIR-V types. OpenCL builtin type names should have the 2884// following format: e.g. %opencl.event_t 2887 SPIRV::lookupOpenCLType(NameWithParameters);
2890 NameWithParameters);
2892// Continue with the SPIR-V builtin type... 2895// Names of the opaque structs representing a SPIR-V builtins without 2896// parameters should have the following format: e.g. %spirv.Event 2898"Unknown builtin opaque type!");
2900// Parameterized SPIR-V builtins names follow this format: 2901// e.g. %spirv.Image._void_1_0_0_0_0_0_0, %spirv.Pipe._0 2902if (!NameWithParameters.
contains(
'_'))
2906unsigned BaseNameLength = NameWithParameters.
find(
'_') - 1;
2907 SplitString(NameWithParameters.
substr(BaseNameLength + 1), Parameters,
"_");
2910bool HasTypeParameter = !
isDigit(Parameters[0][0]);
2911if (HasTypeParameter)
2914for (
unsigned i = HasTypeParameter ? 1 : 0; i < Parameters.size(); i++) {
2915unsigned IntParameter = 0;
2916bool ValidLiteral = !Parameters[i].getAsInteger(10, IntParameter);
2919"Invalid format of SPIR-V builtin parameter literal!");
2923 NameWithParameters.
substr(0, BaseNameLength),
2924 TypeParameters, IntParameters);
2928 SPIRV::AccessQualifier::AccessQualifier AccessQual,
2931// In LLVM IR, SPIR-V and OpenCL builtin types are represented as either 2932// target(...) target extension types or pointers-to-opaque-structs. The 2933// approach relying on structs is deprecated and works only in the non-opaque 2934// pointer mode (-opaque-pointers=0). 2935// In order to maintain compatibility with LLVM IR generated by older versions 2936// of Clang and LLVM/SPIR-V Translator, the pointers-to-opaque-structs are 2937// "translated" to target extension types. This translation is temporary and 2938// will be removed in the future release of LLVM. 2949// Lookup the demangled builtin type in the TableGen records. 2954// "Lower" the BuiltinType into TargetType. The following get<...>Type methods 2955// use the implementation details from TableGen records or TargetExtType 2956// parameters to either create a new OpType<...> machine instruction or get an 2957// existing equivalent SPIRVType from GlobalRegistry. 2959switch (TypeRecord->
Opcode) {
2960case SPIRV::OpTypeImage:
2963case SPIRV::OpTypePipe:
2966case SPIRV::OpTypeDeviceEvent:
2969case SPIRV::OpTypeSampler:
2972case SPIRV::OpTypeSampledImage:
2975case SPIRV::OpTypeCooperativeMatrixKHR:
2984// Emit OpName instruction if a new OpType<...> instruction was added 2985// (equivalent type was not found in GlobalRegistry). unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder MachineInstrBuilder & DefMI
AMDGPU Lower Kernel Arguments
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static bool isDigit(const char C)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
BaseType
A given derived pointer can have multiple base pointers through phi/selects.
This file contains some functions that are useful when dealing with strings.
APInt bitcastToAPInt() const
static APFloat getZero(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Zero.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
uint64_t getZExtValue() const
Get zero extended value.
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
static ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
@ ICMP_ULT
unsigned less than
const APFloat & getValueAPF() const
const APInt & getValue() const
Return the constant as an APInt value reference.
A parsed version of the target data layout string in and methods for querying it.
Tagged union holding either a T or a Error.
Class to represent fixed width SIMD vectors.
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
static constexpr LLT vector(ElementCount EC, unsigned ScalarSizeInBits)
Get a low-level vector of some number of elements and element width.
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
This is an important class for using LLVM in a threaded context.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Helper class to build MachineInstr.
LLVMContext & getContext() const
MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_SELECT Tst, Op0, Op1.
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
MachineBasicBlock::iterator getInsertPt()
Current insertion point for new instructions.
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef< Register > Res, bool HasSideEffects, bool isConvergent)
Build and insert a G_INTRINSIC instruction.
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineInstrBuilder buildZExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineFunction & getMF()
Getter for the function we currently build.
MachineRegisterInfo * getMRI()
Getter for MRI.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
const DataLayout & getDataLayout() const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const MachineOperand & getOperand(unsigned i) const
MachineOperand class - Representation of each machine instruction operand.
const GlobalValue * getGlobal() const
const ConstantInt * getCImm() const
bool isCImm() const
isCImm - Test if this is a MO_CImmediate operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
const MDNode * getMetadata() const
Register getReg() const
getReg - Returns the register number.
const ConstantFP * getFPImm() const
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void setType(Register VReg, LLT Ty)
Set the low-level type of VReg to Ty.
void setRegClass(Register Reg, const TargetRegisterClass *RC)
setRegClass - Set the register class of the specified virtual register.
Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
unsigned getNumVirtRegs() const
getNumVirtRegs - Return the number of virtual registers created.
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Wrapper class representing virtual and physical registers.
constexpr bool isValid() const
SPIRVType * getOrCreateOpTypePipe(MachineIRBuilder &MIRBuilder, SPIRV::AccessQualifier::AccessQualifier AccQual)
SPIRVType * getSPIRVTypeForVReg(Register VReg, const MachineFunction *MF=nullptr) const
void assignSPIRVTypeToVReg(SPIRVType *Type, Register VReg, const MachineFunction &MF)
SPIRVType * getOrCreateSPIRVBoolType(MachineIRBuilder &MIRBuilder)
Register getOrCreateConsIntVector(uint64_t Val, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType, bool EmitIR=true)
const Type * getTypeForSPIRVType(const SPIRVType *Ty) const
Register buildConstantSampler(Register Res, unsigned AddrMode, unsigned Param, unsigned FilerMode, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType)
unsigned getScalarOrVectorComponentCount(Register VReg) const
SPIRVType * getOrCreateOpTypeImage(MachineIRBuilder &MIRBuilder, SPIRVType *SampledType, SPIRV::Dim::Dim Dim, uint32_t Depth, uint32_t Arrayed, uint32_t Multisampled, uint32_t Sampled, SPIRV::ImageFormat::ImageFormat ImageFormat, SPIRV::AccessQualifier::AccessQualifier AccQual)
unsigned getPointerSize() const
SPIRVType * getOrCreateOpTypeByOpcode(const Type *Ty, MachineIRBuilder &MIRBuilder, unsigned Opcode)
Register buildConstantFP(APFloat Val, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType=nullptr)
SPIRVType * getPointeeType(SPIRVType *PtrType)
Register getSPIRVTypeID(const SPIRVType *SpirvType) const
SPIRVType * getOrCreateSPIRVType(const Type *Type, MachineIRBuilder &MIRBuilder, SPIRV::AccessQualifier::AccessQualifier AQ=SPIRV::AccessQualifier::ReadWrite, bool EmitIR=true)
bool isScalarOfType(Register VReg, unsigned TypeOpcode) const
Register buildGlobalVariable(Register Reg, SPIRVType *BaseType, StringRef Name, const GlobalValue *GV, SPIRV::StorageClass::StorageClass Storage, const MachineInstr *Init, bool IsConst, bool HasLinkageTy, SPIRV::LinkageType::LinkageType LinkageType, MachineIRBuilder &MIRBuilder, bool IsInstSelector)
SPIRVType * getOrCreateOpTypeSampledImage(SPIRVType *ImageType, MachineIRBuilder &MIRBuilder)
SPIRVType * getOrCreateSPIRVTypeByName(StringRef TypeStr, MachineIRBuilder &MIRBuilder, SPIRV::StorageClass::StorageClass SC=SPIRV::StorageClass::Function, SPIRV::AccessQualifier::AccessQualifier AQ=SPIRV::AccessQualifier::ReadWrite)
const TargetRegisterClass * getRegClass(SPIRVType *SpvType) const
bool isScalarOrVectorOfType(Register VReg, unsigned TypeOpcode) const
Register getOrCreateConstIntArray(uint64_t Val, size_t Num, MachineInstr &I, SPIRVType *SpvType, const SPIRVInstrInfo &TII)
SPIRVType * getOrCreateOpTypeDeviceEvent(MachineIRBuilder &MIRBuilder)
SPIRVType * getOrCreateSPIRVPointerType(SPIRVType *BaseType, MachineIRBuilder &MIRBuilder, SPIRV::StorageClass::StorageClass SClass=SPIRV::StorageClass::Function)
SPIRVType * getOrCreateOpTypeCoopMatr(MachineIRBuilder &MIRBuilder, const TargetExtType *ExtensionType, const SPIRVType *ElemType, uint32_t Scope, uint32_t Rows, uint32_t Columns, uint32_t Use)
SPIRVType * getOrCreateSPIRVVectorType(SPIRVType *BaseType, unsigned NumElements, MachineIRBuilder &MIRBuilder)
SPIRVType * getOrCreateSPIRVIntegerType(unsigned BitWidth, MachineIRBuilder &MIRBuilder)
LLT getRegType(SPIRVType *SpvType) const
SPIRV::StorageClass::StorageClass getPointerStorageClass(Register VReg) const
SPIRVType * getOrCreateOpTypeSampler(MachineIRBuilder &MIRBuilder)
Register getOrCreateConstNullPtr(MachineIRBuilder &MIRBuilder, SPIRVType *SpvType)
unsigned getScalarOrVectorBitWidth(const SPIRVType *Type) const
Register buildConstantInt(uint64_t Val, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType, bool EmitIR=true, bool ZeroAsNull=true)
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
bool consume_back(StringRef Suffix)
Returns true if this StringRef has the given suffix and removes that suffix.
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
std::string str() const
str - Get the contents as an std::string.
constexpr StringRef substr(size_t Start, size_t N=npos) const
Return a reference to the substring from [Start, Start + N).
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
bool contains_insensitive(StringRef Other) const
Return true if the given string is a substring of *this, and false otherwise.
StringRef slice(size_t Start, size_t End) const
Return a reference to the substring from [Start, End).
bool contains(StringRef Other) const
Return true if the given string is a substring of *this, and false otherwise.
bool consume_front(StringRef Prefix)
Returns true if this StringRef has the given prefix and removes that prefix.
size_t find_first_of(char C, size_t From=0) const
Find the first character in the string that is C, or npos if not found.
size_t rfind(char C, size_t From=npos) const
Search for the last character C in the string.
size_t find(char C, size_t From=0) const
Search for the first character C in the string.
bool ends_with(StringRef Suffix) const
Check if this string ends with the given Suffix.
static constexpr size_t npos
A switch()-like statement whose cases are string literals.
StringSwitch & EndsWith(StringLiteral S, T Value)
Class to represent struct types.
static StructType * getTypeByName(LLVMContext &C, StringRef Name)
Return the type with the specified name, or null if there is none by that name.
static StructType * create(LLVMContext &Context, StringRef Name)
This creates an identified struct.
Class to represent target extensions types, which are generally unintrospectable from target-independ...
unsigned getNumIntParameters() const
static TargetExtType * get(LLVMContext &Context, StringRef Name, ArrayRef< Type * > Types={}, ArrayRef< unsigned > Ints={})
Return a target extension type having the specified name and optional type and integer parameters.
Type * getTypeParameter(unsigned i) const
unsigned getNumTypeParameters() const
unsigned getIntParameter(unsigned i) const
The instances of the Type class are immutable: once they are created, they are never changed.
static Type * getHalfTy(LLVMContext &C)
StringRef getStructName() const
static Type * getVoidTy(LLVMContext &C)
static IntegerType * getInt8Ty(LLVMContext &C)
static IntegerType * getInt32Ty(LLVMContext &C)
static Type * getFloatTy(LLVMContext &C)
LLVM Value Representation.
Value(Type *Ty, unsigned scid)
static VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
LLVMTypeRef LLVMVectorType(LLVMTypeRef ElementType, unsigned ElementCount)
Create a vector type that contains a defined type and has a specific number of elements.
std::string lookupBuiltinNameHelper(StringRef DemangledCall, FPDecorationId *DecorationId)
Parses the name part of the demangled builtin call.
Type * parseBuiltinCallArgumentType(StringRef TypeStr, LLVMContext &Ctx)
bool parseBuiltinTypeStr(SmallVector< StringRef, 10 > &BuiltinArgsTypeStrs, const StringRef DemangledCall, LLVMContext &Ctx)
std::tuple< int, unsigned, unsigned > mapBuiltinToOpcode(const StringRef DemangledCall, SPIRV::InstructionSet::InstructionSet Set)
Helper function for finding a builtin function attributes by a demangled function name.
Type * parseBuiltinCallArgumentBaseType(const StringRef DemangledCall, unsigned ArgIdx, LLVMContext &Ctx)
Parses the provided ArgIdx argument base type in the DemangledCall skeleton.
TargetExtType * parseBuiltinTypeNameToTargetExtType(std::string TypeName, LLVMContext &Context)
Translates a string representing a SPIR-V or OpenCL builtin type to a TargetExtType that can be furth...
std::optional< bool > lowerBuiltin(const StringRef DemangledCall, SPIRV::InstructionSet::InstructionSet Set, MachineIRBuilder &MIRBuilder, const Register OrigRet, const Type *OrigRetTy, const SmallVectorImpl< Register > &Args, SPIRVGlobalRegistry *GR)
SPIRVType * lowerBuiltinType(const Type *OpaqueType, SPIRV::AccessQualifier::AccessQualifier AccessQual, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
This is an optimization pass for GlobalISel generic memory operations.
void buildOpName(Register Target, const StringRef &Name, MachineIRBuilder &MIRBuilder)
static bool generateGetQueryInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateLoadStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateConstructInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildAtomicFlagInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building atomic flag instructions (e.g.
static Register buildBuiltinVariableLoad(MachineIRBuilder &MIRBuilder, SPIRVType *VariableType, SPIRVGlobalRegistry *GR, SPIRV::BuiltIn::BuiltIn BuiltinValue, LLT LLType, Register Reg=Register(0), bool isConst=true, bool hasLinkageTy=true)
Helper function for building a load instruction for loading a builtin global variable of BuiltinValue...
static bool generateImageSizeQueryInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRV::SamplerFilterMode::SamplerFilterMode getSamplerFilterModeFromBitmask(unsigned Bitmask)
static bool buildAtomicStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic store instruction.
void addNumImm(const APInt &Imm, MachineInstrBuilder &MIB)
static const Type * getBlockStructType(Register ParamReg, MachineRegisterInfo *MRI)
static bool generateGroupInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
FPDecorationId demangledPostfixToDecorationId(const std::string &S)
static unsigned getNumComponentsForDim(SPIRV::Dim::Dim dim)
Register insertAssignInstr(Register Reg, Type *Ty, SPIRVType *SpirvTy, SPIRVGlobalRegistry *GR, MachineIRBuilder &MIB, MachineRegisterInfo &MRI)
Helper external function for inserting ASSIGN_TYPE instuction between Reg and its definition,...
static bool generateICarryBorrowInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static Register buildScopeReg(Register CLScopeRegister, SPIRV::Scope::Scope Scope, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, MachineRegisterInfo *MRI)
static std::tuple< Register, SPIRVType * > buildBoolRegister(MachineIRBuilder &MIRBuilder, const SPIRVType *ResultType, SPIRVGlobalRegistry *GR)
Helper function building either a resulting scalar or vector bool register depending on the expected ...
static unsigned getNumSizeComponents(SPIRVType *imgType)
Helper function for obtaining the number of size components.
uint64_t getIConstVal(Register ConstReg, const MachineRegisterInfo *MRI)
static Register buildConstantIntReg32(uint64_t Val, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getSampledImageType(const TargetExtType *OpaqueType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
SPIRV::MemorySemantics::MemorySemantics getMemSemanticsForStorageClass(SPIRV::StorageClass::StorageClass SC)
constexpr unsigned storageClassToAddressSpace(SPIRV::StorageClass::StorageClass SC)
static bool generateDotOrFMulInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateSampleImageInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateBarrierInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getCoopMatrType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateKernelClockInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static void setRegClassIfNull(Register Reg, MachineRegisterInfo *MRI, SPIRVGlobalRegistry *GR)
static bool generateGroupUniformInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateWaveInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getImageType(const TargetExtType *ExtensionType, const SPIRV::AccessQualifier::AccessQualifier Qualifier, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
void buildOpDecorate(Register Reg, MachineIRBuilder &MIRBuilder, SPIRV::Decoration::Decoration Dec, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
Register createVirtualRegister(SPIRVType *SpvType, SPIRVGlobalRegistry *GR, MachineRegisterInfo *MRI, const MachineFunction &MF)
static bool buildBarrierInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building barriers, i.e., memory/control ordering operations.
static bool generateAsyncCopy(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRV::Scope::Scope getSPIRVScope(SPIRV::CLMemoryScope ClScope)
static SPIRVType * getSamplerType(MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
static Register buildLoadInst(SPIRVType *BaseType, Register PtrRegister, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, LLT LowLevelType, Register DestinationReg=Register(0))
Helper function for building a load instruction loading into the DestinationReg.
static bool generateEnqueueInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
static bool buildSelectInst(MachineIRBuilder &MIRBuilder, Register ReturnRegister, Register SourceRegister, const SPIRVType *ReturnType, SPIRVGlobalRegistry *GR)
Helper function for building either a vector or scalar select instruction depending on the expected R...
static const Type * getMachineInstrType(MachineInstr *MI)
static SPIRV::SamplerAddressingMode::SamplerAddressingMode getSamplerAddressingModeFromBitmask(unsigned Bitmask)
static bool generateAtomicInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateConvertInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static Register buildMemSemanticsReg(Register SemanticsRegister, Register PtrRegister, unsigned &Semantics, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static unsigned getConstFromIntrinsic(Register Reg, MachineRegisterInfo *MRI)
static bool generateImageMiscQueryInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateSelectInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder)
static bool buildAtomicLoadInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic load instruction.
static bool generateIntelSubgroupsInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateSpecConstantInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getOrCreateSPIRVDeviceEventPointer(MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Type * parseBasicTypeName(StringRef &TypeName, LLVMContext &Ctx)
static bool generateVectorLoadStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool genWorkgroupQuery(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, SPIRV::BuiltIn::BuiltIn BuiltinValue, uint64_t DefaultValue)
static bool generateCoopMatrInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static std::unique_ptr< const SPIRV::IncomingCall > lookupBuiltin(StringRef DemangledCall, SPIRV::InstructionSet::InstructionSet Set, Register ReturnRegister, const SPIRVType *ReturnType, const SmallVectorImpl< Register > &Arguments)
Looks up the demangled builtin call in the SPIRVBuiltins.td records using the provided DemangledCall ...
static bool buildAtomicFloatingRMWInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic floating-type instruction.
MachineInstr * getDefInstrMaybeConstant(Register &ConstReg, const MachineRegisterInfo *MRI)
constexpr unsigned BitWidth
const MachineInstr SPIRVType
static bool generateReadImageInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
bool hasBuiltinTypePrefix(StringRef Name)
static bool buildEnqueueKernel(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Type * getMDOperandAsType(const MDNode *N, unsigned I)
static bool buildAtomicRMWInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building atomic instructions.
static SPIRV::MemorySemantics::MemorySemantics getSPIRVMemSemantics(std::memory_order MemOrder)
static bool generateRelationalInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildAtomicInitInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder)
Helper function for translating atomic init to OpStore.
static bool generateWriteImageInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getPipeType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static Type * parseTypeString(const StringRef Name, LLVMContext &Context)
bool isSpvIntrinsic(const MachineInstr &MI, Intrinsic::ID IntrinsicID)
static bool generateCastToPtrInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder)
static bool generateAtomicFloatingInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateExtInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildNDRange(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getNonParameterizedType(const TargetExtType *ExtensionType, const SPIRV::BuiltinType *TypeRecord, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static MachineInstr * getBlockStructInstr(Register ParamReg, MachineRegisterInfo *MRI)
static bool buildOpFromWrapper(MachineIRBuilder &MIRBuilder, unsigned Opcode, const SPIRV::IncomingCall *Call, Register TypeReg, ArrayRef< uint32_t > ImmArgs={})
static unsigned getSamplerParamFromBitmask(unsigned Bitmask)
static bool buildAtomicCompareExchangeInst(const SPIRV::IncomingCall *Call, const SPIRV::DemangledBuiltin *Builtin, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic compare-exchange instruction.
std::string getLinkStringForBuiltIn(SPIRV::BuiltIn::BuiltIn BuiltInValue)
static bool generateBuiltinVar(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static const fltSemantics & IEEEsingle() LLVM_READNONE
This struct is a compact representation of a valid (non-zero power of two) alignment.
This class contains a discriminated union of information about pointers in memory operands,...
FPRoundingMode::FPRoundingMode RoundingMode
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
const SmallVectorImpl< Register > & Arguments
const std::string BuiltinName
const SPIRVType * ReturnType
const Register ReturnRegister
const DemangledBuiltin * Builtin
IncomingCall(const std::string BuiltinName, const DemangledBuiltin *Builtin, const Register ReturnRegister, const SPIRVType *ReturnType, const SmallVectorImpl< Register > &Arguments)
InstructionSet::InstructionSet Set
StringRef SpirvTypeLiteral
InstructionSet::InstructionSet Set
FPRoundingMode::FPRoundingMode RoundingMode