[llvm] 0b0224b - [SPIRV] support __spirv_Load/Store builtin functions
Ilia Diachkov via llvm-commits
llvm-commits at lists.llvm.org
Sun Dec 25 13:38:55 PST 2022
Author: Ilia Diachkov
Date: 2022-12-26T01:25:20+03:00
New Revision: 0b0224bc88af60c91fe3e4721013b99269efa5b2
URL: https://github.com/llvm/llvm-project/commit/0b0224bc88af60c91fe3e4721013b99269efa5b2
DIFF: https://github.com/llvm/llvm-project/commit/0b0224bc88af60c91fe3e4721013b99269efa5b2.diff
LOG: [SPIRV] support __spirv_Load/Store builtin functions
The patch adds support for the builtin functions __spirv_Load and
__spirv_Store. One test is added to demonstrate the improvement.
Differential Revision: https://reviews.llvm.org/D140490
Added:
llvm/test/CodeGen/SPIRV/spirv-load-store.ll
Modified:
llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp
llvm/lib/Target/SPIRV/SPIRVBuiltins.td
Removed:
################################################################################
diff --git a/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp b/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp
index 89c25e750a52a..2f44c999e5a22 100644
--- a/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp
@@ -1450,6 +1450,19 @@ static MachineInstr *getBlockStructInstr(Register ParamReg,
return ValueMI;
}
+// Return an integer constant corresponding to the given register and
+// defined in spv_track_constant.
+// TODO: maybe unify with prelegalizer pass.
+static unsigned getConstFromIntrinsic(Register Reg, MachineRegisterInfo *MRI) {
+ MachineInstr *DefMI = MRI->getUniqueVRegDef(Reg);
+ assert(isSpvIntrinsic(*DefMI, Intrinsic::spv_track_constant) &&
+ DefMI->getOperand(2).isReg());
+ MachineInstr *DefMI2 = MRI->getUniqueVRegDef(DefMI->getOperand(2).getReg());
+ assert(DefMI2->getOpcode() == TargetOpcode::G_CONSTANT &&
+ DefMI2->getOperand(1).isCImm());
+ return DefMI2->getOperand(1).getCImm()->getValue().getZExtValue();
+}
+
// Return type of the instruction result from spv_assign_type intrinsic.
// TODO: maybe unify with prelegalizer pass.
static const Type *getMachineInstrType(MachineInstr *MI) {
@@ -1787,6 +1800,35 @@ static bool generateVectorLoadStoreInst(const SPIRV::IncomingCall *Call,
return true;
}
+static bool generateLoadStoreInst(const SPIRV::IncomingCall *Call,
+ MachineIRBuilder &MIRBuilder,
+ SPIRVGlobalRegistry *GR) {
+ // Lookup the instruction opcode in the TableGen records.
+ const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
+ unsigned Opcode =
+ SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
+ bool IsLoad = Opcode == SPIRV::OpLoad;
+ // Build the instruction.
+ auto MIB = MIRBuilder.buildInstr(Opcode);
+ if (IsLoad) {
+ MIB.addDef(Call->ReturnRegister);
+ MIB.addUse(GR->getSPIRVTypeID(Call->ReturnType));
+ }
+ // Add a pointer to the value to load/store.
+ MIB.addUse(Call->Arguments[0]);
+ // Add a value to store.
+ if (!IsLoad)
+ MIB.addUse(Call->Arguments[1]);
+ // Add optional memory attributes and an alignment.
+ MachineRegisterInfo *MRI = MIRBuilder.getMRI();
+ unsigned NumArgs = Call->Arguments.size();
+ if ((IsLoad && NumArgs >= 2) || NumArgs >= 3)
+ MIB.addImm(getConstFromIntrinsic(Call->Arguments[IsLoad ? 1 : 2], MRI));
+ if ((IsLoad && NumArgs >= 3) || NumArgs >= 4)
+ MIB.addImm(getConstFromIntrinsic(Call->Arguments[IsLoad ? 2 : 3], MRI));
+ return true;
+}
+
/// Lowers a builtin funtion call using the provided \p DemangledCall skeleton
/// and external instruction \p Set.
namespace SPIRV {
@@ -1864,6 +1906,8 @@ std::optional<bool> lowerBuiltin(const StringRef DemangledCall,
return generateConvertInst(DemangledCall, Call.get(), MIRBuilder, GR);
case SPIRV::VectorLoadStore:
return generateVectorLoadStoreInst(Call.get(), MIRBuilder, GR);
+ case SPIRV::LoadStore:
+ return generateLoadStoreInst(Call.get(), MIRBuilder, GR);
}
return false;
}
diff --git a/llvm/lib/Target/SPIRV/SPIRVBuiltins.td b/llvm/lib/Target/SPIRV/SPIRVBuiltins.td
index 3c430709215de..635c6451ea04a 100644
--- a/llvm/lib/Target/SPIRV/SPIRVBuiltins.td
+++ b/llvm/lib/Target/SPIRV/SPIRVBuiltins.td
@@ -53,6 +53,7 @@ def SpecConstant : BuiltinGroup;
def Enqueue : BuiltinGroup;
def AsyncCopy : BuiltinGroup;
def VectorLoadStore : BuiltinGroup;
+def LoadStore : BuiltinGroup;
//===----------------------------------------------------------------------===//
// Class defining a demangled builtin record. The information in the record
@@ -552,7 +553,7 @@ defm : DemangledNativeBuiltin<"ndrange_1D", OpenCL_std, Enqueue, 1, 3, OpBuildND
defm : DemangledNativeBuiltin<"ndrange_2D", OpenCL_std, Enqueue, 1, 3, OpBuildNDRange>;
defm : DemangledNativeBuiltin<"ndrange_3D", OpenCL_std, Enqueue, 1, 3, OpBuildNDRange>;
-// Spec constant builtin record:
+// Spec constant builtin records:
defm : DemangledNativeBuiltin<"__spirv_SpecConstant", OpenCL_std, SpecConstant, 2, 2, OpSpecConstant>;
defm : DemangledNativeBuiltin<"__spirv_SpecConstantComposite", OpenCL_std, SpecConstant, 1, 0, OpSpecConstantComposite>;
@@ -560,6 +561,10 @@ defm : DemangledNativeBuiltin<"__spirv_SpecConstantComposite", OpenCL_std, SpecC
defm : DemangledNativeBuiltin<"async_work_group_copy", OpenCL_std, AsyncCopy, 4, 4, OpGroupAsyncCopy>;
defm : DemangledNativeBuiltin<"wait_group_events", OpenCL_std, AsyncCopy, 2, 2, OpGroupWaitEvents>;
+// Load and store builtin records:
+defm : DemangledNativeBuiltin<"__spirv_Load", OpenCL_std, LoadStore, 1, 3, OpLoad>;
+defm : DemangledNativeBuiltin<"__spirv_Store", OpenCL_std, LoadStore, 2, 4, OpStore>;
+
//===----------------------------------------------------------------------===//
// Class defining a work/sub group builtin that should be translated into a
// SPIR-V instruction using the defined properties.
diff --git a/llvm/test/CodeGen/SPIRV/spirv-load-store.ll b/llvm/test/CodeGen/SPIRV/spirv-load-store.ll
new file mode 100644
index 0000000000000..a82bf0ab2e01f
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/spirv-load-store.ll
@@ -0,0 +1,16 @@
+; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s
+;; Translate SPIR-V friendly OpLoad and OpStore calls
+
+; CHECK: %[[#CONST:]] = OpConstant %[[#]] 42
+; CHECK: OpStore %[[#PTR:]] %[[#CONST]] Volatile|Aligned 4
+; CHECK: %[[#]] = OpLoad %[[#]] %[[#PTR]]
+
+define weak_odr dso_local spir_kernel void @foo(i32 addrspace(1)* %var) {
+entry:
+ tail call spir_func void @_Z13__spirv_StorePiiii(i32 addrspace(1)* %var, i32 42, i32 3, i32 4)
+ %value = tail call spir_func double @_Z12__spirv_LoadPi(i32 addrspace(1)* %var)
+ ret void
+}
+
+declare dso_local spir_func double @_Z12__spirv_LoadPi(i32 addrspace(1)*) local_unnamed_addr
+declare dso_local spir_func void @_Z13__spirv_StorePiiii(i32 addrspace(1)*, i32, i32, i32) local_unnamed_addr
More information about the llvm-commits
mailing list