[llvm] cbuffer final (PR #169078)
via llvm-commits
llvm-commits at lists.llvm.org
Fri Nov 21 10:27:45 PST 2025
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-backend-spir-v
Author: Steven Perron (s-perron)
<details>
<summary>Changes</summary>
- **[SPIRV] Improve Logical SPIR-V Pointer Access and GEP Legalization**
- **[SPIRV] Support Peeled Array Layouts for HLSL CBuffers**
---
Patch is 47.01 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/169078.diff
15 Files Affected:
- (modified) llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp (+2)
- (modified) llvm/lib/Target/SPIRV/SPIRVCBufferAccess.cpp (+8-3)
- (modified) llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp (+57-16)
- (modified) llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.cpp (+23)
- (modified) llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.h (+2)
- (modified) llvm/lib/Target/SPIRV/SPIRVIRMapping.h (+5)
- (modified) llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp (+6)
- (modified) llvm/lib/Target/SPIRV/SPIRVLegalizePointerCast.cpp (+12-22)
- (modified) llvm/lib/Target/SPIRV/SPIRVUtils.cpp (+65)
- (modified) llvm/lib/Target/SPIRV/SPIRVUtils.h (+15)
- (added) llvm/test/CodeGen/SPIRV/hlsl-resources/cbuffer-array.ll (+71)
- (added) llvm/test/CodeGen/SPIRV/hlsl-resources/cbuffer-peeled-array-minimal.ll (+65)
- (added) llvm/test/CodeGen/SPIRV/hlsl-resources/cbuffer-peeled-array.ll (+64)
- (added) llvm/test/CodeGen/SPIRV/hlsl-resources/cbuffer-simple.ll (+68)
- (added) llvm/test/CodeGen/SPIRV/hlsl-resources/cbuffer-struct.ll (+146)
``````````diff
diff --git a/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp b/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp
index b2cbdb2ad7375..709f49b0fecc1 100644
--- a/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp
@@ -3373,6 +3373,8 @@ SPIRVType *lowerBuiltinType(const Type *OpaqueType,
TargetType = getInlineSpirvType(BuiltinType, MIRBuilder, GR);
} else if (Name == "spirv.VulkanBuffer") {
TargetType = getVulkanBufferType(BuiltinType, MIRBuilder, GR);
+ } else if (Name == "spirv.Padding") {
+ TargetType = GR->getOrCreatePaddingType(MIRBuilder);
} else if (Name == "spirv.Layout") {
TargetType = getLayoutType(BuiltinType, MIRBuilder, GR);
} else {
diff --git a/llvm/lib/Target/SPIRV/SPIRVCBufferAccess.cpp b/llvm/lib/Target/SPIRV/SPIRVCBufferAccess.cpp
index 329774df554f4..227d8716d974a 100644
--- a/llvm/lib/Target/SPIRV/SPIRVCBufferAccess.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVCBufferAccess.cpp
@@ -79,15 +79,20 @@ static bool replaceCBufferAccesses(Module &M) {
// The handle definition should dominate all uses of the cbuffer members.
// We'll insert our getpointer calls right after it.
IRBuilder<> Builder(HandleDef->getNextNode());
+ auto *HandleTy = cast<TargetExtType>(Mapping.Handle->getValueType());
+ auto *LayoutTy = cast<StructType>(HandleTy->getTypeParameter(0));
+ const StructLayout *SL = M.getDataLayout().getStructLayout(LayoutTy);
- for (uint32_t Index = 0; Index < Mapping.Members.size(); ++Index) {
- GlobalVariable *MemberGV = Mapping.Members[Index].GV;
+ for (const hlsl::CBufferMember &Member : Mapping.Members) {
+ GlobalVariable *MemberGV = Member.GV;
if (MemberGV->use_empty()) {
continue;
}
+ uint32_t IndexInStruct = SL->getElementContainingOffset(Member.Offset);
+
// Create the getpointer intrinsic call.
- Value *IndexVal = Builder.getInt32(Index);
+ Value *IndexVal = Builder.getInt32(IndexInStruct);
Type *PtrType = MemberGV->getType();
Value *GetPointerCall = Builder.CreateIntrinsic(
PtrType, Intrinsic::spv_resource_getpointer, {HandleDef, IndexVal});
diff --git a/llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp b/llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp
index 8e14fb03127fc..fc76bafe7e329 100644
--- a/llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp
@@ -841,6 +841,7 @@ Type *SPIRVEmitIntrinsics::deduceElementTypeHelper(
uint32_t Index = cast<ConstantInt>(II->getOperand(1))->getZExtValue();
Ty = cast<StructType>(Ty)->getElementType(Index);
}
+ Ty = reconstitutePeeledArrayType(Ty);
} else {
llvm_unreachable("Unknown handle type for spv_resource_getpointer.");
}
@@ -1569,16 +1570,60 @@ Instruction *SPIRVEmitIntrinsics::visitSwitchInst(SwitchInst &I) {
return BrI;
}
-Instruction *SPIRVEmitIntrinsics::visitGetElementPtrInst(GetElementPtrInst &I) {
- if (I.getSourceElementType() == IntegerType::getInt8Ty(CurrF->getContext()) &&
- TM->getSubtargetImpl()->isLogicalSPIRV()) {
- Instruction *Result = buildLogicalAccessChainFromGEP(I);
- if (Result)
- return Result;
+static bool isFirstIndexZero(const GetElementPtrInst *GEP) {
+ if (GEP->getNumIndices() == 0)
+ return false;
+ if (const auto *CI = dyn_cast<ConstantInt>(GEP->getOperand(1))) {
+ return CI->getZExtValue() == 0;
}
+ return false;
+}
+Instruction *SPIRVEmitIntrinsics::visitGetElementPtrInst(GetElementPtrInst &I) {
IRBuilder<> B(I.getParent());
B.SetInsertPoint(&I);
+
+ if (TM->getSubtargetImpl()->isLogicalSPIRV() && !isFirstIndexZero(&I)) {
+ // Logical SPIR-V cannot use the OpPtrAccessChain instruction. If the first
+ // index of the GEP is not 0, then we need to try to adjust it.
+ //
+ // If the GEP is doing byte addressing, try to rebuild the full access chain
+ // from the type of the pointer.
+ if (I.getSourceElementType() ==
+ IntegerType::getInt8Ty(CurrF->getContext())) {
+ Instruction *Result = buildLogicalAccessChainFromGEP(I);
+ if (Result) {
+ return Result;
+ }
+ }
+
+ // Look for the array-to-pointer decay. If this is the pattern
+ // we can adjust the types, and prepend a 0 to the indices.
+ Value *PtrOp = I.getPointerOperand();
+ Type *SrcElemTy = I.getSourceElementType();
+ Type *DeducedPointeeTy = deduceElementType(PtrOp, true);
+
+ if (auto *ArrTy = dyn_cast<ArrayType>(DeducedPointeeTy)) {
+ if (ArrTy->getElementType() == SrcElemTy) {
+ SmallVector<Value *> NewIndices;
+ Type *FirstIdxType = I.getOperand(1)->getType();
+ NewIndices.push_back(ConstantInt::get(FirstIdxType, 0));
+ for (Value *Idx : I.indices())
+ NewIndices.push_back(Idx);
+
+ SmallVector<Type *, 2> Types = {I.getType(), I.getPointerOperandType()};
+ SmallVector<Value *, 4> Args;
+ Args.push_back(B.getInt1(I.isInBounds()));
+ Args.push_back(I.getPointerOperand());
+ Args.append(NewIndices.begin(), NewIndices.end());
+
+ auto *NewI = B.CreateIntrinsic(Intrinsic::spv_gep, {Types}, {Args});
+ replaceAllUsesWithAndErase(B, &I, NewI);
+ return NewI;
+ }
+ }
+ }
+
SmallVector<Type *, 2> Types = {I.getType(), I.getOperand(0)->getType()};
SmallVector<Value *, 4> Args;
Args.push_back(B.getInt1(I.isInBounds()));
@@ -1772,16 +1817,12 @@ void SPIRVEmitIntrinsics::insertPtrCastOrAssignTypeInstr(Instruction *I,
Value *Pointer = GEPI->getPointerOperand();
Type *OpTy = nullptr;
- // Knowing the accessed type is mandatory for logical SPIR-V. Sadly,
- // the GEP source element type should not be used for this purpose, and
- // the alternative type-scavenging method is not working.
- // Physical SPIR-V can work around this, but not logical, hence still
- // try to rely on the broken type scavenging for logical.
- bool IsRewrittenGEP =
- GEPI->getSourceElementType() == IntegerType::getInt8Ty(I->getContext());
- if (IsRewrittenGEP && TM->getSubtargetImpl()->isLogicalSPIRV()) {
- Value *Src = getPointerRoot(Pointer);
- OpTy = GR->findDeducedElementType(Src);
+ // Logical SPIR-V is not allowed to use Op*PtrAccessChain instructions. If
+ // the first index is 0, then we can trivially lower to OpAccessChain. If
+ // not we need to try to rewrite the GEP. We avoid adding a pointer cast at
+ // this time, and will rewrite the GEP when visiting it.
+ if (TM->getSubtargetImpl()->isLogicalSPIRV() && !isFirstIndexZero(GEPI)) {
+ return;
}
// In all cases, fall back to the GEP type if type scavenging failed.
diff --git a/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.cpp b/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.cpp
index 76fd834fd7219..b67b16c87e862 100644
--- a/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.cpp
@@ -889,6 +889,17 @@ SPIRVType *SPIRVGlobalRegistry::getOpTypeStruct(
const StructType *Ty, MachineIRBuilder &MIRBuilder,
SPIRV::AccessQualifier::AccessQualifier AccQual,
StructOffsetDecorator Decorator, bool EmitIR) {
+ Type *OriginalElementType = nullptr;
+ uint64_t TotalSize = 0;
+ if (matchPeeledArrayPattern(Ty, OriginalElementType, TotalSize)) {
+ SPIRVType *ElementSPIRVType = findSPIRVType(
+ OriginalElementType, MIRBuilder, AccQual,
+ /* ExplicitLayoutRequired= */ Decorator != nullptr, EmitIR);
+ return getOpTypeArray(TotalSize, ElementSPIRVType, MIRBuilder,
+ /*ExplicitLayoutRequired=*/Decorator != nullptr,
+ EmitIR);
+ }
+
const SPIRVSubtarget &ST =
cast<SPIRVSubtarget>(MIRBuilder.getMF().getSubtarget());
SmallVector<Register, 4> FieldTypes;
@@ -1405,6 +1416,18 @@ SPIRVType *SPIRVGlobalRegistry::getOrCreateVulkanBufferType(
return R;
}
+SPIRVType *
+SPIRVGlobalRegistry::getOrCreatePaddingType(MachineIRBuilder &MIRBuilder) {
+ auto Key = SPIRV::irhandle_padding();
+ if (const MachineInstr *MI = findMI(Key, &MIRBuilder.getMF()))
+ return MI;
+ auto *T = Type::getInt8Ty(MIRBuilder.getContext());
+ SPIRVType *R = getOrCreateSPIRVIntegerType(8, MIRBuilder);
+ finishCreatingSPIRVType(T, R);
+ add(Key, R);
+ return R;
+}
+
SPIRVType *SPIRVGlobalRegistry::getOrCreateLayoutType(
MachineIRBuilder &MIRBuilder, const TargetExtType *T, bool EmitIr) {
auto Key = SPIRV::handle(T);
diff --git a/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.h b/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.h
index c230e62e795e8..f6098634a81dc 100644
--- a/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.h
+++ b/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.h
@@ -611,6 +611,8 @@ class SPIRVGlobalRegistry : public SPIRVIRMapping {
SPIRV::StorageClass::StorageClass SC,
bool IsWritable, bool EmitIr = false);
+ SPIRVType *getOrCreatePaddingType(MachineIRBuilder &MIRBuilder);
+
SPIRVType *getOrCreateLayoutType(MachineIRBuilder &MIRBuilder,
const TargetExtType *T, bool EmitIr = false);
diff --git a/llvm/lib/Target/SPIRV/SPIRVIRMapping.h b/llvm/lib/Target/SPIRV/SPIRVIRMapping.h
index c99d603d340ea..47c7676d5631c 100644
--- a/llvm/lib/Target/SPIRV/SPIRVIRMapping.h
+++ b/llvm/lib/Target/SPIRV/SPIRVIRMapping.h
@@ -64,6 +64,7 @@ enum SpecialTypeKind {
STK_Value,
STK_MachineInstr,
STK_VkBuffer,
+ STK_Padding,
STK_ExplictLayoutType,
STK_Last = -1
};
@@ -149,6 +150,10 @@ inline IRHandle irhandle_vkbuffer(const Type *ElementType,
SpecialTypeKind::STK_VkBuffer);
}
+inline IRHandle irhandle_padding() {
+ return std::make_tuple(nullptr, 0, SpecialTypeKind::STK_Padding);
+}
+
inline IRHandle irhandle_explict_layout_type(const Type *Ty) {
const Type *WrpTy = unifyPtrType(Ty);
return irhandle_ptr(WrpTy, Ty->getTypeID(), STK_ExplictLayoutType);
diff --git a/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp b/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp
index d3fc08eb56cb3..69606c10fb224 100644
--- a/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp
@@ -467,6 +467,7 @@ static bool isConstReg(MachineRegisterInfo *MRI, MachineInstr *OpDef,
switch (Opcode) {
case TargetOpcode::G_CONSTANT:
case TargetOpcode::G_FCONSTANT:
+ case TargetOpcode::G_IMPLICIT_DEF:
return true;
case TargetOpcode::G_INTRINSIC:
case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
@@ -3088,6 +3089,11 @@ bool SPIRVInstructionSelector::selectGEP(Register ResVReg,
.addUse(GR.getSPIRVTypeID(ResType))
// Object to get a pointer to.
.addUse(I.getOperand(3).getReg());
+ assert(Opcode == SPIRV::OpPtrAccessChain ||
+ Opcode == SPIRV::OpInBoundsPtrAccessChain ||
+ (getImm(I.getOperand(4), MRI) && foldImm(I.getOperand(4), MRI) == 0) &&
+ "Cannot translate GEP to OpAccessChain. First index must be 0.");
+
// Adding indices.
const unsigned StartingIndex =
(Opcode == SPIRV::OpAccessChain || Opcode == SPIRV::OpInBoundsAccessChain)
diff --git a/llvm/lib/Target/SPIRV/SPIRVLegalizePointerCast.cpp b/llvm/lib/Target/SPIRV/SPIRVLegalizePointerCast.cpp
index 4ce871b6f5e5d..81c7596530ee2 100644
--- a/llvm/lib/Target/SPIRV/SPIRVLegalizePointerCast.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVLegalizePointerCast.cpp
@@ -104,9 +104,13 @@ class SPIRVLegalizePointerCast : public FunctionPass {
Value *loadFirstValueFromAggregate(IRBuilder<> &B, Type *ElementType,
Value *Source, LoadInst *BadLoad) {
SmallVector<Type *, 2> Types = {BadLoad->getPointerOperandType(),
- BadLoad->getPointerOperandType()};
- SmallVector<Value *, 3> Args{/* isInBounds= */ B.getInt1(false), Source,
- B.getInt32(0), B.getInt32(0)};
+ Source->getType()};
+ SmallVector<Value *, 8> Args{/* isInBounds= */ B.getInt1(false), Source};
+
+ Type *AggregateType = GR->findDeducedElementType(Source);
+ assert(AggregateType && "Could not deduce aggregate type");
+ buildGEPIndexChain(B, ElementType, AggregateType, Args);
+
auto *GEP = B.CreateIntrinsic(Intrinsic::spv_gep, {Types}, {Args});
GR->buildAssignPtr(B, ElementType, GEP);
@@ -201,34 +205,20 @@ class SPIRVLegalizePointerCast : public FunctionPass {
auto *SAT = dyn_cast<ArrayType>(FromTy);
auto *SVT = dyn_cast<FixedVectorType>(FromTy);
- auto *SST = dyn_cast<StructType>(FromTy);
auto *DVT = dyn_cast<FixedVectorType>(ToTy);
B.SetInsertPoint(LI);
- // Destination is the element type of Source, and source is an array ->
- // Loading 1st element.
+ // Destination is the element type of some member of FromTy. For example,
+ // loading the 1st element of an array:
// - float a = array[0];
- if (SAT && SAT->getElementType() == ToTy)
- Output = loadFirstValueFromAggregate(B, SAT->getElementType(),
- OriginalOperand, LI);
- // Destination is the element type of Source, and source is a vector ->
- // Vector to scalar.
- // - float a = vector.x;
- else if (!DVT && SVT && SVT->getElementType() == ToTy) {
- Output = loadFirstValueFromAggregate(B, SVT->getElementType(),
- OriginalOperand, LI);
- }
+ if (isTypeFirstElementAggregate(ToTy, FromTy))
+ Output = loadFirstValueFromAggregate(B, ToTy, OriginalOperand, LI);
// Destination is a smaller vector than source or different vector type.
// - float3 v3 = vector4;
// - float4 v2 = int4;
else if (SVT && DVT)
Output = loadVectorFromVector(B, SVT, DVT, OriginalOperand);
- // Destination is the scalar type stored at the start of an aggregate.
- // - struct S { float m };
- // - float v = s.m;
- else if (SST && SST->getTypeAtIndex(0u) == ToTy)
- Output = loadFirstValueFromAggregate(B, ToTy, OriginalOperand, LI);
else if (SAT && DVT && SAT->getElementType() == DVT->getElementType())
Output = loadVectorFromArray(B, DVT, OriginalOperand);
else
@@ -334,7 +324,7 @@ class SPIRVLegalizePointerCast : public FunctionPass {
Value *storeToFirstValueAggregate(IRBuilder<> &B, Value *Src, Value *Dst,
Type *DstPointeeType, Align Alignment) {
SmallVector<Type *, 2> Types = {Dst->getType(), Dst->getType()};
- SmallVector<Value *, 3> Args{/* isInBounds= */ B.getInt1(true), Dst};
+ SmallVector<Value *, 8> Args{/* isInBounds= */ B.getInt1(true), Dst};
buildGEPIndexChain(B, Src->getType(), DstPointeeType, Args);
auto *GEP = B.CreateIntrinsic(Intrinsic::spv_gep, {Types}, {Args});
GR->buildAssignPtr(B, Src->getType(), GEP);
diff --git a/llvm/lib/Target/SPIRV/SPIRVUtils.cpp b/llvm/lib/Target/SPIRV/SPIRVUtils.cpp
index 8f2fc01da476f..3a5069928848b 100644
--- a/llvm/lib/Target/SPIRV/SPIRVUtils.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVUtils.cpp
@@ -1042,6 +1042,71 @@ getFirstValidInstructionInsertPoint(MachineBasicBlock &BB) {
: VarPos;
}
+bool matchPeeledArrayPattern(const StructType *Ty, Type *&OriginalElementType,
+ uint64_t &TotalSize) {
+ // An array of N padded structs is represented as {[N-1 x <{T, pad}>], T}.
+ if (!Ty->isStructTy() || Ty->getStructNumElements() != 2)
+ return false;
+
+ Type *FirstElement = Ty->getStructElementType(0);
+ Type *SecondElement = Ty->getStructElementType(1);
+
+ if (!FirstElement->isArrayTy())
+ return false;
+
+ Type *ArrayElementType = FirstElement->getArrayElementType();
+ if (!ArrayElementType->isStructTy() ||
+ ArrayElementType->getStructNumElements() != 2)
+ return false;
+
+ Type *T_in_struct = ArrayElementType->getStructElementType(0);
+ if (T_in_struct != SecondElement)
+ return false;
+
+ const uint64_t ArraySize = FirstElement->getArrayNumElements();
+ TotalSize = ArraySize + 1;
+ OriginalElementType = ArrayElementType;
+ return true;
+}
+
+Type *reconstitutePeeledArrayType(Type *Ty) {
+ if (!Ty->isStructTy())
+ return Ty;
+
+ auto *STy = cast<StructType>(Ty);
+ Type *OriginalElementType = nullptr;
+ uint64_t TotalSize = 0;
+ if (matchPeeledArrayPattern(STy, OriginalElementType, TotalSize)) {
+ Type *ResultTy = ArrayType::get(
+ reconstitutePeeledArrayType(OriginalElementType), TotalSize);
+ return ResultTy;
+ }
+
+ SmallVector<Type *, 4> NewElementTypes;
+ bool Changed = false;
+ for (Type *ElementTy : STy->elements()) {
+ Type *NewElementTy = reconstitutePeeledArrayType(ElementTy);
+ if (NewElementTy != ElementTy)
+ Changed = true;
+ NewElementTypes.push_back(NewElementTy);
+ }
+
+ if (Changed) {
+ Type *ResultTy;
+ if (STy->isLiteral())
+ ResultTy =
+ StructType::get(STy->getContext(), NewElementTypes, STy->isPacked());
+ else {
+ auto *NewTy = StructType::create(STy->getContext(), STy->getName());
+ NewTy->setBody(NewElementTypes, STy->isPacked());
+ ResultTy = NewTy;
+ }
+ return ResultTy;
+ }
+
+ return Ty;
+}
+
std::optional<SPIRV::LinkageType::LinkageType>
getSpirvLinkageTypeFor(const SPIRVSubtarget &ST, const GlobalValue &GV) {
if (GV.hasLocalLinkage() || GV.hasHiddenVisibility())
diff --git a/llvm/lib/Target/SPIRV/SPIRVUtils.h b/llvm/lib/Target/SPIRV/SPIRVUtils.h
index 99d9d403ea70c..45e211a1e5d2a 100644
--- a/llvm/lib/Target/SPIRV/SPIRVUtils.h
+++ b/llvm/lib/Target/SPIRV/SPIRVUtils.h
@@ -321,6 +321,21 @@ Type *parseBasicTypeName(StringRef &TypeName, LLVMContext &Ctx);
// Returns true if the function was changed.
bool sortBlocks(Function &F);
+// Check for peeled array structs and recursively reconstitute them. In HLSL
+// CBuffers, arrays may have padding between the elements, but not after the
+// last element. To represent this in LLVM IR an array [N x T] will be
+// represented as {[N-1 x {T, spirv.Padding}], T}. The function
+// matchPeeledArrayPattern recognizes this pattern retrieving the type {T,
+// spirv.Padding}, and the size N.
+bool matchPeeledArrayPattern(const StructType *Ty, Type *&OriginalElementType,
+ uint64_t &TotalSize);
+
+// This function will turn the type {[N-1 x {T, spirv.Padding}], T} back into
+// [N x {T, spirv.Padding}]. So it can be translated into SPIR-V. The offset
+// decorations will be such that there will be no padding after the array when
+// relevant.
+Type *reconstitutePeeledArrayType(Type *Ty);
+
inline bool hasInitializer(const GlobalVariable *GV) {
return GV->hasInitializer() && !isa<UndefValue>(GV->getInitializer());
}
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-resources/cbuffer-array.ll b/llvm/test/CodeGen/SPIRV/hlsl-resources/cbuffer-array.ll
new file mode 100644
index 0000000000000..15b4320d2683d
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/hlsl-resources/cbuffer-array.ll
@@ -0,0 +1,71 @@
+; RUN: llc -O0 -mtriple=spirv-unknown-vulkan-compute %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-vulkan-compute %s -o - -filetype=obj | spirv-val %}
+
+; CHECK-DAG: %[[FLOAT:[0-9]+]] = OpTypeFloat 32
+; CHECK-DAG: %[[VEC4:[0-9]+]] = OpTypeVector %[[FLOAT]] 4
+; CHECK-DAG: %[[PTR_VEC4:[0-9]+]] = OpTypePointer Uniform %[[VEC4]]
+; CHECK-DAG: %[[INT:[0-9]+]] = OpTypeInt 32 0
+; CHECK-DAG: %[[PTR_INT:[0-9]+]] = OpTypePointer Uniform %[[INT]]
+; CHECK-DAG: %[[INT64:[0-9]+]] = OpTypeInt 64 0
+; CHECK-DAG: %[[CONST_4:[0-9]+]] = OpConstant %[[INT]] 4{{$}}
+
+; CHECK: %[[ARRAY:[0-9]+]] = OpTypeArray %[[VEC4]] %[[CONST_4]]
+; CHECK-DAG: %[[PTR_ARRAY:[0-9]+]] = OpTypePointer Uniform %[[ARRAY]]
+
+; CHECK: %[[STRUCT_INNER:[0-9]+]] = OpTypeStruct %[[ARRAY]] %[[INT]]
+; CHECK: %[[STRUCT_CBUFFER:[0-9]+]] = OpTypeStruct %[[STRUCT_INNER]]
+; CHECK: %[[PTR_CBUFFER:[0-9]+]] = OpTypePointer Uniform %[[STRUCT_CBUFFER]]
+
+; CHECK-DAG: %[[ZERO:[0-9]+]] = OpConstant %[[INT]] 0{{$}}
+; CHECK-DAG: %[[ONE:[0-9]+]] = OpConstant %[[INT]] 1{{$}}
+
+; CHECK: %[[CBUFFER:[0-9]+]] = OpVariable %[[PTR_CBUFFER]] Uniform
+
+%__cblayout_MyCBuffer = type <{ [4 x <4 x float>], i32 }>
+
+ at MyCBuffer.cb = local_unnamed_addr global target("spirv.VulkanBuffer", %__cblayout_MyCBuffer, 2, 0) poison
+ at co...
[truncated]
``````````
</details>
https://github.com/llvm/llvm-project/pull/169078
More information about the llvm-commits
mailing list