[llvm] Revert "[AMDGPU] Extended vector promotion to aggregate types." (PR #144366)
via llvm-commits
llvm-commits at lists.llvm.org
Mon Jun 16 08:01:15 PDT 2025
https://github.com/zGoldthorpe created https://github.com/llvm/llvm-project/pull/144366
Reverts llvm/llvm-project#143784
Patch fails some internal tests. Will investigate more thoroughly before attempting to remerge.
@shiltian (since I still can't request reviews yet)
>From 43ade5f69c4bc058dd32a55886ac87e1298ca253 Mon Sep 17 00:00:00 2001
From: zGoldthorpe <Zach.Goldthorpe at amd.com>
Date: Mon, 16 Jun 2025 08:52:04 -0600
Subject: [PATCH] Revert "[AMDGPU] Extended vector promotion to aggregate
types. (#143784)"
This reverts commit 79e06bf1ae9961c5045134288fd8acc9173f6be2.
---
.../lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp | 106 +++----
.../CodeGen/AMDGPU/promote-alloca-structs.ll | 286 ------------------
2 files changed, 41 insertions(+), 351 deletions(-)
delete mode 100644 llvm/test/CodeGen/AMDGPU/promote-alloca-structs.ll
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp b/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
index e90a3a275f67c..700dc87d2f821 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
@@ -818,39 +818,6 @@ static BasicBlock::iterator skipToNonAllocaInsertPt(BasicBlock &BB,
return I;
}
-/// Get the underlying type of a homogeneous aggregate type, or nullptr if the
-/// type is non-homogeneous.
-static Type *getHomogeneousType(Type *Ty) {
- Type *ElemTy = nullptr;
- SmallVector<Type *> WorkList;
- WorkList.push_back(Ty);
- while (!WorkList.empty()) {
- Type *CurTy = WorkList.pop_back_val();
-
- // Check if the current type is an aggregate type.
- if (auto *VectorTy = dyn_cast<FixedVectorType>(CurTy)) {
- WorkList.push_back(VectorTy->getElementType());
- continue;
- }
- if (auto *ArrayTy = dyn_cast<ArrayType>(CurTy)) {
- WorkList.push_back(ArrayTy->getElementType());
- continue;
- }
- if (auto *StructTy = dyn_cast<StructType>(CurTy)) {
- WorkList.append(StructTy->element_begin(), StructTy->element_end());
- continue;
- }
-
- // If not, it must be the same as all other non-aggregate types.
- if (!ElemTy)
- ElemTy = CurTy;
- else if (ElemTy != CurTy)
- return nullptr;
- }
-
- return ElemTy;
-}
-
// FIXME: Should try to pick the most likely to be profitable allocas first.
bool AMDGPUPromoteAllocaImpl::tryPromoteAllocaToVector(AllocaInst &Alloca) {
LLVM_DEBUG(dbgs() << "Trying to promote to vector: " << Alloca << '\n');
@@ -861,42 +828,42 @@ bool AMDGPUPromoteAllocaImpl::tryPromoteAllocaToVector(AllocaInst &Alloca) {
}
Type *AllocaTy = Alloca.getAllocatedType();
- Type *ElemTy = getHomogeneousType(AllocaTy);
-
- if (!ElemTy || !VectorType::isValidElementType(ElemTy)) {
- LLVM_DEBUG(dbgs() << " Cannot convert type to vector\n");
- return false;
- }
+ auto *VectorTy = dyn_cast<FixedVectorType>(AllocaTy);
+ if (auto *ArrayTy = dyn_cast<ArrayType>(AllocaTy)) {
+ uint64_t NumElems = 1;
+ Type *ElemTy;
+ do {
+ NumElems *= ArrayTy->getNumElements();
+ ElemTy = ArrayTy->getElementType();
+ } while ((ArrayTy = dyn_cast<ArrayType>(ElemTy)));
+
+ // Check for array of vectors
+ auto *InnerVectorTy = dyn_cast<FixedVectorType>(ElemTy);
+ if (InnerVectorTy) {
+ NumElems *= InnerVectorTy->getNumElements();
+ ElemTy = InnerVectorTy->getElementType();
+ }
- unsigned ElementSizeInBits = DL->getTypeSizeInBits(ElemTy);
- if (ElementSizeInBits != DL->getTypeAllocSizeInBits(ElemTy)) {
- LLVM_DEBUG(dbgs() << " Cannot convert to vector if the allocation size "
- "does not match the type's size\n");
- return false;
- }
- unsigned ElementSize = ElementSizeInBits / 8;
- if (ElementSize == 0) {
- LLVM_DEBUG(dbgs() << " Cannot create vector of zero-sized elements\n");
- return false;
+ if (VectorType::isValidElementType(ElemTy) && NumElems > 0) {
+ unsigned ElementSize = DL->getTypeSizeInBits(ElemTy) / 8;
+ if (ElementSize > 0) {
+ unsigned AllocaSize = DL->getTypeStoreSize(AllocaTy);
+ // Expand vector if required to match padding of inner type,
+ // i.e. odd size subvectors.
+ // Storage size of new vector must match that of alloca for correct
+ // behaviour of byte offsets and GEP computation.
+ if (NumElems * ElementSize != AllocaSize)
+ NumElems = AllocaSize / ElementSize;
+ if (NumElems > 0 && (AllocaSize % ElementSize) == 0)
+ VectorTy = FixedVectorType::get(ElemTy, NumElems);
+ }
+ }
}
- // Calculate the size of the corresponding vector, accounting for padding of
- // inner types, e.g., odd-sized subvectors. Storage size of new vector must
- // match that of alloca for correct behaviour of byte offsets and GEP
- // computation.
- unsigned AllocaSize = DL->getTypeStoreSize(AllocaTy);
- unsigned NumElems = AllocaSize / ElementSize;
- if (NumElems == 0) {
- LLVM_DEBUG(dbgs() << " Cannot vectorize an empty aggregate type\n");
- return false;
- }
- if (NumElems * ElementSize != AllocaSize) {
- LLVM_DEBUG(
- dbgs() << " Cannot convert type into vector of the same size\n");
+ if (!VectorTy) {
+ LLVM_DEBUG(dbgs() << " Cannot convert type to vector\n");
return false;
}
- auto *VectorTy = FixedVectorType::get(ElemTy, NumElems);
- assert(VectorTy && "Failed to create vector type.");
const unsigned MaxElements =
(MaxVectorRegs * 32) / DL->getTypeSizeInBits(VectorTy->getElementType());
@@ -928,6 +895,15 @@ bool AMDGPUPromoteAllocaImpl::tryPromoteAllocaToVector(AllocaInst &Alloca) {
LLVM_DEBUG(dbgs() << " Attempting promotion to: " << *VectorTy << "\n");
+ Type *VecEltTy = VectorTy->getElementType();
+ unsigned ElementSizeInBits = DL->getTypeSizeInBits(VecEltTy);
+ if (ElementSizeInBits != DL->getTypeAllocSizeInBits(VecEltTy)) {
+ LLVM_DEBUG(dbgs() << " Cannot convert to vector if the allocation size "
+ "does not match the type's size\n");
+ return false;
+ }
+ unsigned ElementSize = ElementSizeInBits / 8;
+ assert(ElementSize > 0);
for (auto *U : Uses) {
Instruction *Inst = cast<Instruction>(U->getUser());
@@ -967,7 +943,7 @@ bool AMDGPUPromoteAllocaImpl::tryPromoteAllocaToVector(AllocaInst &Alloca) {
if (auto *GEP = dyn_cast<GetElementPtrInst>(Inst)) {
// If we can't compute a vector index from this GEP, then we can't
// promote this alloca to vector.
- Value *Index = GEPToVectorIndex(GEP, &Alloca, ElemTy, *DL, NewGEPInsts);
+ Value *Index = GEPToVectorIndex(GEP, &Alloca, VecEltTy, *DL, NewGEPInsts);
if (!Index)
return RejectUser(Inst, "cannot compute vector index for GEP");
diff --git a/llvm/test/CodeGen/AMDGPU/promote-alloca-structs.ll b/llvm/test/CodeGen/AMDGPU/promote-alloca-structs.ll
deleted file mode 100644
index 1cdd027fef89d..0000000000000
--- a/llvm/test/CodeGen/AMDGPU/promote-alloca-structs.ll
+++ /dev/null
@@ -1,286 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -passes=amdgpu-promote-alloca-to-vector -amdgpu-promote-alloca-to-vector-limit=512 %s | FileCheck %s
-
-define i8 @test_v4i8(i32 %bits, i64 %idx) {
-; CHECK-LABEL: define i8 @test_v4i8(
-; CHECK-SAME: i32 [[BITS:%.*]], i64 [[IDX:%.*]]) {
-; CHECK-NEXT: [[STACK:%.*]] = freeze <4 x i8> poison
-; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32 [[BITS]] to <4 x i8>
-; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x i8> [[TMP1]], i64 [[IDX]]
-; CHECK-NEXT: ret i8 [[TMP2]]
-;
- %stack = alloca <4 x i8>, align 4, addrspace(5)
- store i32 %bits, ptr addrspace(5) %stack
- %ptr = getelementptr inbounds i8, ptr addrspace(5) %stack, i64 %idx
- %val = load i8, ptr addrspace(5) %ptr, align 1
- ret i8 %val
-}
-
-define i8 @test_a4i8(i32 %bits, i64 %idx) {
-; CHECK-LABEL: define i8 @test_a4i8(
-; CHECK-SAME: i32 [[BITS:%.*]], i64 [[IDX:%.*]]) {
-; CHECK-NEXT: [[STACK:%.*]] = freeze <4 x i8> poison
-; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32 [[BITS]] to <4 x i8>
-; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x i8> [[TMP1]], i64 [[IDX]]
-; CHECK-NEXT: ret i8 [[TMP2]]
-;
- %stack = alloca [4 x i8], align 4, addrspace(5)
- store i32 %bits, ptr addrspace(5) %stack
- %ptr = getelementptr inbounds i8, ptr addrspace(5) %stack, i64 %idx
- %val = load i8, ptr addrspace(5) %ptr, align 1
- ret i8 %val
-}
-
-define i8 @test_a2v4i8(i64 %bits, i64 %idx) {
-; CHECK-LABEL: define i8 @test_a2v4i8(
-; CHECK-SAME: i64 [[BITS:%.*]], i64 [[IDX:%.*]]) {
-; CHECK-NEXT: [[STACK:%.*]] = freeze <8 x i8> poison
-; CHECK-NEXT: [[TMP1:%.*]] = bitcast i64 [[BITS]] to <8 x i8>
-; CHECK-NEXT: [[TMP2:%.*]] = extractelement <8 x i8> [[TMP1]], i64 [[IDX]]
-; CHECK-NEXT: ret i8 [[TMP2]]
-;
- %stack = alloca [2 x <4 x i8>], align 4, addrspace(5)
- store i64 %bits, ptr addrspace(5) %stack
- %ptr = getelementptr inbounds i8, ptr addrspace(5) %stack, i64 %idx
- %val = load i8, ptr addrspace(5) %ptr, align 1
- ret i8 %val
-}
-
-define i8 @test_a2v3i8(i64 %bits, i64 %idx) {
-; CHECK-LABEL: define i8 @test_a2v3i8(
-; CHECK-SAME: i64 [[BITS:%.*]], i64 [[IDX:%.*]]) {
-; CHECK-NEXT: [[STACK:%.*]] = freeze <8 x i8> poison
-; CHECK-NEXT: [[TMP1:%.*]] = bitcast i64 [[BITS]] to <8 x i8>
-; CHECK-NEXT: [[TMP2:%.*]] = extractelement <8 x i8> [[TMP1]], i64 [[IDX]]
-; CHECK-NEXT: ret i8 [[TMP2]]
-;
- %stack = alloca [2 x <3 x i8>], align 4, addrspace(5)
- store i64 %bits, ptr addrspace(5) %stack
- %ptr = getelementptr inbounds i8, ptr addrspace(5) %stack, i64 %idx
- %val = load i8, ptr addrspace(5) %ptr, align 1
- ret i8 %val
-}
-
-define i8 @test_a2a4i8(i64 %bits, i64 %idx) {
-; CHECK-LABEL: define i8 @test_a2a4i8(
-; CHECK-SAME: i64 [[BITS:%.*]], i64 [[IDX:%.*]]) {
-; CHECK-NEXT: [[STACK:%.*]] = freeze <8 x i8> poison
-; CHECK-NEXT: [[TMP1:%.*]] = bitcast i64 [[BITS]] to <8 x i8>
-; CHECK-NEXT: [[TMP2:%.*]] = extractelement <8 x i8> [[TMP1]], i64 [[IDX]]
-; CHECK-NEXT: ret i8 [[TMP2]]
-;
- %stack = alloca [2 x [4 x i8]], align 4, addrspace(5)
- store i64 %bits, ptr addrspace(5) %stack
- %ptr = getelementptr inbounds i8, ptr addrspace(5) %stack, i64 %idx
- %val = load i8, ptr addrspace(5) %ptr, align 1
- ret i8 %val
-}
-
-define i8 @test_a2a3i8(i48 %bits, i64 %idx) {
-; CHECK-LABEL: define i8 @test_a2a3i8(
-; CHECK-SAME: i48 [[BITS:%.*]], i64 [[IDX:%.*]]) {
-; CHECK-NEXT: [[STACK:%.*]] = freeze <6 x i8> poison
-; CHECK-NEXT: [[TMP1:%.*]] = bitcast i48 [[BITS]] to <6 x i8>
-; CHECK-NEXT: [[TMP2:%.*]] = extractelement <6 x i8> [[TMP1]], i64 [[IDX]]
-; CHECK-NEXT: ret i8 [[TMP2]]
-;
- %stack = alloca [2 x [3 x i8]], align 4, addrspace(5)
- store i48 %bits, ptr addrspace(5) %stack
- %ptr = getelementptr inbounds i8, ptr addrspace(5) %stack, i64 %idx
- %val = load i8, ptr addrspace(5) %ptr, align 1
- ret i8 %val
-}
-
-define i8 @test_s1v4i8(i32 %bits, i64 %idx) {
-; CHECK-LABEL: define i8 @test_s1v4i8(
-; CHECK-SAME: i32 [[BITS:%.*]], i64 [[IDX:%.*]]) {
-; CHECK-NEXT: [[STACK:%.*]] = freeze <4 x i8> poison
-; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32 [[BITS]] to <4 x i8>
-; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x i8> [[TMP1]], i64 [[IDX]]
-; CHECK-NEXT: ret i8 [[TMP2]]
-;
- %stack = alloca {<4 x i8>}, align 4, addrspace(5)
- store i32 %bits, ptr addrspace(5) %stack
- %ptr = getelementptr inbounds i8, ptr addrspace(5) %stack, i64 %idx
- %val = load i8, ptr addrspace(5) %ptr, align 1
- ret i8 %val
-}
-
-define i8 @test_s1a4i8(i32 %bits, i64 %idx) {
-; CHECK-LABEL: define i8 @test_s1a4i8(
-; CHECK-SAME: i32 [[BITS:%.*]], i64 [[IDX:%.*]]) {
-; CHECK-NEXT: [[STACK:%.*]] = freeze <4 x i8> poison
-; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32 [[BITS]] to <4 x i8>
-; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x i8> [[TMP1]], i64 [[IDX]]
-; CHECK-NEXT: ret i8 [[TMP2]]
-;
- %stack = alloca {[4 x i8]}, align 4, addrspace(5)
- store i32 %bits, ptr addrspace(5) %stack
- %ptr = getelementptr inbounds i8, ptr addrspace(5) %stack, i64 %idx
- %val = load i8, ptr addrspace(5) %ptr, align 1
- ret i8 %val
-}
-
-define i8 @test_s4i8(i32 %bits, i64 %idx) {
-; CHECK-LABEL: define i8 @test_s4i8(
-; CHECK-SAME: i32 [[BITS:%.*]], i64 [[IDX:%.*]]) {
-; CHECK-NEXT: [[STACK:%.*]] = freeze <4 x i8> poison
-; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32 [[BITS]] to <4 x i8>
-; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x i8> [[TMP1]], i64 [[IDX]]
-; CHECK-NEXT: ret i8 [[TMP2]]
-;
- %stack = alloca {i8, i8, i8, i8}, align 4, addrspace(5)
- store i32 %bits, ptr addrspace(5) %stack
- %ptr = getelementptr inbounds i8, ptr addrspace(5) %stack, i64 %idx
- %val = load i8, ptr addrspace(5) %ptr, align 1
- ret i8 %val
-}
-
-define i8 @test_s2v4i8(i64 %bits, i64 %idx) {
-; CHECK-LABEL: define i8 @test_s2v4i8(
-; CHECK-SAME: i64 [[BITS:%.*]], i64 [[IDX:%.*]]) {
-; CHECK-NEXT: [[STACK:%.*]] = freeze <8 x i8> poison
-; CHECK-NEXT: [[TMP1:%.*]] = bitcast i64 [[BITS]] to <8 x i8>
-; CHECK-NEXT: [[TMP2:%.*]] = extractelement <8 x i8> [[TMP1]], i64 [[IDX]]
-; CHECK-NEXT: ret i8 [[TMP2]]
-;
- %stack = alloca {<4 x i8>, <4 x i8>}, align 4, addrspace(5)
- store i64 %bits, ptr addrspace(5) %stack
- %ptr = getelementptr inbounds i8, ptr addrspace(5) %stack, i64 %idx
- %val = load i8, ptr addrspace(5) %ptr, align 1
- ret i8 %val
-}
-
-define i8 @test_s2v2i8v4i8(i64 %bits, i64 %idx) {
-; CHECK-LABEL: define i8 @test_s2v2i8v4i8(
-; CHECK-SAME: i64 [[BITS:%.*]], i64 [[IDX:%.*]]) {
-; CHECK-NEXT: [[STACK:%.*]] = freeze <8 x i8> poison
-; CHECK-NEXT: [[TMP1:%.*]] = bitcast i64 [[BITS]] to <8 x i8>
-; CHECK-NEXT: [[TMP2:%.*]] = extractelement <8 x i8> [[TMP1]], i64 [[IDX]]
-; CHECK-NEXT: ret i8 [[TMP2]]
-;
- %stack = alloca {<2 x i8>, <4 x i8>}, align 4, addrspace(5)
- store i64 %bits, ptr addrspace(5) %stack
- %ptr = getelementptr inbounds i8, ptr addrspace(5) %stack, i64 %idx
- %val = load i8, ptr addrspace(5) %ptr, align 1
- ret i8 %val
-}
-
-define i8 @test_s2v2i8v3i8(i64 %bits, i64 %idx) {
-; CHECK-LABEL: define i8 @test_s2v2i8v3i8(
-; CHECK-SAME: i64 [[BITS:%.*]], i64 [[IDX:%.*]]) {
-; CHECK-NEXT: [[STACK:%.*]] = freeze <8 x i8> poison
-; CHECK-NEXT: [[TMP1:%.*]] = bitcast i64 [[BITS]] to <8 x i8>
-; CHECK-NEXT: [[TMP2:%.*]] = extractelement <8 x i8> [[TMP1]], i64 [[IDX]]
-; CHECK-NEXT: ret i8 [[TMP2]]
-;
- %stack = alloca {<2 x i8>, <3 x i8>}, align 4, addrspace(5)
- store i64 %bits, ptr addrspace(5) %stack
- %ptr = getelementptr inbounds i8, ptr addrspace(5) %stack, i64 %idx
- %val = load i8, ptr addrspace(5) %ptr, align 1
- ret i8 %val
-}
-
-define i8 @test_s2s2i8s4i8(i48 %bits, i64 %idx) {
-; CHECK-LABEL: define i8 @test_s2s2i8s4i8(
-; CHECK-SAME: i48 [[BITS:%.*]], i64 [[IDX:%.*]]) {
-; CHECK-NEXT: [[STACK:%.*]] = freeze <6 x i8> poison
-; CHECK-NEXT: [[TMP1:%.*]] = bitcast i48 [[BITS]] to <6 x i8>
-; CHECK-NEXT: [[TMP2:%.*]] = extractelement <6 x i8> [[TMP1]], i64 [[IDX]]
-; CHECK-NEXT: ret i8 [[TMP2]]
-;
- %stack = alloca {{i8, i8}, {i8, i8, i8, i8}}, align 4, addrspace(5)
- store i48 %bits, ptr addrspace(5) %stack
- %ptr = getelementptr inbounds i8, ptr addrspace(5) %stack, i64 %idx
- %val = load i8, ptr addrspace(5) %ptr, align 1
- ret i8 %val
-}
-
-define i8 @test_s2s2i8s3i8(i40 %bits, i64 %idx) {
-; CHECK-LABEL: define i8 @test_s2s2i8s3i8(
-; CHECK-SAME: i40 [[BITS:%.*]], i64 [[IDX:%.*]]) {
-; CHECK-NEXT: [[STACK:%.*]] = freeze <5 x i8> poison
-; CHECK-NEXT: [[TMP1:%.*]] = bitcast i40 [[BITS]] to <5 x i8>
-; CHECK-NEXT: [[TMP2:%.*]] = extractelement <5 x i8> [[TMP1]], i64 [[IDX]]
-; CHECK-NEXT: ret i8 [[TMP2]]
-;
- %stack = alloca {{i8, i8}, {i8, i8, i8}}, align 4, addrspace(5)
- store i40 %bits, ptr addrspace(5) %stack
- %ptr = getelementptr inbounds i8, ptr addrspace(5) %stack, i64 %idx
- %val = load i8, ptr addrspace(5) %ptr, align 1
- ret i8 %val
-}
-
-define i8 @test_s3i8s1i8v2i8(i32 %bits, i64 %idx) {
-; CHECK-LABEL: define i8 @test_s3i8s1i8v2i8(
-; CHECK-SAME: i32 [[BITS:%.*]], i64 [[IDX:%.*]]) {
-; CHECK-NEXT: [[STACK:%.*]] = freeze <4 x i8> poison
-; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32 [[BITS]] to <4 x i8>
-; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x i8> [[TMP1]], i64 [[IDX]]
-; CHECK-NEXT: ret i8 [[TMP2]]
-;
- %stack = alloca {i8, {i8}, <2 x i8>}, align 4, addrspace(5)
- store i32 %bits, ptr addrspace(5) %stack
- %ptr = getelementptr inbounds i8, ptr addrspace(5) %stack, i64 %idx
- %val = load i8, ptr addrspace(5) %ptr, align 1
- ret i8 %val
-}
-
-define i8 @test_s3i8i8s0(i16 %bits, i64 %idx) {
-; CHECK-LABEL: define i8 @test_s3i8i8s0(
-; CHECK-SAME: i16 [[BITS:%.*]], i64 [[IDX:%.*]]) {
-; CHECK-NEXT: [[STACK:%.*]] = freeze <2 x i8> poison
-; CHECK-NEXT: [[TMP1:%.*]] = bitcast i16 [[BITS]] to <2 x i8>
-; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x i8> [[TMP1]], i64 [[IDX]]
-; CHECK-NEXT: ret i8 [[TMP2]]
-;
- %stack = alloca {i8, i8, {}}, align 4, addrspace(5)
- store i16 %bits, ptr addrspace(5) %stack
- %ptr = getelementptr inbounds i8, ptr addrspace(5) %stack, i64 %idx
- %val = load i8, ptr addrspace(5) %ptr, align 1
- ret i8 %val
-}
-
-; heterogeneous element types are not supported
-define i8 @test_heterogeneous(i32 %bits, i64 %idx) {
-; CHECK-LABEL: define i8 @test_heterogeneous(
-; CHECK-SAME: i32 [[BITS:%.*]], i64 [[IDX:%.*]]) {
-; CHECK-NEXT: [[STACK:%.*]] = alloca { i8, i8, i16 }, align 4, addrspace(5)
-; CHECK-NEXT: store i32 [[BITS]], ptr addrspace(5) [[STACK]], align 4
-; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds i8, ptr addrspace(5) [[STACK]], i64 [[IDX]]
-; CHECK-NEXT: [[VAL:%.*]] = load i8, ptr addrspace(5) [[PTR]], align 1
-; CHECK-NEXT: ret i8 [[VAL]]
-;
- %stack = alloca {i8, i8, i16}, align 4, addrspace(5)
- store i32 %bits, ptr addrspace(5) %stack
- %ptr = getelementptr inbounds i8, ptr addrspace(5) %stack, i64 %idx
- %val = load i8, ptr addrspace(5) %ptr, align 1
- ret i8 %val
-}
-
-; empty types are not supported
-define void @test_empty() {
-; CHECK-LABEL: define void @test_empty() {
-; CHECK-NEXT: [[STACK:%.*]] = alloca {}, align 4, addrspace(5)
-; CHECK-NEXT: ret void
-;
- %stack = alloca {}, align 4, addrspace(5)
- ret void
-}
-
-; singleton types are not supported
-define i8 @test_singleton(i8 %bits, i64 %idx) {
-; CHECK-LABEL: define i8 @test_singleton(
-; CHECK-SAME: i8 [[BITS:%.*]], i64 [[IDX:%.*]]) {
-; CHECK-NEXT: [[STACK:%.*]] = alloca { i8, {} }, align 4, addrspace(5)
-; CHECK-NEXT: store i8 [[BITS]], ptr addrspace(5) [[STACK]], align 1
-; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds i8, ptr addrspace(5) [[STACK]], i64 [[IDX]]
-; CHECK-NEXT: [[VAL:%.*]] = load i8, ptr addrspace(5) [[PTR]], align 1
-; CHECK-NEXT: ret i8 [[VAL]]
-;
- %stack = alloca {i8, {}}, align 4, addrspace(5)
- store i8 %bits, ptr addrspace(5) %stack
- %ptr = getelementptr inbounds i8, ptr addrspace(5) %stack, i64 %idx
- %val = load i8, ptr addrspace(5) %ptr, align 1
- ret i8 %val
-}
More information about the llvm-commits
mailing list