[llvm] [DirectX] Fix bug where Flatten arrays was only using last index (PR #144146)
Farzon Lotfi via llvm-commits
llvm-commits at lists.llvm.org
Mon Jun 16 08:35:40 PDT 2025
https://github.com/farzonl updated https://github.com/llvm/llvm-project/pull/144146
>From d3ff0991b70fc715faa34c3d09c1f056013c11c4 Mon Sep 17 00:00:00 2001
From: Farzon Lotfi <farzonlotfi at microsoft.com>
Date: Fri, 13 Jun 2025 12:59:19 -0400
Subject: [PATCH 1/3] [DirectX] Fix bug where Flatten arrays was only using
last index
fixes #142836
We added a function called `collectIndicesAndDimsFromGEP` which builds
the Indicies and Dims up for the recursive case and the base case.
really to solve #142836 we didn't need to add it to the recursive case.
The recursive cases exists for gep chains which are ussually two
indicies per gep ie ptr index and array index. adding collectIndicesAndDimsFromGEP
to the recursive cases means we can now do some mixed mode indexing say
we get a case where its not the ussual 2 indicies but instead 3 we can
now treat those last two indicies as part of the computation for the
flat array index.
---
llvm/lib/Target/DirectX/DXILFlattenArrays.cpp | 47 ++++++++++---
llvm/test/CodeGen/DirectX/flatten-array.ll | 70 +++++++++++++++++++
.../DirectX/llc-vector-load-scalarize.ll | 8 +--
3 files changed, 112 insertions(+), 13 deletions(-)
diff --git a/llvm/lib/Target/DirectX/DXILFlattenArrays.cpp b/llvm/lib/Target/DirectX/DXILFlattenArrays.cpp
index b1f3f41a28e8b..620b34cbaf7e6 100644
--- a/llvm/lib/Target/DirectX/DXILFlattenArrays.cpp
+++ b/llvm/lib/Target/DirectX/DXILFlattenArrays.cpp
@@ -86,6 +86,13 @@ class DXILFlattenArraysVisitor
Value *genInstructionFlattenIndices(ArrayRef<Value *> Indices,
ArrayRef<uint64_t> Dims,
IRBuilder<> &Builder);
+
+ // Helper function to collect indices and dimensions from a GEP instruction
+ void collectIndicesAndDimsFromGEP(GetElementPtrInst &GEP,
+ SmallVectorImpl<Value *> &Indices,
+ SmallVectorImpl<uint64_t> &Dims,
+ bool &AllIndicesAreConstInt);
+
void
recursivelyCollectGEPs(GetElementPtrInst &CurrGEP,
ArrayType *FlattenedArrayType, Value *PtrOperand,
@@ -218,6 +225,29 @@ bool DXILFlattenArraysVisitor::visitAllocaInst(AllocaInst &AI) {
return true;
}
+void DXILFlattenArraysVisitor::collectIndicesAndDimsFromGEP(
+ GetElementPtrInst &GEP, SmallVectorImpl<Value *> &Indices,
+ SmallVectorImpl<uint64_t> &Dims, bool &AllIndicesAreConstInt) {
+
+ // Skip the first index (which is ptr index ie always start at 0 for arrays)
+ // and collect all subsequent indices
+ Type *CurrentType = GEP.getSourceElementType();
+ for (unsigned I = 1; I < GEP.getNumIndices(); ++I) {
+ Value *Index = GEP.getOperand(I + 1); // +1 because operand 0 is the pointer
+ AllIndicesAreConstInt &= isa<ConstantInt>(Index);
+ Indices.push_back(Index);
+
+ // Get the dimension size for this index
+ if (auto *ArrayTy = dyn_cast<ArrayType>(CurrentType)) {
+ Dims.push_back(ArrayTy->getNumElements());
+ CurrentType = ArrayTy->getElementType();
+ } else {
+ // This shouldn't happen for well-formed GEPs
+ assert(false && "Expected array type in GEP chain");
+ }
+ }
+}
+
void DXILFlattenArraysVisitor::recursivelyCollectGEPs(
GetElementPtrInst &CurrGEP, ArrayType *FlattenedArrayType,
Value *PtrOperand, unsigned &GEPChainUseCount, SmallVector<Value *> Indices,
@@ -226,12 +256,8 @@ void DXILFlattenArraysVisitor::recursivelyCollectGEPs(
if (GEPChainMap.count(&CurrGEP) > 0)
return;
- Value *LastIndex = CurrGEP.getOperand(CurrGEP.getNumOperands() - 1);
- AllIndicesAreConstInt &= isa<ConstantInt>(LastIndex);
- Indices.push_back(LastIndex);
- assert(isa<ArrayType>(CurrGEP.getSourceElementType()));
- Dims.push_back(
- cast<ArrayType>(CurrGEP.getSourceElementType())->getNumElements());
+ // Collect indices and dimensions from the current GEP
+ collectIndicesAndDimsFromGEP(CurrGEP, Indices, Dims, AllIndicesAreConstInt);
bool IsMultiDimArr = isMultiDimensionalArray(CurrGEP.getSourceElementType());
if (!IsMultiDimArr) {
assert(GEPChainUseCount < FlattenedArrayType->getNumElements());
@@ -316,9 +342,12 @@ bool DXILFlattenArraysVisitor::visitGetElementPtrInst(GetElementPtrInst &GEP) {
// Handle zero uses here because there won't be an update via
// a child in the chain later.
if (GEPChainUseCount == 0) {
- SmallVector<Value *> Indices({GEP.getOperand(GEP.getNumOperands() - 1)});
- SmallVector<uint64_t> Dims({ArrType->getNumElements()});
- bool AllIndicesAreConstInt = isa<ConstantInt>(Indices[0]);
+ SmallVector<Value *> Indices;
+ SmallVector<uint64_t> Dims;
+ bool AllIndicesAreConstInt = true;
+
+ // Collect indices and dimensions from the GEP
+ collectIndicesAndDimsFromGEP(GEP, Indices, Dims, AllIndicesAreConstInt);
GEPData GEPInfo{std::move(FlattenedArrayType), PtrOperand,
std::move(Indices), std::move(Dims), AllIndicesAreConstInt};
return visitGetElementPtrInstInGEPChainBase(GEPInfo, GEP);
diff --git a/llvm/test/CodeGen/DirectX/flatten-array.ll b/llvm/test/CodeGen/DirectX/flatten-array.ll
index 5c761014d471f..a8ed24a71d5e0 100644
--- a/llvm/test/CodeGen/DirectX/flatten-array.ll
+++ b/llvm/test/CodeGen/DirectX/flatten-array.ll
@@ -187,5 +187,75 @@ define void @global_gep_store() {
ret void
}
+ at g = local_unnamed_addr addrspace(3) global [2 x [2 x float]] zeroinitializer, align 4
+define void @two_index_gep() {
+ ; CHECK-LABEL: define void @two_index_gep(
+ ; CHECK: [[THREAD_ID:%.*]] = tail call i32 @llvm.dx.thread.id(i32 0)
+ ; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[THREAD_ID]], 2
+ ; CHECK-NEXT: [[ADD:%.*]] = add i32 1, [[MUL]]
+ ; CHECK-NEXT: [[GEP_PTR:%.*]] = getelementptr inbounds nuw [4 x float], ptr addrspace(3) @g.1dim, i32 0, i32 [[ADD]]
+ ; CHECK-NEXT: load float, ptr addrspace(3) [[GEP_PTR]], align 4
+ ; CHECK-NEXT: ret void
+ %1 = tail call i32 @llvm.dx.thread.id(i32 0)
+ %2 = getelementptr inbounds nuw [2 x [2 x float]], ptr addrspace(3) @g, i32 0, i32 %1, i32 1
+ %3 = load float, ptr addrspace(3) %2, align 4
+ ret void
+}
+
+define void @two_index_gep_const() {
+ ; CHECK-LABEL: define void @two_index_gep_const(
+ ; CHECK-NEXT: [[GEP_PTR:%.*]] = getelementptr inbounds nuw [4 x float], ptr addrspace(3) @g.1dim, i32 0, i32 3
+ ; CHECK-NEXT: load float, ptr addrspace(3) [[GEP_PTR]], align 4
+ ; CHECK-NEXT: ret void
+ %1 = getelementptr inbounds nuw [2 x [2 x float]], ptr addrspace(3) @g, i32 0, i32 1, i32 1
+ %3 = load float, ptr addrspace(3) %1, align 4
+ ret void
+}
+
+define void @gep_4d_index_test() {
+ ; CHECK-LABEL: gep_4d_index_test
+ ; CHECK: [[a:%.*]] = alloca [16 x i32], align 4
+ ; CHECK-NEXT: getelementptr inbounds [16 x i32], ptr %.1dim, i32 0, i32 1
+ ; CHECK-NEXT: getelementptr inbounds [16 x i32], ptr %.1dim, i32 0, i32 3
+ ; CHECK-NEXT: getelementptr inbounds [16 x i32], ptr %.1dim, i32 0, i32 7
+ ; CHECK-NEXT: getelementptr inbounds [16 x i32], ptr %.1dim, i32 0, i32 15
+ ; CHECK-NEXT: ret void
+ %1 = alloca [2x[2 x[2 x [2 x i32]]]], align 4
+ %2 = getelementptr inbounds [2x[2 x[2 x [2 x i32]]]], [2x[2 x[2 x [2 x i32]]]]* %1, i32 0, i32 0, i32 0, i32 0, i32 1
+ %3 = getelementptr inbounds [2x[2 x[2 x [2 x i32]]]], [2x[2 x[2 x [2 x i32]]]]* %1, i32 0, i32 0, i32 0, i32 1, i32 1
+ %4 = getelementptr inbounds [2x[2 x[2 x [2 x i32]]]], [2x[2 x[2 x [2 x i32]]]]* %1, i32 0, i32 0, i32 1, i32 1, i32 1
+ %5 = getelementptr inbounds [2x[2 x[2 x [2 x i32]]]], [2x[2 x[2 x [2 x i32]]]]* %1, i32 0, i32 1, i32 1, i32 1, i32 1
+ ret void
+}
+
+define void @gep_4d_index_and_gep_chain_mixed() {
+ ; CHECK-LABEL: gep_4d_index_and_gep_chain_mixed
+ ; CHECK-NEXT: [[ALLOCA:%.*]] = alloca [16 x i32], align 4
+ ; CHECK-COUNT-16: getelementptr inbounds [16 x i32], ptr [[ALLOCA]], i32 0, i32 {{[0-9]|1[0-5]}}
+ ; CHECK-NEXT: ret void
+ %1 = alloca [2x[2 x[2 x [2 x i32]]]], align 4
+ %a4d0_0 = getelementptr inbounds [2x[2 x[2 x [2 x i32]]]], [2x[2 x[2 x [2 x i32]]]]* %1, i32 0, i32 0, i32 0
+ %a2d0_0 = getelementptr inbounds [2 x [2 x i32]], [2 x [2 x i32]]* %a4d0_0, i32 0, i32 0, i32 0
+ %a2d0_1 = getelementptr inbounds [2 x [2 x i32]], [2 x [2 x i32]]* %a4d0_0, i32 0, i32 0, i32 1
+ %a2d1_0 = getelementptr inbounds [2 x [2 x i32]], [2 x [2 x i32]]* %a4d0_0, i32 0, i32 1, i32 0
+ %a2d1_1 = getelementptr inbounds [2 x [2 x i32]], [2 x [2 x i32]]* %a4d0_0, i32 0, i32 1, i32 1
+ %b4d0_1 = getelementptr inbounds [2x[2 x[2 x [2 x i32]]]], [2x[2 x[2 x [2 x i32]]]]* %1, i32 0, i32 0, i32 1
+ %b2d0_0 = getelementptr inbounds [2 x [2 x i32]], [2 x [2 x i32]]* %b4d0_1, i32 0, i32 0, i32 0
+ %b2d0_1 = getelementptr inbounds [2 x [2 x i32]], [2 x [2 x i32]]* %b4d0_1, i32 0, i32 0, i32 1
+ %b2d1_0 = getelementptr inbounds [2 x [2 x i32]], [2 x [2 x i32]]* %b4d0_1, i32 0, i32 1, i32 0
+ %b2d1_1 = getelementptr inbounds [2 x [2 x i32]], [2 x [2 x i32]]* %b4d0_1, i32 0, i32 1, i32 1
+ %c4d1_0 = getelementptr inbounds [2x[2 x[2 x [2 x i32]]]], [2x[2 x[2 x [2 x i32]]]]* %1, i32 0, i32 1, i32 0
+ %c2d0_0 = getelementptr inbounds [2 x [2 x i32]], [2 x [2 x i32]]* %c4d1_0, i32 0, i32 0, i32 0
+ %c2d0_1 = getelementptr inbounds [2 x [2 x i32]], [2 x [2 x i32]]* %c4d1_0, i32 0, i32 0, i32 1
+ %c2d1_0 = getelementptr inbounds [2 x [2 x i32]], [2 x [2 x i32]]* %c4d1_0, i32 0, i32 1, i32 0
+ %c2d1_1 = getelementptr inbounds [2 x [2 x i32]], [2 x [2 x i32]]* %c4d1_0, i32 0, i32 1, i32 1
+ %g4d1_1 = getelementptr inbounds [2x[2 x[2 x [2 x i32]]]], [2x[2 x[2 x [2 x i32]]]]* %1, i32 0, i32 1, i32 1
+ %g2d0_0 = getelementptr inbounds [2 x [2 x i32]], [2 x [2 x i32]]* %g4d1_1, i32 0, i32 0, i32 0
+ %g2d0_1 = getelementptr inbounds [2 x [2 x i32]], [2 x [2 x i32]]* %g4d1_1, i32 0, i32 0, i32 1
+ %g2d1_0 = getelementptr inbounds [2 x [2 x i32]], [2 x [2 x i32]]* %g4d1_1, i32 0, i32 1, i32 0
+ %g2d1_1 = getelementptr inbounds [2 x [2 x i32]], [2 x [2 x i32]]* %g4d1_1, i32 0, i32 1, i32 1
+ ret void
+}
+
; Make sure we don't try to walk the body of a function declaration.
declare void @opaque_function()
diff --git a/llvm/test/CodeGen/DirectX/llc-vector-load-scalarize.ll b/llvm/test/CodeGen/DirectX/llc-vector-load-scalarize.ll
index c960aad3d2627..778113bd3160f 100644
--- a/llvm/test/CodeGen/DirectX/llc-vector-load-scalarize.ll
+++ b/llvm/test/CodeGen/DirectX/llc-vector-load-scalarize.ll
@@ -111,13 +111,13 @@ define <4 x i32> @multid_load_test() #0 {
; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr addrspace(3) [[TMP5]], align 4
; CHECK-NEXT: [[TMP7:%.*]] = bitcast ptr addrspace(3) getelementptr (i32, ptr addrspace(3) @groushared2dArrayofVectors.scalarized.1dim, i32 3) to ptr addrspace(3)
; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr addrspace(3) [[TMP7]], align 4
-; CHECK-NEXT: [[TMP9:%.*]] = bitcast ptr addrspace(3) getelementptr inbounds ([36 x i32], ptr addrspace(3) @groushared2dArrayofVectors.scalarized.1dim, i32 0, i32 1) to ptr addrspace(3)
+; CHECK-NEXT: [[TMP9:%.*]] = bitcast ptr addrspace(3) getelementptr inbounds ([36 x i32], ptr addrspace(3) @groushared2dArrayofVectors.scalarized.1dim, i32 0, i32 4) to ptr addrspace(3)
; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr addrspace(3) [[TMP9]], align 4
-; CHECK-NEXT: [[TMP11:%.*]] = bitcast ptr addrspace(3) getelementptr (i32, ptr addrspace(3) getelementptr inbounds ([36 x i32], ptr addrspace(3) @groushared2dArrayofVectors.scalarized.1dim, i32 0, i32 1), i32 1) to ptr addrspace(3)
+; CHECK-NEXT: [[TMP11:%.*]] = bitcast ptr addrspace(3) getelementptr (i32, ptr addrspace(3) getelementptr inbounds ([36 x i32], ptr addrspace(3) @groushared2dArrayofVectors.scalarized.1dim, i32 0, i32 4), i32 1) to ptr addrspace(3)
; CHECK-NEXT: [[TMP12:%.*]] = load i32, ptr addrspace(3) [[TMP11]], align 4
-; CHECK-NEXT: [[TMP13:%.*]] = bitcast ptr addrspace(3) getelementptr (i32, ptr addrspace(3) getelementptr inbounds ([36 x i32], ptr addrspace(3) @groushared2dArrayofVectors.scalarized.1dim, i32 0, i32 1), i32 2) to ptr addrspace(3)
+; CHECK-NEXT: [[TMP13:%.*]] = bitcast ptr addrspace(3) getelementptr (i32, ptr addrspace(3) getelementptr inbounds ([36 x i32], ptr addrspace(3) @groushared2dArrayofVectors.scalarized.1dim, i32 0, i32 4), i32 2) to ptr addrspace(3)
; CHECK-NEXT: [[TMP14:%.*]] = load i32, ptr addrspace(3) [[TMP13]], align 4
-; CHECK-NEXT: [[TMP15:%.*]] = bitcast ptr addrspace(3) getelementptr (i32, ptr addrspace(3) getelementptr inbounds ([36 x i32], ptr addrspace(3) @groushared2dArrayofVectors.scalarized.1dim, i32 0, i32 1), i32 3) to ptr addrspace(3)
+; CHECK-NEXT: [[TMP15:%.*]] = bitcast ptr addrspace(3) getelementptr (i32, ptr addrspace(3) getelementptr inbounds ([36 x i32], ptr addrspace(3) @groushared2dArrayofVectors.scalarized.1dim, i32 0, i32 4), i32 3) to ptr addrspace(3)
; CHECK-NEXT: [[TMP16:%.*]] = load i32, ptr addrspace(3) [[TMP15]], align 4
; CHECK-NEXT: [[DOTI05:%.*]] = add i32 [[TMP2]], [[TMP10]]
; CHECK-NEXT: [[DOTI16:%.*]] = add i32 [[TMP4]], [[TMP12]]
>From b1c6dab3b16cc202c9a5a1f654b23bae77b5e899 Mon Sep 17 00:00:00 2001
From: Farzon Lotfi <farzonlotfi at microsoft.com>
Date: Fri, 13 Jun 2025 16:58:59 -0400
Subject: [PATCH 2/3] address pr comment by making the comment more clear that
index 0 is the ptr operand to the array
---
llvm/lib/Target/DirectX/DXILFlattenArrays.cpp | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/llvm/lib/Target/DirectX/DXILFlattenArrays.cpp b/llvm/lib/Target/DirectX/DXILFlattenArrays.cpp
index 620b34cbaf7e6..5cefd3e9152c5 100644
--- a/llvm/lib/Target/DirectX/DXILFlattenArrays.cpp
+++ b/llvm/lib/Target/DirectX/DXILFlattenArrays.cpp
@@ -229,7 +229,7 @@ void DXILFlattenArraysVisitor::collectIndicesAndDimsFromGEP(
GetElementPtrInst &GEP, SmallVectorImpl<Value *> &Indices,
SmallVectorImpl<uint64_t> &Dims, bool &AllIndicesAreConstInt) {
- // Skip the first index (which is ptr index ie always start at 0 for arrays)
+ // Skip the first index which is array ptr
// and collect all subsequent indices
Type *CurrentType = GEP.getSourceElementType();
for (unsigned I = 1; I < GEP.getNumIndices(); ++I) {
>From 97d94513798e82a687079342542100687dd95e99 Mon Sep 17 00:00:00 2001
From: Farzon Lotfi <farzonlotfi at microsoft.com>
Date: Mon, 16 Jun 2025 11:35:18 -0400
Subject: [PATCH 3/3] address pr comments, make loop in
collectIndicesAndDimsFromGEP clearer by not using getOperand
---
llvm/lib/Target/DirectX/DXILFlattenArrays.cpp | 11 ++++-------
llvm/test/CodeGen/DirectX/flatten-array.ll | 16 ++++++++--------
2 files changed, 12 insertions(+), 15 deletions(-)
diff --git a/llvm/lib/Target/DirectX/DXILFlattenArrays.cpp b/llvm/lib/Target/DirectX/DXILFlattenArrays.cpp
index 5cefd3e9152c5..0b7cf2f970172 100644
--- a/llvm/lib/Target/DirectX/DXILFlattenArrays.cpp
+++ b/llvm/lib/Target/DirectX/DXILFlattenArrays.cpp
@@ -229,20 +229,17 @@ void DXILFlattenArraysVisitor::collectIndicesAndDimsFromGEP(
GetElementPtrInst &GEP, SmallVectorImpl<Value *> &Indices,
SmallVectorImpl<uint64_t> &Dims, bool &AllIndicesAreConstInt) {
- // Skip the first index which is array ptr
- // and collect all subsequent indices
Type *CurrentType = GEP.getSourceElementType();
- for (unsigned I = 1; I < GEP.getNumIndices(); ++I) {
- Value *Index = GEP.getOperand(I + 1); // +1 because operand 0 is the pointer
- AllIndicesAreConstInt &= isa<ConstantInt>(Index);
+
+ // Note index 0 is the ptr index.
+ for (Value *Index : llvm::drop_begin(GEP.indices(), 1)) {
Indices.push_back(Index);
+ AllIndicesAreConstInt &= isa<ConstantInt>(Index);
- // Get the dimension size for this index
if (auto *ArrayTy = dyn_cast<ArrayType>(CurrentType)) {
Dims.push_back(ArrayTy->getNumElements());
CurrentType = ArrayTy->getElementType();
} else {
- // This shouldn't happen for well-formed GEPs
assert(false && "Expected array type in GEP chain");
}
}
diff --git a/llvm/test/CodeGen/DirectX/flatten-array.ll b/llvm/test/CodeGen/DirectX/flatten-array.ll
index a8ed24a71d5e0..dc8c5f8421bfe 100644
--- a/llvm/test/CodeGen/DirectX/flatten-array.ll
+++ b/llvm/test/CodeGen/DirectX/flatten-array.ll
@@ -221,10 +221,10 @@ define void @gep_4d_index_test() {
; CHECK-NEXT: getelementptr inbounds [16 x i32], ptr %.1dim, i32 0, i32 15
; CHECK-NEXT: ret void
%1 = alloca [2x[2 x[2 x [2 x i32]]]], align 4
- %2 = getelementptr inbounds [2x[2 x[2 x [2 x i32]]]], [2x[2 x[2 x [2 x i32]]]]* %1, i32 0, i32 0, i32 0, i32 0, i32 1
- %3 = getelementptr inbounds [2x[2 x[2 x [2 x i32]]]], [2x[2 x[2 x [2 x i32]]]]* %1, i32 0, i32 0, i32 0, i32 1, i32 1
- %4 = getelementptr inbounds [2x[2 x[2 x [2 x i32]]]], [2x[2 x[2 x [2 x i32]]]]* %1, i32 0, i32 0, i32 1, i32 1, i32 1
- %5 = getelementptr inbounds [2x[2 x[2 x [2 x i32]]]], [2x[2 x[2 x [2 x i32]]]]* %1, i32 0, i32 1, i32 1, i32 1, i32 1
+ %2 = getelementptr inbounds [2 x [2 x[2 x [2 x i32]]]], [2 x [2 x [2 x [2 x i32]]]]* %1, i32 0, i32 0, i32 0, i32 0, i32 1
+ %3 = getelementptr inbounds [2 x [2 x[2 x [2 x i32]]]], [2 x [2 x [2 x [2 x i32]]]]* %1, i32 0, i32 0, i32 0, i32 1, i32 1
+ %4 = getelementptr inbounds [2 x [2 x[2 x [2 x i32]]]], [2 x [2 x [2 x [2 x i32]]]]* %1, i32 0, i32 0, i32 1, i32 1, i32 1
+ %5 = getelementptr inbounds [2 x [2 x[2 x [2 x i32]]]], [2 x [2 x [2 x [2 x i32]]]]* %1, i32 0, i32 1, i32 1, i32 1, i32 1
ret void
}
@@ -234,22 +234,22 @@ define void @gep_4d_index_and_gep_chain_mixed() {
; CHECK-COUNT-16: getelementptr inbounds [16 x i32], ptr [[ALLOCA]], i32 0, i32 {{[0-9]|1[0-5]}}
; CHECK-NEXT: ret void
%1 = alloca [2x[2 x[2 x [2 x i32]]]], align 4
- %a4d0_0 = getelementptr inbounds [2x[2 x[2 x [2 x i32]]]], [2x[2 x[2 x [2 x i32]]]]* %1, i32 0, i32 0, i32 0
+ %a4d0_0 = getelementptr inbounds [2 x [2 x [2 x [2 x i32]]]], [2 x [2 x[2 x [2 x i32]]]]* %1, i32 0, i32 0, i32 0
%a2d0_0 = getelementptr inbounds [2 x [2 x i32]], [2 x [2 x i32]]* %a4d0_0, i32 0, i32 0, i32 0
%a2d0_1 = getelementptr inbounds [2 x [2 x i32]], [2 x [2 x i32]]* %a4d0_0, i32 0, i32 0, i32 1
%a2d1_0 = getelementptr inbounds [2 x [2 x i32]], [2 x [2 x i32]]* %a4d0_0, i32 0, i32 1, i32 0
%a2d1_1 = getelementptr inbounds [2 x [2 x i32]], [2 x [2 x i32]]* %a4d0_0, i32 0, i32 1, i32 1
- %b4d0_1 = getelementptr inbounds [2x[2 x[2 x [2 x i32]]]], [2x[2 x[2 x [2 x i32]]]]* %1, i32 0, i32 0, i32 1
+ %b4d0_1 = getelementptr inbounds [2 x [2 x [2 x [2 x i32]]]], [2 x [2 x [2 x [2 x i32]]]]* %1, i32 0, i32 0, i32 1
%b2d0_0 = getelementptr inbounds [2 x [2 x i32]], [2 x [2 x i32]]* %b4d0_1, i32 0, i32 0, i32 0
%b2d0_1 = getelementptr inbounds [2 x [2 x i32]], [2 x [2 x i32]]* %b4d0_1, i32 0, i32 0, i32 1
%b2d1_0 = getelementptr inbounds [2 x [2 x i32]], [2 x [2 x i32]]* %b4d0_1, i32 0, i32 1, i32 0
%b2d1_1 = getelementptr inbounds [2 x [2 x i32]], [2 x [2 x i32]]* %b4d0_1, i32 0, i32 1, i32 1
- %c4d1_0 = getelementptr inbounds [2x[2 x[2 x [2 x i32]]]], [2x[2 x[2 x [2 x i32]]]]* %1, i32 0, i32 1, i32 0
+ %c4d1_0 = getelementptr inbounds [2 x [2 x [2 x [2 x i32]]]], [2 x [2 x [2 x [2 x i32]]]]* %1, i32 0, i32 1, i32 0
%c2d0_0 = getelementptr inbounds [2 x [2 x i32]], [2 x [2 x i32]]* %c4d1_0, i32 0, i32 0, i32 0
%c2d0_1 = getelementptr inbounds [2 x [2 x i32]], [2 x [2 x i32]]* %c4d1_0, i32 0, i32 0, i32 1
%c2d1_0 = getelementptr inbounds [2 x [2 x i32]], [2 x [2 x i32]]* %c4d1_0, i32 0, i32 1, i32 0
%c2d1_1 = getelementptr inbounds [2 x [2 x i32]], [2 x [2 x i32]]* %c4d1_0, i32 0, i32 1, i32 1
- %g4d1_1 = getelementptr inbounds [2x[2 x[2 x [2 x i32]]]], [2x[2 x[2 x [2 x i32]]]]* %1, i32 0, i32 1, i32 1
+ %g4d1_1 = getelementptr inbounds [2 x [2 x [2 x [2 x i32]]]], [2 x [2 x [2 x [2 x i32]]]]* %1, i32 0, i32 1, i32 1
%g2d0_0 = getelementptr inbounds [2 x [2 x i32]], [2 x [2 x i32]]* %g4d1_1, i32 0, i32 0, i32 0
%g2d0_1 = getelementptr inbounds [2 x [2 x i32]], [2 x [2 x i32]]* %g4d1_1, i32 0, i32 0, i32 1
%g2d1_0 = getelementptr inbounds [2 x [2 x i32]], [2 x [2 x i32]]* %g4d1_1, i32 0, i32 1, i32 0
More information about the llvm-commits
mailing list