[llvm] [InstCombine] Lower multi-dimensional GEP to ptradd (PR #150383)
Usha Gupta via llvm-commits
llvm-commits at lists.llvm.org
Fri Aug 1 08:46:48 PDT 2025
https://github.com/usha1830 updated https://github.com/llvm/llvm-project/pull/150383
>From fbd143dc0ba7a90821de634a5787027f59a9babc Mon Sep 17 00:00:00 2001
From: Usha Gupta <usha.gupta at arm.com>
Date: Thu, 24 Jul 2025 07:26:14 +0000
Subject: [PATCH 1/3] [InstCombine] Lower multi-dimensional GEP to ptradd
---
llvm/lib/Transforms/InstCombine/InstructionCombining.cpp | 9 +++++++++
1 file changed, 9 insertions(+)
diff --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
index e2a9255ca9c6e..9b148e523b7a7 100644
--- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -2995,6 +2995,15 @@ static bool shouldCanonicalizeGEPToPtrAdd(GetElementPtrInst &GEP) {
m_Shl(m_Value(), m_ConstantInt())))))
return true;
+ // Flatten multidimensional GEPs with one variable index.
+ unsigned NumVarIndices = 0;
+ for (unsigned i = 1; i < GEP.getNumOperands(); ++i) {
+ if (!isa<ConstantInt>(GEP.getOperand(i)))
+ ++NumVarIndices;
+ }
+ if (NumVarIndices == 1)
+ return true;
+
// gep (gep %p, C1), %x, C2 is expanded so the two constants can
// possibly be merged together.
auto PtrOpGep = dyn_cast<GEPOperator>(PtrOp);
>From 4ada34b3447375e66918518e6f24820680d40f07 Mon Sep 17 00:00:00 2001
From: Usha Gupta <usha.gupta at arm.com>
Date: Fri, 25 Jul 2025 19:05:05 +0000
Subject: [PATCH 2/3] Add more constraints for handing multi-dimensional geps
for global arrays
---
.../InstCombine/InstructionCombining.cpp | 45 +++++++++++---
.../InstCombine/canonicalize-gep-constglob.ll | 61 +++++++++++++++++++
2 files changed, 99 insertions(+), 7 deletions(-)
diff --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
index 9b148e523b7a7..787de6a824abd 100644
--- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -2975,6 +2975,43 @@ Value *InstCombiner::getFreelyInvertedImpl(Value *V, bool WillInvertAllUses,
return nullptr;
}
+/// Return true if we should lower multi-dimensional geps
+static bool ismultiDimGep(GetElementPtrInst &GEP) {
+ // Limit handling to only 3D and 4D arrays with integer types.
+ // getelementptr [9 x [9 x [9 x i32]]], ptr @arr, i64 0, i64 %i, i64 2, i64 3
+ unsigned NumOps = GEP.getNumOperands();
+
+ // First index must be constant zero (array base)
+ if (!isa<ConstantInt>(GEP.getOperand(1)) ||
+ !cast<ConstantInt>(GEP.getOperand(1))->isZero())
+ return false;
+
+ // Limit lowering for arrays with 3 or more dimensions
+ if (NumOps < 5)
+ return false;
+
+ // Check that it's arrays all the way
+ Type *CurTy = GEP.getSourceElementType();
+ unsigned NumVar = 0;
+ for (unsigned I = 2; I < NumOps; ++I) {
+ auto *ArrTy = dyn_cast<ArrayType>(CurTy);
+ if (!ArrTy)
+ return false;
+ if (!isa<ConstantInt>(GEP.getOperand(I)))
+ ++NumVar;
+ CurTy = ArrTy->getElementType();
+ }
+
+ // Limit lowering only for one variable index
+ if (NumVar != 1)
+ return false;
+
+ if (!CurTy->isIntegerTy() || CurTy->getIntegerBitWidth() > 128)
+ return false;
+
+ return true;
+}
+
/// Return true if we should canonicalize the gep to an i8 ptradd.
static bool shouldCanonicalizeGEPToPtrAdd(GetElementPtrInst &GEP) {
Value *PtrOp = GEP.getOperand(0);
@@ -2995,13 +3032,7 @@ static bool shouldCanonicalizeGEPToPtrAdd(GetElementPtrInst &GEP) {
m_Shl(m_Value(), m_ConstantInt())))))
return true;
- // Flatten multidimensional GEPs with one variable index.
- unsigned NumVarIndices = 0;
- for (unsigned i = 1; i < GEP.getNumOperands(); ++i) {
- if (!isa<ConstantInt>(GEP.getOperand(i)))
- ++NumVarIndices;
- }
- if (NumVarIndices == 1)
+ if (ismultiDimGep(GEP))
return true;
// gep (gep %p, C1), %x, C2 is expanded so the two constants can
diff --git a/llvm/test/Transforms/InstCombine/canonicalize-gep-constglob.ll b/llvm/test/Transforms/InstCombine/canonicalize-gep-constglob.ll
index 1520d6ce59548..76686041c93b0 100644
--- a/llvm/test/Transforms/InstCombine/canonicalize-gep-constglob.ll
+++ b/llvm/test/Transforms/InstCombine/canonicalize-gep-constglob.ll
@@ -2,6 +2,9 @@
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
@glob = internal global [10 x [10 x [10 x i32]]] zeroinitializer
+ at glob_i8 = internal global [10 x [10 x [10 x i8]]] zeroinitializer
+ at glob_i16 = internal global [10 x [10 x [10 x i16]]] zeroinitializer
+ at glob_i64 = internal global [10 x [10 x [10 x i64]]] zeroinitializer
define ptr @x12(i64 %x) {
; CHECK-LABEL: define ptr @x12(
@@ -76,3 +79,61 @@ entry:
%c = add i32 %a, %b
ret i32 %c
}
+
+define i8* @flat_gep8(i64 %x) {
+; CHECK-LABEL: define ptr @flat_gep8(
+; CHECK-SAME: i64 [[X:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[GEP_IDX:%.*]] = mul i64 [[X]], 100
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr @glob_i8, i64 [[GEP_IDX]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr i8, ptr [[TMP0]], i64 35
+; CHECK-NEXT: ret ptr [[GEP]]
+;
+entry:
+ %gep = getelementptr [10 x [10 x [10 x i8]]], ptr @glob_i8, i64 0, i64 %x, i64 3, i64 5
+ ret ptr %gep
+}
+
+define i16* @flat_gep16(i64 %x) {
+; CHECK-LABEL: define ptr @flat_gep16(
+; CHECK-SAME: i64 [[X:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[GEP_IDX:%.*]] = mul i64 [[X]], 200
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr @glob_i16, i64 [[GEP_IDX]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr i8, ptr [[TMP0]], i64 46
+; CHECK-NEXT: ret ptr [[GEP]]
+;
+entry:
+ %gep = getelementptr [10 x [10 x [10 x i16]]], ptr @glob_i16, i64 0, i64 %x, i64 2, i64 3
+ ret ptr %gep
+}
+
+define i32* @flat_gep(i64 %x) {
+; CHECK-LABEL: define ptr @flat_gep(
+; CHECK-SAME: i64 [[X:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[GEP_IDX:%.*]] = mul i64 [[X]], 400
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr @glob, i64 [[GEP_IDX]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr i8, ptr [[TMP0]], i64 100
+; CHECK-NEXT: ret ptr [[GEP]]
+;
+entry:
+ %gep = getelementptr [10 x [10 x [10 x i32]]], ptr @glob, i64 0, i64 %x, i64 2, i64 5
+ ret ptr %gep
+}
+
+define i64* @flat_gep64(i64 %x) {
+; CHECK-LABEL: define ptr @flat_gep64(
+; CHECK-SAME: i64 [[X:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[GEP_IDX:%.*]] = mul i64 [[X]], 800
+; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr @glob_i64, i64 [[GEP_IDX]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr i8, ptr [[TMP0]], i64 288
+; CHECK-NEXT: ret ptr [[GEP]]
+;
+entry:
+ %gep = getelementptr [10 x [10 x [10 x i64]]], ptr @glob_i64, i64 0, i64 %x, i64 3, i64 6
+ ret ptr %gep
+}
+
+
>From 4d534db1a980ab63c8f00835058df47011f7e226 Mon Sep 17 00:00:00 2001
From: Usha Gupta <usha.gupta at arm.com>
Date: Fri, 1 Aug 2025 15:41:24 +0000
Subject: [PATCH 3/3] Handle Nested gep with one variable index in the outer
gep
---
.../InstCombine/InstructionCombining.cpp | 57 +++++++++++++++----
1 file changed, 46 insertions(+), 11 deletions(-)
diff --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
index 787de6a824abd..4a285305a9b8f 100644
--- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -2975,29 +2975,64 @@ Value *InstCombiner::getFreelyInvertedImpl(Value *V, bool WillInvertAllUses,
return nullptr;
}
-/// Return true if we should lower multi-dimensional geps
-static bool ismultiDimGep(GetElementPtrInst &GEP) {
- // Limit handling to only 3D and 4D arrays with integer types.
- // getelementptr [9 x [9 x [9 x i32]]], ptr @arr, i64 0, i64 %i, i64 2, i64 3
- unsigned NumOps = GEP.getNumOperands();
+/// Accumulate constant indices from GEPs with all-constant indices, then
+/// check if the outermost GEP (with one variable index) is flattenable.
+/// Matches and returns true for multi-dimensional array geps with only one
+/// variable index. The pointer could also be another gep with all constant
+/// indices. For ex:
+/// -getelementptr [9 x [9 x [9 x i32]]], ptr @arr, i64 0, i64 %i, i64 2, i64 3
+/// -getelementptr [9 x [9 x [9 x i32]]],
+/// <another gep>, i64 0, i64 %i, i64 2, i64 3
+static bool ismultiDimGepFlattenable(const GetElementPtrInst &GEP) {
+ // Collect all indices, outermost last
+ SmallVector<const GEPOperator *, 4> GEPChain;
+ const Value *Base = &GEP;
+
+ // Go over GEPs with all constant indices
+ while (auto *CurGep = dyn_cast<GEPOperator>(Base)) {
+ bool AllConst = true;
+ for (unsigned I = 1; I < CurGep->getNumOperands(); ++I)
+ if (!isa<ConstantInt>(CurGep->getOperand(I)))
+ AllConst = false;
+ if (!AllConst)
+ break;
+ GEPChain.push_back(CurGep);
+ Base = CurGep->getOperand(0)->stripPointerCasts();
+ }
+
+ // Accumulate all indices from innermost to outermost
+ SmallVector<Value *, 8> Indices;
+ for (int I = GEPChain.size() - 1; I >= 0; --I) {
+ const GEPOperator *GO = GEPChain[I];
+ for (unsigned J = 1; J < GO->getNumOperands(); ++J)
+ Indices.push_back(GO->getOperand(J));
+ }
+
+ // Add indices from the main GEP (skip pointer operand)
+ for (unsigned J = 1; J < GEP.getNumOperands(); ++J)
+ Indices.push_back(GEP.getOperand(J));
+
+ if (Indices.empty())
+ return false;
// First index must be constant zero (array base)
- if (!isa<ConstantInt>(GEP.getOperand(1)) ||
- !cast<ConstantInt>(GEP.getOperand(1))->isZero())
+ if (!isa<ConstantInt>(Indices[0]) || !cast<ConstantInt>(Indices[0])->isZero())
return false;
+ unsigned NumDims = Indices.size() - 1;
+
// Limit lowering for arrays with 3 or more dimensions
- if (NumOps < 5)
+ if (NumDims < 3)
return false;
// Check that it's arrays all the way
Type *CurTy = GEP.getSourceElementType();
unsigned NumVar = 0;
- for (unsigned I = 2; I < NumOps; ++I) {
+ for (unsigned I = 1; I < Indices.size(); ++I) {
auto *ArrTy = dyn_cast<ArrayType>(CurTy);
if (!ArrTy)
return false;
- if (!isa<ConstantInt>(GEP.getOperand(I)))
+ if (!isa<ConstantInt>(Indices[I]))
++NumVar;
CurTy = ArrTy->getElementType();
}
@@ -3032,7 +3067,7 @@ static bool shouldCanonicalizeGEPToPtrAdd(GetElementPtrInst &GEP) {
m_Shl(m_Value(), m_ConstantInt())))))
return true;
- if (ismultiDimGep(GEP))
+ if (ismultiDimGepFlattenable(GEP))
return true;
// gep (gep %p, C1), %x, C2 is expanded so the two constants can
More information about the llvm-commits
mailing list