[llvm] [InstCombine] Make foldCmpLoadFromIndexedGlobal more resiliant to non-array geps. (PR #150639)
David Green via llvm-commits
llvm-commits at lists.llvm.org
Thu Jul 31 06:31:00 PDT 2025
https://github.com/davemgreen updated https://github.com/llvm/llvm-project/pull/150639
>From a912a7c5bf50ea7d7d42027e12df438a73a4ed4f Mon Sep 17 00:00:00 2001
From: David Green <david.green at arm.com>
Date: Thu, 31 Jul 2025 14:29:14 +0100
Subject: [PATCH] [InstCombine] Make foldCmpLoadFromIndexedGlobal more
resiliant to non-array geps.
My understanding is that gep [n x i8] and gep i8 can be treated equivalently -
the array type conveys no extra information and could be removed. This goes
through foldCmpLoadFromIndexedGlobal and tries to make it work for non-array
gep types, so long as the index type still matches the array being loaded.
---
.../InstCombine/InstCombineCompares.cpp | 29 ++++++++++++-----
llvm/test/Transforms/InstCombine/load-cmp.ll | 32 ++++++++++++++++++-
2 files changed, 52 insertions(+), 9 deletions(-)
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
index b268fea85ab07..d4f83ece98907 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -113,10 +113,16 @@ Instruction *InstCombinerImpl::foldCmpLoadFromIndexedGlobal(
LoadInst *LI, GetElementPtrInst *GEP, GlobalVariable *GV, CmpInst &ICI,
ConstantInt *AndCst) {
if (LI->isVolatile() || LI->getType() != GEP->getResultElementType() ||
- GV->getValueType() != GEP->getSourceElementType() || !GV->isConstant() ||
+ !GV->getValueType()->isArrayTy() || !GV->isConstant() ||
!GV->hasDefinitiveInitializer())
return nullptr;
+ Type *GEPSrcEltTy = GEP->getSourceElementType();
+ if (GEPSrcEltTy->isArrayTy())
+ GEPSrcEltTy = GEPSrcEltTy->getArrayElementType();
+ if (GV->getValueType()->getArrayElementType() != GEPSrcEltTy)
+ return nullptr;
+
Constant *Init = GV->getInitializer();
if (!isa<ConstantArray>(Init) && !isa<ConstantDataArray>(Init))
return nullptr;
@@ -127,12 +133,19 @@ Instruction *InstCombinerImpl::foldCmpLoadFromIndexedGlobal(
return nullptr;
// There are many forms of this optimization we can handle, for now, just do
- // the simple index into a single-dimensional array.
+ // the simple index into a single-dimensional array or elements of equal size.
//
- // Require: GEP GV, 0, i {{, constant indices}}
- if (GEP->getNumOperands() < 3 || !isa<ConstantInt>(GEP->getOperand(1)) ||
- !cast<ConstantInt>(GEP->getOperand(1))->isZero() ||
- isa<Constant>(GEP->getOperand(2)))
+ // Require: GEP [n x i8] GV, 0, Idx {{, constant indices}}
+ // Or: GEP i8 GV, Idx
+
+ unsigned GEPIdxOp = 1;
+ if (GEP->getSourceElementType()->isArrayTy()) {
+ GEPIdxOp = 2;
+ if (!match(GEP->getOperand(1), m_ZeroInt()))
+ return nullptr;
+ }
+ if (GEP->getNumOperands() < GEPIdxOp + 1 ||
+ isa<Constant>(GEP->getOperand(GEPIdxOp)))
return nullptr;
// Check that indices after the variable are constants and in-range for the
@@ -141,7 +154,7 @@ Instruction *InstCombinerImpl::foldCmpLoadFromIndexedGlobal(
SmallVector<unsigned, 4> LaterIndices;
Type *EltTy = Init->getType()->getArrayElementType();
- for (unsigned i = 3, e = GEP->getNumOperands(); i != e; ++i) {
+ for (unsigned i = GEPIdxOp + 1, e = GEP->getNumOperands(); i != e; ++i) {
ConstantInt *Idx = dyn_cast<ConstantInt>(GEP->getOperand(i));
if (!Idx)
return nullptr; // Variable index.
@@ -163,7 +176,7 @@ Instruction *InstCombinerImpl::foldCmpLoadFromIndexedGlobal(
LaterIndices.push_back(IdxVal);
}
- Value *Idx = GEP->getOperand(2);
+ Value *Idx = GEP->getOperand(GEPIdxOp);
// If the index type is non-canonical, wait for it to be canonicalized.
if (Idx->getType() != DL.getIndexType(GEP->getType()))
return nullptr;
diff --git a/llvm/test/Transforms/InstCombine/load-cmp.ll b/llvm/test/Transforms/InstCombine/load-cmp.ll
index ccaf31f3084d6..d2f886afce34b 100644
--- a/llvm/test/Transforms/InstCombine/load-cmp.ll
+++ b/llvm/test/Transforms/InstCombine/load-cmp.ll
@@ -68,7 +68,6 @@ define i1 @test1_noinbounds_as1(i32 %x) {
%q = load i16, ptr addrspace(1) %p
%r = icmp eq i16 %q, 0
ret i1 %r
-
}
define i1 @test1_noinbounds_as2(i64 %x) {
@@ -81,7 +80,17 @@ define i1 @test1_noinbounds_as2(i64 %x) {
%q = load i16, ptr addrspace(2) %p
%r = icmp eq i16 %q, 0
ret i1 %r
+}
+define i1 @test1_noarrayty(i32 %X) {
+; CHECK-LABEL: @test1_noarrayty(
+; CHECK-NEXT: [[R:%.*]] = icmp eq i32 [[X:%.*]], 9
+; CHECK-NEXT: ret i1 [[R]]
+;
+ %P = getelementptr inbounds i16, ptr @G16, i32 %X
+ %Q = load i16, ptr %P
+ %R = icmp eq i16 %Q, 0
+ ret i1 %R
}
define i1 @test2(i32 %X) {
@@ -104,7 +113,17 @@ define i1 @test3(i32 %X) {
%Q = load double, ptr %P
%R = fcmp oeq double %Q, 1.0
ret i1 %R
+}
+define i1 @test3_noarrayty(i32 %X) {
+; CHECK-LABEL: @test3_noarrayty(
+; CHECK-NEXT: [[R:%.*]] = icmp eq i32 [[X:%.*]], 1
+; CHECK-NEXT: ret i1 [[R]]
+;
+ %P = getelementptr inbounds double, ptr @GD, i32 %X
+ %Q = load double, ptr %P
+ %R = fcmp oeq double %Q, 1.0
+ ret i1 %R
}
define i1 @test4(i32 %X) {
@@ -326,6 +345,17 @@ define i1 @test10_struct_arr_noinbounds_i64(i64 %x) {
ret i1 %r
}
+define i1 @test10_struct_arr_noarrayty(i32 %x) {
+; CHECK-LABEL: @test10_struct_arr_noarrayty(
+; CHECK-NEXT: [[R:%.*]] = icmp ne i32 [[X:%.*]], 1
+; CHECK-NEXT: ret i1 [[R]]
+;
+ %p = getelementptr inbounds %Foo, ptr @GStructArr, i32 %x, i32 2
+ %q = load i32, ptr %p
+ %r = icmp eq i32 %q, 9
+ ret i1 %r
+}
+
@table = internal constant [2 x ptr] [ptr @g, ptr getelementptr (i8, ptr @g, i64 4)], align 16
@g = external global [2 x i32]
More information about the llvm-commits
mailing list