[llvm] d9971be - [InstCombine] Make foldCmpLoadFromIndexedGlobal more resilient to non-array geps. (#150639)
via llvm-commits
llvm-commits at lists.llvm.org
Sun Aug 3 02:19:46 PDT 2025
Author: David Green
Date: 2025-08-03T10:19:42+01:00
New Revision: d9971be83e5d23631b04b3ce40bfb2be01b6085e
URL: https://github.com/llvm/llvm-project/commit/d9971be83e5d23631b04b3ce40bfb2be01b6085e
DIFF: https://github.com/llvm/llvm-project/commit/d9971be83e5d23631b04b3ce40bfb2be01b6085e.diff
LOG: [InstCombine] Make foldCmpLoadFromIndexedGlobal more resilient to non-array geps. (#150639)
My understanding is that gep [n x i8] and gep i8 can be treated
equivalently - the array type conveys no extra information and could be
removed. This goes through foldCmpLoadFromIndexedGlobal and tries to
make it work for non-array gep types, so long as the index type still
matches the array being loaded.
Added:
Modified:
llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
llvm/test/Transforms/InstCombine/load-cmp.ll
Removed:
################################################################################
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
index b268fea85ab07..d4f83ece98907 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -113,10 +113,16 @@ Instruction *InstCombinerImpl::foldCmpLoadFromIndexedGlobal(
LoadInst *LI, GetElementPtrInst *GEP, GlobalVariable *GV, CmpInst &ICI,
ConstantInt *AndCst) {
if (LI->isVolatile() || LI->getType() != GEP->getResultElementType() ||
- GV->getValueType() != GEP->getSourceElementType() || !GV->isConstant() ||
+ !GV->getValueType()->isArrayTy() || !GV->isConstant() ||
!GV->hasDefinitiveInitializer())
return nullptr;
+ Type *GEPSrcEltTy = GEP->getSourceElementType();
+ if (GEPSrcEltTy->isArrayTy())
+ GEPSrcEltTy = GEPSrcEltTy->getArrayElementType();
+ if (GV->getValueType()->getArrayElementType() != GEPSrcEltTy)
+ return nullptr;
+
Constant *Init = GV->getInitializer();
if (!isa<ConstantArray>(Init) && !isa<ConstantDataArray>(Init))
return nullptr;
@@ -127,12 +133,19 @@ Instruction *InstCombinerImpl::foldCmpLoadFromIndexedGlobal(
return nullptr;
// There are many forms of this optimization we can handle, for now, just do
- // the simple index into a single-dimensional array.
+ // the simple index into a single-dimensional array or elements of equal size.
//
- // Require: GEP GV, 0, i {{, constant indices}}
- if (GEP->getNumOperands() < 3 || !isa<ConstantInt>(GEP->getOperand(1)) ||
- !cast<ConstantInt>(GEP->getOperand(1))->isZero() ||
- isa<Constant>(GEP->getOperand(2)))
+ // Require: GEP [n x i8] GV, 0, Idx {{, constant indices}}
+ // Or: GEP i8 GV, Idx
+
+ unsigned GEPIdxOp = 1;
+ if (GEP->getSourceElementType()->isArrayTy()) {
+ GEPIdxOp = 2;
+ if (!match(GEP->getOperand(1), m_ZeroInt()))
+ return nullptr;
+ }
+ if (GEP->getNumOperands() < GEPIdxOp + 1 ||
+ isa<Constant>(GEP->getOperand(GEPIdxOp)))
return nullptr;
// Check that indices after the variable are constants and in-range for the
@@ -141,7 +154,7 @@ Instruction *InstCombinerImpl::foldCmpLoadFromIndexedGlobal(
SmallVector<unsigned, 4> LaterIndices;
Type *EltTy = Init->getType()->getArrayElementType();
- for (unsigned i = 3, e = GEP->getNumOperands(); i != e; ++i) {
+ for (unsigned i = GEPIdxOp + 1, e = GEP->getNumOperands(); i != e; ++i) {
ConstantInt *Idx = dyn_cast<ConstantInt>(GEP->getOperand(i));
if (!Idx)
return nullptr; // Variable index.
@@ -163,7 +176,7 @@ Instruction *InstCombinerImpl::foldCmpLoadFromIndexedGlobal(
LaterIndices.push_back(IdxVal);
}
- Value *Idx = GEP->getOperand(2);
+ Value *Idx = GEP->getOperand(GEPIdxOp);
// If the index type is non-canonical, wait for it to be canonicalized.
if (Idx->getType() != DL.getIndexType(GEP->getType()))
return nullptr;
diff --git a/llvm/test/Transforms/InstCombine/load-cmp.ll b/llvm/test/Transforms/InstCombine/load-cmp.ll
index df34e7d58bcba..f44d27c691b5f 100644
--- a/llvm/test/Transforms/InstCombine/load-cmp.ll
+++ b/llvm/test/Transforms/InstCombine/load-cmp.ll
@@ -68,7 +68,6 @@ define i1 @test1_noinbounds_as1(i32 %x) {
%q = load i16, ptr addrspace(1) %p
%r = icmp eq i16 %q, 0
ret i1 %r
-
}
define i1 @test1_noinbounds_as2(i64 %x) {
@@ -81,7 +80,17 @@ define i1 @test1_noinbounds_as2(i64 %x) {
%q = load i16, ptr addrspace(2) %p
%r = icmp eq i16 %q, 0
ret i1 %r
+}
+define i1 @test1_noarrayty(i32 %X) {
+; CHECK-LABEL: @test1_noarrayty(
+; CHECK-NEXT: [[R:%.*]] = icmp eq i32 [[X:%.*]], 9
+; CHECK-NEXT: ret i1 [[R]]
+;
+ %P = getelementptr inbounds i16, ptr @G16, i32 %X
+ %Q = load i16, ptr %P
+ %R = icmp eq i16 %Q, 0
+ ret i1 %R
}
define i1 @test2(i32 %X) {
@@ -104,7 +113,17 @@ define i1 @test3(i32 %X) {
%Q = load double, ptr %P
%R = fcmp oeq double %Q, 1.0
ret i1 %R
+}
+define i1 @test3_noarrayty(i32 %X) {
+; CHECK-LABEL: @test3_noarrayty(
+; CHECK-NEXT: [[R:%.*]] = icmp eq i32 [[X:%.*]], 1
+; CHECK-NEXT: ret i1 [[R]]
+;
+ %P = getelementptr inbounds double, ptr @GD, i32 %X
+ %Q = load double, ptr %P
+ %R = fcmp oeq double %Q, 1.0
+ ret i1 %R
}
define i1 @test4(i32 %X) {
@@ -325,6 +344,17 @@ define i1 @test10_struct_arr_noinbounds_i64(i64 %x) {
ret i1 %r
}
+define i1 @test10_struct_arr_noarrayty(i32 %x) {
+; CHECK-LABEL: @test10_struct_arr_noarrayty(
+; CHECK-NEXT: [[R:%.*]] = icmp ne i32 [[X:%.*]], 1
+; CHECK-NEXT: ret i1 [[R]]
+;
+ %p = getelementptr inbounds %Foo, ptr @GStructArr, i32 %x, i32 2
+ %q = load i32, ptr %p
+ %r = icmp eq i32 %q, 9
+ ret i1 %r
+}
+
@table = internal constant [2 x ptr] [ptr @g, ptr getelementptr (i8, ptr @g, i64 4)], align 16
@g = external global [2 x i32]
More information about the llvm-commits
mailing list