[llvm] [InstCombine] Fix #163110: Fold icmp (shl X, L), (add (shl Y, L), 1<<L) to icmp X, (Y + 1) (PR #165975)
via llvm-commits
llvm-commits at lists.llvm.org
Sun Jan 11 09:53:32 PST 2026
- Previous message: [llvm] [InstCombine] Fix #163110: Fold icmp (shl X, L), (add (shl Y, L), 1<<L) to icmp X, (Y + 1) (PR #165975)
- Next message: [llvm] [InstCombine] Fix #163110: Fold icmp (shl X, L), (add (shl Y, L), 1<<L) to icmp X, (Y + 1) (PR #165975)
- Messages sorted by:
[ date ]
[ thread ]
[ subject ]
[ author ]
https://github.com/Michael-Chen-NJU updated https://github.com/llvm/llvm-project/pull/165975
>From 3eacc3647f45513cfb34932fe943c91dce42f5c8 Mon Sep 17 00:00:00 2001
From: Michael-Chen-NJU <2802328816 at qq.com>
Date: Sat, 1 Nov 2025 14:58:47 +0800
Subject: [PATCH 1/7] [InstCombine] Add test case for fold (X << 5) == ((Y <<
5) + 32)
---
.../InstCombine/icmp-shl-add-to-add.ll | 112 ++++++++++++++++++
1 file changed, 112 insertions(+)
create mode 100644 llvm/test/Transforms/InstCombine/icmp-shl-add-to-add.ll
diff --git a/llvm/test/Transforms/InstCombine/icmp-shl-add-to-add.ll b/llvm/test/Transforms/InstCombine/icmp-shl-add-to-add.ll
new file mode 100644
index 0000000000000..95dda32394c58
--- /dev/null
+++ b/llvm/test/Transforms/InstCombine/icmp-shl-add-to-add.ll
@@ -0,0 +1,112 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -passes=instcombine -S | FileCheck %s
+
+; Test case: Fold (X << 5) == ((Y << 5) + 32) into X == (Y + 1).
+; This corresponds to the provided alive2 proof.
+
+define i1 @shl_add_const_eq_base(i64 %v0, i64 %v3) {
+; CHECK-LABEL: @shl_add_const_eq_base(
+; CHECK-NEXT: [[V1:%.*]] = shl nsw i64 %v0, 5
+; CHECK-NEXT: [[V4:%.*]] = shl nsw i64 %v3, 5
+; CHECK-NEXT: [[V5:%.*]] = add nsw i64 [[V4]], 32
+; CHECK-NEXT: [[V6:%.*]] = icmp eq i64 [[V1]], [[V5]]
+; CHECK-NEXT: ret i1 [[V6]]
+;
+ %v1 = shl nsw i64 %v0, 5
+ %v4 = shl nsw i64 %v3, 5
+ %v5 = add nsw i64 %v4, 32
+ %v6 = icmp eq i64 %v1, %v5
+ ret i1 %v6
+}
+
+; Test: icmp ne
+define i1 @shl_add_const_ne(i64 %v0, i64 %v3) {
+; CHECK-LABEL: @shl_add_const_ne(
+; CHECK-NEXT: [[V1:%.*]] = shl nsw i64 [[V0:%.*]], 5
+; CHECK-NEXT: [[V4:%.*]] = shl nsw i64 [[V3:%.*]], 5
+; CHECK-NEXT: [[V5:%.*]] = add nsw i64 [[V4]], 32
+; CHECK-NEXT: [[V6:%.*]] = icmp ne i64 [[V1]], [[V5]]
+; CHECK-NEXT: ret i1 [[V6]]
+;
+ %v1 = shl nsw i64 %v0, 5
+ %v4 = shl nsw i64 %v3, 5
+ %v5 = add nsw i64 %v4, 32
+ %v6 = icmp ne i64 %v1, %v5 ; Note: icmp ne
+ ret i1 %v6
+}
+
+; Test: shl amounts do not match (5 vs 4).
+define i1 @shl_add_const_eq_mismatch_shl_amt(i64 %v0, i64 %v3) {
+; CHECK-LABEL: @shl_add_const_eq_mismatch_shl_amt(
+; CHECK-NEXT: [[V1:%.*]] = shl nsw i64 %v0, 5
+; CHECK-NEXT: [[V4:%.*]] = shl nsw i64 %v3, 4
+; CHECK-NEXT: [[V5:%.*]] = add nsw i64 [[V4]], 16
+; CHECK-NEXT: [[V6:%.*]] = icmp eq i64 [[V1]], [[V5]]
+; CHECK-NEXT: ret i1 [[V6]]
+;
+ %v1 = shl nsw i64 %v0, 5
+ %v4 = shl nsw i64 %v3, 4 ; Shift amount mismatch
+ %v5 = add nsw i64 %v4, 16
+ %v6 = icmp eq i64 %v1, %v5
+ ret i1 %v6
+}
+
+; Test: Constant is wrong (32 vs 64).
+define i1 @shl_add_const_eq_wrong_constant(i64 %v0, i64 %v3) {
+; CHECK-LABEL: @shl_add_const_eq_wrong_constant(
+; CHECK-NEXT: [[V1:%.*]] = shl nsw i64 %v0, 5
+; CHECK-NEXT: [[V4:%.*]] = shl nsw i64 %v3, 5
+; CHECK-NEXT: [[V5:%.*]] = add nsw i64 [[V4]], 64
+; CHECK-NEXT: [[V6:%.*]] = icmp eq i64 [[V1]], [[V5]]
+; CHECK-NEXT: ret i1 [[V6]]
+;
+ %v1 = shl nsw i64 %v0, 5
+ %v4 = shl nsw i64 %v3, 5
+ %v5 = add nsw i64 %v4, 64 ; Constant mismatch
+ %v6 = icmp eq i64 %v1, %v5
+ ret i1 %v6
+}
+
+; Test: Missing NSW flag on one of the shl instructions.
+define i1 @shl_add_const_eq_no_nsw_on_v1(i64 %v0, i64 %v3) {
+; CHECK-LABEL: @shl_add_const_eq_no_nsw_on_v1(
+; CHECK-NEXT: [[V1:%.*]] = shl i64 %v0, 5
+; CHECK-NEXT: [[V4:%.*]] = shl nsw i64 %v3, 5
+; CHECK-NEXT: [[V5:%.*]] = add nsw i64 [[V4]], 32
+; CHECK-NEXT: [[V6:%.*]] = icmp eq i64 [[V1]], [[V5]]
+; CHECK-NEXT: ret i1 [[V6]]
+;
+ %v1 = shl i64 %v0, 5 ; Missing nsw
+ %v4 = shl nsw i64 %v3, 5
+ %v5 = add nsw i64 %v4, 32
+ %v6 = icmp eq i64 %v1, %v5
+ ret i1 %v6
+}
+
+; Test: Lower bit width (i8) and different shift amount (3). Constant is 8.
+define i1 @shl_add_const_eq_i8(i8 %v0, i8 %v3) {
+; CHECK-LABEL: @shl_add_const_eq_i8(
+; CHECK-NEXT: [[V7:%.*]] = add nsw i8 %v3, 1
+; CHECK-NEXT: [[V6:%.*]] = icmp eq i8 %v0, [[V7]]
+; CHECK-NEXT: ret i1 [[V6]]
+;
+ %v1 = shl nsw i8 %v0, 3
+ %v4 = shl nsw i8 %v3, 3
+ %v5 = add nsw i8 %v4, 8 ; 2^3 = 8
+ %v6 = icmp eq i8 %v1, %v5
+ ret i1 %v6
+}
+
+; Test: i32 bit width and larger shift amount (10). Constant is 1024.
+define i1 @shl_add_const_eq_i32(i32 %v0, i32 %v3) {
+; CHECK-LABEL: @shl_add_const_eq_i32(
+; CHECK-NEXT: [[V7:%.*]] = add nsw i32 %v3, 1
+; CHECK-NEXT: [[V6:%.*]] = icmp eq i32 %v0, [[V7]]
+; CHECK-NEXT: ret i1 [[V6]]
+;
+ %v1 = shl nsw i32 %v0, 10
+ %v4 = shl nsw i32 %v3, 10
+ %v5 = add nsw i32 %v4, 1024 ; 2^10 = 1024
+ %v6 = icmp eq i32 %v1, %v5
+ ret i1 %v6
+}
>From cb022137e9459586a606b22d1f3d7d2bf8ac81f8 Mon Sep 17 00:00:00 2001
From: Michael-Chen-NJU <2802328816 at qq.com>
Date: Sat, 1 Nov 2025 15:19:28 +0800
Subject: [PATCH 2/7] [InstCombine] Optimize icmp with shl and add by folding
(X << Log2) == ((Y << Log2) + K) into X == (Y + 1)
---
.../InstCombine/InstCombineCompares.cpp | 20 ++++++++++++
.../InstCombine/icmp-shl-add-to-add.ll | 32 ++++++++-----------
2 files changed, 34 insertions(+), 18 deletions(-)
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
index fba1ccf2c8c9b..28d3c772acdcc 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -6001,6 +6001,26 @@ Instruction *InstCombinerImpl::foldICmpEquality(ICmpInst &I) {
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
const CmpInst::Predicate Pred = I.getPredicate();
+
+ //icmp (shl nsw X, Log2), (add nsw (shl nsw Y, Log2), K) -> icmp X, (add nsw Y, 1)
+ Value *X, *Y;
+ ConstantInt *CLog2M0, *CLog2M1, *CVal;
+ auto M0 = m_NSWShl(m_Value(X), m_ConstantInt(CLog2M0));
+ auto M1 = m_NSWAdd(m_NSWShl (m_Value(Y), m_ConstantInt(CLog2M1)),
+ m_ConstantInt(CVal));
+
+ if (match(&I, m_c_ICmp(M0, M1)) && CLog2M0->getValue() == CLog2M1->getValue()) {
+ unsigned BitWidth = CLog2M0->getBitWidth();
+ unsigned ShAmt = (unsigned)CLog2M0->getLimitedValue(BitWidth);
+ APInt ExpectedK = APInt::getOneBitSet(BitWidth, ShAmt);
+ if (CVal->getValue() == ExpectedK) {
+ Value *NewRHS = Builder.CreateAdd(
+ Y, ConstantInt::get(Y->getType(), 1),
+ "", /*HasNUW=*/false, /*HasNSW=*/true);
+ return new ICmpInst(Pred, X, NewRHS);
+ }
+ }
+
Value *A, *B, *C, *D;
if (match(Op0, m_Xor(m_Value(A), m_Value(B)))) {
if (A == Op1 || B == Op1) { // (A^B) == A -> B == 0
diff --git a/llvm/test/Transforms/InstCombine/icmp-shl-add-to-add.ll b/llvm/test/Transforms/InstCombine/icmp-shl-add-to-add.ll
index 95dda32394c58..0f375a05528a2 100644
--- a/llvm/test/Transforms/InstCombine/icmp-shl-add-to-add.ll
+++ b/llvm/test/Transforms/InstCombine/icmp-shl-add-to-add.ll
@@ -6,10 +6,8 @@
define i1 @shl_add_const_eq_base(i64 %v0, i64 %v3) {
; CHECK-LABEL: @shl_add_const_eq_base(
-; CHECK-NEXT: [[V1:%.*]] = shl nsw i64 %v0, 5
-; CHECK-NEXT: [[V4:%.*]] = shl nsw i64 %v3, 5
-; CHECK-NEXT: [[V5:%.*]] = add nsw i64 [[V4]], 32
-; CHECK-NEXT: [[V6:%.*]] = icmp eq i64 [[V1]], [[V5]]
+; CHECK-NEXT: [[V5:%.*]] = add nsw i64 [[V3:%.*]], 1
+; CHECK-NEXT: [[V6:%.*]] = icmp eq i64 [[V1:%.*]], [[V5]]
; CHECK-NEXT: ret i1 [[V6]]
;
%v1 = shl nsw i64 %v0, 5
@@ -22,10 +20,8 @@ define i1 @shl_add_const_eq_base(i64 %v0, i64 %v3) {
; Test: icmp ne
define i1 @shl_add_const_ne(i64 %v0, i64 %v3) {
; CHECK-LABEL: @shl_add_const_ne(
-; CHECK-NEXT: [[V1:%.*]] = shl nsw i64 [[V0:%.*]], 5
-; CHECK-NEXT: [[V4:%.*]] = shl nsw i64 [[V3:%.*]], 5
-; CHECK-NEXT: [[V5:%.*]] = add nsw i64 [[V4]], 32
-; CHECK-NEXT: [[V6:%.*]] = icmp ne i64 [[V1]], [[V5]]
+; CHECK-NEXT: [[V5:%.*]] = add nsw i64 [[V3:%.*]], 1
+; CHECK-NEXT: [[V6:%.*]] = icmp ne i64 [[V1:%.*]], [[V5]]
; CHECK-NEXT: ret i1 [[V6]]
;
%v1 = shl nsw i64 %v0, 5
@@ -38,8 +34,8 @@ define i1 @shl_add_const_ne(i64 %v0, i64 %v3) {
; Test: shl amounts do not match (5 vs 4).
define i1 @shl_add_const_eq_mismatch_shl_amt(i64 %v0, i64 %v3) {
; CHECK-LABEL: @shl_add_const_eq_mismatch_shl_amt(
-; CHECK-NEXT: [[V1:%.*]] = shl nsw i64 %v0, 5
-; CHECK-NEXT: [[V4:%.*]] = shl nsw i64 %v3, 4
+; CHECK-NEXT: [[V1:%.*]] = shl nsw i64 [[V0:%.*]], 5
+; CHECK-NEXT: [[V4:%.*]] = shl nsw i64 [[V3:%.*]], 4
; CHECK-NEXT: [[V5:%.*]] = add nsw i64 [[V4]], 16
; CHECK-NEXT: [[V6:%.*]] = icmp eq i64 [[V1]], [[V5]]
; CHECK-NEXT: ret i1 [[V6]]
@@ -54,8 +50,8 @@ define i1 @shl_add_const_eq_mismatch_shl_amt(i64 %v0, i64 %v3) {
; Test: Constant is wrong (32 vs 64).
define i1 @shl_add_const_eq_wrong_constant(i64 %v0, i64 %v3) {
; CHECK-LABEL: @shl_add_const_eq_wrong_constant(
-; CHECK-NEXT: [[V1:%.*]] = shl nsw i64 %v0, 5
-; CHECK-NEXT: [[V4:%.*]] = shl nsw i64 %v3, 5
+; CHECK-NEXT: [[V1:%.*]] = shl nsw i64 [[V0:%.*]], 5
+; CHECK-NEXT: [[V4:%.*]] = shl nsw i64 [[V3:%.*]], 5
; CHECK-NEXT: [[V5:%.*]] = add nsw i64 [[V4]], 64
; CHECK-NEXT: [[V6:%.*]] = icmp eq i64 [[V1]], [[V5]]
; CHECK-NEXT: ret i1 [[V6]]
@@ -70,8 +66,8 @@ define i1 @shl_add_const_eq_wrong_constant(i64 %v0, i64 %v3) {
; Test: Missing NSW flag on one of the shl instructions.
define i1 @shl_add_const_eq_no_nsw_on_v1(i64 %v0, i64 %v3) {
; CHECK-LABEL: @shl_add_const_eq_no_nsw_on_v1(
-; CHECK-NEXT: [[V1:%.*]] = shl i64 %v0, 5
-; CHECK-NEXT: [[V4:%.*]] = shl nsw i64 %v3, 5
+; CHECK-NEXT: [[V1:%.*]] = shl i64 [[V0:%.*]], 5
+; CHECK-NEXT: [[V4:%.*]] = shl nsw i64 [[V3:%.*]], 5
; CHECK-NEXT: [[V5:%.*]] = add nsw i64 [[V4]], 32
; CHECK-NEXT: [[V6:%.*]] = icmp eq i64 [[V1]], [[V5]]
; CHECK-NEXT: ret i1 [[V6]]
@@ -86,8 +82,8 @@ define i1 @shl_add_const_eq_no_nsw_on_v1(i64 %v0, i64 %v3) {
; Test: Lower bit width (i8) and different shift amount (3). Constant is 8.
define i1 @shl_add_const_eq_i8(i8 %v0, i8 %v3) {
; CHECK-LABEL: @shl_add_const_eq_i8(
-; CHECK-NEXT: [[V7:%.*]] = add nsw i8 %v3, 1
-; CHECK-NEXT: [[V6:%.*]] = icmp eq i8 %v0, [[V7]]
+; CHECK-NEXT: [[TMP1:%.*]] = add nsw i8 [[V3:%.*]], 1
+; CHECK-NEXT: [[V6:%.*]] = icmp eq i8 [[V0:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[V6]]
;
%v1 = shl nsw i8 %v0, 3
@@ -100,8 +96,8 @@ define i1 @shl_add_const_eq_i8(i8 %v0, i8 %v3) {
; Test: i32 bit width and larger shift amount (10). Constant is 1024.
define i1 @shl_add_const_eq_i32(i32 %v0, i32 %v3) {
; CHECK-LABEL: @shl_add_const_eq_i32(
-; CHECK-NEXT: [[V7:%.*]] = add nsw i32 %v3, 1
-; CHECK-NEXT: [[V6:%.*]] = icmp eq i32 %v0, [[V7]]
+; CHECK-NEXT: [[TMP1:%.*]] = add nsw i32 [[V3:%.*]], 1
+; CHECK-NEXT: [[V6:%.*]] = icmp eq i32 [[V0:%.*]], [[TMP1]]
; CHECK-NEXT: ret i1 [[V6]]
;
%v1 = shl nsw i32 %v0, 10
>From c32b54fc00827a65085f0e5eab67832d1b1cad63 Mon Sep 17 00:00:00 2001
From: Michael-Chen-NJU <2802328816 at qq.com>
Date: Sat, 1 Nov 2025 18:45:17 +0800
Subject: [PATCH 3/7] [InstCombine] Add tests for multi-use cases and vector
operations in icmp-shl-add optimization
---
.../InstCombine/icmp-shl-add-to-add.ll | 69 +++++++++++++++++++
1 file changed, 69 insertions(+)
diff --git a/llvm/test/Transforms/InstCombine/icmp-shl-add-to-add.ll b/llvm/test/Transforms/InstCombine/icmp-shl-add-to-add.ll
index 0f375a05528a2..f0f3ff0b9b2ef 100644
--- a/llvm/test/Transforms/InstCombine/icmp-shl-add-to-add.ll
+++ b/llvm/test/Transforms/InstCombine/icmp-shl-add-to-add.ll
@@ -4,6 +4,8 @@
; Test case: Fold (X << 5) == ((Y << 5) + 32) into X == (Y + 1).
; This corresponds to the provided alive2 proof.
+declare void @use_i64(i64)
+
define i1 @shl_add_const_eq_base(i64 %v0, i64 %v3) {
; CHECK-LABEL: @shl_add_const_eq_base(
; CHECK-NEXT: [[V5:%.*]] = add nsw i64 [[V3:%.*]], 1
@@ -106,3 +108,70 @@ define i1 @shl_add_const_eq_i32(i32 %v0, i32 %v3) {
%v6 = icmp eq i32 %v1, %v5
ret i1 %v6
}
+
+; Test: Multi-use case. The optimization should still occur if applicable,
+; but the extraneous call must be preserved.
+define i1 @shl_add_const_eq_multi_use(i64 %v0, i64 %v3) {
+; CHECK-LABEL: @shl_add_const_eq_multi_use(
+; CHECK-NEXT: [[V1:%.*]] = shl nsw i64 [[V0:%.*]], 5
+; CHECK-NEXT: call void @use_i64(i64 [[V1]])
+; CHECK-NEXT: [[V4:%.*]] = shl nsw i64 [[V3:%.*]], 5
+; CHECK-NEXT: [[V5:%.*]] = add nsw i64 [[V4]], 32
+; CHECK-NEXT: [[V6:%.*]] = icmp eq i64 [[V1]], [[V5]]
+; CHECK-NEXT: ret i1 [[V6]]
+;
+ %v1 = shl nsw i64 %v0, 5
+ call void @use_i64(i64 %v1) ; Additional use of v1
+ %v4 = shl nsw i64 %v3, 5
+ %v5 = add nsw i64 %v4, 32
+ %v6 = icmp eq i64 %v1, %v5
+ ret i1 %v6
+}
+
+; Test: Vector splat. Should fold once optimization is applied.
+define <2 x i1> @shl_add_const_eq_vec_splat(<2 x i64> %v0, <2 x i64> %v3) {
+; CHECK-LABEL: @shl_add_const_eq_vec_splat(
+; CHECK-NEXT: [[V1:%.*]] = shl nsw <2 x i64> [[V0:%.*]], <i64 5, i64 5>
+; CHECK-NEXT: [[V4:%.*]] = shl nsw <2 x i64> [[V3:%.*]], <i64 5, i64 5>
+; CHECK-NEXT: [[V5:%.*]] = add nsw <2 x i64> [[V4]], <i64 32, i64 32>
+; CHECK-NEXT: [[V6:%.*]] = icmp eq <2 x i64> [[V1]], [[V5]]
+; CHECK-NEXT: ret <2 x i1> [[V6]]
+;
+ %v1 = shl nsw <2 x i64> %v0, <i64 5, i64 5>
+ %v4 = shl nsw <2 x i64> %v3, <i64 5, i64 5>
+ %v5 = add nsw <2 x i64> %v4, <i64 32, i64 32>
+ %v6 = icmp eq <2 x i64> %v1, %v5
+ ret <2 x i1> %v6
+}
+
+; Test: Vector splat with poison. Should fold once optimization is applied.
+define <2 x i1> @shl_add_const_eq_vec_splat_poison(<2 x i64> %v0, <2 x i64> %v3) {
+; CHECK-LABEL: @shl_add_const_eq_vec_splat_poison(
+; CHECK-NEXT: [[V1:%.*]] = shl nsw <2 x i64> [[V0:%.*]], <i64 5, i64 5>
+; CHECK-NEXT: [[V4:%.*]] = shl nsw <2 x i64> [[V3:%.*]], <i64 5, i64 5>
+; CHECK-NEXT: [[V5:%.*]] = add nsw <2 x i64> [[V4]], <i64 32, i64 poison>
+; CHECK-NEXT: [[V6:%.*]] = icmp eq <2 x i64> [[V1]], [[V5]]
+; CHECK-NEXT: ret <2 x i1> [[V6]]
+;
+ %v1 = shl nsw <2 x i64> %v0, <i64 5, i64 5>
+ %v4 = shl nsw <2 x i64> %v3, <i64 5, i64 5>
+ %v5 = add nsw <2 x i64> %v4, <i64 32, i64 poison>
+ %v6 = icmp eq <2 x i64> %v1, %v5
+ ret <2 x i1> %v6
+}
+
+; Test: Vector non-splat (should not fold).
+define <2 x i1> @shl_add_const_eq_vec_non_splat(<2 x i64> %v0, <2 x i64> %v3) {
+; CHECK-LABEL: @shl_add_const_eq_vec_non_splat(
+; CHECK-NEXT: [[V1:%.*]] = shl nsw <2 x i64> [[V0:%.*]], <i64 5, i64 6>
+; CHECK-NEXT: [[V4:%.*]] = shl nsw <2 x i64> [[V3:%.*]], <i64 5, i64 6>
+; CHECK-NEXT: [[V5:%.*]] = add nsw <2 x i64> [[V4]], <i64 32, i64 64>
+; CHECK-NEXT: [[V6:%.*]] = icmp eq <2 x i64> [[V1]], [[V5]]
+; CHECK-NEXT: ret <2 x i1> [[V6]]
+;
+ %v1 = shl nsw <2 x i64> %v0, <i64 5, i64 6>
+ %v4 = shl nsw <2 x i64> %v3, <i64 5, i64 6>
+ %v5 = add nsw <2 x i64> %v4, <i64 32, i64 64>
+ %v6 = icmp eq <2 x i64> %v1, %v5
+ ret <2 x i1> %v6
+}
>From 6dc8405a47d4664c2ed345ee7b30e3f61938c4ae Mon Sep 17 00:00:00 2001
From: Michael-Chen-NJU <2802328816 at qq.com>
Date: Sat, 1 Nov 2025 19:22:15 +0800
Subject: [PATCH 4/7] [InstCombine] Refactor icmp folding logic for shl and add
operations with APInt
---
.../InstCombine/InstCombineCompares.cpp | 22 +++++++++----------
.../InstCombine/icmp-shl-add-to-add.ll | 17 +++++---------
2 files changed, 17 insertions(+), 22 deletions(-)
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
index 28d3c772acdcc..734d27c70705e 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -6002,25 +6002,25 @@ Instruction *InstCombinerImpl::foldICmpEquality(ICmpInst &I) {
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
const CmpInst::Predicate Pred = I.getPredicate();
- //icmp (shl nsw X, Log2), (add nsw (shl nsw Y, Log2), K) -> icmp X, (add nsw Y, 1)
+ // icmp (shl nsw X, Log2), (add nsw (shl nsw Y, Log2), K) -> icmp X, (add nsw
+ // Y, 1)
Value *X, *Y;
- ConstantInt *CLog2M0, *CLog2M1, *CVal;
- auto M0 = m_NSWShl(m_Value(X), m_ConstantInt(CLog2M0));
- auto M1 = m_NSWAdd(m_NSWShl (m_Value(Y), m_ConstantInt(CLog2M1)),
- m_ConstantInt(CVal));
+ const APInt *CLog2M0, *CLog2M1, *CVal;
+ auto M0 = m_NSWShl(m_Value(X), m_APIntAllowPoison(CLog2M0));
+ auto M1 = m_NSWAdd(m_NSWShl(m_Value(Y), m_APIntAllowPoison(CLog2M1)),
+ m_APIntAllowPoison(CVal));
- if (match(&I, m_c_ICmp(M0, M1)) && CLog2M0->getValue() == CLog2M1->getValue()) {
+ if (match(&I, m_c_ICmp(M0, M1)) && *CLog2M0 == *CLog2M1) {
unsigned BitWidth = CLog2M0->getBitWidth();
unsigned ShAmt = (unsigned)CLog2M0->getLimitedValue(BitWidth);
APInt ExpectedK = APInt::getOneBitSet(BitWidth, ShAmt);
- if (CVal->getValue() == ExpectedK) {
- Value *NewRHS = Builder.CreateAdd(
- Y, ConstantInt::get(Y->getType(), 1),
- "", /*HasNUW=*/false, /*HasNSW=*/true);
+ if (*CVal == ExpectedK) {
+ Value *NewRHS = Builder.CreateAdd(Y, ConstantInt::get(Y->getType(), 1),
+ "", /*HasNUW=*/false, /*HasNSW=*/true);
return new ICmpInst(Pred, X, NewRHS);
}
}
-
+
Value *A, *B, *C, *D;
if (match(Op0, m_Xor(m_Value(A), m_Value(B)))) {
if (A == Op1 || B == Op1) { // (A^B) == A -> B == 0
diff --git a/llvm/test/Transforms/InstCombine/icmp-shl-add-to-add.ll b/llvm/test/Transforms/InstCombine/icmp-shl-add-to-add.ll
index f0f3ff0b9b2ef..1523b283b8b08 100644
--- a/llvm/test/Transforms/InstCombine/icmp-shl-add-to-add.ll
+++ b/llvm/test/Transforms/InstCombine/icmp-shl-add-to-add.ll
@@ -115,9 +115,8 @@ define i1 @shl_add_const_eq_multi_use(i64 %v0, i64 %v3) {
; CHECK-LABEL: @shl_add_const_eq_multi_use(
; CHECK-NEXT: [[V1:%.*]] = shl nsw i64 [[V0:%.*]], 5
; CHECK-NEXT: call void @use_i64(i64 [[V1]])
-; CHECK-NEXT: [[V4:%.*]] = shl nsw i64 [[V3:%.*]], 5
-; CHECK-NEXT: [[V5:%.*]] = add nsw i64 [[V4]], 32
-; CHECK-NEXT: [[V6:%.*]] = icmp eq i64 [[V1]], [[V5]]
+; CHECK-NEXT: [[TMP1:%.*]] = add nsw i64 [[V3:%.*]], 1
+; CHECK-NEXT: [[V6:%.*]] = icmp eq i64 [[V0]], [[TMP1]]
; CHECK-NEXT: ret i1 [[V6]]
;
%v1 = shl nsw i64 %v0, 5
@@ -131,10 +130,8 @@ define i1 @shl_add_const_eq_multi_use(i64 %v0, i64 %v3) {
; Test: Vector splat. Should fold once optimization is applied.
define <2 x i1> @shl_add_const_eq_vec_splat(<2 x i64> %v0, <2 x i64> %v3) {
; CHECK-LABEL: @shl_add_const_eq_vec_splat(
-; CHECK-NEXT: [[V1:%.*]] = shl nsw <2 x i64> [[V0:%.*]], <i64 5, i64 5>
-; CHECK-NEXT: [[V4:%.*]] = shl nsw <2 x i64> [[V3:%.*]], <i64 5, i64 5>
-; CHECK-NEXT: [[V5:%.*]] = add nsw <2 x i64> [[V4]], <i64 32, i64 32>
-; CHECK-NEXT: [[V6:%.*]] = icmp eq <2 x i64> [[V1]], [[V5]]
+; CHECK-NEXT: [[V5:%.*]] = add nsw <2 x i64> [[V3:%.*]], splat (i64 1)
+; CHECK-NEXT: [[V6:%.*]] = icmp eq <2 x i64> [[V1:%.*]], [[V5]]
; CHECK-NEXT: ret <2 x i1> [[V6]]
;
%v1 = shl nsw <2 x i64> %v0, <i64 5, i64 5>
@@ -147,10 +144,8 @@ define <2 x i1> @shl_add_const_eq_vec_splat(<2 x i64> %v0, <2 x i64> %v3) {
; Test: Vector splat with poison. Should fold once optimization is applied.
define <2 x i1> @shl_add_const_eq_vec_splat_poison(<2 x i64> %v0, <2 x i64> %v3) {
; CHECK-LABEL: @shl_add_const_eq_vec_splat_poison(
-; CHECK-NEXT: [[V1:%.*]] = shl nsw <2 x i64> [[V0:%.*]], <i64 5, i64 5>
-; CHECK-NEXT: [[V4:%.*]] = shl nsw <2 x i64> [[V3:%.*]], <i64 5, i64 5>
-; CHECK-NEXT: [[V5:%.*]] = add nsw <2 x i64> [[V4]], <i64 32, i64 poison>
-; CHECK-NEXT: [[V6:%.*]] = icmp eq <2 x i64> [[V1]], [[V5]]
+; CHECK-NEXT: [[V5:%.*]] = add nsw <2 x i64> [[V3:%.*]], splat (i64 1)
+; CHECK-NEXT: [[V6:%.*]] = icmp eq <2 x i64> [[V1:%.*]], [[V5]]
; CHECK-NEXT: ret <2 x i1> [[V6]]
;
%v1 = shl nsw <2 x i64> %v0, <i64 5, i64 5>
>From 2c8b91de405cc7cd000286dcd19c24385f1d2db8 Mon Sep 17 00:00:00 2001
From: Michael-Chen-NJU <2802328816 at qq.com>
Date: Mon, 8 Dec 2025 13:18:15 +0800
Subject: [PATCH 5/7] [InstCombine] Fold icmp (shl nsw X, L), (add nsw (shl nsw
Y, L), K).
---
.../Transforms/InstCombine/InstCombineCompares.cpp | 11 ++++++-----
.../Transforms/InstCombine/icmp-shl-add-to-add.ll | 12 +++++-------
2 files changed, 11 insertions(+), 12 deletions(-)
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
index e1404d50c8a75..8f4f7220aa0fc 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -6024,8 +6024,9 @@ Instruction *InstCombinerImpl::foldICmpEquality(ICmpInst &I) {
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
const CmpInst::Predicate Pred = I.getPredicate();
- // icmp (shl nsw X, Log2), (add nsw (shl nsw Y, Log2), K) -> icmp X, (add nsw
- // Y, 1)
+ // icmp (shl nsw X, L), (add nsw (shl nsw Y, L), K) where K is a multiple of
+ // 2^L
+ // -> icmp X, (add nsw Y, K / 2^L)
Value *X, *Y;
const APInt *CLog2M0, *CLog2M1, *CVal;
auto M0 = m_NSWShl(m_Value(X), m_APIntAllowPoison(CLog2M0));
@@ -6035,9 +6036,9 @@ Instruction *InstCombinerImpl::foldICmpEquality(ICmpInst &I) {
if (match(&I, m_c_ICmp(M0, M1)) && *CLog2M0 == *CLog2M1) {
unsigned BitWidth = CLog2M0->getBitWidth();
unsigned ShAmt = (unsigned)CLog2M0->getLimitedValue(BitWidth);
- APInt ExpectedK = APInt::getOneBitSet(BitWidth, ShAmt);
- if (*CVal == ExpectedK) {
- Value *NewRHS = Builder.CreateAdd(Y, ConstantInt::get(Y->getType(), 1),
+ if (CVal->countr_zero() >= ShAmt) {
+ APInt NewK = CVal->lshr(ShAmt);
+ Value *NewRHS = Builder.CreateAdd(Y, ConstantInt::get(Y->getType(), NewK),
"", /*HasNUW=*/false, /*HasNSW=*/true);
return new ICmpInst(Pred, X, NewRHS);
}
diff --git a/llvm/test/Transforms/InstCombine/icmp-shl-add-to-add.ll b/llvm/test/Transforms/InstCombine/icmp-shl-add-to-add.ll
index 1523b283b8b08..52dc3b0f5376a 100644
--- a/llvm/test/Transforms/InstCombine/icmp-shl-add-to-add.ll
+++ b/llvm/test/Transforms/InstCombine/icmp-shl-add-to-add.ll
@@ -49,13 +49,11 @@ define i1 @shl_add_const_eq_mismatch_shl_amt(i64 %v0, i64 %v3) {
ret i1 %v6
}
-; Test: Constant is wrong (32 vs 64).
-define i1 @shl_add_const_eq_wrong_constant(i64 %v0, i64 %v3) {
-; CHECK-LABEL: @shl_add_const_eq_wrong_constant(
-; CHECK-NEXT: [[V1:%.*]] = shl nsw i64 [[V0:%.*]], 5
-; CHECK-NEXT: [[V4:%.*]] = shl nsw i64 [[V3:%.*]], 5
-; CHECK-NEXT: [[V5:%.*]] = add nsw i64 [[V4]], 64
-; CHECK-NEXT: [[V6:%.*]] = icmp eq i64 [[V1]], [[V5]]
+; Test: Constant K is a multiple of 2^L (64 vs 32). Should simplify to K/2^L = 2.
+define i1 @shl_add_const_eq_k_multiple_of_pow2(i64 %v0, i64 %v3) {
+; CHECK-LABEL: @shl_add_const_eq_k_multiple_of_pow2(
+; CHECK-NEXT: [[V5:%.*]] = add nsw i64 [[V3:%.*]], 2
+; CHECK-NEXT: [[V6:%.*]] = icmp eq i64 [[V1:%.*]], [[V5]]
; CHECK-NEXT: ret i1 [[V6]]
;
%v1 = shl nsw i64 %v0, 5
>From 64addefd3e337e349018c3fce55f66a8edc98b45 Mon Sep 17 00:00:00 2001
From: Michael-Chen-NJU <2802328816 at qq.com>
Date: Mon, 12 Jan 2026 01:32:47 +0800
Subject: [PATCH 6/7] using canEvaluateShifted & getShiftedValue
---
.../InstCombine/InstCombineCompares.cpp | 22 ++---
.../InstCombine/InstCombineInternal.h | 4 +
.../InstCombine/InstCombineShifts.cpp | 82 ++++++++++++-------
3 files changed, 65 insertions(+), 43 deletions(-)
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
index 8f4f7220aa0fc..e8327421973b0 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -6027,20 +6027,14 @@ Instruction *InstCombinerImpl::foldICmpEquality(ICmpInst &I) {
// icmp (shl nsw X, L), (add nsw (shl nsw Y, L), K) where K is a multiple of
// 2^L
// -> icmp X, (add nsw Y, K / 2^L)
- Value *X, *Y;
- const APInt *CLog2M0, *CLog2M1, *CVal;
- auto M0 = m_NSWShl(m_Value(X), m_APIntAllowPoison(CLog2M0));
- auto M1 = m_NSWAdd(m_NSWShl(m_Value(Y), m_APIntAllowPoison(CLog2M1)),
- m_APIntAllowPoison(CVal));
-
- if (match(&I, m_c_ICmp(M0, M1)) && *CLog2M0 == *CLog2M1) {
- unsigned BitWidth = CLog2M0->getBitWidth();
- unsigned ShAmt = (unsigned)CLog2M0->getLimitedValue(BitWidth);
- if (CVal->countr_zero() >= ShAmt) {
- APInt NewK = CVal->lshr(ShAmt);
- Value *NewRHS = Builder.CreateAdd(Y, ConstantInt::get(Y->getType(), NewK),
- "", /*HasNUW=*/false, /*HasNSW=*/true);
- return new ICmpInst(Pred, X, NewRHS);
+ Value *X;
+ const APInt *CShAmt;
+ if (match(Op0, m_NSWShl(m_Value(X), m_APIntAllowPoison(CShAmt)))) {
+ unsigned ShAmt = CShAmt->getZExtValue();
+ if (canEvaluateShifted(Op1, ShAmt, false, &I)) {
+ Value *NewOp0 = X;
+ Value *NewOp1 = getShiftedValue(Op1, ShAmt, false);
+ return new ICmpInst(Pred, NewOp0, NewOp1);
}
}
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineInternal.h b/llvm/lib/Transforms/InstCombine/InstCombineInternal.h
index 9bdd8cb71f7f3..dd4138b55421f 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineInternal.h
+++ b/llvm/lib/Transforms/InstCombine/InstCombineInternal.h
@@ -448,6 +448,10 @@ class LLVM_LIBRARY_VISIBILITY InstCombinerImpl final
bool InvertFalseVal = false);
Value *getSelectCondition(Value *A, Value *B, bool ABIsTheSame);
+ bool canEvaluateShifted(Value *V, unsigned NumBits, bool IsLeftShift,
+ Instruction *CxtI);
+ Value *getShiftedValue(Value *V, unsigned NumBits, bool isLeftShift);
+
Instruction *foldLShrOverflowBit(BinaryOperator &I);
Instruction *foldExtractOfOverflowIntrinsic(ExtractValueInst &EV);
Instruction *foldIntrinsicWithOverflowCommon(IntrinsicInst *II);
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
index 899a3c16554c9..d7624be48314a 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
@@ -584,8 +584,8 @@ static bool canEvaluateShiftedShift(unsigned OuterShAmt, bool IsOuterShl,
/// %F = lshr i128 %E, 64
/// where the client will ask if E can be computed shifted right by 64-bits. If
/// this succeeds, getShiftedValue() will be called to produce the value.
-static bool canEvaluateShifted(Value *V, unsigned NumBits, bool IsLeftShift,
- InstCombinerImpl &IC, Instruction *CxtI) {
+bool InstCombinerImpl::canEvaluateShifted(Value *V, unsigned NumBits,
+ bool IsLeftShift, Instruction *CxtI) {
// We can always evaluate immediate constants.
if (match(V, m_ImmConstant()))
return true;
@@ -603,19 +603,19 @@ static bool canEvaluateShifted(Value *V, unsigned NumBits, bool IsLeftShift,
case Instruction::Or:
case Instruction::Xor:
// Bitwise operators can all arbitrarily be arbitrarily evaluated shifted.
- return canEvaluateShifted(I->getOperand(0), NumBits, IsLeftShift, IC, I) &&
- canEvaluateShifted(I->getOperand(1), NumBits, IsLeftShift, IC, I);
+ return canEvaluateShifted(I->getOperand(0), NumBits, IsLeftShift, I) &&
+ canEvaluateShifted(I->getOperand(1), NumBits, IsLeftShift, I);
case Instruction::Shl:
case Instruction::LShr:
- return canEvaluateShiftedShift(NumBits, IsLeftShift, I, IC, CxtI);
+ return canEvaluateShiftedShift(NumBits, IsLeftShift, I, *this, CxtI);
case Instruction::Select: {
SelectInst *SI = cast<SelectInst>(I);
Value *TrueVal = SI->getTrueValue();
Value *FalseVal = SI->getFalseValue();
- return canEvaluateShifted(TrueVal, NumBits, IsLeftShift, IC, SI) &&
- canEvaluateShifted(FalseVal, NumBits, IsLeftShift, IC, SI);
+ return canEvaluateShifted(TrueVal, NumBits, IsLeftShift, SI) &&
+ canEvaluateShifted(FalseVal, NumBits, IsLeftShift, SI);
}
case Instruction::PHI: {
// We can change a phi if we can change all operands. Note that we never
@@ -623,7 +623,7 @@ static bool canEvaluateShifted(Value *V, unsigned NumBits, bool IsLeftShift,
// instructions with a single use.
PHINode *PN = cast<PHINode>(I);
for (Value *IncValue : PN->incoming_values())
- if (!canEvaluateShifted(IncValue, NumBits, IsLeftShift, IC, PN))
+ if (!canEvaluateShifted(IncValue, NumBits, IsLeftShift, PN))
return false;
return true;
}
@@ -633,6 +633,21 @@ static bool canEvaluateShifted(Value *V, unsigned NumBits, bool IsLeftShift,
return !IsLeftShift && match(I->getOperand(1), m_APInt(MulConst)) &&
MulConst->isNegatedPowerOf2() && MulConst->countr_zero() == NumBits;
}
+ case Instruction::Add: {
+ bool CanHandleAdd = false;
+ if (IsLeftShift)
+ CanHandleAdd = cast<BinaryOperator>(I)->hasNoSignedWrap();
+ else {
+ const APInt *C;
+ if (match(I->getOperand(1), m_APIntAllowPoison(C)))
+ CanHandleAdd = C->countr_zero() >= NumBits;
+ }
+ if (CanHandleAdd)
+ return canEvaluateShifted(I->getOperand(0), NumBits, IsLeftShift, I) &&
+ canEvaluateShifted(I->getOperand(1), NumBits, IsLeftShift, I);
+
+ return false;
+ }
}
}
@@ -701,18 +716,18 @@ static Value *foldShiftedShift(BinaryOperator *InnerShift, unsigned OuterShAmt,
/// When canEvaluateShifted() returns true for an expression, this function
/// inserts the new computation that produces the shifted value.
-static Value *getShiftedValue(Value *V, unsigned NumBits, bool isLeftShift,
- InstCombinerImpl &IC, const DataLayout &DL) {
+Value *InstCombinerImpl::getShiftedValue(Value *V, unsigned NumBits,
+ bool isLeftShift) {
// We can always evaluate constants shifted.
if (Constant *C = dyn_cast<Constant>(V)) {
if (isLeftShift)
- return IC.Builder.CreateShl(C, NumBits);
+ return Builder.CreateShl(C, NumBits);
else
- return IC.Builder.CreateLShr(C, NumBits);
+ return Builder.CreateAShr(C, NumBits);
}
Instruction *I = cast<Instruction>(V);
- IC.addToWorklist(I);
+ addToWorklist(I);
switch (I->getOpcode()) {
default: llvm_unreachable("Inconsistency with CanEvaluateShifted");
@@ -720,22 +735,25 @@ static Value *getShiftedValue(Value *V, unsigned NumBits, bool isLeftShift,
case Instruction::Or:
case Instruction::Xor:
// Bitwise operators can all arbitrarily be arbitrarily evaluated shifted.
- I->setOperand(
- 0, getShiftedValue(I->getOperand(0), NumBits, isLeftShift, IC, DL));
- I->setOperand(
- 1, getShiftedValue(I->getOperand(1), NumBits, isLeftShift, IC, DL));
+ I->setOperand(0, getShiftedValue(I->getOperand(0), NumBits, isLeftShift));
+ I->setOperand(1, getShiftedValue(I->getOperand(1), NumBits, isLeftShift));
return I;
case Instruction::Shl:
- case Instruction::LShr:
+ case Instruction::LShr: {
+ auto *OBO = dyn_cast<OverflowingBinaryOperator>(I);
+
+ if (!isLeftShift && OBO && OBO->hasNoSignedWrap() &&
+ match(I->getOperand(1), m_SpecificInt(NumBits)))
+ return I->getOperand(0);
+
return foldShiftedShift(cast<BinaryOperator>(I), NumBits, isLeftShift,
- IC.Builder);
+ Builder);
+ }
case Instruction::Select:
- I->setOperand(
- 1, getShiftedValue(I->getOperand(1), NumBits, isLeftShift, IC, DL));
- I->setOperand(
- 2, getShiftedValue(I->getOperand(2), NumBits, isLeftShift, IC, DL));
+ I->setOperand(1, getShiftedValue(I->getOperand(1), NumBits, isLeftShift));
+ I->setOperand(2, getShiftedValue(I->getOperand(2), NumBits, isLeftShift));
return I;
case Instruction::PHI: {
// We can change a phi if we can change all operands. Note that we never
@@ -743,20 +761,26 @@ static Value *getShiftedValue(Value *V, unsigned NumBits, bool isLeftShift,
// instructions with a single use.
PHINode *PN = cast<PHINode>(I);
for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
- PN->setIncomingValue(i, getShiftedValue(PN->getIncomingValue(i), NumBits,
- isLeftShift, IC, DL));
+ PN->setIncomingValue(
+ i, getShiftedValue(PN->getIncomingValue(i), NumBits, isLeftShift));
return PN;
}
case Instruction::Mul: {
assert(!isLeftShift && "Unexpected shift direction!");
auto *Neg = BinaryOperator::CreateNeg(I->getOperand(0));
- IC.InsertNewInstWith(Neg, I->getIterator());
+ InsertNewInstWith(Neg, I->getIterator());
unsigned TypeWidth = I->getType()->getScalarSizeInBits();
APInt Mask = APInt::getLowBitsSet(TypeWidth, TypeWidth - NumBits);
auto *And = BinaryOperator::CreateAnd(Neg,
ConstantInt::get(I->getType(), Mask));
And->takeName(I);
- return IC.InsertNewInstWith(And, I->getIterator());
+ return InsertNewInstWith(And, I->getIterator());
+ }
+ case Instruction::Add: {
+ Value *LHS = getShiftedValue(I->getOperand(0), NumBits, isLeftShift);
+ Value *RHS = getShiftedValue(I->getOperand(1), NumBits, isLeftShift);
+ bool HasNSW = cast<BinaryOperator>(I)->hasNoSignedWrap();
+ return Builder.CreateAdd(LHS, RHS, I->getName(), /*HasNUW=*/false, HasNSW);
}
}
}
@@ -828,14 +852,14 @@ Instruction *InstCombinerImpl::FoldShiftByConstant(Value *Op0, Constant *C1,
// See if we can propagate this shift into the input, this covers the trivial
// cast of lshr(shl(x,c1),c2) as well as other more complex cases.
if (I.getOpcode() != Instruction::AShr &&
- canEvaluateShifted(Op0, Op1C->getZExtValue(), IsLeftShift, *this, &I)) {
+ canEvaluateShifted(Op0, Op1C->getZExtValue(), IsLeftShift, &I)) {
LLVM_DEBUG(
dbgs() << "ICE: GetShiftedValue propagating shift through expression"
" to eliminate shift:\n IN: "
<< *Op0 << "\n SH: " << I << "\n");
return replaceInstUsesWith(
- I, getShiftedValue(Op0, Op1C->getZExtValue(), IsLeftShift, *this, DL));
+ I, getShiftedValue(Op0, Op1C->getZExtValue(), IsLeftShift));
}
if (Instruction *FoldedShift = foldBinOpIntoSelectOrPhi(I))
>From e7613410cc4c1a7983fcd8288c663d8fa6bea00c Mon Sep 17 00:00:00 2001
From: Michael-Chen-NJU <2802328816 at qq.com>
Date: Mon, 12 Jan 2026 01:35:06 +0800
Subject: [PATCH 7/7] update tests
---
llvm/test/Transforms/InstCombine/apint-shift.ll | 6 +++---
.../Transforms/InstCombine/icmp-shl-add-to-add.ll | 2 +-
llvm/test/Transforms/InstCombine/lshr.ll | 15 ++++++++-------
3 files changed, 12 insertions(+), 11 deletions(-)
diff --git a/llvm/test/Transforms/InstCombine/apint-shift.ll b/llvm/test/Transforms/InstCombine/apint-shift.ll
index 4dd0811bb7ecb..d404c5cd344f6 100644
--- a/llvm/test/Transforms/InstCombine/apint-shift.ll
+++ b/llvm/test/Transforms/InstCombine/apint-shift.ll
@@ -538,9 +538,9 @@ define <2 x i43> @lshr_shl_eq_amt_multi_use_splat_vec(<2 x i43> %A) {
define i37 @test25(i37 %AA, i37 %BB) {
; CHECK-LABEL: @test25(
; CHECK-NEXT: [[D:%.*]] = and i37 [[AA:%.*]], -131072
-; CHECK-NEXT: [[C2:%.*]] = add i37 [[BB:%.*]], [[D]]
-; CHECK-NEXT: [[F:%.*]] = and i37 [[C2]], -131072
-; CHECK-NEXT: ret i37 [[F]]
+; CHECK-NEXT: [[F:%.*]] = and i37 [[C2:%.*]], -131072
+; CHECK-NEXT: [[E1:%.*]] = add nsw i37 [[F]], [[D]]
+; CHECK-NEXT: ret i37 [[E1]]
;
%C = lshr i37 %BB, 17
%D = lshr i37 %AA, 17
diff --git a/llvm/test/Transforms/InstCombine/icmp-shl-add-to-add.ll b/llvm/test/Transforms/InstCombine/icmp-shl-add-to-add.ll
index 52dc3b0f5376a..4b0d61db52b43 100644
--- a/llvm/test/Transforms/InstCombine/icmp-shl-add-to-add.ll
+++ b/llvm/test/Transforms/InstCombine/icmp-shl-add-to-add.ll
@@ -142,7 +142,7 @@ define <2 x i1> @shl_add_const_eq_vec_splat(<2 x i64> %v0, <2 x i64> %v3) {
; Test: Vector splat with poison. Should fold once optimization is applied.
define <2 x i1> @shl_add_const_eq_vec_splat_poison(<2 x i64> %v0, <2 x i64> %v3) {
; CHECK-LABEL: @shl_add_const_eq_vec_splat_poison(
-; CHECK-NEXT: [[V5:%.*]] = add nsw <2 x i64> [[V3:%.*]], splat (i64 1)
+; CHECK-NEXT: [[V5:%.*]] = add nsw <2 x i64> [[V3:%.*]], <i64 1, i64 poison>
; CHECK-NEXT: [[V6:%.*]] = icmp eq <2 x i64> [[V1:%.*]], [[V5]]
; CHECK-NEXT: ret <2 x i1> [[V6]]
;
diff --git a/llvm/test/Transforms/InstCombine/lshr.ll b/llvm/test/Transforms/InstCombine/lshr.ll
index ff358c6bc772a..e085be958d317 100644
--- a/llvm/test/Transforms/InstCombine/lshr.ll
+++ b/llvm/test/Transforms/InstCombine/lshr.ll
@@ -141,9 +141,9 @@ define i8 @lshr_cttz_zero_is_undef_vec(<2 x i8> %x) {
define i8 @lshr_exact(i8 %x) {
; CHECK-LABEL: @lshr_exact(
-; CHECK-NEXT: [[TMP1:%.*]] = add i8 [[X:%.*]], 1
-; CHECK-NEXT: [[LSHR:%.*]] = and i8 [[TMP1]], 63
-; CHECK-NEXT: ret i8 [[LSHR]]
+; CHECK-NEXT: [[LSHR:%.*]] = and i8 [[TMP1:%.*]], 63
+; CHECK-NEXT: [[ADD1:%.*]] = add nuw nsw i8 [[LSHR]], 1
+; CHECK-NEXT: ret i8 [[ADD1]]
;
%shl = shl i8 %x, 2
%add = add i8 %shl, 4
@@ -153,9 +153,9 @@ define i8 @lshr_exact(i8 %x) {
define <2 x i8> @lshr_exact_splat_vec(<2 x i8> %x) {
; CHECK-LABEL: @lshr_exact_splat_vec(
-; CHECK-NEXT: [[TMP1:%.*]] = add <2 x i8> [[X:%.*]], splat (i8 1)
-; CHECK-NEXT: [[LSHR:%.*]] = and <2 x i8> [[TMP1]], splat (i8 63)
-; CHECK-NEXT: ret <2 x i8> [[LSHR]]
+; CHECK-NEXT: [[LSHR:%.*]] = and <2 x i8> [[TMP1:%.*]], splat (i8 63)
+; CHECK-NEXT: [[ADD1:%.*]] = add nuw nsw <2 x i8> [[LSHR]], splat (i8 1)
+; CHECK-NEXT: ret <2 x i8> [[ADD1]]
;
%shl = shl <2 x i8> %x, <i8 2, i8 2>
%add = add <2 x i8> %shl, <i8 4, i8 4>
@@ -165,7 +165,8 @@ define <2 x i8> @lshr_exact_splat_vec(<2 x i8> %x) {
define <2 x i8> @lshr_exact_splat_vec_nuw(<2 x i8> %x) {
; CHECK-LABEL: @lshr_exact_splat_vec_nuw(
-; CHECK-NEXT: [[LSHR:%.*]] = add nuw <2 x i8> [[X:%.*]], splat (i8 1)
+; CHECK-NEXT: [[SHL:%.*]] = and <2 x i8> [[X:%.*]], splat (i8 63)
+; CHECK-NEXT: [[LSHR:%.*]] = add nuw nsw <2 x i8> [[SHL]], splat (i8 1)
; CHECK-NEXT: ret <2 x i8> [[LSHR]]
;
%shl = shl nuw <2 x i8> %x, <i8 2, i8 2>
- Previous message: [llvm] [InstCombine] Fix #163110: Fold icmp (shl X, L), (add (shl Y, L), 1<<L) to icmp X, (Y + 1) (PR #165975)
- Next message: [llvm] [InstCombine] Fix #163110: Fold icmp (shl X, L), (add (shl Y, L), 1<<L) to icmp X, (Y + 1) (PR #165975)
- Messages sorted by:
[ date ]
[ thread ]
[ subject ]
[ author ]
More information about the llvm-commits
mailing list