[llvm] [InstCombine] Fold ((X << nuw Z) binop nuw Y) >>u Z --> X binop nuw (Y >>u Z) (PR #88193)
via llvm-commits
llvm-commits at lists.llvm.org
Mon May 6 19:28:27 PDT 2024
https://github.com/AtariDreams updated https://github.com/llvm/llvm-project/pull/88193
>From d2573bf1d1329d75d46a7cbeb9ae3f24a06f9462 Mon Sep 17 00:00:00 2001
From: Rose <gfunni234 at gmail.com>
Date: Sun, 5 May 2024 21:43:20 -0400
Subject: [PATCH 1/2] [InstCombine] Pre-commit tests (NFC)
---
llvm/test/Transforms/InstCombine/lshr.ll | 146 +++++++++++++++++++++++
1 file changed, 146 insertions(+)
diff --git a/llvm/test/Transforms/InstCombine/lshr.ll b/llvm/test/Transforms/InstCombine/lshr.ll
index 7d611ba188d6b4..d320f4dab77801 100644
--- a/llvm/test/Transforms/InstCombine/lshr.ll
+++ b/llvm/test/Transforms/InstCombine/lshr.ll
@@ -163,6 +163,18 @@ define <2 x i8> @lshr_exact_splat_vec(<2 x i8> %x) {
ret <2 x i8> %lshr
}
+define <2 x i8> @lshr_exact_splat_vec_nuw(<2 x i8> %x) {
+; CHECK-LABEL: @lshr_exact_splat_vec_nuw(
+; CHECK-NEXT: [[TMP1:%.*]] = add <2 x i8> [[X:%.*]], <i8 1, i8 1>
+; CHECK-NEXT: [[LSHR:%.*]] = and <2 x i8> [[TMP1]], <i8 63, i8 63>
+; CHECK-NEXT: ret <2 x i8> [[LSHR]]
+;
+ %shl = shl nuw <2 x i8> %x, <i8 2, i8 2>
+ %add = add nuw <2 x i8> %shl, <i8 4, i8 4>
+ %lshr = lshr <2 x i8> %add, <i8 2, i8 2>
+ ret <2 x i8> %lshr
+}
+
define i8 @shl_add(i8 %x, i8 %y) {
; CHECK-LABEL: @shl_add(
; CHECK-NEXT: [[TMP1:%.*]] = lshr i8 [[Y:%.*]], 2
@@ -360,8 +372,127 @@ define <3 x i14> @mul_splat_fold_vec(<3 x i14> %x) {
ret <3 x i14> %t
}
+define i32 @shl_add_lshr_flag_preservation(i32 %x, i32 %c, i32 %y) {
+; CHECK-LABEL: @shl_add_lshr_flag_preservation(
+; CHECK-NEXT: [[SHL:%.*]] = shl nuw i32 [[X:%.*]], [[C:%.*]]
+; CHECK-NEXT: [[ADD:%.*]] = add nuw nsw i32 [[SHL]], [[Y:%.*]]
+; CHECK-NEXT: [[LSHR:%.*]] = lshr exact i32 [[ADD]], [[C]]
+; CHECK-NEXT: ret i32 [[LSHR]]
+;
+ %shl = shl nuw i32 %x, %c
+ %add = add nuw nsw i32 %shl, %y
+ %lshr = lshr exact i32 %add, %c
+ ret i32 %lshr
+}
+
+define i32 @shl_add_lshr(i32 %x, i32 %c, i32 %y) {
+; CHECK-LABEL: @shl_add_lshr(
+; CHECK-NEXT: [[SHL:%.*]] = shl nuw i32 [[X:%.*]], [[C:%.*]]
+; CHECK-NEXT: [[ADD:%.*]] = add nuw i32 [[SHL]], [[Y:%.*]]
+; CHECK-NEXT: [[LSHR:%.*]] = lshr i32 [[ADD]], [[C]]
+; CHECK-NEXT: ret i32 [[LSHR]]
+;
+ %shl = shl nuw i32 %x, %c
+ %add = add nuw i32 %shl, %y
+ %lshr = lshr i32 %add, %c
+ ret i32 %lshr
+}
+
+define i32 @shl_add_lshr_comm(i32 %x, i32 %c, i32 %y) {
+; CHECK-LABEL: @shl_add_lshr_comm(
+; CHECK-NEXT: [[SHL:%.*]] = shl nuw i32 [[X:%.*]], [[C:%.*]]
+; CHECK-NEXT: [[ADD:%.*]] = add nuw i32 [[SHL]], [[Y:%.*]]
+; CHECK-NEXT: [[LSHR:%.*]] = lshr i32 [[ADD]], [[C]]
+; CHECK-NEXT: ret i32 [[LSHR]]
+;
+ %shl = shl nuw i32 %x, %c
+ %add = add nuw i32 %y, %shl
+ %lshr = lshr i32 %add, %c
+ ret i32 %lshr
+}
+
+define i32 @shl_sub_lshr(i32 %x, i32 %c, i32 %y) {
+; CHECK-LABEL: @shl_sub_lshr(
+; CHECK-NEXT: [[SHL:%.*]] = shl nuw i32 [[X:%.*]], [[C:%.*]]
+; CHECK-NEXT: [[SUB:%.*]] = sub nuw i32 [[SHL]], [[Y:%.*]]
+; CHECK-NEXT: [[LSHR:%.*]] = lshr i32 [[SUB]], [[C]]
+; CHECK-NEXT: ret i32 [[LSHR]]
+;
+ %shl = shl nuw i32 %x, %c
+ %sub = sub nuw i32 %shl, %y
+ %lshr = lshr i32 %sub, %c
+ ret i32 %lshr
+}
+
+define i32 @shl_or_lshr(i32 %x, i32 %c, i32 %y) {
+; CHECK-LABEL: @shl_or_lshr(
+; CHECK-NEXT: [[SHL:%.*]] = shl nuw i32 [[X:%.*]], [[C:%.*]]
+; CHECK-NEXT: [[OR:%.*]] = or i32 [[SHL]], [[Y:%.*]]
+; CHECK-NEXT: [[LSHR:%.*]] = lshr i32 [[OR]], [[C]]
+; CHECK-NEXT: ret i32 [[LSHR]]
+;
+ %shl = shl nuw i32 %x, %c
+ %or = or i32 %shl, %y
+ %lshr = lshr i32 %or, %c
+ ret i32 %lshr
+}
+
+define i32 @shl_or_disjoint_lshr(i32 %x, i32 %c, i32 %y) {
+; CHECK-LABEL: @shl_or_disjoint_lshr(
+; CHECK-NEXT: [[SHL:%.*]] = shl nuw i32 [[X:%.*]], [[C:%.*]]
+; CHECK-NEXT: [[OR:%.*]] = or disjoint i32 [[SHL]], [[Y:%.*]]
+; CHECK-NEXT: [[LSHR:%.*]] = lshr i32 [[OR]], [[C]]
+; CHECK-NEXT: ret i32 [[LSHR]]
+;
+ %shl = shl nuw i32 %x, %c
+ %or = or disjoint i32 %shl, %y
+ %lshr = lshr i32 %or, %c
+ ret i32 %lshr
+}
+
+define i32 @shl_xor_lshr(i32 %x, i32 %c, i32 %y) {
+; CHECK-LABEL: @shl_xor_lshr(
+; CHECK-NEXT: [[SHL:%.*]] = shl nuw i32 [[X:%.*]], [[C:%.*]]
+; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[SHL]], [[Y:%.*]]
+; CHECK-NEXT: [[LSHR:%.*]] = lshr i32 [[XOR]], [[C]]
+; CHECK-NEXT: ret i32 [[LSHR]]
+;
+ %shl = shl nuw i32 %x, %c
+ %xor = xor i32 %shl, %y
+ %lshr = lshr i32 %xor, %c
+ ret i32 %lshr
+}
+
; Negative test
+define i32 @shl_and_lshr(i32 %x, i32 %c, i32 %y) {
+; CHECK-LABEL: @shl_and_lshr(
+; CHECK-NEXT: [[SHL:%.*]] = shl nuw i32 [[X:%.*]], [[C:%.*]]
+; CHECK-NEXT: [[AND:%.*]] = and i32 [[SHL]], [[Y:%.*]]
+; CHECK-NEXT: [[LSHR:%.*]] = lshr i32 [[AND]], [[C]]
+; CHECK-NEXT: ret i32 [[LSHR]]
+;
+ %shl = shl nuw i32 %x, %c
+ %and = and i32 %shl, %y
+ %lshr = lshr i32 %and, %c
+ ret i32 %lshr
+}
+
+; Negative test
+
+define i32 @shl_add_lshr_neg(i32 %x, i32 %y, i32 %z) {
+; CHECK-LABEL: @shl_add_lshr_neg(
+; CHECK-NEXT: [[SHL:%.*]] = shl nuw i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[ADD:%.*]] = add nuw nsw i32 [[SHL]], [[Z:%.*]]
+; CHECK-NEXT: [[RES:%.*]] = lshr exact i32 [[ADD]], [[Z]]
+; CHECK-NEXT: ret i32 [[RES]]
+;
+ %shl = shl nuw i32 %x, %y
+ %add = add nuw nsw i32 %shl, %z
+ %res = lshr exact i32 %add, %z
+ ret i32 %res
+}
+
define i32 @mul_splat_fold_wrong_mul_const(i32 %x) {
; CHECK-LABEL: @mul_splat_fold_wrong_mul_const(
; CHECK-NEXT: [[M:%.*]] = mul nuw i32 [[X:%.*]], 65538
@@ -375,6 +506,21 @@ define i32 @mul_splat_fold_wrong_mul_const(i32 %x) {
; Negative test
+define i32 @shl_add_lshr_multiuse(i32 %x, i32 %y, i32 %z) {
+; CHECK-LABEL: @shl_add_lshr_multiuse(
+; CHECK-NEXT: [[SHL:%.*]] = shl nuw i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[ADD:%.*]] = add nuw nsw i32 [[SHL]], [[Z:%.*]]
+; CHECK-NEXT: call void @use(i32 [[ADD]])
+; CHECK-NEXT: [[RES:%.*]] = lshr exact i32 [[ADD]], [[Z]]
+; CHECK-NEXT: ret i32 [[RES]]
+;
+ %shl = shl nuw i32 %x, %y
+ %add = add nuw nsw i32 %shl, %z
+ call void @use (i32 %add)
+ %res = lshr exact i32 %add, %z
+ ret i32 %res
+}
+
define i32 @mul_splat_fold_wrong_lshr_const(i32 %x) {
; CHECK-LABEL: @mul_splat_fold_wrong_lshr_const(
; CHECK-NEXT: [[M:%.*]] = mul nuw i32 [[X:%.*]], 65537
>From 42a920ae39f2694821953fccec794347bb7fb295 Mon Sep 17 00:00:00 2001
From: Rose <gfunni234 at gmail.com>
Date: Mon, 6 May 2024 20:36:28 -0400
Subject: [PATCH 2/2] [InstCombine] Fold ((X << nuw Z) binop nuw Y) >>u Z --> X
binop nuw (Y >>u Z)
Proofs:
https://alive2.llvm.org/ce/z/N9dRzP
https://alive2.llvm.org/ce/z/Xrpc-Y
---
.../InstCombine/InstCombineShifts.cpp | 48 ++++++++++++++++++-
llvm/test/Transforms/InstCombine/lshr.ll | 38 ++++++---------
2 files changed, 61 insertions(+), 25 deletions(-)
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
index 1cb21a1d81af4b..f5b1552600815d 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
@@ -1259,6 +1259,52 @@ Instruction *InstCombinerImpl::visitLShr(BinaryOperator &I) {
match(Op1, m_SpecificIntAllowPoison(BitWidth - 1)))
return new ZExtInst(Builder.CreateIsNotNeg(X, "isnotneg"), Ty);
+ Value *Y;
+ if (match(Op0, m_OneUse(m_NUWSub(m_NUWShl(m_Value(X), m_Specific(Op1)),
+ m_Value(Y))))) {
+ Value *NewLshr = Builder.CreateLShr(Y, Op1, "", I.isExact());
+ auto *NewSub = BinaryOperator::CreateNUWSub(NewLshr, X);
+ NewSub->setHasNoSignedWrap(
+ cast<OverflowingBinaryOperator>(Op0)->hasNoSignedWrap());
+ return NewSub;
+ }
+
+ auto isSuitableBinOpcode = [](Instruction::BinaryOps BinOpcode) {
+ switch (BinOpcode) {
+ default:
+ return false;
+ case Instruction::Add:
+ case Instruction::Or:
+ case Instruction::Xor:
+ // And does not work here, and sub is handled separately.
+ return true;
+ }
+ };
+
+ // If both the add and the shift are nuw, then:
+ // ((X << nuw Z) binop nuw Y) >>u Z --> X binop nuw (Y >>u Z)
+ if (match(Op0, m_OneUse(m_c_BinOp(m_NUWShl(m_Value(X), m_Specific(Op1)),
+ m_Value(Y))))) {
+ BinaryOperator *Op0OB = cast<BinaryOperator>(Op0);
+ bool canFold = false;
+ if (isSuitableBinOpcode(Op0OB->getOpcode())) {
+ if (auto *Inst = dyn_cast<OverflowingBinaryOperator>(Op0)) {
+ if (Inst->hasNoUnsignedWrap())
+ canFold = true;
+ } else
+ canFold = true;
+
+ if (canFold) {
+ Value *NewLshr = Builder.CreateLShr(Y, Op1, "", I.isExact());
+ auto *NewBinOp = BinaryOperator::Create(Op0OB->getOpcode(), NewLshr, X);
+ NewBinOp->setHasNoUnsignedWrap(Op0OB->hasNoUnsignedWrap());
+ NewBinOp->setHasNoSignedWrap(Op0OB->hasNoSignedWrap());
+
+ return NewBinOp;
+ }
+ }
+ }
+
if (match(Op1, m_APInt(C))) {
unsigned ShAmtC = C->getZExtValue();
auto *II = dyn_cast<IntrinsicInst>(Op0);
@@ -1275,7 +1321,6 @@ Instruction *InstCombinerImpl::visitLShr(BinaryOperator &I) {
return new ZExtInst(Cmp, Ty);
}
- Value *X;
const APInt *C1;
if (match(Op0, m_Shl(m_Value(X), m_APInt(C1))) && C1->ult(BitWidth)) {
if (C1->ult(ShAmtC)) {
@@ -1320,7 +1365,6 @@ Instruction *InstCombinerImpl::visitLShr(BinaryOperator &I) {
// ((X << C) + Y) >>u C --> (X + (Y >>u C)) & (-1 >>u C)
// TODO: Consolidate with the more general transform that starts from shl
// (the shifts are in the opposite order).
- Value *Y;
if (match(Op0,
m_OneUse(m_c_Add(m_OneUse(m_Shl(m_Value(X), m_Specific(Op1))),
m_Value(Y))))) {
diff --git a/llvm/test/Transforms/InstCombine/lshr.ll b/llvm/test/Transforms/InstCombine/lshr.ll
index d320f4dab77801..2455fb6508c8ab 100644
--- a/llvm/test/Transforms/InstCombine/lshr.ll
+++ b/llvm/test/Transforms/InstCombine/lshr.ll
@@ -165,8 +165,7 @@ define <2 x i8> @lshr_exact_splat_vec(<2 x i8> %x) {
define <2 x i8> @lshr_exact_splat_vec_nuw(<2 x i8> %x) {
; CHECK-LABEL: @lshr_exact_splat_vec_nuw(
-; CHECK-NEXT: [[TMP1:%.*]] = add <2 x i8> [[X:%.*]], <i8 1, i8 1>
-; CHECK-NEXT: [[LSHR:%.*]] = and <2 x i8> [[TMP1]], <i8 63, i8 63>
+; CHECK-NEXT: [[LSHR:%.*]] = add nuw <2 x i8> [[X:%.*]], <i8 1, i8 1>
; CHECK-NEXT: ret <2 x i8> [[LSHR]]
;
%shl = shl nuw <2 x i8> %x, <i8 2, i8 2>
@@ -374,9 +373,8 @@ define <3 x i14> @mul_splat_fold_vec(<3 x i14> %x) {
define i32 @shl_add_lshr_flag_preservation(i32 %x, i32 %c, i32 %y) {
; CHECK-LABEL: @shl_add_lshr_flag_preservation(
-; CHECK-NEXT: [[SHL:%.*]] = shl nuw i32 [[X:%.*]], [[C:%.*]]
-; CHECK-NEXT: [[ADD:%.*]] = add nuw nsw i32 [[SHL]], [[Y:%.*]]
-; CHECK-NEXT: [[LSHR:%.*]] = lshr exact i32 [[ADD]], [[C]]
+; CHECK-NEXT: [[TMP1:%.*]] = lshr exact i32 [[Y:%.*]], [[C:%.*]]
+; CHECK-NEXT: [[LSHR:%.*]] = add nuw nsw i32 [[TMP1]], [[X:%.*]]
; CHECK-NEXT: ret i32 [[LSHR]]
;
%shl = shl nuw i32 %x, %c
@@ -387,9 +385,8 @@ define i32 @shl_add_lshr_flag_preservation(i32 %x, i32 %c, i32 %y) {
define i32 @shl_add_lshr(i32 %x, i32 %c, i32 %y) {
; CHECK-LABEL: @shl_add_lshr(
-; CHECK-NEXT: [[SHL:%.*]] = shl nuw i32 [[X:%.*]], [[C:%.*]]
-; CHECK-NEXT: [[ADD:%.*]] = add nuw i32 [[SHL]], [[Y:%.*]]
-; CHECK-NEXT: [[LSHR:%.*]] = lshr i32 [[ADD]], [[C]]
+; CHECK-NEXT: [[TMP1:%.*]] = lshr i32 [[Y:%.*]], [[C:%.*]]
+; CHECK-NEXT: [[LSHR:%.*]] = add nuw i32 [[TMP1]], [[X:%.*]]
; CHECK-NEXT: ret i32 [[LSHR]]
;
%shl = shl nuw i32 %x, %c
@@ -400,9 +397,8 @@ define i32 @shl_add_lshr(i32 %x, i32 %c, i32 %y) {
define i32 @shl_add_lshr_comm(i32 %x, i32 %c, i32 %y) {
; CHECK-LABEL: @shl_add_lshr_comm(
-; CHECK-NEXT: [[SHL:%.*]] = shl nuw i32 [[X:%.*]], [[C:%.*]]
-; CHECK-NEXT: [[ADD:%.*]] = add nuw i32 [[SHL]], [[Y:%.*]]
-; CHECK-NEXT: [[LSHR:%.*]] = lshr i32 [[ADD]], [[C]]
+; CHECK-NEXT: [[TMP1:%.*]] = lshr i32 [[Y:%.*]], [[C:%.*]]
+; CHECK-NEXT: [[LSHR:%.*]] = add nuw i32 [[TMP1]], [[X:%.*]]
; CHECK-NEXT: ret i32 [[LSHR]]
;
%shl = shl nuw i32 %x, %c
@@ -413,9 +409,8 @@ define i32 @shl_add_lshr_comm(i32 %x, i32 %c, i32 %y) {
define i32 @shl_sub_lshr(i32 %x, i32 %c, i32 %y) {
; CHECK-LABEL: @shl_sub_lshr(
-; CHECK-NEXT: [[SHL:%.*]] = shl nuw i32 [[X:%.*]], [[C:%.*]]
-; CHECK-NEXT: [[SUB:%.*]] = sub nuw i32 [[SHL]], [[Y:%.*]]
-; CHECK-NEXT: [[LSHR:%.*]] = lshr i32 [[SUB]], [[C]]
+; CHECK-NEXT: [[TMP1:%.*]] = lshr i32 [[Y:%.*]], [[C:%.*]]
+; CHECK-NEXT: [[LSHR:%.*]] = sub nuw i32 [[TMP1]], [[X:%.*]]
; CHECK-NEXT: ret i32 [[LSHR]]
;
%shl = shl nuw i32 %x, %c
@@ -426,9 +421,8 @@ define i32 @shl_sub_lshr(i32 %x, i32 %c, i32 %y) {
define i32 @shl_or_lshr(i32 %x, i32 %c, i32 %y) {
; CHECK-LABEL: @shl_or_lshr(
-; CHECK-NEXT: [[SHL:%.*]] = shl nuw i32 [[X:%.*]], [[C:%.*]]
-; CHECK-NEXT: [[OR:%.*]] = or i32 [[SHL]], [[Y:%.*]]
-; CHECK-NEXT: [[LSHR:%.*]] = lshr i32 [[OR]], [[C]]
+; CHECK-NEXT: [[TMP1:%.*]] = lshr i32 [[Y:%.*]], [[C:%.*]]
+; CHECK-NEXT: [[LSHR:%.*]] = or i32 [[TMP1]], [[X:%.*]]
; CHECK-NEXT: ret i32 [[LSHR]]
;
%shl = shl nuw i32 %x, %c
@@ -439,9 +433,8 @@ define i32 @shl_or_lshr(i32 %x, i32 %c, i32 %y) {
define i32 @shl_or_disjoint_lshr(i32 %x, i32 %c, i32 %y) {
; CHECK-LABEL: @shl_or_disjoint_lshr(
-; CHECK-NEXT: [[SHL:%.*]] = shl nuw i32 [[X:%.*]], [[C:%.*]]
-; CHECK-NEXT: [[OR:%.*]] = or disjoint i32 [[SHL]], [[Y:%.*]]
-; CHECK-NEXT: [[LSHR:%.*]] = lshr i32 [[OR]], [[C]]
+; CHECK-NEXT: [[TMP1:%.*]] = lshr i32 [[Y:%.*]], [[C:%.*]]
+; CHECK-NEXT: [[LSHR:%.*]] = or disjoint i32 [[TMP1]], [[X:%.*]]
; CHECK-NEXT: ret i32 [[LSHR]]
;
%shl = shl nuw i32 %x, %c
@@ -452,9 +445,8 @@ define i32 @shl_or_disjoint_lshr(i32 %x, i32 %c, i32 %y) {
define i32 @shl_xor_lshr(i32 %x, i32 %c, i32 %y) {
; CHECK-LABEL: @shl_xor_lshr(
-; CHECK-NEXT: [[SHL:%.*]] = shl nuw i32 [[X:%.*]], [[C:%.*]]
-; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[SHL]], [[Y:%.*]]
-; CHECK-NEXT: [[LSHR:%.*]] = lshr i32 [[XOR]], [[C]]
+; CHECK-NEXT: [[TMP1:%.*]] = lshr i32 [[Y:%.*]], [[C:%.*]]
+; CHECK-NEXT: [[LSHR:%.*]] = xor i32 [[TMP1]], [[X:%.*]]
; CHECK-NEXT: ret i32 [[LSHR]]
;
%shl = shl nuw i32 %x, %c
More information about the llvm-commits
mailing list