[llvm] [InstCombine] Fold adds + shifts with nsw and nuw flags (PR #88193)
via llvm-commits
llvm-commits at lists.llvm.org
Sun Apr 21 06:05:03 PDT 2024
https://github.com/AtariDreams updated https://github.com/llvm/llvm-project/pull/88193
>From e287e93b764459251d23f35d5cdbbde536f2a161 Mon Sep 17 00:00:00 2001
From: Rose <gfunni234 at gmail.com>
Date: Tue, 9 Apr 2024 16:32:36 -0400
Subject: [PATCH 1/3] [InstCombine] Pre-commit tests (NFC)
---
llvm/test/Transforms/InstCombine/lshr.ll | 57 ++++++++++++++++++++++++
1 file changed, 57 insertions(+)
diff --git a/llvm/test/Transforms/InstCombine/lshr.ll b/llvm/test/Transforms/InstCombine/lshr.ll
index 7d611ba188d6b4..adc40aaf82b20e 100644
--- a/llvm/test/Transforms/InstCombine/lshr.ll
+++ b/llvm/test/Transforms/InstCombine/lshr.ll
@@ -360,8 +360,65 @@ define <3 x i14> @mul_splat_fold_vec(<3 x i14> %x) {
ret <3 x i14> %t
}
+define i32 @mul_times_3_div_2 (i32 %x) {
+; CHECK-LABEL: @mul_times_3_div_2(
+; CHECK-NEXT: [[TMP1:%.*]] = mul nuw nsw i32 [[X:%.*]], 3
+; CHECK-NEXT: [[TMP2:%.*]] = lshr i32 [[TMP1]], 1
+; CHECK-NEXT: ret i32 [[TMP2]]
+;
+ %2 = mul nsw nuw i32 %x, 3
+ %3 = lshr i32 %2, 1
+ ret i32 %3
+}
+
+ define i32 @shl_add_lshr (i32 %x, i32 %c, i32 %y) {
+; CHECK-LABEL: @shl_add_lshr(
+; CHECK-NEXT: [[TMP1:%.*]] = shl nuw i32 [[X:%.*]], [[C:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = add nuw nsw i32 [[TMP1]], [[Y:%.*]]
+; CHECK-NEXT: [[TMP3:%.*]] = lshr exact i32 [[TMP2]], [[C]]
+; CHECK-NEXT: ret i32 [[TMP3]]
+;
+ %2 = shl nuw i32 %x, %c
+ %3 = add nsw nuw i32 %2, %y
+ %4 = lshr exact i32 %3, %c
+ ret i32 %4
+}
+
+ define i32 @ashr_mul_times_3_div_2 (i32 %0) {
+; CHECK-LABEL: @ashr_mul_times_3_div_2(
+; CHECK-NEXT: [[TMP2:%.*]] = mul nuw nsw i32 [[TMP0:%.*]], 3
+; CHECK-NEXT: [[TMP3:%.*]] = ashr i32 [[TMP2]], 1
+; CHECK-NEXT: ret i32 [[TMP3]]
+;
+ %2 = mul nsw nuw i32 %0, 3
+ %3 = ashr i32 %2, 1
+ ret i32 %3
+}
+
+define i32 @ashr_mul_times_3_div_2_exact (i32 %0) {
+; CHECK-LABEL: @ashr_mul_times_3_div_2_exact(
+; CHECK-NEXT: [[TMP2:%.*]] = mul nsw i32 [[TMP0:%.*]], 3
+; CHECK-NEXT: [[TMP3:%.*]] = ashr exact i32 [[TMP2]], 1
+; CHECK-NEXT: ret i32 [[TMP3]]
+;
+ %2 = mul nsw i32 %0, 3
+ %3 = ashr exact i32 %2, 1
+ ret i32 %3
+}
+
; Negative test
+define i32 @mul_times_3_div_2_no_nsw (i32 %x) {
+; CHECK-LABEL: @mul_times_3_div_2_no_nsw(
+; CHECK-NEXT: [[TMP1:%.*]] = mul i32 [[X:%.*]], 3
+; CHECK-NEXT: [[TMP2:%.*]] = lshr i32 [[TMP1]], 1
+; CHECK-NEXT: ret i32 [[TMP2]]
+;
+ %2 = mul i32 %x, 3
+ %3 = lshr i32 %2, 1
+ ret i32 %3
+}
+
define i32 @mul_splat_fold_wrong_mul_const(i32 %x) {
; CHECK-LABEL: @mul_splat_fold_wrong_mul_const(
; CHECK-NEXT: [[M:%.*]] = mul nuw i32 [[X:%.*]], 65538
>From 0c436922e730239c0c1c92e2bcb35e65aee7eb82 Mon Sep 17 00:00:00 2001
From: Rose <gfunni234 at gmail.com>
Date: Tue, 9 Apr 2024 14:56:50 -0400
Subject: [PATCH 2/3] [InstCombine] Fold adds + shifts with nsw and nuw flags
I also added mul nsw/nuw 3, div 2 since this was the canonical version of ((x << 1) + x) / 2, which is a specific expression which canonicalization causes the InstCombine to miss it.
Proofs:
https://alive2.llvm.org/ce/z/kDVTiL
https://alive2.llvm.org/ce/z/wORNYm
---
.../InstCombine/InstCombineShifts.cpp | 49 ++++++++++++++++++-
llvm/test/Transforms/InstCombine/lshr.ll | 25 +++++-----
2 files changed, 59 insertions(+), 15 deletions(-)
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
index 244f03a1bc2b4c..b77e002376d4f3 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
@@ -1267,6 +1267,18 @@ Instruction *InstCombinerImpl::visitLShr(BinaryOperator &I) {
match(Op1, m_SpecificIntAllowPoison(BitWidth - 1)))
return new ZExtInst(Builder.CreateIsNotNeg(X, "isnotneg"), Ty);
+ // If both the add and the shift are nuw, then:
+ // ((X << Y) + Z) nuw >>u Z --> X + (Y nuw >>u Z) nuw
+ Value *Y;
+ if (match(Op0, m_OneUse(m_c_NUWAdd((m_NUWShl(m_Value(X), m_Specific(Op1))),
+ m_Value(Y))))) {
+ Value *NewLshr = Builder.CreateLShr(Y, Op1, "", I.isExact());
+ auto *newAdd = BinaryOperator::CreateNUWAdd(NewLshr, X);
+ if (auto *Op0Bin = cast<OverflowingBinaryOperator>(Op0))
+ newAdd->setHasNoSignedWrap(Op0Bin->hasNoSignedWrap());
+ return newAdd;
+ }
+
if (match(Op1, m_APInt(C))) {
unsigned ShAmtC = C->getZExtValue();
auto *II = dyn_cast<IntrinsicInst>(Op0);
@@ -1283,7 +1295,6 @@ Instruction *InstCombinerImpl::visitLShr(BinaryOperator &I) {
return new ZExtInst(Cmp, Ty);
}
- Value *X;
const APInt *C1;
if (match(Op0, m_Shl(m_Value(X), m_APInt(C1))) && C1->ult(BitWidth)) {
if (C1->ult(ShAmtC)) {
@@ -1328,7 +1339,6 @@ Instruction *InstCombinerImpl::visitLShr(BinaryOperator &I) {
// ((X << C) + Y) >>u C --> (X + (Y >>u C)) & (-1 >>u C)
// TODO: Consolidate with the more general transform that starts from shl
// (the shifts are in the opposite order).
- Value *Y;
if (match(Op0,
m_OneUse(m_c_Add(m_OneUse(m_Shl(m_Value(X), m_Specific(Op1))),
m_Value(Y))))) {
@@ -1450,9 +1460,24 @@ Instruction *InstCombinerImpl::visitLShr(BinaryOperator &I) {
NewMul->setHasNoSignedWrap(true);
return NewMul;
}
+
+ // Special case: lshr nuw (mul (X, 3), 1) -> add nuw nsw (X, lshr(X, 1)
+ if (ShAmtC == 1 && MulC->getZExtValue() == 3) {
+ auto *NewAdd = BinaryOperator::CreateNUWAdd(
+ X,
+ Builder.CreateLShr(X, ConstantInt::get(Ty, 1), "", I.isExact()));
+ NewAdd->setHasNoSignedWrap(true);
+ return NewAdd;
+ }
}
}
+ // // lshr nsw (mul (X, 3), 1) -> add nsw (X, lshr(X, 1)
+ if (match(Op0, m_OneUse(m_NSWMul(m_Value(X), m_SpecificInt(3)))) &&
+ ShAmtC == 1)
+ return BinaryOperator::CreateNSWAdd(
+ X, Builder.CreateLShr(X, ConstantInt::get(Ty, 1), "", I.isExact()));
+
// Try to narrow bswap.
// In the case where the shift amount equals the bitwidth difference, the
// shift is eliminated.
@@ -1656,6 +1681,26 @@ Instruction *InstCombinerImpl::visitAShr(BinaryOperator &I) {
if (match(Op0, m_OneUse(m_NSWSub(m_Value(X), m_Value(Y)))))
return new SExtInst(Builder.CreateICmpSLT(X, Y), Ty);
}
+
+ // Special case: ashr nuw (mul (X, 3), 1) -> add nuw nsw (X, lshr(X, 1)
+ if (match(Op0, m_OneUse(m_NSWMul(m_Value(X), m_SpecificInt(3)))) &&
+ ShAmt == 1) {
+ Value *Shift;
+ if (auto *Op0Bin = cast<OverflowingBinaryOperator>(Op0)) {
+ if (Op0Bin->hasNoUnsignedWrap())
+ // We can use lshr if the mul is nuw and nsw
+ Shift =
+ Builder.CreateLShr(X, ConstantInt::get(Ty, 1), "", I.isExact());
+ else
+ Shift =
+ Builder.CreateAShr(X, ConstantInt::get(Ty, 1), "", I.isExact());
+
+ auto *NewAdd = BinaryOperator::CreateNSWAdd(X, Shift);
+ NewAdd->setHasNoUnsignedWrap(Op0Bin->hasNoUnsignedWrap());
+
+ return NewAdd;
+ }
+ }
}
const SimplifyQuery Q = SQ.getWithInstruction(&I);
diff --git a/llvm/test/Transforms/InstCombine/lshr.ll b/llvm/test/Transforms/InstCombine/lshr.ll
index adc40aaf82b20e..39830842c47091 100644
--- a/llvm/test/Transforms/InstCombine/lshr.ll
+++ b/llvm/test/Transforms/InstCombine/lshr.ll
@@ -362,9 +362,9 @@ define <3 x i14> @mul_splat_fold_vec(<3 x i14> %x) {
define i32 @mul_times_3_div_2 (i32 %x) {
; CHECK-LABEL: @mul_times_3_div_2(
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw nsw i32 [[X:%.*]], 3
-; CHECK-NEXT: [[TMP2:%.*]] = lshr i32 [[TMP1]], 1
-; CHECK-NEXT: ret i32 [[TMP2]]
+; CHECK-NEXT: [[TMP2:%.*]] = lshr i32 [[TMP1:%.*]], 1
+; CHECK-NEXT: [[TMP3:%.*]] = add nuw nsw i32 [[TMP2]], [[TMP1]]
+; CHECK-NEXT: ret i32 [[TMP3]]
;
%2 = mul nsw nuw i32 %x, 3
%3 = lshr i32 %2, 1
@@ -373,21 +373,20 @@ define i32 @mul_times_3_div_2 (i32 %x) {
define i32 @shl_add_lshr (i32 %x, i32 %c, i32 %y) {
; CHECK-LABEL: @shl_add_lshr(
-; CHECK-NEXT: [[TMP1:%.*]] = shl nuw i32 [[X:%.*]], [[C:%.*]]
-; CHECK-NEXT: [[TMP2:%.*]] = add nuw nsw i32 [[TMP1]], [[Y:%.*]]
-; CHECK-NEXT: [[TMP3:%.*]] = lshr exact i32 [[TMP2]], [[C]]
-; CHECK-NEXT: ret i32 [[TMP3]]
+; CHECK-NEXT: [[TMP3:%.*]] = lshr exact i32 [[TMP2:%.*]], [[C:%.*]]
+; CHECK-NEXT: [[TMP4:%.*]] = add nuw nsw i32 [[TMP3]], [[X:%.*]]
+; CHECK-NEXT: ret i32 [[TMP4]]
;
%2 = shl nuw i32 %x, %c
- %3 = add nsw nuw i32 %2, %y
+ %3 = add nuw nsw i32 %2, %y
%4 = lshr exact i32 %3, %c
ret i32 %4
}
define i32 @ashr_mul_times_3_div_2 (i32 %0) {
; CHECK-LABEL: @ashr_mul_times_3_div_2(
-; CHECK-NEXT: [[TMP2:%.*]] = mul nuw nsw i32 [[TMP0:%.*]], 3
-; CHECK-NEXT: [[TMP3:%.*]] = ashr i32 [[TMP2]], 1
+; CHECK-NEXT: [[TMP2:%.*]] = lshr i32 [[TMP0:%.*]], 1
+; CHECK-NEXT: [[TMP3:%.*]] = add nuw nsw i32 [[TMP2]], [[TMP0]]
; CHECK-NEXT: ret i32 [[TMP3]]
;
%2 = mul nsw nuw i32 %0, 3
@@ -397,9 +396,9 @@ define i32 @mul_times_3_div_2 (i32 %x) {
define i32 @ashr_mul_times_3_div_2_exact (i32 %0) {
; CHECK-LABEL: @ashr_mul_times_3_div_2_exact(
-; CHECK-NEXT: [[TMP2:%.*]] = mul nsw i32 [[TMP0:%.*]], 3
-; CHECK-NEXT: [[TMP3:%.*]] = ashr exact i32 [[TMP2]], 1
-; CHECK-NEXT: ret i32 [[TMP3]]
+; CHECK-NEXT: [[TMP3:%.*]] = ashr exact i32 [[TMP2:%.*]], 1
+; CHECK-NEXT: [[TMP4:%.*]] = add nsw i32 [[TMP3]], [[TMP2]]
+; CHECK-NEXT: ret i32 [[TMP4]]
;
%2 = mul nsw i32 %0, 3
%3 = ashr exact i32 %2, 1
>From 59ca3c28d73a26f246e75b8ba019e492e4fae10f Mon Sep 17 00:00:00 2001
From: AtariDreams <gfunni234 at gmail.com>
Date: Sun, 21 Apr 2024 09:01:31 -0400
Subject: [PATCH 3/3] Fix mistake
---
llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
index b77e002376d4f3..2e64f02edda376 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
@@ -1270,8 +1270,8 @@ Instruction *InstCombinerImpl::visitLShr(BinaryOperator &I) {
// If both the add and the shift are nuw, then:
// ((X << Y) + Z) nuw >>u Z --> X + (Y nuw >>u Z) nuw
Value *Y;
- if (match(Op0, m_OneUse(m_c_NUWAdd((m_NUWShl(m_Value(X), m_Specific(Op1))),
- m_Value(Y))))) {
+ if (match(Op0, m_OneUse(m_c_NUWAdd(m_NUWShl(m_Value(X), m_Value(Y)),
+ m_Specific(Op1))))) {
Value *NewLshr = Builder.CreateLShr(Y, Op1, "", I.isExact());
auto *newAdd = BinaryOperator::CreateNUWAdd(NewLshr, X);
if (auto *Op0Bin = cast<OverflowingBinaryOperator>(Op0))
More information about the llvm-commits
mailing list