[llvm] [InstCombine] Fold X * (2^N + 1) >> N -> X + X >> N, or directly to X if X >> N is 0 (PR #90295)
via llvm-commits
llvm-commits at lists.llvm.org
Mon May 20 19:20:46 PDT 2024
https://github.com/AtariDreams updated https://github.com/llvm/llvm-project/pull/90295
>From 7d94ac95f9f63ff778035ca44629a9138056b366 Mon Sep 17 00:00:00 2001
From: Rose <gfunni234 at gmail.com>
Date: Wed, 1 May 2024 22:50:44 -0400
Subject: [PATCH 1/2] [InstCombine] Pre-commit tests (NFC)
---
llvm/test/Transforms/InstCombine/ashr-lshr.ll | 259 ++++++++++++++++++
llvm/test/Transforms/InstCombine/lshr.ll | 15 +-
2 files changed, 273 insertions(+), 1 deletion(-)
diff --git a/llvm/test/Transforms/InstCombine/ashr-lshr.ll b/llvm/test/Transforms/InstCombine/ashr-lshr.ll
index ac206dc7999dd..aa02dff0fde52 100644
--- a/llvm/test/Transforms/InstCombine/ashr-lshr.ll
+++ b/llvm/test/Transforms/InstCombine/ashr-lshr.ll
@@ -604,3 +604,262 @@ define <2 x i8> @ashr_known_pos_exact_vec(<2 x i8> %x, <2 x i8> %y) {
%r = ashr exact <2 x i8> %p, %y
ret <2 x i8> %r
}
+
+define i32 @lshr_mul_times_3_div_2(i32 %0) {
+; CHECK-LABEL: @lshr_mul_times_3_div_2(
+; CHECK-NEXT: [[MUL:%.*]] = mul nuw nsw i32 [[TMP0:%.*]], 3
+; CHECK-NEXT: [[LSHR:%.*]] = lshr i32 [[MUL]], 1
+; CHECK-NEXT: ret i32 [[LSHR]]
+;
+ %mul = mul nsw nuw i32 %0, 3
+ %lshr = lshr i32 %mul, 1
+ ret i32 %lshr
+}
+
+define i32 @lshr_mul_times_3_div_2_exact(i32 %x) {
+; CHECK-LABEL: @lshr_mul_times_3_div_2_exact(
+; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[X:%.*]], 3
+; CHECK-NEXT: [[LSHR:%.*]] = lshr exact i32 [[MUL]], 1
+; CHECK-NEXT: ret i32 [[LSHR]]
+;
+ %mul = mul nsw i32 %x, 3
+ %lshr = lshr exact i32 %mul, 1
+ ret i32 %lshr
+}
+
+; Negative test
+
+define i32 @lshr_mul_times_3_div_2_no_flags(i32 %0) {
+; CHECK-LABEL: @lshr_mul_times_3_div_2_no_flags(
+; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[TMP0:%.*]], 3
+; CHECK-NEXT: [[LSHR:%.*]] = lshr i32 [[MUL]], 1
+; CHECK-NEXT: ret i32 [[LSHR]]
+;
+ %mul = mul i32 %0, 3
+ %lshr = lshr i32 %mul, 1
+ ret i32 %lshr
+}
+
+; Negative test
+
+define i32 @mul_times_3_div_2_multiuse_lshr(i32 %x) {
+; CHECK-LABEL: @mul_times_3_div_2_multiuse_lshr(
+; CHECK-NEXT: [[MUL:%.*]] = mul nuw i32 [[X:%.*]], 3
+; CHECK-NEXT: [[RES:%.*]] = lshr i32 [[MUL]], 1
+; CHECK-NEXT: call void @use(i32 [[MUL]])
+; CHECK-NEXT: ret i32 [[RES]]
+;
+ %mul = mul nuw i32 %x, 3
+ %res = lshr i32 %mul, 1
+ call void @use(i32 %mul)
+ ret i32 %res
+}
+
+define i32 @lshr_mul_times_3_div_2_exact_2(i32 %x) {
+; CHECK-LABEL: @lshr_mul_times_3_div_2_exact_2(
+; CHECK-NEXT: [[MUL:%.*]] = mul nuw i32 [[X:%.*]], 3
+; CHECK-NEXT: [[LSHR:%.*]] = lshr exact i32 [[MUL]], 1
+; CHECK-NEXT: ret i32 [[LSHR]]
+;
+ %mul = mul nuw i32 %x, 3
+ %lshr = lshr exact i32 %mul, 1
+ ret i32 %lshr
+}
+
+define i32 @lshr_mul_times_5_div_4(i32 %0) {
+; CHECK-LABEL: @lshr_mul_times_5_div_4(
+; CHECK-NEXT: [[MUL:%.*]] = mul nuw nsw i32 [[TMP0:%.*]], 5
+; CHECK-NEXT: [[LSHR:%.*]] = lshr i32 [[MUL]], 2
+; CHECK-NEXT: ret i32 [[LSHR]]
+;
+ %mul = mul nsw nuw i32 %0, 5
+ %lshr = lshr i32 %mul, 2
+ ret i32 %lshr
+}
+
+define i32 @lshr_mul_times_5_div_4_exact(i32 %x) {
+; CHECK-LABEL: @lshr_mul_times_5_div_4_exact(
+; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[X:%.*]], 5
+; CHECK-NEXT: [[LSHR:%.*]] = lshr exact i32 [[MUL]], 2
+; CHECK-NEXT: ret i32 [[LSHR]]
+;
+ %mul = mul nsw i32 %x, 5
+ %lshr = lshr exact i32 %mul, 2
+ ret i32 %lshr
+}
+
+; Negative test
+
+define i32 @lshr_mul_times_5_div_4_no_flags(i32 %0) {
+; CHECK-LABEL: @lshr_mul_times_5_div_4_no_flags(
+; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[TMP0:%.*]], 5
+; CHECK-NEXT: [[LSHR:%.*]] = lshr i32 [[MUL]], 2
+; CHECK-NEXT: ret i32 [[LSHR]]
+;
+ %mul = mul i32 %0, 5
+ %lshr = lshr i32 %mul, 2
+ ret i32 %lshr
+}
+
+; Negative test
+
+define i32 @mul_times_5_div_4_multiuse_lshr(i32 %x) {
+; CHECK-LABEL: @mul_times_5_div_4_multiuse_lshr(
+; CHECK-NEXT: [[MUL:%.*]] = mul nuw i32 [[X:%.*]], 5
+; CHECK-NEXT: [[RES:%.*]] = lshr i32 [[MUL]], 2
+; CHECK-NEXT: call void @use(i32 [[MUL]])
+; CHECK-NEXT: ret i32 [[RES]]
+;
+ %mul = mul nuw i32 %x, 5
+ %res = lshr i32 %mul, 2
+ call void @use(i32 %mul)
+ ret i32 %res
+}
+
+define i32 @lshr_mul_times_5_div_4_exact_2(i32 %x) {
+; CHECK-LABEL: @lshr_mul_times_5_div_4_exact_2(
+; CHECK-NEXT: [[MUL:%.*]] = mul nuw i32 [[X:%.*]], 5
+; CHECK-NEXT: [[LSHR:%.*]] = lshr exact i32 [[MUL]], 2
+; CHECK-NEXT: ret i32 [[LSHR]]
+;
+ %mul = mul nuw i32 %x, 5
+ %lshr = lshr exact i32 %mul, 2
+ ret i32 %lshr
+}
+
+define i32 @ashr_mul_times_3_div_2(i32 %0) {
+; CHECK-LABEL: @ashr_mul_times_3_div_2(
+; CHECK-NEXT: [[MUL:%.*]] = mul nuw nsw i32 [[TMP0:%.*]], 3
+; CHECK-NEXT: [[ASHR:%.*]] = ashr i32 [[MUL]], 1
+; CHECK-NEXT: ret i32 [[ASHR]]
+;
+ %mul = mul nuw nsw i32 %0, 3
+ %ashr = ashr i32 %mul, 1
+ ret i32 %ashr
+}
+
+define i32 @ashr_mul_times_3_div_2_exact(i32 %x) {
+; CHECK-LABEL: @ashr_mul_times_3_div_2_exact(
+; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[X:%.*]], 3
+; CHECK-NEXT: [[ASHR:%.*]] = ashr exact i32 [[MUL]], 1
+; CHECK-NEXT: ret i32 [[ASHR]]
+;
+ %mul = mul nsw i32 %x, 3
+ %ashr = ashr exact i32 %mul, 1
+ ret i32 %ashr
+}
+
+; Negative test
+
+define i32 @ashr_mul_times_3_div_2_no_flags(i32 %0) {
+; CHECK-LABEL: @ashr_mul_times_3_div_2_no_flags(
+; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[TMP0:%.*]], 3
+; CHECK-NEXT: [[ASHR:%.*]] = ashr i32 [[MUL]], 1
+; CHECK-NEXT: ret i32 [[ASHR]]
+;
+ %mul = mul i32 %0, 3
+ %ashr = ashr i32 %mul, 1
+ ret i32 %ashr
+}
+
+; Negative test
+
+define i32 @ashr_mul_times_3_div_2_no_nsw(i32 %0) {
+; CHECK-LABEL: @ashr_mul_times_3_div_2_no_nsw(
+; CHECK-NEXT: [[MUL:%.*]] = mul nuw i32 [[TMP0:%.*]], 3
+; CHECK-NEXT: [[ASHR:%.*]] = ashr i32 [[MUL]], 1
+; CHECK-NEXT: ret i32 [[ASHR]]
+;
+ %mul = mul nuw i32 %0, 3
+ %ashr = ashr i32 %mul, 1
+ ret i32 %ashr
+}
+
+; Negative test
+
+define i32 @mul_times_3_div_2_multiuse_ashr(i32 %x) {
+; CHECK-LABEL: @mul_times_3_div_2_multiuse_ashr(
+; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[X:%.*]], 3
+; CHECK-NEXT: [[RES:%.*]] = ashr i32 [[MUL]], 1
+; CHECK-NEXT: call void @use(i32 [[MUL]])
+; CHECK-NEXT: ret i32 [[RES]]
+;
+ %mul = mul nsw i32 %x, 3
+ %res = ashr i32 %mul, 1
+ call void @use(i32 %mul)
+ ret i32 %res
+}
+
+define i32 @ashr_mul_times_3_div_2_exact_2(i32 %x) {
+; CHECK-LABEL: @ashr_mul_times_3_div_2_exact_2(
+; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[X:%.*]], 3
+; CHECK-NEXT: [[ASHR:%.*]] = ashr exact i32 [[MUL]], 1
+; CHECK-NEXT: ret i32 [[ASHR]]
+;
+ %mul = mul nsw i32 %x, 3
+ %ashr = ashr exact i32 %mul, 1
+ ret i32 %ashr
+}
+
+define i32 @ashr_mul_times_5_div_4(i32 %0) {
+; CHECK-LABEL: @ashr_mul_times_5_div_4(
+; CHECK-NEXT: [[MUL:%.*]] = mul nuw nsw i32 [[TMP0:%.*]], 5
+; CHECK-NEXT: [[ASHR:%.*]] = ashr i32 [[MUL]], 2
+; CHECK-NEXT: ret i32 [[ASHR]]
+;
+ %mul = mul nuw nsw i32 %0, 5
+ %ashr = ashr i32 %mul, 2
+ ret i32 %ashr
+}
+
+define i32 @ashr_mul_times_5_div_4_exact(i32 %x) {
+; CHECK-LABEL: @ashr_mul_times_5_div_4_exact(
+; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[X:%.*]], 5
+; CHECK-NEXT: [[ASHR:%.*]] = ashr exact i32 [[MUL]], 2
+; CHECK-NEXT: ret i32 [[ASHR]]
+;
+ %mul = mul nsw i32 %x, 5
+ %ashr = ashr exact i32 %mul, 2
+ ret i32 %ashr
+}
+
+; Negative test
+
+define i32 @ashr_mul_times_5_div_4_no_flags(i32 %0) {
+; CHECK-LABEL: @ashr_mul_times_5_div_4_no_flags(
+; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[TMP0:%.*]], 5
+; CHECK-NEXT: [[ASHR:%.*]] = ashr i32 [[MUL]], 2
+; CHECK-NEXT: ret i32 [[ASHR]]
+;
+ %mul = mul i32 %0, 5
+ %ashr = ashr i32 %mul, 2
+ ret i32 %ashr
+}
+
+; Negative test
+
+define i32 @mul_times_5_div_4_multiuse_ashr(i32 %x) {
+; CHECK-LABEL: @mul_times_5_div_4_multiuse_ashr(
+; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[X:%.*]], 5
+; CHECK-NEXT: [[RES:%.*]] = ashr i32 [[MUL]], 2
+; CHECK-NEXT: call void @use(i32 [[MUL]])
+; CHECK-NEXT: ret i32 [[RES]]
+;
+ %mul = mul nsw i32 %x, 5
+ %res = ashr i32 %mul, 2
+ call void @use(i32 %mul)
+ ret i32 %res
+}
+
+define i32 @ashr_mul_times_5_div_4_exact_2(i32 %x) {
+; CHECK-LABEL: @ashr_mul_times_5_div_4_exact_2(
+; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[X:%.*]], 5
+; CHECK-NEXT: [[ASHR:%.*]] = ashr exact i32 [[MUL]], 2
+; CHECK-NEXT: ret i32 [[ASHR]]
+;
+ %mul = mul nsw i32 %x, 5
+ %ashr = ashr exact i32 %mul, 2
+ ret i32 %ashr
+}
+
+declare void @use(i32)
diff --git a/llvm/test/Transforms/InstCombine/lshr.ll b/llvm/test/Transforms/InstCombine/lshr.ll
index fa92c1c4b3be4..f2f737b7c503f 100644
--- a/llvm/test/Transforms/InstCombine/lshr.ll
+++ b/llvm/test/Transforms/InstCombine/lshr.ll
@@ -628,7 +628,7 @@ define i32 @mul_splat_fold_wrong_lshr_const(i32 %x) {
ret i32 %t
}
-; Negative test
+; Negative test (but simplifies into a different transform)
define i32 @mul_splat_fold_no_nuw(i32 %x) {
; CHECK-LABEL: @mul_splat_fold_no_nuw(
@@ -641,6 +641,19 @@ define i32 @mul_splat_fold_no_nuw(i32 %x) {
ret i32 %t
}
+; Negative test
+
+define i32 @mul_splat_fold_no_flags(i32 %x) {
+; CHECK-LABEL: @mul_splat_fold_no_flags(
+; CHECK-NEXT: [[M:%.*]] = mul i32 [[X:%.*]], 65537
+; CHECK-NEXT: [[T:%.*]] = lshr i32 [[M]], 16
+; CHECK-NEXT: ret i32 [[T]]
+;
+ %m = mul i32 %x, 65537
+ %t = lshr i32 %m, 16
+ ret i32 %t
+}
+
; Negative test (but simplifies before we reach the mul_splat transform)- need more than 2 bits
define i2 @mul_splat_fold_too_narrow(i2 %x) {
>From 5051da21a250f688f8fe859f852b6fd4e8c47a3d Mon Sep 17 00:00:00 2001
From: Rose <gfunni234 at gmail.com>
Date: Mon, 6 May 2024 17:07:29 -0400
Subject: [PATCH 2/2] [InstCombine] Fold X * (2^N + 1) >> N -> X + X >> N, or
directly to X if X >> N is 0
Alive2 Proofs:
https://alive2.llvm.org/ce/z/eSinJY
https://alive2.llvm.org/ce/z/sweDgc
https://alive2.llvm.org/ce/z/-2dXZi
---
llvm/lib/Analysis/InstructionSimplify.cpp | 37 +++++++++-
.../InstCombine/InstCombineShifts.cpp | 67 ++++++++++++-----
llvm/test/Transforms/InstCombine/ashr-lshr.ll | 73 +++++++++++++------
llvm/test/Transforms/InstCombine/lshr.ll | 12 ++-
4 files changed, 137 insertions(+), 52 deletions(-)
diff --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp
index 37a7259a5cd02..f5ca3088f4d1a 100644
--- a/llvm/lib/Analysis/InstructionSimplify.cpp
+++ b/llvm/lib/Analysis/InstructionSimplify.cpp
@@ -1479,6 +1479,25 @@ static Value *simplifyLShrInst(Value *Op0, Value *Op1, bool IsExact,
if (Q.IIQ.UseInstrInfo && match(Op0, m_NUWShl(m_Value(X), m_Specific(Op1))))
return X;
+ // Look for a "splat" mul pattern - it replicates bits across each half
+ // of a value, so a right shift is just a mask of the low bits:
+ // lshr i[2N] (mul nuw X, (2^N)+1), N --> and iN X, (2^N)-1
+ const APInt *MulC;
+ const APInt *ShAmt;
+ if (Q.IIQ.UseInstrInfo && match(Op0, m_NUWMul(m_Value(X), m_APInt(MulC))) &&
+ match(Op1, m_APInt(ShAmt))) {
+ unsigned ShAmtC = ShAmt->getZExtValue();
+ unsigned BitWidth = ShAmt->getBitWidth();
+ if (BitWidth > 2 && (*MulC - 1).isPowerOf2() &&
+ MulC->logBase2() == ShAmtC) {
+ if (ShAmtC * 2 == BitWidth)
+ return X;
+ const KnownBits XKnown = computeKnownBits(X, /* Depth */ 0, Q);
+ if (XKnown.countMaxActiveBits() <= ShAmtC)
+ return X;
+ }
+ }
+
// ((X << A) | Y) >> A -> X if effective width of Y is not larger than A.
// We can return X as we do in the above case since OR alters no bits in X.
// SimplifyDemandedBits in InstCombine can do more general optimization for
@@ -1518,8 +1537,24 @@ static Value *simplifyAShrInst(Value *Op0, Value *Op1, bool IsExact,
match(Op0, m_Shl(m_AllOnes(), m_Specific(Op1))))
return Constant::getAllOnesValue(Op0->getType());
- // (X << A) >> A -> X
+ const APInt *MulC;
+ const APInt *ShAmt;
Value *X;
+ if (Q.IIQ.UseInstrInfo && match(Op0, m_NUWMul(m_Value(X), m_APInt(MulC))) &&
+ match(Op1, m_APInt(ShAmt)) &&
+ cast<OverflowingBinaryOperator>(Op0)->hasNoSignedWrap()) {
+ unsigned ShAmtC = ShAmt->getZExtValue();
+ unsigned BitWidth = ShAmt->getBitWidth();
+ if (BitWidth > 2 && (*MulC - 1).isPowerOf2() &&
+ MulC->logBase2() == ShAmtC &&
+ (ShAmtC < BitWidth - 1)) /* Minus 1 for the sign bit */ {
+ KnownBits KnownX = computeKnownBits(X, /* Depth */ 0, Q);
+ if (KnownX.countMaxActiveBits() <= ShAmtC)
+ return X;
+ }
+ }
+
+ // (X << A) >> A -> X
if (Q.IIQ.UseInstrInfo && match(Op0, m_NSWShl(m_Value(X), m_Specific(Op1))))
return X;
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
index ba297111d945f..8dd0f2f61756c 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
@@ -1456,30 +1456,42 @@ Instruction *InstCombinerImpl::visitLShr(BinaryOperator &I) {
}
const APInt *MulC;
- if (match(Op0, m_NUWMul(m_Value(X), m_APInt(MulC)))) {
- // Look for a "splat" mul pattern - it replicates bits across each half of
- // a value, so a right shift is just a mask of the low bits:
- // lshr i[2N] (mul nuw X, (2^N)+1), N --> and iN X, (2^N)-1
- // TODO: Generalize to allow more than just half-width shifts?
- if (BitWidth > 2 && ShAmtC * 2 == BitWidth && (*MulC - 1).isPowerOf2() &&
- MulC->logBase2() == ShAmtC)
- return BinaryOperator::CreateAnd(X, ConstantInt::get(Ty, *MulC - 2));
+ if (match(Op0, m_OneUse(m_NUWMul(m_Value(X), m_APInt(MulC))))) {
+ if (BitWidth > 2 && (*MulC - 1).isPowerOf2() &&
+ MulC->logBase2() == ShAmtC) {
+
+ // lshr (mul nuw (X, 2^N + 1)), N -> add nuw (X, lshr(X, N))
+ auto *NewAdd = BinaryOperator::CreateNUWAdd(
+ X, Builder.CreateLShr(X, ConstantInt::get(Ty, ShAmtC), "",
+ I.isExact()));
+ NewAdd->setHasNoSignedWrap(
+ cast<OverflowingBinaryOperator>(Op0)->hasNoSignedWrap());
+ return NewAdd;
+ }
// The one-use check is not strictly necessary, but codegen may not be
// able to invert the transform and perf may suffer with an extra mul
// instruction.
- if (Op0->hasOneUse()) {
- APInt NewMulC = MulC->lshr(ShAmtC);
- // if c is divisible by (1 << ShAmtC):
- // lshr (mul nuw x, MulC), ShAmtC -> mul nuw nsw x, (MulC >> ShAmtC)
- if (MulC->eq(NewMulC.shl(ShAmtC))) {
- auto *NewMul =
- BinaryOperator::CreateNUWMul(X, ConstantInt::get(Ty, NewMulC));
- assert(ShAmtC != 0 &&
- "lshr X, 0 should be handled by simplifyLShrInst.");
- NewMul->setHasNoSignedWrap(true);
- return NewMul;
- }
+ APInt NewMulC = MulC->lshr(ShAmtC);
+ // if c is divisible by (1 << ShAmtC):
+ // lshr (mul nuw x, MulC), ShAmtC -> mul nuw nsw x, (MulC >> ShAmtC)
+ if (MulC->eq(NewMulC.shl(ShAmtC))) {
+ auto *NewMul =
+ BinaryOperator::CreateNUWMul(X, ConstantInt::get(Ty, NewMulC));
+ assert(ShAmtC != 0 &&
+ "lshr X, 0 should be handled by simplifyLShrInst.");
+ NewMul->setHasNoSignedWrap(true);
+ return NewMul;
+ }
+ }
+
+ // lshr (mul nsw (X, 2^N + 1)), N -> add nsw (X, lshr(X, N))
+ if (match(Op0, m_OneUse(m_NSWMul(m_Value(X), m_APInt(MulC))))) {
+ if (BitWidth > 2 && (*MulC - 1).isPowerOf2() &&
+ MulC->logBase2() == ShAmtC) {
+ return BinaryOperator::CreateNSWAdd(
+ X, Builder.CreateLShr(X, ConstantInt::get(Ty, ShAmtC), "",
+ I.isExact()));
}
}
@@ -1686,6 +1698,21 @@ Instruction *InstCombinerImpl::visitAShr(BinaryOperator &I) {
if (match(Op0, m_OneUse(m_NSWSub(m_Value(X), m_Value(Y)))))
return new SExtInst(Builder.CreateICmpSLT(X, Y), Ty);
}
+
+ const APInt *MulC;
+ if (match(Op0, m_OneUse(m_NSWMul(m_Value(X), m_APInt(MulC)))) &&
+ (BitWidth > 2 && (*MulC - 1).isPowerOf2() &&
+ MulC->logBase2() == ShAmt &&
+ (ShAmt < BitWidth - 1))) /* Minus 1 for the sign bit */ {
+
+ // ashr (mul nsw (X, 2^N + 1)), N -> add nsw (X, ashr(X, N))
+ auto *NewAdd = BinaryOperator::CreateNSWAdd(
+ X,
+ Builder.CreateAShr(X, ConstantInt::get(Ty, ShAmt), "", I.isExact()));
+ NewAdd->setHasNoUnsignedWrap(
+ cast<OverflowingBinaryOperator>(Op0)->hasNoUnsignedWrap());
+ return NewAdd;
+ }
}
const SimplifyQuery Q = SQ.getWithInstruction(&I);
diff --git a/llvm/test/Transforms/InstCombine/ashr-lshr.ll b/llvm/test/Transforms/InstCombine/ashr-lshr.ll
index aa02dff0fde52..dd3ea0f8718c6 100644
--- a/llvm/test/Transforms/InstCombine/ashr-lshr.ll
+++ b/llvm/test/Transforms/InstCombine/ashr-lshr.ll
@@ -607,8 +607,8 @@ define <2 x i8> @ashr_known_pos_exact_vec(<2 x i8> %x, <2 x i8> %y) {
define i32 @lshr_mul_times_3_div_2(i32 %0) {
; CHECK-LABEL: @lshr_mul_times_3_div_2(
-; CHECK-NEXT: [[MUL:%.*]] = mul nuw nsw i32 [[TMP0:%.*]], 3
-; CHECK-NEXT: [[LSHR:%.*]] = lshr i32 [[MUL]], 1
+; CHECK-NEXT: [[TMP2:%.*]] = lshr i32 [[TMP0:%.*]], 1
+; CHECK-NEXT: [[LSHR:%.*]] = add nuw nsw i32 [[TMP2]], [[TMP0]]
; CHECK-NEXT: ret i32 [[LSHR]]
;
%mul = mul nsw nuw i32 %0, 3
@@ -618,8 +618,8 @@ define i32 @lshr_mul_times_3_div_2(i32 %0) {
define i32 @lshr_mul_times_3_div_2_exact(i32 %x) {
; CHECK-LABEL: @lshr_mul_times_3_div_2_exact(
-; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[X:%.*]], 3
-; CHECK-NEXT: [[LSHR:%.*]] = lshr exact i32 [[MUL]], 1
+; CHECK-NEXT: [[TMP1:%.*]] = lshr exact i32 [[X:%.*]], 1
+; CHECK-NEXT: [[LSHR:%.*]] = add nsw i32 [[TMP1]], [[X]]
; CHECK-NEXT: ret i32 [[LSHR]]
;
%mul = mul nsw i32 %x, 3
@@ -657,8 +657,8 @@ define i32 @mul_times_3_div_2_multiuse_lshr(i32 %x) {
define i32 @lshr_mul_times_3_div_2_exact_2(i32 %x) {
; CHECK-LABEL: @lshr_mul_times_3_div_2_exact_2(
-; CHECK-NEXT: [[MUL:%.*]] = mul nuw i32 [[X:%.*]], 3
-; CHECK-NEXT: [[LSHR:%.*]] = lshr exact i32 [[MUL]], 1
+; CHECK-NEXT: [[TMP1:%.*]] = lshr exact i32 [[X:%.*]], 1
+; CHECK-NEXT: [[LSHR:%.*]] = add nuw i32 [[TMP1]], [[X]]
; CHECK-NEXT: ret i32 [[LSHR]]
;
%mul = mul nuw i32 %x, 3
@@ -668,8 +668,8 @@ define i32 @lshr_mul_times_3_div_2_exact_2(i32 %x) {
define i32 @lshr_mul_times_5_div_4(i32 %0) {
; CHECK-LABEL: @lshr_mul_times_5_div_4(
-; CHECK-NEXT: [[MUL:%.*]] = mul nuw nsw i32 [[TMP0:%.*]], 5
-; CHECK-NEXT: [[LSHR:%.*]] = lshr i32 [[MUL]], 2
+; CHECK-NEXT: [[TMP2:%.*]] = lshr i32 [[TMP0:%.*]], 2
+; CHECK-NEXT: [[LSHR:%.*]] = add nuw nsw i32 [[TMP2]], [[TMP0]]
; CHECK-NEXT: ret i32 [[LSHR]]
;
%mul = mul nsw nuw i32 %0, 5
@@ -679,8 +679,8 @@ define i32 @lshr_mul_times_5_div_4(i32 %0) {
define i32 @lshr_mul_times_5_div_4_exact(i32 %x) {
; CHECK-LABEL: @lshr_mul_times_5_div_4_exact(
-; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[X:%.*]], 5
-; CHECK-NEXT: [[LSHR:%.*]] = lshr exact i32 [[MUL]], 2
+; CHECK-NEXT: [[TMP1:%.*]] = lshr exact i32 [[X:%.*]], 2
+; CHECK-NEXT: [[LSHR:%.*]] = add nsw i32 [[TMP1]], [[X]]
; CHECK-NEXT: ret i32 [[LSHR]]
;
%mul = mul nsw i32 %x, 5
@@ -718,8 +718,8 @@ define i32 @mul_times_5_div_4_multiuse_lshr(i32 %x) {
define i32 @lshr_mul_times_5_div_4_exact_2(i32 %x) {
; CHECK-LABEL: @lshr_mul_times_5_div_4_exact_2(
-; CHECK-NEXT: [[MUL:%.*]] = mul nuw i32 [[X:%.*]], 5
-; CHECK-NEXT: [[LSHR:%.*]] = lshr exact i32 [[MUL]], 2
+; CHECK-NEXT: [[TMP1:%.*]] = lshr exact i32 [[X:%.*]], 2
+; CHECK-NEXT: [[LSHR:%.*]] = add nuw i32 [[TMP1]], [[X]]
; CHECK-NEXT: ret i32 [[LSHR]]
;
%mul = mul nuw i32 %x, 5
@@ -729,8 +729,8 @@ define i32 @lshr_mul_times_5_div_4_exact_2(i32 %x) {
define i32 @ashr_mul_times_3_div_2(i32 %0) {
; CHECK-LABEL: @ashr_mul_times_3_div_2(
-; CHECK-NEXT: [[MUL:%.*]] = mul nuw nsw i32 [[TMP0:%.*]], 3
-; CHECK-NEXT: [[ASHR:%.*]] = ashr i32 [[MUL]], 1
+; CHECK-NEXT: [[TMP2:%.*]] = ashr i32 [[TMP0:%.*]], 1
+; CHECK-NEXT: [[ASHR:%.*]] = add nuw nsw i32 [[TMP2]], [[TMP0]]
; CHECK-NEXT: ret i32 [[ASHR]]
;
%mul = mul nuw nsw i32 %0, 3
@@ -740,8 +740,8 @@ define i32 @ashr_mul_times_3_div_2(i32 %0) {
define i32 @ashr_mul_times_3_div_2_exact(i32 %x) {
; CHECK-LABEL: @ashr_mul_times_3_div_2_exact(
-; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[X:%.*]], 3
-; CHECK-NEXT: [[ASHR:%.*]] = ashr exact i32 [[MUL]], 1
+; CHECK-NEXT: [[TMP1:%.*]] = ashr exact i32 [[X:%.*]], 1
+; CHECK-NEXT: [[ASHR:%.*]] = add nsw i32 [[TMP1]], [[X]]
; CHECK-NEXT: ret i32 [[ASHR]]
;
%mul = mul nsw i32 %x, 3
@@ -792,8 +792,8 @@ define i32 @mul_times_3_div_2_multiuse_ashr(i32 %x) {
define i32 @ashr_mul_times_3_div_2_exact_2(i32 %x) {
; CHECK-LABEL: @ashr_mul_times_3_div_2_exact_2(
-; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[X:%.*]], 3
-; CHECK-NEXT: [[ASHR:%.*]] = ashr exact i32 [[MUL]], 1
+; CHECK-NEXT: [[TMP1:%.*]] = ashr exact i32 [[X:%.*]], 1
+; CHECK-NEXT: [[ASHR:%.*]] = add nsw i32 [[TMP1]], [[X]]
; CHECK-NEXT: ret i32 [[ASHR]]
;
%mul = mul nsw i32 %x, 3
@@ -803,8 +803,8 @@ define i32 @ashr_mul_times_3_div_2_exact_2(i32 %x) {
define i32 @ashr_mul_times_5_div_4(i32 %0) {
; CHECK-LABEL: @ashr_mul_times_5_div_4(
-; CHECK-NEXT: [[MUL:%.*]] = mul nuw nsw i32 [[TMP0:%.*]], 5
-; CHECK-NEXT: [[ASHR:%.*]] = ashr i32 [[MUL]], 2
+; CHECK-NEXT: [[TMP2:%.*]] = ashr i32 [[TMP0:%.*]], 2
+; CHECK-NEXT: [[ASHR:%.*]] = add nuw nsw i32 [[TMP2]], [[TMP0]]
; CHECK-NEXT: ret i32 [[ASHR]]
;
%mul = mul nuw nsw i32 %0, 5
@@ -814,8 +814,8 @@ define i32 @ashr_mul_times_5_div_4(i32 %0) {
define i32 @ashr_mul_times_5_div_4_exact(i32 %x) {
; CHECK-LABEL: @ashr_mul_times_5_div_4_exact(
-; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[X:%.*]], 5
-; CHECK-NEXT: [[ASHR:%.*]] = ashr exact i32 [[MUL]], 2
+; CHECK-NEXT: [[TMP1:%.*]] = ashr exact i32 [[X:%.*]], 2
+; CHECK-NEXT: [[ASHR:%.*]] = add nsw i32 [[TMP1]], [[X]]
; CHECK-NEXT: ret i32 [[ASHR]]
;
%mul = mul nsw i32 %x, 5
@@ -853,8 +853,8 @@ define i32 @mul_times_5_div_4_multiuse_ashr(i32 %x) {
define i32 @ashr_mul_times_5_div_4_exact_2(i32 %x) {
; CHECK-LABEL: @ashr_mul_times_5_div_4_exact_2(
-; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[X:%.*]], 5
-; CHECK-NEXT: [[ASHR:%.*]] = ashr exact i32 [[MUL]], 2
+; CHECK-NEXT: [[TMP1:%.*]] = ashr exact i32 [[X:%.*]], 2
+; CHECK-NEXT: [[ASHR:%.*]] = add nsw i32 [[TMP1]], [[X]]
; CHECK-NEXT: ret i32 [[ASHR]]
;
%mul = mul nsw i32 %x, 5
@@ -862,4 +862,29 @@ define i32 @ashr_mul_times_5_div_4_exact_2(i32 %x) {
ret i32 %ashr
}
+define i32 @mul_splat_fold_known_active_bits(i32 %x) {
+; CHECK-LABEL: @mul_splat_fold_known_active_bits(
+; CHECK-NEXT: [[M:%.*]] = mul nuw i32 [[X:%.*]], 65537
+; CHECK-NEXT: [[T:%.*]] = ashr i32 [[M]], 16
+; CHECK-NEXT: ret i32 [[T]]
+;
+ %xx = and i32 %x, 360
+ %m = mul nuw i32 %x, 65537
+ %t = ashr i32 %m, 16
+ ret i32 %t
+}
+
+; Negative test
+
+define i32 @mul_splat_fold_no_known_active_bits(i32 %x) {
+; CHECK-LABEL: @mul_splat_fold_no_known_active_bits(
+; CHECK-NEXT: [[TMP1:%.*]] = ashr i32 [[X:%.*]], 16
+; CHECK-NEXT: [[T:%.*]] = add nsw i32 [[TMP1]], [[X]]
+; CHECK-NEXT: ret i32 [[T]]
+;
+ %m = mul nsw i32 %x, 65537
+ %t = ashr i32 %m, 16
+ ret i32 %t
+}
+
declare void @use(i32)
diff --git a/llvm/test/Transforms/InstCombine/lshr.ll b/llvm/test/Transforms/InstCombine/lshr.ll
index f2f737b7c503f..ee79550794a13 100644
--- a/llvm/test/Transforms/InstCombine/lshr.ll
+++ b/llvm/test/Transforms/InstCombine/lshr.ll
@@ -348,8 +348,7 @@ define <2 x i32> @narrow_lshr_constant(<2 x i8> %x, <2 x i8> %y) {
define i32 @mul_splat_fold(i32 %x) {
; CHECK-LABEL: @mul_splat_fold(
-; CHECK-NEXT: [[T:%.*]] = and i32 [[X:%.*]], 65535
-; CHECK-NEXT: ret i32 [[T]]
+; CHECK-NEXT: ret i32 [[X:%.*]]
;
%m = mul nuw i32 %x, 65537
%t = lshr i32 %m, 16
@@ -362,8 +361,7 @@ define <3 x i14> @mul_splat_fold_vec(<3 x i14> %x) {
; CHECK-LABEL: @mul_splat_fold_vec(
; CHECK-NEXT: [[M:%.*]] = mul nuw <3 x i14> [[X:%.*]], <i14 129, i14 129, i14 129>
; CHECK-NEXT: call void @usevec(<3 x i14> [[M]])
-; CHECK-NEXT: [[T:%.*]] = and <3 x i14> [[X]], <i14 127, i14 127, i14 127>
-; CHECK-NEXT: ret <3 x i14> [[T]]
+; CHECK-NEXT: ret <3 x i14> [[X]]
;
%m = mul nuw <3 x i14> %x, <i14 129, i14 129, i14 129>
call void @usevec(<3 x i14> %m)
@@ -632,8 +630,8 @@ define i32 @mul_splat_fold_wrong_lshr_const(i32 %x) {
define i32 @mul_splat_fold_no_nuw(i32 %x) {
; CHECK-LABEL: @mul_splat_fold_no_nuw(
-; CHECK-NEXT: [[M:%.*]] = mul nsw i32 [[X:%.*]], 65537
-; CHECK-NEXT: [[T:%.*]] = lshr i32 [[M]], 16
+; CHECK-NEXT: [[TMP1:%.*]] = lshr i32 [[X:%.*]], 16
+; CHECK-NEXT: [[T:%.*]] = add nsw i32 [[TMP1]], [[X]]
; CHECK-NEXT: ret i32 [[T]]
;
%m = mul nsw i32 %x, 65537
@@ -641,7 +639,7 @@ define i32 @mul_splat_fold_no_nuw(i32 %x) {
ret i32 %t
}
-; Negative test
+; Negative test
define i32 @mul_splat_fold_no_flags(i32 %x) {
; CHECK-LABEL: @mul_splat_fold_no_flags(
More information about the llvm-commits
mailing list