[llvm] [InstCombine] Simplify further when shift is half bitwidth (PR #93677)

via llvm-commits llvm-commits at lists.llvm.org
Wed May 29 14:15:22 PDT 2024


https://github.com/AtariDreams updated https://github.com/llvm/llvm-project/pull/93677

>From 8e26587135765d6aed4b9e1aaa7bda109c4d4d3d Mon Sep 17 00:00:00 2001
From: Rose <gfunni234 at gmail.com>
Date: Wed, 1 May 2024 22:50:44 -0400
Subject: [PATCH 1/4] [InstCombine] Pre-commit tests (NFC)

---
 llvm/test/Transforms/InstCombine/ashr-lshr.ll | 259 ++++++++++++++++++
 llvm/test/Transforms/InstCombine/lshr.ll      |  15 +-
 2 files changed, 273 insertions(+), 1 deletion(-)

diff --git a/llvm/test/Transforms/InstCombine/ashr-lshr.ll b/llvm/test/Transforms/InstCombine/ashr-lshr.ll
index ac206dc7999dd..aa02dff0fde52 100644
--- a/llvm/test/Transforms/InstCombine/ashr-lshr.ll
+++ b/llvm/test/Transforms/InstCombine/ashr-lshr.ll
@@ -604,3 +604,262 @@ define <2 x i8> @ashr_known_pos_exact_vec(<2 x i8> %x, <2 x i8> %y) {
   %r = ashr exact <2 x i8> %p, %y
   ret <2 x i8> %r
 }
+
+define i32 @lshr_mul_times_3_div_2(i32 %0) {
+; CHECK-LABEL: @lshr_mul_times_3_div_2(
+; CHECK-NEXT:    [[MUL:%.*]] = mul nuw nsw i32 [[TMP0:%.*]], 3
+; CHECK-NEXT:    [[LSHR:%.*]] = lshr i32 [[MUL]], 1
+; CHECK-NEXT:    ret i32 [[LSHR]]
+;
+  %mul = mul nsw nuw i32 %0, 3
+  %lshr = lshr i32 %mul, 1
+  ret i32 %lshr
+}
+
+define i32 @lshr_mul_times_3_div_2_exact(i32 %x) {
+; CHECK-LABEL: @lshr_mul_times_3_div_2_exact(
+; CHECK-NEXT:    [[MUL:%.*]] = mul nsw i32 [[X:%.*]], 3
+; CHECK-NEXT:    [[LSHR:%.*]] = lshr exact i32 [[MUL]], 1
+; CHECK-NEXT:    ret i32 [[LSHR]]
+;
+  %mul = mul nsw i32 %x, 3
+  %lshr = lshr exact i32 %mul, 1
+  ret i32 %lshr
+}
+
+; Negative test
+
+define i32 @lshr_mul_times_3_div_2_no_flags(i32 %0) {
+; CHECK-LABEL: @lshr_mul_times_3_div_2_no_flags(
+; CHECK-NEXT:    [[MUL:%.*]] = mul i32 [[TMP0:%.*]], 3
+; CHECK-NEXT:    [[LSHR:%.*]] = lshr i32 [[MUL]], 1
+; CHECK-NEXT:    ret i32 [[LSHR]]
+;
+  %mul = mul i32 %0, 3
+  %lshr = lshr i32 %mul, 1
+  ret i32 %lshr
+}
+
+; Negative test
+
+define i32 @mul_times_3_div_2_multiuse_lshr(i32 %x) {
+; CHECK-LABEL: @mul_times_3_div_2_multiuse_lshr(
+; CHECK-NEXT:    [[MUL:%.*]] = mul nuw i32 [[X:%.*]], 3
+; CHECK-NEXT:    [[RES:%.*]] = lshr i32 [[MUL]], 1
+; CHECK-NEXT:    call void @use(i32 [[MUL]])
+; CHECK-NEXT:    ret i32 [[RES]]
+;
+  %mul = mul nuw i32 %x, 3
+  %res = lshr i32 %mul, 1
+  call void @use(i32 %mul)
+  ret i32 %res
+}
+
+define i32 @lshr_mul_times_3_div_2_exact_2(i32 %x) {
+; CHECK-LABEL: @lshr_mul_times_3_div_2_exact_2(
+; CHECK-NEXT:    [[MUL:%.*]] = mul nuw i32 [[X:%.*]], 3
+; CHECK-NEXT:    [[LSHR:%.*]] = lshr exact i32 [[MUL]], 1
+; CHECK-NEXT:    ret i32 [[LSHR]]
+;
+  %mul = mul nuw i32 %x, 3
+  %lshr = lshr exact i32 %mul, 1
+  ret i32 %lshr
+}
+
+define i32 @lshr_mul_times_5_div_4(i32 %0) {
+; CHECK-LABEL: @lshr_mul_times_5_div_4(
+; CHECK-NEXT:    [[MUL:%.*]] = mul nuw nsw i32 [[TMP0:%.*]], 5
+; CHECK-NEXT:    [[LSHR:%.*]] = lshr i32 [[MUL]], 2
+; CHECK-NEXT:    ret i32 [[LSHR]]
+;
+  %mul = mul nsw nuw i32 %0, 5
+  %lshr = lshr i32 %mul, 2
+  ret i32 %lshr
+}
+
+define i32 @lshr_mul_times_5_div_4_exact(i32 %x) {
+; CHECK-LABEL: @lshr_mul_times_5_div_4_exact(
+; CHECK-NEXT:    [[MUL:%.*]] = mul nsw i32 [[X:%.*]], 5
+; CHECK-NEXT:    [[LSHR:%.*]] = lshr exact i32 [[MUL]], 2
+; CHECK-NEXT:    ret i32 [[LSHR]]
+;
+  %mul = mul nsw i32 %x, 5
+  %lshr = lshr exact i32 %mul, 2
+  ret i32 %lshr
+}
+
+; Negative test
+
+define i32 @lshr_mul_times_5_div_4_no_flags(i32 %0) {
+; CHECK-LABEL: @lshr_mul_times_5_div_4_no_flags(
+; CHECK-NEXT:    [[MUL:%.*]] = mul i32 [[TMP0:%.*]], 5
+; CHECK-NEXT:    [[LSHR:%.*]] = lshr i32 [[MUL]], 2
+; CHECK-NEXT:    ret i32 [[LSHR]]
+;
+  %mul = mul i32 %0, 5
+  %lshr = lshr i32 %mul, 2
+  ret i32 %lshr
+}
+
+; Negative test
+
+define i32 @mul_times_5_div_4_multiuse_lshr(i32 %x) {
+; CHECK-LABEL: @mul_times_5_div_4_multiuse_lshr(
+; CHECK-NEXT:    [[MUL:%.*]] = mul nuw i32 [[X:%.*]], 5
+; CHECK-NEXT:    [[RES:%.*]] = lshr i32 [[MUL]], 2
+; CHECK-NEXT:    call void @use(i32 [[MUL]])
+; CHECK-NEXT:    ret i32 [[RES]]
+;
+  %mul = mul nuw i32 %x, 5
+  %res = lshr i32 %mul, 2
+  call void @use(i32 %mul)
+  ret i32 %res
+}
+
+define i32 @lshr_mul_times_5_div_4_exact_2(i32 %x) {
+; CHECK-LABEL: @lshr_mul_times_5_div_4_exact_2(
+; CHECK-NEXT:    [[MUL:%.*]] = mul nuw i32 [[X:%.*]], 5
+; CHECK-NEXT:    [[LSHR:%.*]] = lshr exact i32 [[MUL]], 2
+; CHECK-NEXT:    ret i32 [[LSHR]]
+;
+  %mul = mul nuw i32 %x, 5
+  %lshr = lshr exact i32 %mul, 2
+  ret i32 %lshr
+}
+
+define i32 @ashr_mul_times_3_div_2(i32 %0) {
+; CHECK-LABEL: @ashr_mul_times_3_div_2(
+; CHECK-NEXT:    [[MUL:%.*]] = mul nuw nsw i32 [[TMP0:%.*]], 3
+; CHECK-NEXT:    [[ASHR:%.*]] = ashr i32 [[MUL]], 1
+; CHECK-NEXT:    ret i32 [[ASHR]]
+;
+  %mul = mul nuw nsw i32 %0, 3
+  %ashr = ashr i32 %mul, 1
+  ret i32 %ashr
+}
+
+define i32 @ashr_mul_times_3_div_2_exact(i32 %x) {
+; CHECK-LABEL: @ashr_mul_times_3_div_2_exact(
+; CHECK-NEXT:    [[MUL:%.*]] = mul nsw i32 [[X:%.*]], 3
+; CHECK-NEXT:    [[ASHR:%.*]] = ashr exact i32 [[MUL]], 1
+; CHECK-NEXT:    ret i32 [[ASHR]]
+;
+  %mul = mul nsw i32 %x, 3
+  %ashr = ashr exact i32 %mul, 1
+  ret i32 %ashr
+}
+
+; Negative test
+
+define i32 @ashr_mul_times_3_div_2_no_flags(i32 %0) {
+; CHECK-LABEL: @ashr_mul_times_3_div_2_no_flags(
+; CHECK-NEXT:    [[MUL:%.*]] = mul i32 [[TMP0:%.*]], 3
+; CHECK-NEXT:    [[ASHR:%.*]] = ashr i32 [[MUL]], 1
+; CHECK-NEXT:    ret i32 [[ASHR]]
+;
+  %mul = mul i32 %0, 3
+  %ashr = ashr i32 %mul, 1
+  ret i32 %ashr
+}
+
+; Negative test
+
+define i32 @ashr_mul_times_3_div_2_no_nsw(i32 %0) {
+; CHECK-LABEL: @ashr_mul_times_3_div_2_no_nsw(
+; CHECK-NEXT:    [[MUL:%.*]] = mul nuw i32 [[TMP0:%.*]], 3
+; CHECK-NEXT:    [[ASHR:%.*]] = ashr i32 [[MUL]], 1
+; CHECK-NEXT:    ret i32 [[ASHR]]
+;
+  %mul = mul nuw i32 %0, 3
+  %ashr = ashr i32 %mul, 1
+  ret i32 %ashr
+}
+
+; Negative test
+
+define i32 @mul_times_3_div_2_multiuse_ashr(i32 %x) {
+; CHECK-LABEL: @mul_times_3_div_2_multiuse_ashr(
+; CHECK-NEXT:    [[MUL:%.*]] = mul nsw i32 [[X:%.*]], 3
+; CHECK-NEXT:    [[RES:%.*]] = ashr i32 [[MUL]], 1
+; CHECK-NEXT:    call void @use(i32 [[MUL]])
+; CHECK-NEXT:    ret i32 [[RES]]
+;
+  %mul = mul nsw i32 %x, 3
+  %res = ashr i32 %mul, 1
+  call void @use(i32 %mul)
+  ret i32 %res
+}
+
+define i32 @ashr_mul_times_3_div_2_exact_2(i32 %x) {
+; CHECK-LABEL: @ashr_mul_times_3_div_2_exact_2(
+; CHECK-NEXT:    [[MUL:%.*]] = mul nsw i32 [[X:%.*]], 3
+; CHECK-NEXT:    [[ASHR:%.*]] = ashr exact i32 [[MUL]], 1
+; CHECK-NEXT:    ret i32 [[ASHR]]
+;
+  %mul = mul nsw i32 %x, 3
+  %ashr = ashr exact i32 %mul, 1
+  ret i32 %ashr
+}
+
+define i32 @ashr_mul_times_5_div_4(i32 %0) {
+; CHECK-LABEL: @ashr_mul_times_5_div_4(
+; CHECK-NEXT:    [[MUL:%.*]] = mul nuw nsw i32 [[TMP0:%.*]], 5
+; CHECK-NEXT:    [[ASHR:%.*]] = ashr i32 [[MUL]], 2
+; CHECK-NEXT:    ret i32 [[ASHR]]
+;
+  %mul = mul nuw nsw i32 %0, 5
+  %ashr = ashr i32 %mul, 2
+  ret i32 %ashr
+}
+
+define i32 @ashr_mul_times_5_div_4_exact(i32 %x) {
+; CHECK-LABEL: @ashr_mul_times_5_div_4_exact(
+; CHECK-NEXT:    [[MUL:%.*]] = mul nsw i32 [[X:%.*]], 5
+; CHECK-NEXT:    [[ASHR:%.*]] = ashr exact i32 [[MUL]], 2
+; CHECK-NEXT:    ret i32 [[ASHR]]
+;
+  %mul = mul nsw i32 %x, 5
+  %ashr = ashr exact i32 %mul, 2
+  ret i32 %ashr
+}
+
+; Negative test
+
+define i32 @ashr_mul_times_5_div_4_no_flags(i32 %0) {
+; CHECK-LABEL: @ashr_mul_times_5_div_4_no_flags(
+; CHECK-NEXT:    [[MUL:%.*]] = mul i32 [[TMP0:%.*]], 5
+; CHECK-NEXT:    [[ASHR:%.*]] = ashr i32 [[MUL]], 2
+; CHECK-NEXT:    ret i32 [[ASHR]]
+;
+  %mul = mul i32 %0, 5
+  %ashr = ashr i32 %mul, 2
+  ret i32 %ashr
+}
+
+; Negative test
+
+define i32 @mul_times_5_div_4_multiuse_ashr(i32 %x) {
+; CHECK-LABEL: @mul_times_5_div_4_multiuse_ashr(
+; CHECK-NEXT:    [[MUL:%.*]] = mul nsw i32 [[X:%.*]], 5
+; CHECK-NEXT:    [[RES:%.*]] = ashr i32 [[MUL]], 2
+; CHECK-NEXT:    call void @use(i32 [[MUL]])
+; CHECK-NEXT:    ret i32 [[RES]]
+;
+  %mul = mul nsw i32 %x, 5
+  %res = ashr i32 %mul, 2
+  call void @use(i32 %mul)
+  ret i32 %res
+}
+
+define i32 @ashr_mul_times_5_div_4_exact_2(i32 %x) {
+; CHECK-LABEL: @ashr_mul_times_5_div_4_exact_2(
+; CHECK-NEXT:    [[MUL:%.*]] = mul nsw i32 [[X:%.*]], 5
+; CHECK-NEXT:    [[ASHR:%.*]] = ashr exact i32 [[MUL]], 2
+; CHECK-NEXT:    ret i32 [[ASHR]]
+;
+  %mul = mul nsw i32 %x, 5
+  %ashr = ashr exact i32 %mul, 2
+  ret i32 %ashr
+}
+
+declare void @use(i32)
diff --git a/llvm/test/Transforms/InstCombine/lshr.ll b/llvm/test/Transforms/InstCombine/lshr.ll
index fa92c1c4b3be4..f2f737b7c503f 100644
--- a/llvm/test/Transforms/InstCombine/lshr.ll
+++ b/llvm/test/Transforms/InstCombine/lshr.ll
@@ -628,7 +628,7 @@ define i32 @mul_splat_fold_wrong_lshr_const(i32 %x) {
   ret i32 %t
 }
 
-; Negative test
+; Negative test (but simplifies into a different transform)
 
 define i32 @mul_splat_fold_no_nuw(i32 %x) {
 ; CHECK-LABEL: @mul_splat_fold_no_nuw(
@@ -641,6 +641,19 @@ define i32 @mul_splat_fold_no_nuw(i32 %x) {
   ret i32 %t
 }
 
+; Negative test 
+
+define i32 @mul_splat_fold_no_flags(i32 %x) {
+; CHECK-LABEL: @mul_splat_fold_no_flags(
+; CHECK-NEXT:    [[M:%.*]] = mul i32 [[X:%.*]], 65537
+; CHECK-NEXT:    [[T:%.*]] = lshr i32 [[M]], 16
+; CHECK-NEXT:    ret i32 [[T]]
+;
+  %m = mul i32 %x, 65537
+  %t = lshr i32 %m, 16
+  ret i32 %t
+}
+
 ; Negative test (but simplifies before we reach the mul_splat transform)- need more than 2 bits
 
 define i2 @mul_splat_fold_too_narrow(i2 %x) {

>From 1d89cd6b056605f0f5d38f6e33daf9f8a557cc14 Mon Sep 17 00:00:00 2001
From: Rose <gfunni234 at gmail.com>
Date: Mon, 6 May 2024 17:07:29 -0400
Subject: [PATCH 2/4] [InstCombine] lshr (mul (X, 2^N + 1)), N -> add (X,
 lshr(X, N))

Alive2 Proofs:
https://alive2.llvm.org/ce/z/eSinJY
https://alive2.llvm.org/ce/z/sweDgc
https://alive2.llvm.org/ce/z/-2dXZi
https://alive2.llvm.org/ce/z/kx2PhF
https://alive2.llvm.org/ce/z/e9QjM6
---
 .../InstCombine/InstCombineShifts.cpp         | 73 +++++++++++++++++--
 llvm/test/Transforms/InstCombine/ashr-lshr.ll | 70 ++++++++++++------
 llvm/test/Transforms/InstCombine/lshr.ll      | 25 ++++---
 3 files changed, 128 insertions(+), 40 deletions(-)

diff --git a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
index 0f1979fbe0c76..3d2563317e336 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
@@ -1461,13 +1461,30 @@ Instruction *InstCombinerImpl::visitLShr(BinaryOperator &I) {
 
     const APInt *MulC;
     if (match(Op0, m_NUWMul(m_Value(X), m_APInt(MulC)))) {
-      // Look for a "splat" mul pattern - it replicates bits across each half of
-      // a value, so a right shift is just a mask of the low bits:
-      // lshr i[2N] (mul nuw X, (2^N)+1), N --> and iN X, (2^N)-1
-      // TODO: Generalize to allow more than just half-width shifts?
-      if (BitWidth > 2 && ShAmtC * 2 == BitWidth && (*MulC - 1).isPowerOf2() &&
-          MulC->logBase2() == ShAmtC)
-        return BinaryOperator::CreateAnd(X, ConstantInt::get(Ty, *MulC - 2));
+      if (BitWidth > 2 && (*MulC - 1).isPowerOf2() &&
+          MulC->logBase2() == ShAmtC) {
+
+        // Look for a "splat" mul pattern - it replicates bits across each half
+        // of a value, so a right shift is just a mask of the low bits:
+        // lshr i[2N] (mul nuw X, (2^N)+1), N --> X
+        if (ShAmtC * 2 == BitWidth)
+          return replaceInstUsesWith(I, X);
+
+        KnownBits KnownX =
+            computeKnownBits(X, /* Depth */ 0, cast<Instruction>(Op0));
+        if (KnownX.countMaxActiveBits() <= ShAmtC)
+          return replaceInstUsesWith(I, X);
+
+        // lshr (mul nuw (X, 2^N + 1)), N -> add nuw (X, lshr(X, N))
+        if (Op0->hasOneUse()) {
+          auto *NewAdd = BinaryOperator::CreateNUWAdd(
+              X, Builder.CreateLShr(X, ConstantInt::get(Ty, ShAmtC), "",
+                                    I.isExact()));
+          NewAdd->setHasNoSignedWrap(
+              cast<OverflowingBinaryOperator>(Op0)->hasNoSignedWrap());
+          return NewAdd;
+        }
+      }
 
       // The one-use check is not strictly necessary, but codegen may not be
       // able to invert the transform and perf may suffer with an extra mul
@@ -1487,6 +1504,26 @@ Instruction *InstCombinerImpl::visitLShr(BinaryOperator &I) {
       }
     }
 
+    // lshr (mul nsw (X, 2^N + 1)), N -> add nsw (X, lshr(X, N))
+    if (match(Op0, m_OneUse(m_NSWMul(m_Value(X), m_APInt(MulC))))) {
+      if (BitWidth > 2 && (*MulC - 1).isPowerOf2() &&
+          MulC->logBase2() == ShAmtC) {
+        return BinaryOperator::CreateNSWAdd(
+            X, Builder.CreateLShr(X, ConstantInt::get(Ty, ShAmtC), "",
+                                  I.isExact()));
+      }
+    }
+
+    // lshr (mul nsw (X, 2^N + 1)), N -> add nsw (X, lshr(X, N))
+    if (match(Op0, m_OneUse(m_NSWMul(m_Value(X), m_APInt(MulC))))) {
+      if (BitWidth > 2 && (*MulC - 1).isPowerOf2() &&
+          MulC->logBase2() == ShAmtC) {
+        return BinaryOperator::CreateNSWAdd(
+            X, Builder.CreateLShr(X, ConstantInt::get(Ty, ShAmtC), "",
+                                  I.isExact()));
+      }
+    }
+
     // Try to narrow bswap.
     // In the case where the shift amount equals the bitwidth difference, the
     // shift is eliminated.
@@ -1690,6 +1727,28 @@ Instruction *InstCombinerImpl::visitAShr(BinaryOperator &I) {
       if (match(Op0, m_OneUse(m_NSWSub(m_Value(X), m_Value(Y)))))
         return new SExtInst(Builder.CreateICmpSLT(X, Y), Ty);
     }
+
+    const APInt *MulC;
+    if (match(Op0, m_NSWMul(m_Value(X), m_APInt(MulC))) &&
+        (BitWidth > 2 && (*MulC - 1).isPowerOf2() &&
+         MulC->logBase2() == ShAmt &&
+         (ShAmt < BitWidth - 1))) /* Minus 1 for the sign bit */ {
+
+      // ashr (mul nsw (X, 2^N + 1)), N -> add nsw (X, ashr(X, N))
+      KnownBits KnownX =
+          computeKnownBits(X, /* Depth */ 0, cast<Instruction>(Op0));
+      if (KnownX.countMaxActiveBits() <= ShAmt)
+        return replaceInstUsesWith(I, X);
+
+      if (Op0->hasOneUse()) {
+        auto *NewAdd = BinaryOperator::CreateNSWAdd(
+            X, Builder.CreateAShr(X, ConstantInt::get(Ty, ShAmt), "",
+                                  I.isExact()));
+        NewAdd->setHasNoUnsignedWrap(
+            cast<OverflowingBinaryOperator>(Op0)->hasNoUnsignedWrap());
+        return NewAdd;
+      }
+    }
   }
 
   const SimplifyQuery Q = SQ.getWithInstruction(&I);
diff --git a/llvm/test/Transforms/InstCombine/ashr-lshr.ll b/llvm/test/Transforms/InstCombine/ashr-lshr.ll
index aa02dff0fde52..f426755dfc9dd 100644
--- a/llvm/test/Transforms/InstCombine/ashr-lshr.ll
+++ b/llvm/test/Transforms/InstCombine/ashr-lshr.ll
@@ -607,8 +607,8 @@ define <2 x i8> @ashr_known_pos_exact_vec(<2 x i8> %x, <2 x i8> %y) {
 
 define i32 @lshr_mul_times_3_div_2(i32 %0) {
 ; CHECK-LABEL: @lshr_mul_times_3_div_2(
-; CHECK-NEXT:    [[MUL:%.*]] = mul nuw nsw i32 [[TMP0:%.*]], 3
-; CHECK-NEXT:    [[LSHR:%.*]] = lshr i32 [[MUL]], 1
+; CHECK-NEXT:    [[TMP2:%.*]] = lshr i32 [[TMP0:%.*]], 1
+; CHECK-NEXT:    [[LSHR:%.*]] = add nuw nsw i32 [[TMP2]], [[TMP0]]
 ; CHECK-NEXT:    ret i32 [[LSHR]]
 ;
   %mul = mul nsw nuw i32 %0, 3
@@ -618,8 +618,8 @@ define i32 @lshr_mul_times_3_div_2(i32 %0) {
 
 define i32 @lshr_mul_times_3_div_2_exact(i32 %x) {
 ; CHECK-LABEL: @lshr_mul_times_3_div_2_exact(
-; CHECK-NEXT:    [[MUL:%.*]] = mul nsw i32 [[X:%.*]], 3
-; CHECK-NEXT:    [[LSHR:%.*]] = lshr exact i32 [[MUL]], 1
+; CHECK-NEXT:    [[TMP1:%.*]] = lshr exact i32 [[X:%.*]], 1
+; CHECK-NEXT:    [[LSHR:%.*]] = add nsw i32 [[TMP1]], [[X]]
 ; CHECK-NEXT:    ret i32 [[LSHR]]
 ;
   %mul = mul nsw i32 %x, 3
@@ -657,8 +657,8 @@ define i32 @mul_times_3_div_2_multiuse_lshr(i32 %x) {
 
 define i32 @lshr_mul_times_3_div_2_exact_2(i32 %x) {
 ; CHECK-LABEL: @lshr_mul_times_3_div_2_exact_2(
-; CHECK-NEXT:    [[MUL:%.*]] = mul nuw i32 [[X:%.*]], 3
-; CHECK-NEXT:    [[LSHR:%.*]] = lshr exact i32 [[MUL]], 1
+; CHECK-NEXT:    [[TMP1:%.*]] = lshr exact i32 [[X:%.*]], 1
+; CHECK-NEXT:    [[LSHR:%.*]] = add nuw i32 [[TMP1]], [[X]]
 ; CHECK-NEXT:    ret i32 [[LSHR]]
 ;
   %mul = mul nuw i32 %x, 3
@@ -668,8 +668,8 @@ define i32 @lshr_mul_times_3_div_2_exact_2(i32 %x) {
 
 define i32 @lshr_mul_times_5_div_4(i32 %0) {
 ; CHECK-LABEL: @lshr_mul_times_5_div_4(
-; CHECK-NEXT:    [[MUL:%.*]] = mul nuw nsw i32 [[TMP0:%.*]], 5
-; CHECK-NEXT:    [[LSHR:%.*]] = lshr i32 [[MUL]], 2
+; CHECK-NEXT:    [[TMP2:%.*]] = lshr i32 [[TMP0:%.*]], 2
+; CHECK-NEXT:    [[LSHR:%.*]] = add nuw nsw i32 [[TMP2]], [[TMP0]]
 ; CHECK-NEXT:    ret i32 [[LSHR]]
 ;
   %mul = mul nsw nuw i32 %0, 5
@@ -679,8 +679,8 @@ define i32 @lshr_mul_times_5_div_4(i32 %0) {
 
 define i32 @lshr_mul_times_5_div_4_exact(i32 %x) {
 ; CHECK-LABEL: @lshr_mul_times_5_div_4_exact(
-; CHECK-NEXT:    [[MUL:%.*]] = mul nsw i32 [[X:%.*]], 5
-; CHECK-NEXT:    [[LSHR:%.*]] = lshr exact i32 [[MUL]], 2
+; CHECK-NEXT:    [[TMP1:%.*]] = lshr exact i32 [[X:%.*]], 2
+; CHECK-NEXT:    [[LSHR:%.*]] = add nsw i32 [[TMP1]], [[X]]
 ; CHECK-NEXT:    ret i32 [[LSHR]]
 ;
   %mul = mul nsw i32 %x, 5
@@ -718,8 +718,8 @@ define i32 @mul_times_5_div_4_multiuse_lshr(i32 %x) {
 
 define i32 @lshr_mul_times_5_div_4_exact_2(i32 %x) {
 ; CHECK-LABEL: @lshr_mul_times_5_div_4_exact_2(
-; CHECK-NEXT:    [[MUL:%.*]] = mul nuw i32 [[X:%.*]], 5
-; CHECK-NEXT:    [[LSHR:%.*]] = lshr exact i32 [[MUL]], 2
+; CHECK-NEXT:    [[TMP1:%.*]] = lshr exact i32 [[X:%.*]], 2
+; CHECK-NEXT:    [[LSHR:%.*]] = add nuw i32 [[TMP1]], [[X]]
 ; CHECK-NEXT:    ret i32 [[LSHR]]
 ;
   %mul = mul nuw i32 %x, 5
@@ -729,8 +729,8 @@ define i32 @lshr_mul_times_5_div_4_exact_2(i32 %x) {
 
 define i32 @ashr_mul_times_3_div_2(i32 %0) {
 ; CHECK-LABEL: @ashr_mul_times_3_div_2(
-; CHECK-NEXT:    [[MUL:%.*]] = mul nuw nsw i32 [[TMP0:%.*]], 3
-; CHECK-NEXT:    [[ASHR:%.*]] = ashr i32 [[MUL]], 1
+; CHECK-NEXT:    [[TMP2:%.*]] = ashr i32 [[TMP0:%.*]], 1
+; CHECK-NEXT:    [[ASHR:%.*]] = add nuw nsw i32 [[TMP2]], [[TMP0]]
 ; CHECK-NEXT:    ret i32 [[ASHR]]
 ;
   %mul = mul nuw nsw i32 %0, 3
@@ -740,8 +740,8 @@ define i32 @ashr_mul_times_3_div_2(i32 %0) {
 
 define i32 @ashr_mul_times_3_div_2_exact(i32 %x) {
 ; CHECK-LABEL: @ashr_mul_times_3_div_2_exact(
-; CHECK-NEXT:    [[MUL:%.*]] = mul nsw i32 [[X:%.*]], 3
-; CHECK-NEXT:    [[ASHR:%.*]] = ashr exact i32 [[MUL]], 1
+; CHECK-NEXT:    [[TMP1:%.*]] = ashr exact i32 [[X:%.*]], 1
+; CHECK-NEXT:    [[ASHR:%.*]] = add nsw i32 [[TMP1]], [[X]]
 ; CHECK-NEXT:    ret i32 [[ASHR]]
 ;
   %mul = mul nsw i32 %x, 3
@@ -792,8 +792,8 @@ define i32 @mul_times_3_div_2_multiuse_ashr(i32 %x) {
 
 define i32 @ashr_mul_times_3_div_2_exact_2(i32 %x) {
 ; CHECK-LABEL: @ashr_mul_times_3_div_2_exact_2(
-; CHECK-NEXT:    [[MUL:%.*]] = mul nsw i32 [[X:%.*]], 3
-; CHECK-NEXT:    [[ASHR:%.*]] = ashr exact i32 [[MUL]], 1
+; CHECK-NEXT:    [[TMP1:%.*]] = ashr exact i32 [[X:%.*]], 1
+; CHECK-NEXT:    [[ASHR:%.*]] = add nsw i32 [[TMP1]], [[X]]
 ; CHECK-NEXT:    ret i32 [[ASHR]]
 ;
   %mul = mul nsw i32 %x, 3
@@ -803,8 +803,8 @@ define i32 @ashr_mul_times_3_div_2_exact_2(i32 %x) {
 
 define i32 @ashr_mul_times_5_div_4(i32 %0) {
 ; CHECK-LABEL: @ashr_mul_times_5_div_4(
-; CHECK-NEXT:    [[MUL:%.*]] = mul nuw nsw i32 [[TMP0:%.*]], 5
-; CHECK-NEXT:    [[ASHR:%.*]] = ashr i32 [[MUL]], 2
+; CHECK-NEXT:    [[TMP2:%.*]] = ashr i32 [[TMP0:%.*]], 2
+; CHECK-NEXT:    [[ASHR:%.*]] = add nuw nsw i32 [[TMP2]], [[TMP0]]
 ; CHECK-NEXT:    ret i32 [[ASHR]]
 ;
   %mul = mul nuw nsw i32 %0, 5
@@ -814,8 +814,8 @@ define i32 @ashr_mul_times_5_div_4(i32 %0) {
 
 define i32 @ashr_mul_times_5_div_4_exact(i32 %x) {
 ; CHECK-LABEL: @ashr_mul_times_5_div_4_exact(
-; CHECK-NEXT:    [[MUL:%.*]] = mul nsw i32 [[X:%.*]], 5
-; CHECK-NEXT:    [[ASHR:%.*]] = ashr exact i32 [[MUL]], 2
+; CHECK-NEXT:    [[TMP1:%.*]] = ashr exact i32 [[X:%.*]], 2
+; CHECK-NEXT:    [[ASHR:%.*]] = add nsw i32 [[TMP1]], [[X]]
 ; CHECK-NEXT:    ret i32 [[ASHR]]
 ;
   %mul = mul nsw i32 %x, 5
@@ -853,8 +853,8 @@ define i32 @mul_times_5_div_4_multiuse_ashr(i32 %x) {
 
 define i32 @ashr_mul_times_5_div_4_exact_2(i32 %x) {
 ; CHECK-LABEL: @ashr_mul_times_5_div_4_exact_2(
-; CHECK-NEXT:    [[MUL:%.*]] = mul nsw i32 [[X:%.*]], 5
-; CHECK-NEXT:    [[ASHR:%.*]] = ashr exact i32 [[MUL]], 2
+; CHECK-NEXT:    [[TMP1:%.*]] = ashr exact i32 [[X:%.*]], 2
+; CHECK-NEXT:    [[ASHR:%.*]] = add nsw i32 [[TMP1]], [[X]]
 ; CHECK-NEXT:    ret i32 [[ASHR]]
 ;
   %mul = mul nsw i32 %x, 5
@@ -862,4 +862,26 @@ define i32 @ashr_mul_times_5_div_4_exact_2(i32 %x) {
   ret i32 %ashr
 }
 
+define i32 @mul_splat_fold_known_active_bits(i32 %x) {
+; CHECK-LABEL: @mul_splat_fold_known_active_bits(
+; CHECK-NEXT:    [[XX:%.*]] = and i32 [[X:%.*]], 360
+; CHECK-NEXT:    ret i32 [[XX]]
+;
+  %xx = and i32 %x, 360
+  %m = mul nuw i32 %xx, 65537
+  %t = ashr i32 %m, 16
+  ret i32 %t
+}
+
+define i32 @mul_splat_fold_no_known_active_bits(i32 %x) {
+; CHECK-LABEL: @mul_splat_fold_no_known_active_bits(
+; CHECK-NEXT:    [[TMP1:%.*]] = ashr i32 [[X:%.*]], 16
+; CHECK-NEXT:    [[T:%.*]] = add nsw i32 [[TMP1]], [[X]]
+; CHECK-NEXT:    ret i32 [[T]]
+;
+  %m = mul nsw i32 %x, 65537
+  %t = ashr i32 %m, 16
+  ret i32 %t
+}
+
 declare void @use(i32)
diff --git a/llvm/test/Transforms/InstCombine/lshr.ll b/llvm/test/Transforms/InstCombine/lshr.ll
index f2f737b7c503f..17b08985ee90e 100644
--- a/llvm/test/Transforms/InstCombine/lshr.ll
+++ b/llvm/test/Transforms/InstCombine/lshr.ll
@@ -348,22 +348,31 @@ define <2 x i32> @narrow_lshr_constant(<2 x i8> %x, <2 x i8> %y) {
 
 define i32 @mul_splat_fold(i32 %x) {
 ; CHECK-LABEL: @mul_splat_fold(
-; CHECK-NEXT:    [[T:%.*]] = and i32 [[X:%.*]], 65535
-; CHECK-NEXT:    ret i32 [[T]]
+; CHECK-NEXT:    ret i32 [[X:%.*]]
 ;
   %m = mul nuw i32 %x, 65537
   %t = lshr i32 %m, 16
   ret i32 %t
 }
 
+define i32 @mul_splat_fold_known_zeros(i32 %x) {
+; CHECK-LABEL: @mul_splat_fold_known_zeros(
+; CHECK-NEXT:    [[XX:%.*]] = and i32 [[X:%.*]], 360
+; CHECK-NEXT:    ret i32 [[XX]]
+;
+  %xx = and i32 %x, 360
+  %m = mul nuw i32 %xx, 65537
+  %t = lshr i32 %m, 16
+  ret i32 %t
+}
+
 ; Vector type, extra use, weird types are all ok.
 
 define <3 x i14> @mul_splat_fold_vec(<3 x i14> %x) {
 ; CHECK-LABEL: @mul_splat_fold_vec(
 ; CHECK-NEXT:    [[M:%.*]] = mul nuw <3 x i14> [[X:%.*]], <i14 129, i14 129, i14 129>
 ; CHECK-NEXT:    call void @usevec(<3 x i14> [[M]])
-; CHECK-NEXT:    [[T:%.*]] = and <3 x i14> [[X]], <i14 127, i14 127, i14 127>
-; CHECK-NEXT:    ret <3 x i14> [[T]]
+; CHECK-NEXT:    ret <3 x i14> [[X]]
 ;
   %m = mul nuw <3 x i14> %x, <i14 129, i14 129, i14 129>
   call void @usevec(<3 x i14> %m)
@@ -628,12 +637,10 @@ define i32 @mul_splat_fold_wrong_lshr_const(i32 %x) {
   ret i32 %t
 }
 
-; Negative test (but simplifies into a different transform)
-
 define i32 @mul_splat_fold_no_nuw(i32 %x) {
 ; CHECK-LABEL: @mul_splat_fold_no_nuw(
-; CHECK-NEXT:    [[M:%.*]] = mul nsw i32 [[X:%.*]], 65537
-; CHECK-NEXT:    [[T:%.*]] = lshr i32 [[M]], 16
+; CHECK-NEXT:    [[TMP1:%.*]] = lshr i32 [[X:%.*]], 16
+; CHECK-NEXT:    [[T:%.*]] = add nsw i32 [[TMP1]], [[X]]
 ; CHECK-NEXT:    ret i32 [[T]]
 ;
   %m = mul nsw i32 %x, 65537
@@ -641,7 +648,7 @@ define i32 @mul_splat_fold_no_nuw(i32 %x) {
   ret i32 %t
 }
 
-; Negative test 
+; Negative test
 
 define i32 @mul_splat_fold_no_flags(i32 %x) {
 ; CHECK-LABEL: @mul_splat_fold_no_flags(

>From 568b990c053dd083c8d18b4b1360a8751d9ec3da Mon Sep 17 00:00:00 2001
From: AtariDreams <gfunni234 at gmail.com>
Date: Wed, 29 May 2024 17:13:09 -0400
Subject: [PATCH 3/4] Update InstCombineShifts.cpp

---
 llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp | 6 ------
 1 file changed, 6 deletions(-)

diff --git a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
index 6a21704c57a52..94a15349ed413 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
@@ -1463,18 +1463,12 @@ Instruction *InstCombinerImpl::visitLShr(BinaryOperator &I) {
     if (match(Op0, m_NUWMul(m_Value(X), m_APInt(MulC)))) {
       if (BitWidth > 2 && (*MulC - 1).isPowerOf2() &&
           MulC->logBase2() == ShAmtC) {
-
         // Look for a "splat" mul pattern - it replicates bits across each half
         // of a value, so a right shift is just a mask of the low bits:
         // lshr i[2N] (mul nuw X, (2^N)+1), N --> X
         if (ShAmtC * 2 == BitWidth)
           return replaceInstUsesWith(I, X);
 
-        KnownBits KnownX =
-            computeKnownBits(X, /* Depth */ 0, cast<Instruction>(Op0));
-        if (KnownX.countMaxActiveBits() <= ShAmtC)
-          return replaceInstUsesWith(I, X);
-
         // lshr (mul nuw (X, 2^N + 1)), N -> add nuw (X, lshr(X, N))
         if (Op0->hasOneUse()) {
           auto *NewAdd = BinaryOperator::CreateNUWAdd(

>From 728eac99dcf78de7969fddb3e5a8f56345e680fc Mon Sep 17 00:00:00 2001
From: AtariDreams <gfunni234 at gmail.com>
Date: Wed, 29 May 2024 17:15:13 -0400
Subject: [PATCH 4/4] Update InstCombineShifts.cpp

---
 llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp | 11 +----------
 1 file changed, 1 insertion(+), 10 deletions(-)

diff --git a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
index 94a15349ed413..266ccf9d19e6a 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
@@ -1713,25 +1713,16 @@ Instruction *InstCombinerImpl::visitAShr(BinaryOperator &I) {
     }
 
     const APInt *MulC;
-    if (match(Op0, m_NSWMul(m_Value(X), m_APInt(MulC))) &&
+    if (match(Op0, m_OneUse(m_NSWMul(m_Value(X), m_APInt(MulC)))) &&
         (BitWidth > 2 && (*MulC - 1).isPowerOf2() &&
          MulC->logBase2() == ShAmt &&
          (ShAmt < BitWidth - 1))) /* Minus 1 for the sign bit */ {
-
-      // ashr (mul nsw (X, 2^N + 1)), N -> add nsw (X, ashr(X, N))
-      KnownBits KnownX =
-          computeKnownBits(X, /* Depth */ 0, cast<Instruction>(Op0));
-      if (KnownX.countMaxActiveBits() <= ShAmt)
-        return replaceInstUsesWith(I, X);
-
-      if (Op0->hasOneUse()) {
         auto *NewAdd = BinaryOperator::CreateNSWAdd(
             X, Builder.CreateAShr(X, ConstantInt::get(Ty, ShAmt), "",
                                   I.isExact()));
         NewAdd->setHasNoUnsignedWrap(
             cast<OverflowingBinaryOperator>(Op0)->hasNoUnsignedWrap());
         return NewAdd;
-      }
     }
   }
 



More information about the llvm-commits mailing list