[llvm] [InstCombine] Fold adds + shifts with nsw and nuw flags (PR #88193)

via llvm-commits llvm-commits at lists.llvm.org
Sun Apr 21 16:37:53 PDT 2024


https://github.com/AtariDreams updated https://github.com/llvm/llvm-project/pull/88193

>From ef0ebd3bc56ebf7b201d54bdcab7630b30166fd4 Mon Sep 17 00:00:00 2001
From: Rose <gfunni234 at gmail.com>
Date: Sun, 21 Apr 2024 17:44:48 -0400
Subject: [PATCH 1/2] [InstCombine] Pre-commit tests (NFC)

---
 llvm/test/Transforms/InstCombine/ashr-lshr.ll |  48 ++++++++
 llvm/test/Transforms/InstCombine/lshr.ll      | 104 +++++++++++++++++-
 2 files changed, 148 insertions(+), 4 deletions(-)

diff --git a/llvm/test/Transforms/InstCombine/ashr-lshr.ll b/llvm/test/Transforms/InstCombine/ashr-lshr.ll
index ac206dc7999dd2..7dd62327521081 100644
--- a/llvm/test/Transforms/InstCombine/ashr-lshr.ll
+++ b/llvm/test/Transforms/InstCombine/ashr-lshr.ll
@@ -604,3 +604,51 @@ define <2 x i8> @ashr_known_pos_exact_vec(<2 x i8> %x, <2 x i8> %y) {
   %r = ashr exact <2 x i8> %p, %y
   ret <2 x i8> %r
 }
+
+define i32 @ashr_mul_times_3_div_2(i32 %0) {
+; CHECK-LABEL: @ashr_mul_times_3_div_2(
+; CHECK-NEXT:    [[MUL:%.*]] = mul nuw nsw i32 [[TMP0:%.*]], 3
+; CHECK-NEXT:    [[ASHR:%.*]] = ashr i32 [[MUL]], 1
+; CHECK-NEXT:    ret i32 [[ASHR]]
+;
+  %mul = mul nsw nuw i32 %0, 3
+  %ashr = ashr i32 %mul, 1
+  ret i32 %ashr
+}
+
+define i32 @ashr_mul_times_3_div_2_exact(i32 %x) {
+; CHECK-LABEL: @ashr_mul_times_3_div_2_exact(
+; CHECK-NEXT:    [[MUL:%.*]] = mul nsw i32 [[X:%.*]], 3
+; CHECK-NEXT:    [[ASHR:%.*]] = ashr exact i32 [[MUL]], 1
+; CHECK-NEXT:    ret i32 [[ASHR]]
+;
+  %mul = mul nsw i32 %x, 3
+  %ashr = ashr exact i32 %mul, 1
+  ret i32 %ashr
+}
+
+define i32 @mul_times_3_div_2_multiuse(i32 %x) {
+; CHECK-LABEL: @mul_times_3_div_2_multiuse(
+; CHECK-NEXT:    [[MUL:%.*]] = mul nuw i32 [[X:%.*]], 3
+; CHECK-NEXT:    [[RES:%.*]] = ashr i32 [[MUL]], 1
+; CHECK-NEXT:    call void @use(i32 [[MUL]])
+; CHECK-NEXT:    ret i32 [[RES]]
+;
+  %mul = mul nuw i32 %x, 3
+  %res = ashr i32 %mul, 1
+  call void @use (i32 %mul)
+  ret i32 %res
+}
+
+define i32 @ashr_mul_times_3_div_2_exact_2(i32 %x) {
+; CHECK-LABEL: @ashr_mul_times_3_div_2_exact_2(
+; CHECK-NEXT:    [[MUL:%.*]] = mul nuw i32 [[X:%.*]], 3
+; CHECK-NEXT:    [[ASHR:%.*]] = ashr exact i32 [[MUL]], 1
+; CHECK-NEXT:    ret i32 [[ASHR]]
+;
+  %mul = mul nuw i32 %x, 3
+  %ashr = ashr exact i32 %mul, 1
+  ret i32 %ashr
+}
+
+declare void @use(i32)
diff --git a/llvm/test/Transforms/InstCombine/lshr.ll b/llvm/test/Transforms/InstCombine/lshr.ll
index 7d611ba188d6b4..384e6e38b144d2 100644
--- a/llvm/test/Transforms/InstCombine/lshr.ll
+++ b/llvm/test/Transforms/InstCombine/lshr.ll
@@ -360,7 +360,79 @@ define <3 x i14> @mul_splat_fold_vec(<3 x i14> %x) {
   ret <3 x i14> %t
 }
 
-; Negative test
+; Negative tests
+
+define i32 @mul_times_3_div_2(i32 %x) {
+; CHECK-LABEL: @mul_times_3_div_2(
+; CHECK-NEXT:    [[MUL:%.*]] = mul nuw nsw i32 [[X:%.*]], 3
+; CHECK-NEXT:    [[RES:%.*]] = lshr i32 [[MUL]], 1
+; CHECK-NEXT:    ret i32 [[RES]]
+;
+  %mul = mul nsw nuw i32 %x, 3
+  %res = lshr i32 %mul, 1
+  ret i32 %res
+}
+
+define i32 @shl_add_lshr(i32 %x, i32 %c, i32 %y) {
+; CHECK-LABEL: @shl_add_lshr(
+; CHECK-NEXT:    [[SHL:%.*]] = shl nuw i32 [[X:%.*]], [[C:%.*]]
+; CHECK-NEXT:    [[ADD:%.*]] = add nuw nsw i32 [[SHL]], [[Y:%.*]]
+; CHECK-NEXT:    [[LSHR:%.*]] = lshr exact i32 [[ADD]], [[C]]
+; CHECK-NEXT:    ret i32 [[LSHR]]
+;
+  %shl = shl nuw i32 %x, %c
+  %add = add nuw nsw i32 %shl, %y
+  %lshr = lshr exact i32 %add, %c
+  ret i32 %lshr
+}
+
+define i32 @lshr_mul_times_3_div_2_nuw(i32 %0) {
+; CHECK-LABEL: @lshr_mul_times_3_div_2_nuw(
+; CHECK-NEXT:    [[TMP2:%.*]] = lshr i32 [[TMP0:%.*]], 1
+; CHECK-NEXT:    [[LSHR:%.*]] = add nuw nsw i32 [[TMP2]], [[TMP0]]
+; CHECK-NEXT:    ret i32 [[LSHR]]
+;
+  %mul = mul nuw i32 %0, 3
+  %lshr = lshr i32 %mul, 1
+  ret i32 %lshr
+}
+
+define i32 @lshr_mul_times_3_div_2_nsw(i32 %0) {
+; CHECK-LABEL: @lshr_mul_times_3_div_2_nsw(
+; CHECK-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP0:%.*]], 3
+; CHECK-NEXT:    [[LSHR:%.*]] = lshr i32 [[MUL]], 1
+; CHECK-NEXT:    ret i32 [[LSHR]]
+;
+  %mul = mul nsw i32 %0, 3
+  %lshr = lshr i32 %mul, 1
+  ret i32 %lshr
+}
+
+; Negative tests
+
+define i32 @mul_times_3_div_2_no_flag(i32 %x) {
+; CHECK-LABEL: @mul_times_3_div_2_no_flag(
+; CHECK-NEXT:    [[MUL:%.*]] = mul i32 [[X:%.*]], 3
+; CHECK-NEXT:    [[LSHR:%.*]] = lshr i32 [[MUL]], 1
+; CHECK-NEXT:    ret i32 [[LSHR]]
+;
+  %mul = mul i32 %x, 3
+  %lshr = lshr i32 %mul, 1
+  ret i32 %lshr
+}
+
+define i32 @shl_add_lshr_neg(i32 %x, i32 %y, i32 %z) {
+; CHECK-LABEL: @shl_add_lshr_neg(
+; CHECK-NEXT:    [[SHL:%.*]] = shl nuw i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT:    [[ADD:%.*]] = add nuw nsw i32 [[SHL]], [[Z:%.*]]
+; CHECK-NEXT:    [[RES:%.*]] = lshr exact i32 [[ADD]], [[Z]]
+; CHECK-NEXT:    ret i32 [[RES]]
+;
+  %shl = shl nuw i32 %x, %y
+  %add = add nuw nsw i32 %shl, %z
+  %res = lshr exact i32 %add, %z
+  ret i32 %res
+}
 
 define i32 @mul_splat_fold_wrong_mul_const(i32 %x) {
 ; CHECK-LABEL: @mul_splat_fold_wrong_mul_const(
@@ -373,7 +445,33 @@ define i32 @mul_splat_fold_wrong_mul_const(i32 %x) {
   ret i32 %t
 }
 
-; Negative test
+define i32 @shl_add_lshr_multiuse(i32 %x, i32 %y, i32 %z) {
+; CHECK-LABEL: @shl_add_lshr_multiuse(
+; CHECK-NEXT:    [[SHL:%.*]] = shl nuw i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT:    [[ADD:%.*]] = add nuw nsw i32 [[SHL]], [[Z:%.*]]
+; CHECK-NEXT:    call void @use(i32 [[ADD]])
+; CHECK-NEXT:    [[RES:%.*]] = lshr exact i32 [[ADD]], [[Z]]
+; CHECK-NEXT:    ret i32 [[RES]]
+;
+  %shl = shl nuw i32 %x, %y
+  %add = add nuw nsw i32 %shl, %z
+  call void @use (i32 %add)
+  %res = lshr exact i32 %add, %z
+  ret i32 %res
+}
+
+define i32 @mul_times_3_div_2_multiuse(i32 %x) {
+; CHECK-LABEL: @mul_times_3_div_2_multiuse(
+; CHECK-NEXT:    [[MUL:%.*]] = mul nuw i32 [[X:%.*]], 3
+; CHECK-NEXT:    [[RES:%.*]] = lshr i32 [[MUL]], 1
+; CHECK-NEXT:    call void @use(i32 [[MUL]])
+; CHECK-NEXT:    ret i32 [[RES]]
+;
+  %mul = mul nuw i32 %x, 3
+  %res = lshr i32 %mul, 1
+  call void @use (i32 %mul)
+  ret i32 %res
+}
 
 define i32 @mul_splat_fold_wrong_lshr_const(i32 %x) {
 ; CHECK-LABEL: @mul_splat_fold_wrong_lshr_const(
@@ -386,8 +484,6 @@ define i32 @mul_splat_fold_wrong_lshr_const(i32 %x) {
   ret i32 %t
 }
 
-; Negative test
-
 define i32 @mul_splat_fold_no_nuw(i32 %x) {
 ; CHECK-LABEL: @mul_splat_fold_no_nuw(
 ; CHECK-NEXT:    [[M:%.*]] = mul nsw i32 [[X:%.*]], 65537

>From d5194aa4cc19c5ffd302c07bfb581a934d7df4fc Mon Sep 17 00:00:00 2001
From: Rose <gfunni234 at gmail.com>
Date: Sun, 21 Apr 2024 17:50:52 -0400
Subject: [PATCH 2/2] [InstCombine] Fold adds + shifts with nsw and nuw flags

[InstCombine] Fold adds + shifts with nsw and nuw flags
I also added mul nsw/nuw 3, div 2 since this was the canonical version of ((x << 1) + x) / 2, which is a specific expression which canonicalization causes the InstCombine to miss it.

Proofs:
https://alive2.llvm.org/ce/z/kDVTiL
https://alive2.llvm.org/ce/z/wORNYm
---
 .../InstCombine/InstCombineShifts.cpp         | 49 ++++++++++++++++++-
 llvm/test/Transforms/InstCombine/ashr-lshr.ll |  8 +--
 llvm/test/Transforms/InstCombine/lshr.ll      | 19 ++++---
 3 files changed, 62 insertions(+), 14 deletions(-)

diff --git a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
index 244f03a1bc2b4c..4710ad3ae3b2e6 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
@@ -1267,6 +1267,18 @@ Instruction *InstCombinerImpl::visitLShr(BinaryOperator &I) {
       match(Op1, m_SpecificIntAllowPoison(BitWidth - 1)))
     return new ZExtInst(Builder.CreateIsNotNeg(X, "isnotneg"), Ty);
 
+  // If both the add and the shift are nuw, then:
+  // ((X << Z) + Y) nuw >>u Z --> X + (Y nuw >>u Z) nuw
+  Value *Y;
+  if (match(Op0, m_OneUse(m_c_NUWAdd(m_NUWShl(m_Value(X), m_Specific(Op1)),
+                                     m_Value(Y))))) {
+    Value *NewLshr = Builder.CreateLShr(Y, Op1, "", I.isExact());
+    auto *newAdd = BinaryOperator::CreateNUWAdd(NewLshr, X);
+    if (auto *Op0Bin = cast<OverflowingBinaryOperator>(Op0))
+      newAdd->setHasNoSignedWrap(Op0Bin->hasNoSignedWrap());
+    return newAdd;
+  }
+
   if (match(Op1, m_APInt(C))) {
     unsigned ShAmtC = C->getZExtValue();
     auto *II = dyn_cast<IntrinsicInst>(Op0);
@@ -1283,7 +1295,6 @@ Instruction *InstCombinerImpl::visitLShr(BinaryOperator &I) {
       return new ZExtInst(Cmp, Ty);
     }
 
-    Value *X;
     const APInt *C1;
     if (match(Op0, m_Shl(m_Value(X), m_APInt(C1))) && C1->ult(BitWidth)) {
       if (C1->ult(ShAmtC)) {
@@ -1328,7 +1339,6 @@ Instruction *InstCombinerImpl::visitLShr(BinaryOperator &I) {
     // ((X << C) + Y) >>u C --> (X + (Y >>u C)) & (-1 >>u C)
     // TODO: Consolidate with the more general transform that starts from shl
     //       (the shifts are in the opposite order).
-    Value *Y;
     if (match(Op0,
               m_OneUse(m_c_Add(m_OneUse(m_Shl(m_Value(X), m_Specific(Op1))),
                                m_Value(Y))))) {
@@ -1450,9 +1460,24 @@ Instruction *InstCombinerImpl::visitLShr(BinaryOperator &I) {
           NewMul->setHasNoSignedWrap(true);
           return NewMul;
         }
+
+        // Special case: lshr nuw (mul (X, 3), 1) -> add nuw nsw (X, lshr(X, 1)
+        if (ShAmtC == 1 && MulC->getZExtValue() == 3) {
+          auto *NewAdd = BinaryOperator::CreateNUWAdd(
+              X,
+              Builder.CreateLShr(X, ConstantInt::get(Ty, 1), "", I.isExact()));
+          NewAdd->setHasNoSignedWrap(true);
+          return NewAdd;
+        }
       }
     }
 
+    // // lshr nsw (mul (X, 3), 1) -> add nsw (X, lshr(X, 1)
+    if (match(Op0, m_OneUse(m_NSWMul(m_Value(X), m_SpecificInt(3)))) &&
+        ShAmtC == 1)
+      return BinaryOperator::CreateNSWAdd(
+          X, Builder.CreateLShr(X, ConstantInt::get(Ty, 1), "", I.isExact()));
+
     // Try to narrow bswap.
     // In the case where the shift amount equals the bitwidth difference, the
     // shift is eliminated.
@@ -1656,6 +1681,26 @@ Instruction *InstCombinerImpl::visitAShr(BinaryOperator &I) {
       if (match(Op0, m_OneUse(m_NSWSub(m_Value(X), m_Value(Y)))))
         return new SExtInst(Builder.CreateICmpSLT(X, Y), Ty);
     }
+
+    // Special case: ashr nuw (mul (X, 3), 1) -> add nuw nsw (X, lshr(X, 1)
+    if (match(Op0, m_OneUse(m_NSWMul(m_Value(X), m_SpecificInt(3)))) &&
+        ShAmt == 1) {
+      Value *Shift;
+      if (auto *Op0Bin = cast<OverflowingBinaryOperator>(Op0)) {
+        if (Op0Bin->hasNoUnsignedWrap())
+          // We can use lshr if the mul is nuw and nsw
+          Shift =
+              Builder.CreateLShr(X, ConstantInt::get(Ty, 1), "", I.isExact());
+        else
+          Shift =
+              Builder.CreateAShr(X, ConstantInt::get(Ty, 1), "", I.isExact());
+
+        auto *NewAdd = BinaryOperator::CreateNSWAdd(X, Shift);
+        NewAdd->setHasNoUnsignedWrap(Op0Bin->hasNoUnsignedWrap());
+
+        return NewAdd;
+      }
+    }
   }
 
   const SimplifyQuery Q = SQ.getWithInstruction(&I);
diff --git a/llvm/test/Transforms/InstCombine/ashr-lshr.ll b/llvm/test/Transforms/InstCombine/ashr-lshr.ll
index 7dd62327521081..25f53074f4e794 100644
--- a/llvm/test/Transforms/InstCombine/ashr-lshr.ll
+++ b/llvm/test/Transforms/InstCombine/ashr-lshr.ll
@@ -607,8 +607,8 @@ define <2 x i8> @ashr_known_pos_exact_vec(<2 x i8> %x, <2 x i8> %y) {
 
 define i32 @ashr_mul_times_3_div_2(i32 %0) {
 ; CHECK-LABEL: @ashr_mul_times_3_div_2(
-; CHECK-NEXT:    [[MUL:%.*]] = mul nuw nsw i32 [[TMP0:%.*]], 3
-; CHECK-NEXT:    [[ASHR:%.*]] = ashr i32 [[MUL]], 1
+; CHECK-NEXT:    [[TMP2:%.*]] = lshr i32 [[TMP0:%.*]], 1
+; CHECK-NEXT:    [[ASHR:%.*]] = add nuw nsw i32 [[TMP2]], [[TMP0]]
 ; CHECK-NEXT:    ret i32 [[ASHR]]
 ;
   %mul = mul nsw nuw i32 %0, 3
@@ -618,8 +618,8 @@ define i32 @ashr_mul_times_3_div_2(i32 %0) {
 
 define i32 @ashr_mul_times_3_div_2_exact(i32 %x) {
 ; CHECK-LABEL: @ashr_mul_times_3_div_2_exact(
-; CHECK-NEXT:    [[MUL:%.*]] = mul nsw i32 [[X:%.*]], 3
-; CHECK-NEXT:    [[ASHR:%.*]] = ashr exact i32 [[MUL]], 1
+; CHECK-NEXT:    [[TMP1:%.*]] = ashr exact i32 [[X:%.*]], 1
+; CHECK-NEXT:    [[ASHR:%.*]] = add nsw i32 [[TMP1]], [[X]]
 ; CHECK-NEXT:    ret i32 [[ASHR]]
 ;
   %mul = mul nsw i32 %x, 3
diff --git a/llvm/test/Transforms/InstCombine/lshr.ll b/llvm/test/Transforms/InstCombine/lshr.ll
index 384e6e38b144d2..5604faf8de39b6 100644
--- a/llvm/test/Transforms/InstCombine/lshr.ll
+++ b/llvm/test/Transforms/InstCombine/lshr.ll
@@ -360,12 +360,12 @@ define <3 x i14> @mul_splat_fold_vec(<3 x i14> %x) {
   ret <3 x i14> %t
 }
 
-; Negative tests
+; Negative test
 
 define i32 @mul_times_3_div_2(i32 %x) {
 ; CHECK-LABEL: @mul_times_3_div_2(
-; CHECK-NEXT:    [[MUL:%.*]] = mul nuw nsw i32 [[X:%.*]], 3
-; CHECK-NEXT:    [[RES:%.*]] = lshr i32 [[MUL]], 1
+; CHECK-NEXT:    [[TMP1:%.*]] = lshr i32 [[X:%.*]], 1
+; CHECK-NEXT:    [[RES:%.*]] = add nuw nsw i32 [[TMP1]], [[X]]
 ; CHECK-NEXT:    ret i32 [[RES]]
 ;
   %mul = mul nsw nuw i32 %x, 3
@@ -375,9 +375,8 @@ define i32 @mul_times_3_div_2(i32 %x) {
 
 define i32 @shl_add_lshr(i32 %x, i32 %c, i32 %y) {
 ; CHECK-LABEL: @shl_add_lshr(
-; CHECK-NEXT:    [[SHL:%.*]] = shl nuw i32 [[X:%.*]], [[C:%.*]]
-; CHECK-NEXT:    [[ADD:%.*]] = add nuw nsw i32 [[SHL]], [[Y:%.*]]
-; CHECK-NEXT:    [[LSHR:%.*]] = lshr exact i32 [[ADD]], [[C]]
+; CHECK-NEXT:    [[TMP1:%.*]] = lshr exact i32 [[Y:%.*]], [[C:%.*]]
+; CHECK-NEXT:    [[LSHR:%.*]] = add nuw nsw i32 [[TMP1]], [[X:%.*]]
 ; CHECK-NEXT:    ret i32 [[LSHR]]
 ;
   %shl = shl nuw i32 %x, %c
@@ -399,8 +398,8 @@ define i32 @lshr_mul_times_3_div_2_nuw(i32 %0) {
 
 define i32 @lshr_mul_times_3_div_2_nsw(i32 %0) {
 ; CHECK-LABEL: @lshr_mul_times_3_div_2_nsw(
-; CHECK-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP0:%.*]], 3
-; CHECK-NEXT:    [[LSHR:%.*]] = lshr i32 [[MUL]], 1
+; CHECK-NEXT:    [[TMP2:%.*]] = lshr i32 [[TMP0:%.*]], 1
+; CHECK-NEXT:    [[LSHR:%.*]] = add nsw i32 [[TMP2]], [[TMP0]]
 ; CHECK-NEXT:    ret i32 [[LSHR]]
 ;
   %mul = mul nsw i32 %0, 3
@@ -445,6 +444,8 @@ define i32 @mul_splat_fold_wrong_mul_const(i32 %x) {
   ret i32 %t
 }
 
+; Negative test
+
 define i32 @shl_add_lshr_multiuse(i32 %x, i32 %y, i32 %z) {
 ; CHECK-LABEL: @shl_add_lshr_multiuse(
 ; CHECK-NEXT:    [[SHL:%.*]] = shl nuw i32 [[X:%.*]], [[Y:%.*]]
@@ -484,6 +485,8 @@ define i32 @mul_splat_fold_wrong_lshr_const(i32 %x) {
   ret i32 %t
 }
 
+; Negative test
+
 define i32 @mul_splat_fold_no_nuw(i32 %x) {
 ; CHECK-LABEL: @mul_splat_fold_no_nuw(
 ; CHECK-NEXT:    [[M:%.*]] = mul nsw i32 [[X:%.*]], 65537



More information about the llvm-commits mailing list