[llvm] be02adf - [InstCombine] Fold (x + C1) * (-1<<C2) --> (-C1 - x) * (1<<C2)

Roman Lebedev via llvm-commits llvm-commits at lists.llvm.org
Thu Aug 6 13:45:41 PDT 2020


Author: Roman Lebedev
Date: 2020-08-06T23:40:16+03:00
New Revision: be02adfad7acf8040ad025b58052b3838db7e23b

URL: https://github.com/llvm/llvm-project/commit/be02adfad7acf8040ad025b58052b3838db7e23b
DIFF: https://github.com/llvm/llvm-project/commit/be02adfad7acf8040ad025b58052b3838db7e23b.diff

LOG: [InstCombine] Fold  (x + C1) * (-1<<C2)  -->  (-C1 - x) * (1<<C2)

Negator knows how to do this, but the one-use reasoning is getting
a bit muddy here, we don't really want to increase instruction count,
so we need to both lie that "IsNegation" and have an one-use check
on the outermost LHS value.

Added: 
    

Modified: 
    llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
    llvm/test/Transforms/InstCombine/mul.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp b/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
index f8d5e8ae64c6..89641704a195 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
@@ -236,7 +236,7 @@ Instruction *InstCombinerImpl::visitMul(BinaryOperator &I) {
   if (Op0->hasOneUse() && match(Op1, m_NegatedPower2())) {
     // Interpret  X * (-1<<C)  as  (-X) * (1<<C)  and try to sink the negation.
     // The "* (1<<C)" thus becomes a potential shifting opportunity.
-    if (Value *NegOp0 = Negator::Negate(/*IsNegation*/ false, Op0, *this))
+    if (Value *NegOp0 = Negator::Negate(/*IsNegation*/ true, Op0, *this))
       return BinaryOperator::CreateMul(
           NegOp0, ConstantExpr::getNeg(cast<Constant>(Op1)), I.getName());
   }

diff  --git a/llvm/test/Transforms/InstCombine/mul.ll b/llvm/test/Transforms/InstCombine/mul.ll
index 480f1ff6038a..921c80440147 100644
--- a/llvm/test/Transforms/InstCombine/mul.ll
+++ b/llvm/test/Transforms/InstCombine/mul.ll
@@ -962,8 +962,8 @@ define <2 x i32> @mulsub2_vec_nonuniform_undef(<2 x i32> %a0) {
 
 define i32 @muladd2(i32 %a0) {
 ; CHECK-LABEL: @muladd2(
-; CHECK-NEXT:    [[TMP1:%.*]] = mul i32 [[A0:%.*]], -4
-; CHECK-NEXT:    [[MUL:%.*]] = add i32 [[TMP1]], -64
+; CHECK-NEXT:    [[ADD_NEG_NEG:%.*]] = mul i32 [[A0:%.*]], -4
+; CHECK-NEXT:    [[MUL:%.*]] = add i32 [[ADD_NEG_NEG]], -64
 ; CHECK-NEXT:    ret i32 [[MUL]]
 ;
   %add = add i32 %a0, 16
@@ -973,8 +973,8 @@ define i32 @muladd2(i32 %a0) {
 
 define <2 x i32> @muladd2_vec(<2 x i32> %a0) {
 ; CHECK-LABEL: @muladd2_vec(
-; CHECK-NEXT:    [[TMP1:%.*]] = mul <2 x i32> [[A0:%.*]], <i32 -4, i32 -4>
-; CHECK-NEXT:    [[MUL:%.*]] = add <2 x i32> [[TMP1]], <i32 -64, i32 -64>
+; CHECK-NEXT:    [[ADD_NEG_NEG:%.*]] = mul <2 x i32> [[A0:%.*]], <i32 -4, i32 -4>
+; CHECK-NEXT:    [[MUL:%.*]] = add <2 x i32> [[ADD_NEG_NEG]], <i32 -64, i32 -64>
 ; CHECK-NEXT:    ret <2 x i32> [[MUL]]
 ;
   %add = add <2 x i32> %a0, <i32 16, i32 16>
@@ -984,8 +984,8 @@ define <2 x i32> @muladd2_vec(<2 x i32> %a0) {
 
 define <2 x i32> @muladd2_vec_nonuniform(<2 x i32> %a0) {
 ; CHECK-LABEL: @muladd2_vec_nonuniform(
-; CHECK-NEXT:    [[TMP1:%.*]] = mul <2 x i32> [[A0:%.*]], <i32 -4, i32 -8>
-; CHECK-NEXT:    [[MUL:%.*]] = add <2 x i32> [[TMP1]], <i32 -64, i32 -256>
+; CHECK-NEXT:    [[ADD_NEG:%.*]] = sub <2 x i32> <i32 -16, i32 -32>, [[A0:%.*]]
+; CHECK-NEXT:    [[MUL:%.*]] = shl <2 x i32> [[ADD_NEG]], <i32 2, i32 3>
 ; CHECK-NEXT:    ret <2 x i32> [[MUL]]
 ;
   %add = add <2 x i32> %a0, <i32 16, i32 32>
@@ -995,8 +995,8 @@ define <2 x i32> @muladd2_vec_nonuniform(<2 x i32> %a0) {
 
 define <2 x i32> @muladd2_vec_nonuniform_undef(<2 x i32> %a0) {
 ; CHECK-LABEL: @muladd2_vec_nonuniform_undef(
-; CHECK-NEXT:    [[TMP1:%.*]] = mul <2 x i32> [[A0:%.*]], <i32 -4, i32 undef>
-; CHECK-NEXT:    [[MUL:%.*]] = add <2 x i32> [[TMP1]], <i32 -64, i32 0>
+; CHECK-NEXT:    [[ADD_NEG:%.*]] = sub <2 x i32> <i32 -16, i32 -32>, [[A0:%.*]]
+; CHECK-NEXT:    [[MUL:%.*]] = shl <2 x i32> [[ADD_NEG]], <i32 2, i32 undef>
 ; CHECK-NEXT:    ret <2 x i32> [[MUL]]
 ;
   %add = add <2 x i32> %a0, <i32 16, i32 32>
@@ -1006,9 +1006,9 @@ define <2 x i32> @muladd2_vec_nonuniform_undef(<2 x i32> %a0) {
 
 define i32 @mulmuladd2(i32 %a0, i32 %a1) {
 ; CHECK-LABEL: @mulmuladd2(
-; CHECK-NEXT:    [[ADD:%.*]] = add i32 [[A0:%.*]], 16
-; CHECK-NEXT:    [[MUL1:%.*]] = mul i32 [[ADD]], [[A1:%.*]]
-; CHECK-NEXT:    [[MUL2:%.*]] = mul i32 [[MUL1]], -4
+; CHECK-NEXT:    [[ADD_NEG:%.*]] = sub i32 -16, [[A0:%.*]]
+; CHECK-NEXT:    [[MUL1_NEG:%.*]] = mul i32 [[ADD_NEG]], [[A1:%.*]]
+; CHECK-NEXT:    [[MUL2:%.*]] = shl i32 [[MUL1_NEG]], 2
 ; CHECK-NEXT:    ret i32 [[MUL2]]
 ;
   %add = add i32 %a0, 16


        


More information about the llvm-commits mailing list