[llvm] ed39621 - [InstCombine] Transform abs pattern using multiplication to abs intrinsic (PR45691)

Dávid Bolvanský via llvm-commits llvm-commits at lists.llvm.org
Sun Jan 17 08:06:26 PST 2021


Author: Dávid Bolvanský
Date: 2021-01-17T17:06:14+01:00
New Revision: ed396212da41feed9bffb8cc1ca6518ab031a3c7

URL: https://github.com/llvm/llvm-project/commit/ed396212da41feed9bffb8cc1ca6518ab031a3c7
DIFF: https://github.com/llvm/llvm-project/commit/ed396212da41feed9bffb8cc1ca6518ab031a3c7.diff

LOG: [InstCombine] Transform abs pattern using multiplication to abs intrinsic (PR45691)

```
unsigned r(int v)
{
    return (1 | -(v < 0)) * v;
}

`r` is equivalent to `abs(v)`.

```

```
define <4 x i8> @src(<4 x i8> %0) {
%1:
  %2 = ashr <4 x i8> %0, { 31, undef, 31, 31 }
  %3 = or <4 x i8> %2, { 1, 1, 1, undef }
  %4 = mul nsw <4 x i8> %3, %0
  ret <4 x i8> %4
}
=>
define <4 x i8> @tgt(<4 x i8> %0) {
%1:
  %2 = icmp slt <4 x i8> %0, { 0, 0, 0, 0 }
  %3 = sub nsw <4 x i8> { 0, 0, 0, 0 }, %0
  %4 = select <4 x i1> %2, <4 x i8> %3, <4 x i8> %0
  ret <4 x i8> %4
}
Transformation seems to be correct!
```

Reviewed By: nikic

Differential Revision: https://reviews.llvm.org/D94874

Added: 
    

Modified: 
    llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
    llvm/test/Transforms/InstCombine/ashr-or-mul-abs.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp b/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
index 2da7415b908b..4b485a0ad85e 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
@@ -153,8 +153,10 @@ Instruction *InstCombinerImpl::visitMul(BinaryOperator &I) {
   if (Value *V = SimplifyUsingDistributiveLaws(I))
     return replaceInstUsesWith(I, V);
 
-  // X * -1 == 0 - X
   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
+  unsigned BitWidth = I.getType()->getScalarSizeInBits();
+
+  // X * -1 == 0 - X
   if (match(Op1, m_AllOnes())) {
     BinaryOperator *BO = BinaryOperator::CreateNeg(Op0, I.getName());
     if (I.hasNoSignedWrap())
@@ -360,6 +362,19 @@ Instruction *InstCombinerImpl::visitMul(BinaryOperator &I) {
   if (match(Op1, m_LShr(m_Value(X), m_APInt(C))) && *C == C->getBitWidth() - 1)
     return BinaryOperator::CreateAnd(Builder.CreateAShr(X, *C), Op0);
 
+  // ((ashr X, 31) | 1) * X --> abs(X)
+  // X * ((ashr X, 31) | 1) --> abs(X)
+  if (match(&I, m_c_BinOp(m_Or(m_AShr(m_Value(X),
+                                    m_SpecificIntAllowUndef(BitWidth - 1)),
+                             m_One()),
+                        m_Deferred(X)))) {
+    Value *Abs = Builder.CreateBinaryIntrinsic(
+        Intrinsic::abs, X,
+        ConstantInt::getBool(I.getContext(), I.hasNoSignedWrap()));
+    Abs->takeName(&I);
+    return replaceInstUsesWith(I, Abs);
+  }
+
   if (Instruction *Ext = narrowMathIfNoOverflow(I))
     return Ext;
 

diff  --git a/llvm/test/Transforms/InstCombine/ashr-or-mul-abs.ll b/llvm/test/Transforms/InstCombine/ashr-or-mul-abs.ll
index 1874378f1f1f..ab390a209340 100644
--- a/llvm/test/Transforms/InstCombine/ashr-or-mul-abs.ll
+++ b/llvm/test/Transforms/InstCombine/ashr-or-mul-abs.ll
@@ -6,9 +6,7 @@
 
 define i32 @ashr_or_mul_to_abs(i32 %X) {
 ; CHECK-LABEL: @ashr_or_mul_to_abs(
-; CHECK-NEXT:    [[I:%.*]] = ashr i32 [[X:%.*]], 31
-; CHECK-NEXT:    [[I1:%.*]] = or i32 [[I]], 1
-; CHECK-NEXT:    [[I2:%.*]] = mul nsw i32 [[I1]], [[X]]
+; CHECK-NEXT:    [[I2:%.*]] = call i32 @llvm.abs.i32(i32 [[X:%.*]], i1 true)
 ; CHECK-NEXT:    ret i32 [[I2]]
 ;
   %i = ashr i32 %X, 31
@@ -19,9 +17,7 @@ define i32 @ashr_or_mul_to_abs(i32 %X) {
 
 define i32 @ashr_or_mul_to_abs2(i32 %X) {
 ; CHECK-LABEL: @ashr_or_mul_to_abs2(
-; CHECK-NEXT:    [[I:%.*]] = ashr i32 [[X:%.*]], 31
-; CHECK-NEXT:    [[I1:%.*]] = or i32 [[I]], 1
-; CHECK-NEXT:    [[I2:%.*]] = mul i32 [[I1]], [[X]]
+; CHECK-NEXT:    [[I2:%.*]] = call i32 @llvm.abs.i32(i32 [[X:%.*]], i1 false)
 ; CHECK-NEXT:    ret i32 [[I2]]
 ;
   %i = ashr i32 %X, 31
@@ -30,13 +26,13 @@ define i32 @ashr_or_mul_to_abs2(i32 %X) {
   ret i32 %i2
 }
 
-define i32 @ashr_or_mul_to_abs3(i32 %X) {
+define i32 @ashr_or_mul_to_abs3(i32 %PX) {
 ; CHECK-LABEL: @ashr_or_mul_to_abs3(
-; CHECK-NEXT:    [[I:%.*]] = ashr i32 [[X:%.*]], 31
-; CHECK-NEXT:    [[I1:%.*]] = or i32 [[I]], 1
-; CHECK-NEXT:    [[I2:%.*]] = mul i32 [[I1]], [[X]]
+; CHECK-NEXT:    [[X:%.*]] = sdiv i32 42, [[PX:%.*]]
+; CHECK-NEXT:    [[I2:%.*]] = call i32 @llvm.abs.i32(i32 [[X]], i1 false)
 ; CHECK-NEXT:    ret i32 [[I2]]
 ;
+  %X = sdiv i32 42, %PX ; thwart complexity-based canonicalization
   %i = ashr i32 %X, 31
   %i1 = or i32 %i, 1
   %i2 = mul i32 %X, %i1
@@ -46,9 +42,7 @@ define i32 @ashr_or_mul_to_abs3(i32 %X) {
 
 define <4 x i32> @ashr_or_mul_to_abs_vec(<4 x i32> %X) {
 ; CHECK-LABEL: @ashr_or_mul_to_abs_vec(
-; CHECK-NEXT:    [[I:%.*]] = ashr <4 x i32> [[X:%.*]], <i32 31, i32 31, i32 31, i32 31>
-; CHECK-NEXT:    [[I1:%.*]] = or <4 x i32> [[I]], <i32 1, i32 1, i32 1, i32 1>
-; CHECK-NEXT:    [[I2:%.*]] = mul <4 x i32> [[I1]], [[X]]
+; CHECK-NEXT:    [[I2:%.*]] = call <4 x i32> @llvm.abs.v4i32(<4 x i32> [[X:%.*]], i1 false)
 ; CHECK-NEXT:    ret <4 x i32> [[I2]]
 ;
   %i = ashr <4 x i32> %X, <i32 31, i32 31, i32 31, i32 31>
@@ -59,9 +53,7 @@ define <4 x i32> @ashr_or_mul_to_abs_vec(<4 x i32> %X) {
 
 define <4 x i32> @ashr_or_mul_to_abs_vec2(<4 x i32> %X) {
 ; CHECK-LABEL: @ashr_or_mul_to_abs_vec2(
-; CHECK-NEXT:    [[I:%.*]] = ashr <4 x i32> [[X:%.*]], <i32 31, i32 31, i32 31, i32 31>
-; CHECK-NEXT:    [[I1:%.*]] = or <4 x i32> [[I]], <i32 1, i32 1, i32 1, i32 1>
-; CHECK-NEXT:    [[I2:%.*]] = mul nsw <4 x i32> [[I1]], [[X]]
+; CHECK-NEXT:    [[I2:%.*]] = call <4 x i32> @llvm.abs.v4i32(<4 x i32> [[X:%.*]], i1 true)
 ; CHECK-NEXT:    ret <4 x i32> [[I2]]
 ;
   %i = ashr <4 x i32> %X, <i32 31, i32 31, i32 31, i32 31>
@@ -72,9 +64,7 @@ define <4 x i32> @ashr_or_mul_to_abs_vec2(<4 x i32> %X) {
 
 define <4 x i32> @ashr_or_mul_to_abs_vec3_undef(<4 x i32> %X) {
 ; CHECK-LABEL: @ashr_or_mul_to_abs_vec3_undef(
-; CHECK-NEXT:    [[I:%.*]] = ashr <4 x i32> [[X:%.*]], <i32 31, i32 undef, i32 31, i32 31>
-; CHECK-NEXT:    [[I1:%.*]] = or <4 x i32> [[I]], <i32 1, i32 1, i32 1, i32 undef>
-; CHECK-NEXT:    [[I2:%.*]] = mul <4 x i32> [[I1]], [[X]]
+; CHECK-NEXT:    [[I2:%.*]] = call <4 x i32> @llvm.abs.v4i32(<4 x i32> [[X:%.*]], i1 false)
 ; CHECK-NEXT:    ret <4 x i32> [[I2]]
 ;
   %i = ashr <4 x i32> %X, <i32 31, i32 undef, i32 31, i32 31>
@@ -110,3 +100,16 @@ define i32 @ashr_or_mul_to_abs_neg2(i32 %X) {
   %i2 = mul nsw i32 %i1, %X
   ret i32 %i2
 }
+
+define i32 @ashr_or_mul_to_abs_neg3(i32 %X, i32 %Y) {
+; CHECK-LABEL: @ashr_or_mul_to_abs_neg3(
+; CHECK-NEXT:    [[I:%.*]] = ashr i32 [[X:%.*]], 31
+; CHECK-NEXT:    [[I1:%.*]] = or i32 [[I]], 1
+; CHECK-NEXT:    [[I2:%.*]] = mul nsw i32 [[I1]], [[Y:%.*]]
+; CHECK-NEXT:    ret i32 [[I2]]
+;
+  %i = ashr i32 %X, 31
+  %i1 = or i32 %i, 1
+  %i2 = mul nsw i32 %i1, %Y
+  ret i32 %i2
+}


        


More information about the llvm-commits mailing list