[llvm] 7736c19 - [InstCombine] Missed optimization for pow(x, y) * pow(x, z) with fast-math

Sanjay Patel via llvm-commits llvm-commits at lists.llvm.org
Mon Jun 7 05:10:37 PDT 2021


Author: Daniil Seredkin
Date: 2021-06-07T08:08:05-04:00
New Revision: 7736c1936a93d7effecf32ef5193dfc00b732cd2

URL: https://github.com/llvm/llvm-project/commit/7736c1936a93d7effecf32ef5193dfc00b732cd2
DIFF: https://github.com/llvm/llvm-project/commit/7736c1936a93d7effecf32ef5193dfc00b732cd2.diff

LOG: [InstCombine] Missed optimization for pow(x, y) * pow(x, z) with fast-math

If FP reassociation (fast-math) is allowed, then LLVM is free to do the
following transformation pow(x, y) * pow(x, z) -> pow(x, y + z).
This patch adds this transformation and tests for it.
See more https://bugs.llvm.org/show_bug.cgi?id=47205

It handles two cases

1. When operands of fmul are different instructions

%4 = call reassoc float @llvm.pow.f32(float %0, float %1)
%5 = call reassoc float @llvm.pow.f32(float %0, float %2)
%6 = fmul reassoc float %5, %4
-->
%3 = fadd reassoc float %1, %2
%4 = call reassoc float @llvm.pow.f32(float %0, float %3)

2. When operands of fmul are the same instruction

%4 = call reassoc float @llvm.pow.f32(float %0, float %1)
%5 = fmul reassoc float %4, %4
-->
%3 = fadd reassoc float %1, %1
%4 = call reassoc float @llvm.pow.f32(float %0, float %3)

Differential Revision: https://reviews.llvm.org/D102574

Added: 
    

Modified: 
    llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
    llvm/test/Transforms/InstCombine/fmul-pow.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp b/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
index bb962e639c61e..24796c3ccc1ca 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
@@ -555,22 +555,30 @@ Instruction *InstCombinerImpl::visitFMul(BinaryOperator &I) {
       }
     }
 
-    // exp(X) * exp(Y) -> exp(X + Y)
-    if (match(Op0, m_Intrinsic<Intrinsic::exp>(m_Value(X))) &&
-        match(Op1, m_Intrinsic<Intrinsic::exp>(m_Value(Y))) &&
-        I.isOnlyUserOfAnyOperand()) {
-      Value *XY = Builder.CreateFAddFMF(X, Y, &I);
-      Value *Exp = Builder.CreateUnaryIntrinsic(Intrinsic::exp, XY, &I);
-      return replaceInstUsesWith(I, Exp);
-    }
+    if (I.isOnlyUserOfAnyOperand()) {
+      // pow(x, y) * pow(x, z) -> pow(x, y + z)
+      if (match(Op0, m_Intrinsic<Intrinsic::pow>(m_Value(X), m_Value(Y))) &&
+          match(Op1, m_Intrinsic<Intrinsic::pow>(m_Specific(X), m_Value(Z)))) {
+        auto *YZ = Builder.CreateFAddFMF(Y, Z, &I);
+        auto *NewPow = Builder.CreateBinaryIntrinsic(Intrinsic::pow, X, YZ, &I);
+        return replaceInstUsesWith(I, NewPow);
+      }
 
-    // exp2(X) * exp2(Y) -> exp2(X + Y)
-    if (match(Op0, m_Intrinsic<Intrinsic::exp2>(m_Value(X))) &&
-        match(Op1, m_Intrinsic<Intrinsic::exp2>(m_Value(Y))) &&
-        I.isOnlyUserOfAnyOperand()) {
-      Value *XY = Builder.CreateFAddFMF(X, Y, &I);
-      Value *Exp2 = Builder.CreateUnaryIntrinsic(Intrinsic::exp2, XY, &I);
-      return replaceInstUsesWith(I, Exp2);
+      // exp(X) * exp(Y) -> exp(X + Y)
+      if (match(Op0, m_Intrinsic<Intrinsic::exp>(m_Value(X))) &&
+          match(Op1, m_Intrinsic<Intrinsic::exp>(m_Value(Y)))) {
+        Value *XY = Builder.CreateFAddFMF(X, Y, &I);
+        Value *Exp = Builder.CreateUnaryIntrinsic(Intrinsic::exp, XY, &I);
+        return replaceInstUsesWith(I, Exp);
+      }
+
+      // exp2(X) * exp2(Y) -> exp2(X + Y)
+      if (match(Op0, m_Intrinsic<Intrinsic::exp2>(m_Value(X))) &&
+          match(Op1, m_Intrinsic<Intrinsic::exp2>(m_Value(Y)))) {
+        Value *XY = Builder.CreateFAddFMF(X, Y, &I);
+        Value *Exp2 = Builder.CreateUnaryIntrinsic(Intrinsic::exp2, XY, &I);
+        return replaceInstUsesWith(I, Exp2);
+      }
     }
 
     // (X*Y) * X => (X*X) * Y where Y != X

diff  --git a/llvm/test/Transforms/InstCombine/fmul-pow.ll b/llvm/test/Transforms/InstCombine/fmul-pow.ll
index 6e23f7764c602..70741554ac134 100644
--- a/llvm/test/Transforms/InstCombine/fmul-pow.ll
+++ b/llvm/test/Transforms/InstCombine/fmul-pow.ll
@@ -79,10 +79,9 @@ define double @pow_ab_pow_ac(double %a, double %b, double %c) {
 
 define double @pow_ab_x_pow_ac_reassoc(double %a, double %b, double %c) {
 ; CHECK-LABEL: @pow_ab_x_pow_ac_reassoc(
-; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.pow.f64(double [[A:%.*]], double [[B:%.*]])
-; CHECK-NEXT:    [[TMP2:%.*]] = call double @llvm.pow.f64(double [[A]], double [[C:%.*]])
-; CHECK-NEXT:    [[MUL:%.*]] = fmul reassoc double [[TMP2]], [[TMP1]]
-; CHECK-NEXT:    ret double [[MUL]]
+; CHECK-NEXT:    [[TMP1:%.*]] = fadd reassoc double [[C:%.*]], [[B:%.*]]
+; CHECK-NEXT:    [[TMP2:%.*]] = call reassoc double @llvm.pow.f64(double [[A:%.*]], double [[TMP1]])
+; CHECK-NEXT:    ret double [[TMP2]]
 ;
   %1 = call double @llvm.pow.f64(double %a, double %b)
   %2 = call double @llvm.pow.f64(double %a, double %c)
@@ -90,12 +89,11 @@ define double @pow_ab_x_pow_ac_reassoc(double %a, double %b, double %c) {
   ret double %mul
 }
 
-
 define double @pow_ab_reassoc(double %a, double %b) {
 ; CHECK-LABEL: @pow_ab_reassoc(
-; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.pow.f64(double [[A:%.*]], double [[B:%.*]])
-; CHECK-NEXT:    [[MUL:%.*]] = fmul reassoc double [[TMP1]], [[TMP1]]
-; CHECK-NEXT:    ret double [[MUL]]
+; CHECK-NEXT:    [[TMP1:%.*]] = fadd reassoc double [[B:%.*]], [[B]]
+; CHECK-NEXT:    [[TMP2:%.*]] = call reassoc double @llvm.pow.f64(double [[A:%.*]], double [[TMP1]])
+; CHECK-NEXT:    ret double [[TMP2]]
 ;
   %1 = call double @llvm.pow.f64(double %a, double %b)
   %mul = fmul reassoc double %1, %1
@@ -118,10 +116,10 @@ define double @pow_ab_reassoc_extra_use(double %a, double %b) {
 define double @pow_ab_x_pow_ac_reassoc_extra_use(double %a, double %b, double %c) {
 ; CHECK-LABEL: @pow_ab_x_pow_ac_reassoc_extra_use(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.pow.f64(double [[A:%.*]], double [[B:%.*]])
-; CHECK-NEXT:    [[TMP2:%.*]] = call double @llvm.pow.f64(double [[A]], double [[C:%.*]])
-; CHECK-NEXT:    [[MUL:%.*]] = fmul reassoc double [[TMP1]], [[TMP2]]
+; CHECK-NEXT:    [[TMP2:%.*]] = fadd reassoc double [[B]], [[C:%.*]]
+; CHECK-NEXT:    [[TMP3:%.*]] = call reassoc double @llvm.pow.f64(double [[A]], double [[TMP2]])
 ; CHECK-NEXT:    call void @use(double [[TMP1]])
-; CHECK-NEXT:    ret double [[MUL]]
+; CHECK-NEXT:    ret double [[TMP3]]
 ;
   %1 = call double @llvm.pow.f64(double %a, double %b)
   %2 = call double @llvm.pow.f64(double %a, double %c)


        


More information about the llvm-commits mailing list