[llvm] 914576c - [InstCombine] fold pow(X,Y) * pow(Z,Y) -> pow(X*Z, Y) (with fast-math)

Sanjay Patel via llvm-commits llvm-commits at lists.llvm.org
Fri Jan 13 10:26:20 PST 2023


Author: Sanjay Patel
Date: 2023-01-13T13:26:10-05:00
New Revision: 914576c1f0b66994ca97867676ab6074b9be6f3e

URL: https://github.com/llvm/llvm-project/commit/914576c1f0b66994ca97867676ab6074b9be6f3e
DIFF: https://github.com/llvm/llvm-project/commit/914576c1f0b66994ca97867676ab6074b9be6f3e.diff

LOG: [InstCombine] fold pow(X,Y) * pow(Z,Y) -> pow(X*Z, Y) (with fast-math)

This is one of the patterns suggested in issue #34943.

Added: 
    

Modified: 
    llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
    llvm/test/Transforms/InstCombine/fmul-pow.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp b/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
index 2484b59682e9f..6ce3939d75126 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
@@ -662,13 +662,20 @@ Instruction *InstCombinerImpl::visitFMul(BinaryOperator &I) {
     }
 
     if (I.isOnlyUserOfAnyOperand()) {
-      // pow(x, y) * pow(x, z) -> pow(x, y + z)
+      // pow(X, Y) * pow(X, Z) -> pow(X, Y + Z)
       if (match(Op0, m_Intrinsic<Intrinsic::pow>(m_Value(X), m_Value(Y))) &&
           match(Op1, m_Intrinsic<Intrinsic::pow>(m_Specific(X), m_Value(Z)))) {
         auto *YZ = Builder.CreateFAddFMF(Y, Z, &I);
         auto *NewPow = Builder.CreateBinaryIntrinsic(Intrinsic::pow, X, YZ, &I);
         return replaceInstUsesWith(I, NewPow);
       }
+      // pow(X, Y) * pow(Z, Y) -> pow(X * Z, Y)
+      if (match(Op0, m_Intrinsic<Intrinsic::pow>(m_Value(X), m_Value(Y))) &&
+          match(Op1, m_Intrinsic<Intrinsic::pow>(m_Value(Z), m_Specific(Y)))) {
+        auto *XZ = Builder.CreateFMulFMF(X, Z, &I);
+        auto *NewPow = Builder.CreateBinaryIntrinsic(Intrinsic::pow, XZ, Y, &I);
+        return replaceInstUsesWith(I, NewPow);
+      }
 
       // powi(x, y) * powi(x, z) -> powi(x, y + z)
       if (match(Op0, m_Intrinsic<Intrinsic::powi>(m_Value(X), m_Value(Y))) &&

diff  --git a/llvm/test/Transforms/InstCombine/fmul-pow.ll b/llvm/test/Transforms/InstCombine/fmul-pow.ll
index e5226400bb3db..4b51900fc9881 100644
--- a/llvm/test/Transforms/InstCombine/fmul-pow.ll
+++ b/llvm/test/Transforms/InstCombine/fmul-pow.ll
@@ -38,6 +38,9 @@ define double @pow_ab_a_reassoc_commute(double %a, double %b)  {
   ret double %mul
 }
 
+; negative test for:
+; (a**b) * (c**b) --> (a*c) ** b (if mul is reassoc)
+
 define double @pow_ab_pow_cb(double %a, double %b, double %c) {
 ; CHECK-LABEL: @pow_ab_pow_cb(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.pow.f64(double [[A:%.*]], double [[B:%.*]])
@@ -51,11 +54,12 @@ define double @pow_ab_pow_cb(double %a, double %b, double %c) {
   ret double %mul
 }
 
+; (a**b) * (c**b) --> (a*c) ** b
+
 define double @pow_ab_pow_cb_reassoc(double %a, double %b, double %c) {
 ; CHECK-LABEL: @pow_ab_pow_cb_reassoc(
-; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.pow.f64(double [[A:%.*]], double [[B:%.*]])
-; CHECK-NEXT:    [[TMP2:%.*]] = call double @llvm.pow.f64(double [[C:%.*]], double [[B]])
-; CHECK-NEXT:    [[MUL:%.*]] = fmul reassoc double [[TMP2]], [[TMP1]]
+; CHECK-NEXT:    [[TMP1:%.*]] = fmul reassoc double [[C:%.*]], [[A:%.*]]
+; CHECK-NEXT:    [[MUL:%.*]] = call reassoc double @llvm.pow.f64(double [[TMP1]], double [[B:%.*]])
 ; CHECK-NEXT:    ret double [[MUL]]
 ;
   %1 = call double @llvm.pow.f64(double %a, double %b)
@@ -64,11 +68,13 @@ define double @pow_ab_pow_cb_reassoc(double %a, double %b, double %c) {
   ret double %mul
 }
 
+; (a**b) * (c**b) --> (a*c) ** b
+
 define double @pow_ab_pow_cb_reassoc_use1(double %a, double %b, double %c) {
 ; CHECK-LABEL: @pow_ab_pow_cb_reassoc_use1(
 ; CHECK-NEXT:    [[AB:%.*]] = call double @llvm.pow.f64(double [[A:%.*]], double [[B:%.*]])
-; CHECK-NEXT:    [[CB:%.*]] = call double @llvm.pow.f64(double [[C:%.*]], double [[B]])
-; CHECK-NEXT:    [[MUL:%.*]] = fmul reassoc double [[AB]], [[CB]]
+; CHECK-NEXT:    [[TMP1:%.*]] = fmul reassoc double [[A]], [[C:%.*]]
+; CHECK-NEXT:    [[MUL:%.*]] = call reassoc double @llvm.pow.f64(double [[TMP1]], double [[B]])
 ; CHECK-NEXT:    call void @use(double [[AB]])
 ; CHECK-NEXT:    ret double [[MUL]]
 ;
@@ -79,11 +85,13 @@ define double @pow_ab_pow_cb_reassoc_use1(double %a, double %b, double %c) {
   ret double %mul
 }
 
+; (a**b) * (c**b) --> (a*c) ** b
+
 define double @pow_ab_pow_cb_reassoc_use2(double %a, double %b, double %c) {
 ; CHECK-LABEL: @pow_ab_pow_cb_reassoc_use2(
-; CHECK-NEXT:    [[AB:%.*]] = call double @llvm.pow.f64(double [[A:%.*]], double [[B:%.*]])
-; CHECK-NEXT:    [[CB:%.*]] = call double @llvm.pow.f64(double [[C:%.*]], double [[B]])
-; CHECK-NEXT:    [[MUL:%.*]] = fmul reassoc double [[AB]], [[CB]]
+; CHECK-NEXT:    [[CB:%.*]] = call double @llvm.pow.f64(double [[C:%.*]], double [[B:%.*]])
+; CHECK-NEXT:    [[TMP1:%.*]] = fmul reassoc double [[A:%.*]], [[C]]
+; CHECK-NEXT:    [[MUL:%.*]] = call reassoc double @llvm.pow.f64(double [[TMP1]], double [[B]])
 ; CHECK-NEXT:    call void @use(double [[CB]])
 ; CHECK-NEXT:    ret double [[MUL]]
 ;
@@ -94,6 +102,8 @@ define double @pow_ab_pow_cb_reassoc_use2(double %a, double %b, double %c) {
   ret double %mul
 }
 
+; negative test - too many extra uses
+
 define double @pow_ab_pow_cb_reassoc_use3(double %a, double %b, double %c) {
 ; CHECK-LABEL: @pow_ab_pow_cb_reassoc_use3(
 ; CHECK-NEXT:    [[AB:%.*]] = call double @llvm.pow.f64(double [[A:%.*]], double [[B:%.*]])


        


More information about the llvm-commits mailing list