[llvm] 61af2ab - [InstCombine] fold pow(X, Y) * X -> pow(X, Y+1) (with fast-math)
Sanjay Patel via llvm-commits
llvm-commits at lists.llvm.org
Fri Jan 13 14:23:15 PST 2023
Author: Sanjay Patel
Date: 2023-01-13T17:13:46-05:00
New Revision: 61af2ab68142729cbe34c41c8ee8e6f38759c4ed
URL: https://github.com/llvm/llvm-project/commit/61af2ab68142729cbe34c41c8ee8e6f38759c4ed
DIFF: https://github.com/llvm/llvm-project/commit/61af2ab68142729cbe34c41c8ee8e6f38759c4ed.diff
LOG: [InstCombine] fold pow(X,Y) * X -> pow(X, Y+1) (with fast-math)
This is one of the patterns suggested in issue #34943.
Added:
Modified:
llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
llvm/test/Transforms/InstCombine/fmul-pow.ll
Removed:
################################################################################
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp b/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
index 6ce3939d75126..d685dd0b7bc01 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
@@ -661,6 +661,17 @@ Instruction *InstCombinerImpl::visitFMul(BinaryOperator &I) {
}
}
+ // pow(X, Y) * X --> pow(X, Y+1)
+ // X * pow(X, Y) --> pow(X, Y+1)
+ if (match(&I, m_c_FMul(m_OneUse(m_Intrinsic<Intrinsic::pow>(m_Value(X),
+ m_Value(Y))),
+ m_Deferred(X)))) {
+ Value *Y1 =
+ Builder.CreateFAddFMF(Y, ConstantFP::get(I.getType(), 1.0), &I);
+ Value *Pow = Builder.CreateBinaryIntrinsic(Intrinsic::pow, X, Y1, &I);
+ return replaceInstUsesWith(I, Pow);
+ }
+
if (I.isOnlyUserOfAnyOperand()) {
// pow(X, Y) * pow(X, Z) -> pow(X, Y + Z)
if (match(Op0, m_Intrinsic<Intrinsic::pow>(m_Value(X), m_Value(Y))) &&
diff --git a/llvm/test/Transforms/InstCombine/fmul-pow.ll b/llvm/test/Transforms/InstCombine/fmul-pow.ll
index e1f31d8148816..598464138414b 100644
--- a/llvm/test/Transforms/InstCombine/fmul-pow.ll
+++ b/llvm/test/Transforms/InstCombine/fmul-pow.ll
@@ -4,6 +4,9 @@
declare double @llvm.pow.f64(double, double)
declare void @use(double)
+; negative test for:
+; pow(a,b) * a --> pow(a, b+1) (requires reassoc)
+
define double @pow_ab_a(double %a, double %b) {
; CHECK-LABEL: @pow_ab_a(
; CHECK-NEXT: [[P:%.*]] = call double @llvm.pow.f64(double [[A:%.*]], double [[B:%.*]])
@@ -15,10 +18,12 @@ define double @pow_ab_a(double %a, double %b) {
ret double %m
}
+; pow(a,b) * a --> pow(a, b+1)
+
define double @pow_ab_a_reassoc(double %a, double %b) {
; CHECK-LABEL: @pow_ab_a_reassoc(
-; CHECK-NEXT: [[P:%.*]] = call double @llvm.pow.f64(double [[A:%.*]], double [[B:%.*]])
-; CHECK-NEXT: [[M:%.*]] = fmul reassoc double [[P]], [[A]]
+; CHECK-NEXT: [[TMP1:%.*]] = fadd reassoc double [[B:%.*]], 1.000000e+00
+; CHECK-NEXT: [[M:%.*]] = call reassoc double @llvm.pow.f64(double [[A:%.*]], double [[TMP1]])
; CHECK-NEXT: ret double [[M]]
;
%p = call double @llvm.pow.f64(double %a, double %b)
@@ -26,11 +31,13 @@ define double @pow_ab_a_reassoc(double %a, double %b) {
ret double %m
}
+; a * pow(a,b) --> pow(a, b+1)
+
define double @pow_ab_a_reassoc_commute(double %pa, double %b) {
; CHECK-LABEL: @pow_ab_a_reassoc_commute(
; CHECK-NEXT: [[A:%.*]] = fadd double [[PA:%.*]], 4.200000e+01
-; CHECK-NEXT: [[P:%.*]] = call double @llvm.pow.f64(double [[A]], double [[B:%.*]])
-; CHECK-NEXT: [[M:%.*]] = fmul reassoc double [[A]], [[P]]
+; CHECK-NEXT: [[TMP1:%.*]] = fadd reassoc double [[B:%.*]], 1.000000e+00
+; CHECK-NEXT: [[M:%.*]] = call reassoc double @llvm.pow.f64(double [[A]], double [[TMP1]])
; CHECK-NEXT: ret double [[M]]
;
%a = fadd double %pa, 42.0 ; thwart complexity-based canonicalization
@@ -39,6 +46,8 @@ define double @pow_ab_a_reassoc_commute(double %pa, double %b) {
ret double %m
}
+; negative test - extra uses not allowed
+
define double @pow_ab_a_reassoc_use(double %a, double %b) {
; CHECK-LABEL: @pow_ab_a_reassoc_use(
; CHECK-NEXT: [[P:%.*]] = call double @llvm.pow.f64(double [[A:%.*]], double [[B:%.*]])
More information about the llvm-commits
mailing list