[llvm] [InstCombine] optimize powi(X,Y) * X with Ofast (PR #69998)
via llvm-commits
llvm-commits at lists.llvm.org
Wed Mar 6 17:19:08 PST 2024
https://github.com/vfdff updated https://github.com/llvm/llvm-project/pull/69998
>From 0a480d454d2b2044b4626bcebfca3902f0c5c047 Mon Sep 17 00:00:00 2001
From: zhongyunde 00443407 <zhongyunde at huawei.com>
Date: Mon, 23 Oct 2023 09:19:54 -0400
Subject: [PATCH 1/3] [InstCombine] optimize powi(X,Y) * X with Ofast
Try to transform the powi(X, Y) * X into powi(X, Y+1) with Ofast
For this case, when the Y is 3, then powi(X, 4) is replaced by
X2 = X * X; X2 * X2 in the further step.
Similar to D109954, who requires reassoc.
Fixes https://github.com/llvm/llvm-project/issues/69862.
---
.../InstCombine/InstCombineMulDivRem.cpp | 12 +++++
llvm/test/Transforms/InstCombine/powi.ll | 49 +++++++++++++++++++
2 files changed, 61 insertions(+)
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp b/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
index 0bd4b6d1a835af..ae31e95220ce93 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
@@ -683,6 +683,18 @@ Instruction *InstCombinerImpl::foldFMulReassoc(BinaryOperator &I) {
return replaceInstUsesWith(I, Pow);
}
+ // powi(X, Y) * X --> powi(X, Y+1)
+ // X * powi(X, Y) --> powi(X, Y+1)
+ if (match(&I, m_c_FMul(m_OneUse(m_Intrinsic<Intrinsic::powi>(m_Value(X),
+ m_Value(Y))),
+ m_Deferred(X))) &&
+ willNotOverflowSignedAdd(Y, ConstantInt::get(Y->getType(), 1), I)) {
+ auto *Y1 = Builder.CreateAdd(Y, ConstantInt::get(Y->getType(), 1));
+ auto *NewPow = Builder.CreateIntrinsic(
+ Intrinsic::powi, {X->getType(), Y1->getType()}, {X, Y1}, &I);
+ return replaceInstUsesWith(I, NewPow);
+ }
+
if (I.isOnlyUserOfAnyOperand()) {
// pow(X, Y) * pow(X, Z) -> pow(X, Y + Z)
if (match(Op0, m_Intrinsic<Intrinsic::pow>(m_Value(X), m_Value(Y))) &&
diff --git a/llvm/test/Transforms/InstCombine/powi.ll b/llvm/test/Transforms/InstCombine/powi.ll
index 89efbb6f453611..95722d09a17ad3 100644
--- a/llvm/test/Transforms/InstCombine/powi.ll
+++ b/llvm/test/Transforms/InstCombine/powi.ll
@@ -341,3 +341,52 @@ define double @fdiv_pow_powi_negative_variable(double %x, i32 %y) {
%div = fdiv reassoc nnan double %p1, %x
ret double %div
}
+
+; powi(X, Y) * X --> powi(X, Y+1)
+define double @powi_fmul_powi_x(double noundef %x) {
+; CHECK-LABEL: @powi_fmul_powi_x(
+; CHECK-NEXT: [[MUL:%.*]] = call reassoc double @llvm.powi.f64.i32(double [[X:%.*]], i32 4)
+; CHECK-NEXT: ret double [[MUL]]
+;
+ %p1 = tail call double @llvm.powi.f64.i32(double %x, i32 3)
+ %mul = fmul reassoc double %p1, %x
+ ret double %mul
+}
+
+; Negative test: Multi-use
+define double @powi_fmul_powi_x_multi_use(double noundef %x) {
+; CHECK-LABEL: @powi_fmul_powi_x_multi_use(
+; CHECK-NEXT: [[P1:%.*]] = tail call double @llvm.powi.f64.i32(double [[X:%.*]], i32 3)
+; CHECK-NEXT: tail call void @use(double [[P1]])
+; CHECK-NEXT: [[MUL:%.*]] = fmul reassoc double [[P1]], [[X]]
+; CHECK-NEXT: ret double [[MUL]]
+;
+ %p1 = tail call double @llvm.powi.f64.i32(double %x, i32 3)
+ tail call void @use(double %p1)
+ %mul = fmul reassoc double %p1, %x
+ ret double %mul
+}
+
+; Negative test: Miss fmf flag
+define double @powi_fmul_powi_x_missing_reassoc(double noundef %x) {
+; CHECK-LABEL: @powi_fmul_powi_x_missing_reassoc(
+; CHECK-NEXT: [[P1:%.*]] = tail call double @llvm.powi.f64.i32(double [[X:%.*]], i32 3)
+; CHECK-NEXT: [[MUL:%.*]] = fmul double [[P1]], [[X]]
+; CHECK-NEXT: ret double [[MUL]]
+;
+ %p1 = tail call double @llvm.powi.f64.i32(double %x, i32 3)
+ %mul = fmul double %p1, %x
+ ret double %mul
+}
+
+; Negative test: overflow
+define double @powi_fmul_powi_x_overflow(double noundef %x) {
+; CHECK-LABEL: @powi_fmul_powi_x_overflow(
+; CHECK-NEXT: [[P1:%.*]] = tail call double @llvm.powi.f64.i32(double [[X:%.*]], i32 2147483647)
+; CHECK-NEXT: [[MUL:%.*]] = fmul reassoc double [[P1]], [[X]]
+; CHECK-NEXT: ret double [[MUL]]
+;
+ %p1 = tail call double @llvm.powi.f64.i32(double %x, i32 2147483647) ; INT_MAX
+ %mul = fmul reassoc double %p1, %x
+ ret double %mul
+}
>From 1f93f62edee9339bd8a09e8a02ac951c85eaacf0 Mon Sep 17 00:00:00 2001
From: zhongyunde 00443407 <zhongyunde at huawei.com>
Date: Fri, 1 Mar 2024 20:33:54 -0500
Subject: [PATCH 2/3] [InstCombine] create a helper function foldPowiReassoc,
NFC
---
.../InstCombine/InstCombineInternal.h | 1 +
.../InstCombine/InstCombineMulDivRem.cpp | 61 ++++++++++++-------
2 files changed, 41 insertions(+), 21 deletions(-)
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineInternal.h b/llvm/lib/Transforms/InstCombine/InstCombineInternal.h
index 0b4283bc37650a..9487ab4b1e6872 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineInternal.h
+++ b/llvm/lib/Transforms/InstCombine/InstCombineInternal.h
@@ -98,6 +98,7 @@ class LLVM_LIBRARY_VISIBILITY InstCombinerImpl final
Instruction *visitSub(BinaryOperator &I);
Instruction *visitFSub(BinaryOperator &I);
Instruction *visitMul(BinaryOperator &I);
+ Instruction *foldPowiReassoc(BinaryOperator &I);
Instruction *foldFMulReassoc(BinaryOperator &I);
Instruction *visitFMul(BinaryOperator &I);
Instruction *visitURem(BinaryOperator &I);
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp b/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
index ae31e95220ce93..80179a0062cf21 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
@@ -571,6 +571,44 @@ Instruction *InstCombinerImpl::foldFPSignBitOps(BinaryOperator &I) {
return nullptr;
}
+Instruction *InstCombinerImpl::foldPowiReassoc(BinaryOperator &I) {
+ Value *X, *Y, *Z;
+ auto createPowiExpr = [](BinaryOperator &I, InstCombinerImpl &IC, Value *X,
+ Value *Y, Value *Z) {
+ Value *YZ;
+ InstCombiner::BuilderTy &Builder = IC.Builder;
+
+ if (auto *C = dyn_cast<ConstantInt>(Z)) {
+ if (C->isOne())
+ YZ = Builder.CreateAdd(Y, ConstantInt::get(Y->getType(), 1));
+ } else
+ YZ = Builder.CreateAdd(Y, Z);
+
+ auto *NewPow = Builder.CreateIntrinsic(
+ Intrinsic::powi, {X->getType(), YZ->getType()}, {X, YZ}, &I);
+ return IC.replaceInstUsesWith(I, NewPow);
+ };
+
+ // powi(X, Y) * X --> powi(X, Y+1)
+ // X * powi(X, Y) --> powi(X, Y+1)
+ if (match(&I, m_c_FMul(m_OneUse(m_Intrinsic<Intrinsic::powi>(m_Value(X),
+ m_Value(Y))),
+ m_Deferred(X))) &&
+ willNotOverflowSignedAdd(Y, ConstantInt::get(Y->getType(), 1), I))
+ return createPowiExpr(I, *this, X, Y, ConstantInt::get(Y->getType(), 1));
+
+ // powi(x, y) * powi(x, z) -> powi(x, y + z)
+ Value *Op0 = I.getOperand(0);
+ Value *Op1 = I.getOperand(1);
+ if (I.isOnlyUserOfAnyOperand() &&
+ match(Op0, m_Intrinsic<Intrinsic::powi>(m_Value(X), m_Value(Y))) &&
+ match(Op1, m_Intrinsic<Intrinsic::powi>(m_Specific(X), m_Value(Z))) &&
+ Y->getType() == Z->getType())
+ return createPowiExpr(I, *this, X, Y, Z);
+
+ return nullptr;
+}
+
Instruction *InstCombinerImpl::foldFMulReassoc(BinaryOperator &I) {
Value *Op0 = I.getOperand(0);
Value *Op1 = I.getOperand(1);
@@ -683,17 +721,8 @@ Instruction *InstCombinerImpl::foldFMulReassoc(BinaryOperator &I) {
return replaceInstUsesWith(I, Pow);
}
- // powi(X, Y) * X --> powi(X, Y+1)
- // X * powi(X, Y) --> powi(X, Y+1)
- if (match(&I, m_c_FMul(m_OneUse(m_Intrinsic<Intrinsic::powi>(m_Value(X),
- m_Value(Y))),
- m_Deferred(X))) &&
- willNotOverflowSignedAdd(Y, ConstantInt::get(Y->getType(), 1), I)) {
- auto *Y1 = Builder.CreateAdd(Y, ConstantInt::get(Y->getType(), 1));
- auto *NewPow = Builder.CreateIntrinsic(
- Intrinsic::powi, {X->getType(), Y1->getType()}, {X, Y1}, &I);
- return replaceInstUsesWith(I, NewPow);
- }
+ if (Instruction *FoldedPowi = foldPowiReassoc(I))
+ return FoldedPowi;
if (I.isOnlyUserOfAnyOperand()) {
// pow(X, Y) * pow(X, Z) -> pow(X, Y + Z)
@@ -711,16 +740,6 @@ Instruction *InstCombinerImpl::foldFMulReassoc(BinaryOperator &I) {
return replaceInstUsesWith(I, NewPow);
}
- // powi(x, y) * powi(x, z) -> powi(x, y + z)
- if (match(Op0, m_Intrinsic<Intrinsic::powi>(m_Value(X), m_Value(Y))) &&
- match(Op1, m_Intrinsic<Intrinsic::powi>(m_Specific(X), m_Value(Z))) &&
- Y->getType() == Z->getType()) {
- auto *YZ = Builder.CreateAdd(Y, Z);
- auto *NewPow = Builder.CreateIntrinsic(
- Intrinsic::powi, {X->getType(), YZ->getType()}, {X, YZ}, &I);
- return replaceInstUsesWith(I, NewPow);
- }
-
// exp(X) * exp(Y) -> exp(X + Y)
if (match(Op0, m_Intrinsic<Intrinsic::exp>(m_Value(X))) &&
match(Op1, m_Intrinsic<Intrinsic::exp>(m_Value(Y)))) {
>From b71285c1f8640ab56f86bf82bf66ad0a795de728 Mon Sep 17 00:00:00 2001
From: zhongyunde 00443407 <zhongyunde at huawei.com>
Date: Sat, 2 Mar 2024 01:30:08 -0500
Subject: [PATCH 3/3] [InstCombine] Add restrict reassoc for the operands of
fmul
According the discussion, except the fmul itself, all its operands
should also have reassoc flag.
Add new interface m_AllowReassoc to check reassoc flag
---
llvm/include/llvm/IR/PatternMatch.h | 16 ++++
.../InstCombine/InstCombineMulDivRem.cpp | 30 +++----
llvm/test/Transforms/InstCombine/powi.ll | 90 ++++++++++++++-----
3 files changed, 99 insertions(+), 37 deletions(-)
diff --git a/llvm/include/llvm/IR/PatternMatch.h b/llvm/include/llvm/IR/PatternMatch.h
index fed552414298ad..108895dada03b1 100644
--- a/llvm/include/llvm/IR/PatternMatch.h
+++ b/llvm/include/llvm/IR/PatternMatch.h
@@ -68,6 +68,22 @@ template <typename T> inline OneUse_match<T> m_OneUse(const T &SubPattern) {
return SubPattern;
}
+template <typename SubPattern_t> struct AllowReassoc_match {
+ SubPattern_t SubPattern;
+
+ AllowReassoc_match(const SubPattern_t &SP) : SubPattern(SP) {}
+
+ template <typename OpTy> bool match(OpTy *V) {
+ auto *I = dyn_cast<FPMathOperator>(V);
+ return I && I->hasAllowReassoc() && SubPattern.match(I);
+ }
+};
+
+template <typename T>
+inline AllowReassoc_match<T> m_AllowReassoc(const T &SubPattern) {
+ return SubPattern;
+}
+
template <typename Class> struct class_match {
template <typename ITy> bool match(ITy *V) { return isa<Class>(V); }
};
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp b/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
index 80179a0062cf21..29a704320c75e7 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
@@ -572,37 +572,35 @@ Instruction *InstCombinerImpl::foldFPSignBitOps(BinaryOperator &I) {
}
Instruction *InstCombinerImpl::foldPowiReassoc(BinaryOperator &I) {
- Value *X, *Y, *Z;
auto createPowiExpr = [](BinaryOperator &I, InstCombinerImpl &IC, Value *X,
Value *Y, Value *Z) {
- Value *YZ;
InstCombiner::BuilderTy &Builder = IC.Builder;
-
- if (auto *C = dyn_cast<ConstantInt>(Z)) {
- if (C->isOne())
- YZ = Builder.CreateAdd(Y, ConstantInt::get(Y->getType(), 1));
- } else
- YZ = Builder.CreateAdd(Y, Z);
-
+ Value *YZ = Builder.CreateAdd(Y, Z);
auto *NewPow = Builder.CreateIntrinsic(
Intrinsic::powi, {X->getType(), YZ->getType()}, {X, YZ}, &I);
return IC.replaceInstUsesWith(I, NewPow);
};
+ Value *X, *Y, *Z;
+
// powi(X, Y) * X --> powi(X, Y+1)
// X * powi(X, Y) --> powi(X, Y+1)
- if (match(&I, m_c_FMul(m_OneUse(m_Intrinsic<Intrinsic::powi>(m_Value(X),
- m_Value(Y))),
- m_Deferred(X))) &&
- willNotOverflowSignedAdd(Y, ConstantInt::get(Y->getType(), 1), I))
- return createPowiExpr(I, *this, X, Y, ConstantInt::get(Y->getType(), 1));
+ if (match(&I, m_c_FMul(m_AllowReassoc(m_OneUse(m_Intrinsic<Intrinsic::powi>(
+ m_Value(X), m_Value(Y)))),
+ m_Deferred(X)))) {
+ Constant *One = ConstantInt::get(Y->getType(), 1);
+ if (willNotOverflowSignedAdd(Y, One, I))
+ return createPowiExpr(I, *this, X, Y, One);
+ }
// powi(x, y) * powi(x, z) -> powi(x, y + z)
Value *Op0 = I.getOperand(0);
Value *Op1 = I.getOperand(1);
if (I.isOnlyUserOfAnyOperand() &&
- match(Op0, m_Intrinsic<Intrinsic::powi>(m_Value(X), m_Value(Y))) &&
- match(Op1, m_Intrinsic<Intrinsic::powi>(m_Specific(X), m_Value(Z))) &&
+ match(Op0, m_AllowReassoc(
+ m_Intrinsic<Intrinsic::powi>(m_Value(X), m_Value(Y)))) &&
+ match(Op1, m_AllowReassoc(m_Intrinsic<Intrinsic::powi>(m_Specific(X),
+ m_Value(Z)))) &&
Y->getType() == Z->getType())
return createPowiExpr(I, *this, X, Y, Z);
diff --git a/llvm/test/Transforms/InstCombine/powi.ll b/llvm/test/Transforms/InstCombine/powi.ll
index 95722d09a17ad3..43e34c889106e1 100644
--- a/llvm/test/Transforms/InstCombine/powi.ll
+++ b/llvm/test/Transforms/InstCombine/powi.ll
@@ -125,22 +125,55 @@ entry:
ret double %mul
}
-define double @powi_fmul_powi_no_reassoc(double %x, i32 %y, i32 %z) {
-; CHECK-LABEL: @powi_fmul_powi_no_reassoc(
+; Negative test: Missing reassoc flag on fmul
+define double @powi_fmul_powi_no_reassoc1(double %x, i32 %y, i32 %z) {
+; CHECK-LABEL: @powi_fmul_powi_no_reassoc1(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[P1:%.*]] = tail call double @llvm.powi.f64.i32(double [[X:%.*]], i32 [[Y:%.*]])
-; CHECK-NEXT: [[P2:%.*]] = tail call double @llvm.powi.f64.i32(double [[X]], i32 [[Z:%.*]])
+; CHECK-NEXT: [[P1:%.*]] = tail call reassoc double @llvm.powi.f64.i32(double [[X:%.*]], i32 [[Y:%.*]])
+; CHECK-NEXT: [[P2:%.*]] = tail call reassoc double @llvm.powi.f64.i32(double [[X]], i32 [[Z:%.*]])
; CHECK-NEXT: [[MUL:%.*]] = fmul double [[P2]], [[P1]]
; CHECK-NEXT: ret double [[MUL]]
;
entry:
- %p1 = tail call double @llvm.powi.f64.i32(double %x, i32 %y)
- %p2 = tail call double @llvm.powi.f64.i32(double %x, i32 %z)
+ %p1 = tail call reassoc double @llvm.powi.f64.i32(double %x, i32 %y)
+ %p2 = tail call reassoc double @llvm.powi.f64.i32(double %x, i32 %z)
%mul = fmul double %p2, %p1
ret double %mul
}
+; Negative test: Missing reassoc flag on 2nd operand
+define double @powi_fmul_powi_no_reassoc2(double %x, i32 %y, i32 %z) {
+; CHECK-LABEL: @powi_fmul_powi_no_reassoc2(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[P1:%.*]] = tail call reassoc double @llvm.powi.f64.i32(double [[X:%.*]], i32 [[Y:%.*]])
+; CHECK-NEXT: [[P2:%.*]] = tail call double @llvm.powi.f64.i32(double [[X]], i32 [[Z:%.*]])
+; CHECK-NEXT: [[MUL:%.*]] = fmul reassoc double [[P2]], [[P1]]
+; CHECK-NEXT: ret double [[MUL]]
+;
+entry:
+ %p1 = tail call reassoc double @llvm.powi.f64.i32(double %x, i32 %y)
+ %p2 = tail call double @llvm.powi.f64.i32(double %x, i32 %z)
+ %mul = fmul reassoc double %p2, %p1
+ ret double %mul
+}
+; Negative test: Missing reassoc flag on 1st operand
+define double @powi_fmul_powi_no_reassoc3(double %x, i32 %y, i32 %z) {
+; CHECK-LABEL: @powi_fmul_powi_no_reassoc3(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[P1:%.*]] = tail call double @llvm.powi.f64.i32(double [[X:%.*]], i32 [[Y:%.*]])
+; CHECK-NEXT: [[P2:%.*]] = tail call reassoc double @llvm.powi.f64.i32(double [[X]], i32 [[Z:%.*]])
+; CHECK-NEXT: [[MUL:%.*]] = fmul reassoc double [[P2]], [[P1]]
+; CHECK-NEXT: ret double [[MUL]]
+;
+entry:
+ %p1 = tail call double @llvm.powi.f64.i32(double %x, i32 %y)
+ %p2 = tail call reassoc double @llvm.powi.f64.i32(double %x, i32 %z)
+ %mul = fmul reassoc double %p2, %p1
+ ret double %mul
+}
+
+; All of the fmul and its operands should have the reassoc flags
define double @powi_fmul_powi(double %x, i32 %y, i32 %z) {
; CHECK-LABEL: @powi_fmul_powi(
; CHECK-NEXT: entry:
@@ -149,8 +182,8 @@ define double @powi_fmul_powi(double %x, i32 %y, i32 %z) {
; CHECK-NEXT: ret double [[MUL]]
;
entry:
- %p1 = tail call double @llvm.powi.f64.i32(double %x, i32 %y)
- %p2 = tail call double @llvm.powi.f64.i32(double %x, i32 %z)
+ %p1 = tail call reassoc double @llvm.powi.f64.i32(double %x, i32 %y)
+ %p2 = tail call reassoc double @llvm.powi.f64.i32(double %x, i32 %z)
%mul = fmul reassoc double %p2, %p1
ret double %mul
}
@@ -163,8 +196,8 @@ define double @powi_fmul_powi_fast_on_fmul(double %x, i32 %y, i32 %z) {
; CHECK-NEXT: ret double [[MUL]]
;
entry:
- %p1 = tail call double @llvm.powi.f64.i32(double %x, i32 %y)
- %p2 = tail call double @llvm.powi.f64.i32(double %x, i32 %z)
+ %p1 = tail call fast double @llvm.powi.f64.i32(double %x, i32 %y)
+ %p2 = tail call fast double @llvm.powi.f64.i32(double %x, i32 %z)
%mul = fmul fast double %p2, %p1
ret double %mul
}
@@ -192,8 +225,23 @@ define double @powi_fmul_powi_same_power(double %x, i32 %y, i32 %z) {
; CHECK-NEXT: ret double [[MUL]]
;
entry:
- %p1 = tail call double @llvm.powi.f64.i32(double %x, i32 %y)
- %p2 = tail call double @llvm.powi.f64.i32(double %x, i32 %y)
+ %p1 = tail call reassoc double @llvm.powi.f64.i32(double %x, i32 %y)
+ %p2 = tail call reassoc double @llvm.powi.f64.i32(double %x, i32 %y)
+ %mul = fmul reassoc double %p2, %p1
+ ret double %mul
+}
+
+define double @powi_fmul_powi_different_integer_types(double %x, i32 %y, i16 %z) {
+; CHECK-LABEL: @powi_fmul_powi_different_integer_types(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[P1:%.*]] = tail call reassoc double @llvm.powi.f64.i32(double [[X:%.*]], i32 [[Y:%.*]])
+; CHECK-NEXT: [[P2:%.*]] = tail call reassoc double @llvm.powi.f64.i16(double [[X]], i16 [[Z:%.*]])
+; CHECK-NEXT: [[MUL:%.*]] = fmul reassoc double [[P2]], [[P1]]
+; CHECK-NEXT: ret double [[MUL]]
+;
+entry:
+ %p1 = tail call reassoc double @llvm.powi.f64.i32(double %x, i32 %y)
+ %p2 = tail call reassoc double @llvm.powi.f64.i16(double %x, i16 %z)
%mul = fmul reassoc double %p2, %p1
ret double %mul
}
@@ -201,16 +249,16 @@ entry:
define double @powi_fmul_powi_use_first(double %x, i32 %y, i32 %z) {
; CHECK-LABEL: @powi_fmul_powi_use_first(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[P1:%.*]] = tail call double @llvm.powi.f64.i32(double [[X:%.*]], i32 [[Y:%.*]])
+; CHECK-NEXT: [[P1:%.*]] = tail call reassoc double @llvm.powi.f64.i32(double [[X:%.*]], i32 [[Y:%.*]])
; CHECK-NEXT: tail call void @use(double [[P1]])
; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[Y]], [[Z:%.*]]
; CHECK-NEXT: [[MUL:%.*]] = call reassoc double @llvm.powi.f64.i32(double [[X]], i32 [[TMP0]])
; CHECK-NEXT: ret double [[MUL]]
;
entry:
- %p1 = tail call double @llvm.powi.f64.i32(double %x, i32 %y)
+ %p1 = tail call reassoc double @llvm.powi.f64.i32(double %x, i32 %y)
tail call void @use(double %p1)
- %p2 = tail call double @llvm.powi.f64.i32(double %x, i32 %z)
+ %p2 = tail call reassoc double @llvm.powi.f64.i32(double %x, i32 %z)
%mul = fmul reassoc double %p1, %p2
ret double %mul
}
@@ -218,16 +266,16 @@ entry:
define double @powi_fmul_powi_use_second(double %x, i32 %y, i32 %z) {
; CHECK-LABEL: @powi_fmul_powi_use_second(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[P1:%.*]] = tail call double @llvm.powi.f64.i32(double [[X:%.*]], i32 [[Z:%.*]])
+; CHECK-NEXT: [[P1:%.*]] = tail call reassoc double @llvm.powi.f64.i32(double [[X:%.*]], i32 [[Z:%.*]])
; CHECK-NEXT: tail call void @use(double [[P1]])
; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[Y:%.*]], [[Z]]
; CHECK-NEXT: [[MUL:%.*]] = call reassoc double @llvm.powi.f64.i32(double [[X]], i32 [[TMP0]])
; CHECK-NEXT: ret double [[MUL]]
;
entry:
- %p1 = tail call double @llvm.powi.f64.i32(double %x, i32 %z)
+ %p1 = tail call reassoc double @llvm.powi.f64.i32(double %x, i32 %z)
tail call void @use(double %p1)
- %p2 = tail call double @llvm.powi.f64.i32(double %x, i32 %y)
+ %p2 = tail call reassoc double @llvm.powi.f64.i32(double %x, i32 %y)
%mul = fmul reassoc double %p2, %p1
ret double %mul
}
@@ -333,11 +381,11 @@ define double @fdiv_pow_powi_negative(double %x) {
; Negative test: The 2nd powi argument is a variable
define double @fdiv_pow_powi_negative_variable(double %x, i32 %y) {
; CHECK-LABEL: @fdiv_pow_powi_negative_variable(
-; CHECK-NEXT: [[P1:%.*]] = call double @llvm.powi.f64.i32(double [[X:%.*]], i32 [[Y:%.*]])
+; CHECK-NEXT: [[P1:%.*]] = call reassoc double @llvm.powi.f64.i32(double [[X:%.*]], i32 [[Y:%.*]])
; CHECK-NEXT: [[DIV:%.*]] = fdiv reassoc nnan double [[P1]], [[X]]
; CHECK-NEXT: ret double [[DIV]]
;
- %p1 = call double @llvm.powi.f64.i32(double %x, i32 %y)
+ %p1 = call reassoc double @llvm.powi.f64.i32(double %x, i32 %y)
%div = fdiv reassoc nnan double %p1, %x
ret double %div
}
@@ -348,7 +396,7 @@ define double @powi_fmul_powi_x(double noundef %x) {
; CHECK-NEXT: [[MUL:%.*]] = call reassoc double @llvm.powi.f64.i32(double [[X:%.*]], i32 4)
; CHECK-NEXT: ret double [[MUL]]
;
- %p1 = tail call double @llvm.powi.f64.i32(double %x, i32 3)
+ %p1 = tail call reassoc double @llvm.powi.f64.i32(double %x, i32 3)
%mul = fmul reassoc double %p1, %x
ret double %mul
}
More information about the llvm-commits
mailing list