[llvm] [InstCombine] Fix the correctness of missing check reassoc attribute (PR #71277)
via llvm-commits
llvm-commits at lists.llvm.org
Mon Nov 6 23:33:23 PST 2023
https://github.com/vfdff updated https://github.com/llvm/llvm-project/pull/71277
>From 8ab1ea368670ccc52019368e761389e3f6528887 Mon Sep 17 00:00:00 2001
From: zhongyunde 00443407 <zhongyunde at huawei.com>
Date: Mon, 23 Oct 2023 09:19:54 -0400
Subject: [PATCH] [InstCombine] Fix the correctness of missing check reassoc
attribute
The potential issue is based on the discussion of PR69998. The Transfrom is reasonable
when the I and all of its operands have the reassoc attribute.
The IR node may have different attribute within a function if you're doing LTO.
---
llvm/include/llvm/IR/Instruction.h | 4 ++
llvm/lib/IR/Instruction.cpp | 9 ++++
.../InstCombine/InstCombineMulDivRem.cpp | 2 +-
llvm/test/Transforms/InstCombine/fmul-exp.ll | 20 ++++----
llvm/test/Transforms/InstCombine/fmul-exp2.ll | 20 ++++----
llvm/test/Transforms/InstCombine/fmul-pow.ll | 50 +++++++++----------
llvm/test/Transforms/InstCombine/fmul-sqrt.ll | 12 ++---
llvm/test/Transforms/InstCombine/fmul.ll | 2 +-
llvm/test/Transforms/InstCombine/powi.ll | 22 ++++----
9 files changed, 77 insertions(+), 64 deletions(-)
diff --git a/llvm/include/llvm/IR/Instruction.h b/llvm/include/llvm/IR/Instruction.h
index b5ccdf020a4c006..e166cc0e43fda9b 100644
--- a/llvm/include/llvm/IR/Instruction.h
+++ b/llvm/include/llvm/IR/Instruction.h
@@ -416,6 +416,10 @@ class Instruction : public User,
/// instruction.
void setNonNeg(bool b = true);
+ /// It checks all of its operands have attribute Reassoc if they are
+ /// instruction.
+ bool hasAllowReassocOfAllOperand() const LLVM_READONLY;
+
/// Determine whether the no unsigned wrap flag is set.
bool hasNoUnsignedWrap() const LLVM_READONLY;
diff --git a/llvm/lib/IR/Instruction.cpp b/llvm/lib/IR/Instruction.cpp
index 1b3c03348f41a70..895df100f6ff46d 100644
--- a/llvm/lib/IR/Instruction.cpp
+++ b/llvm/lib/IR/Instruction.cpp
@@ -177,6 +177,15 @@ void Instruction::setNonNeg(bool b) {
(b * PossiblyNonNegInst::NonNeg);
}
+bool Instruction::hasAllowReassocOfAllOperand() const {
+ return all_of(operands(), [](Value *V) {
+ if (!isa<IntrinsicInst>(V))
+ return true;
+ IntrinsicInst *OptI = cast<IntrinsicInst>(V);
+ return OptI->hasAllowReassoc();
+ });
+}
+
bool Instruction::hasNoUnsignedWrap() const {
return cast<OverflowingBinaryOperator>(this)->hasNoUnsignedWrap();
}
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp b/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
index db0804380855e3a..d2ac7abc42b9ce6 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
@@ -781,7 +781,7 @@ Instruction *InstCombinerImpl::visitFMul(BinaryOperator &I) {
if (Value *V = SimplifySelectsFeedingBinaryOp(I, Op0, Op1))
return replaceInstUsesWith(I, V);
- if (I.hasAllowReassoc())
+ if (I.hasAllowReassoc() && I.hasAllowReassocOfAllOperand())
if (Instruction *FoldedMul = foldFMulReassoc(I))
return FoldedMul;
diff --git a/llvm/test/Transforms/InstCombine/fmul-exp.ll b/llvm/test/Transforms/InstCombine/fmul-exp.ll
index 62d22b8c085c267..16066b5d5bc5168 100644
--- a/llvm/test/Transforms/InstCombine/fmul-exp.ll
+++ b/llvm/test/Transforms/InstCombine/fmul-exp.ll
@@ -21,14 +21,14 @@ define double @exp_a_exp_b(double %a, double %b) {
; exp(a) * exp(b) reassoc, multiple uses
define double @exp_a_exp_b_multiple_uses(double %a, double %b) {
; CHECK-LABEL: @exp_a_exp_b_multiple_uses(
-; CHECK-NEXT: [[T1:%.*]] = call double @llvm.exp.f64(double [[B:%.*]])
+; CHECK-NEXT: [[T1:%.*]] = call reassoc double @llvm.exp.f64(double [[B:%.*]])
; CHECK-NEXT: [[TMP1:%.*]] = fadd reassoc double [[A:%.*]], [[B]]
; CHECK-NEXT: [[MUL:%.*]] = call reassoc double @llvm.exp.f64(double [[TMP1]])
; CHECK-NEXT: call void @use(double [[T1]])
; CHECK-NEXT: ret double [[MUL]]
;
- %t = call double @llvm.exp.f64(double %a)
- %t1 = call double @llvm.exp.f64(double %b)
+ %t = call reassoc double @llvm.exp.f64(double %a)
+ %t1 = call reassoc double @llvm.exp.f64(double %b)
%mul = fmul reassoc double %t, %t1
call void @use(double %t1)
ret double %mul
@@ -59,8 +59,8 @@ define double @exp_a_exp_b_reassoc(double %a, double %b) {
; CHECK-NEXT: [[MUL:%.*]] = call reassoc double @llvm.exp.f64(double [[TMP1]])
; CHECK-NEXT: ret double [[MUL]]
;
- %t = call double @llvm.exp.f64(double %a)
- %t1 = call double @llvm.exp.f64(double %b)
+ %t = call reassoc double @llvm.exp.f64(double %a)
+ %t1 = call reassoc double @llvm.exp.f64(double %b)
%mul = fmul reassoc double %t, %t1
ret double %mul
}
@@ -71,7 +71,7 @@ define double @exp_a_a(double %a) {
; CHECK-NEXT: [[M:%.*]] = call reassoc double @llvm.exp.f64(double [[TMP1]])
; CHECK-NEXT: ret double [[M]]
;
- %t = call double @llvm.exp.f64(double %a)
+ %t = call reassoc double @llvm.exp.f64(double %a)
%m = fmul reassoc double %t, %t
ret double %m
}
@@ -100,12 +100,12 @@ define double @exp_a_exp_b_exp_c_exp_d_fast(double %a, double %b, double %c, dou
; CHECK-NEXT: [[MUL2:%.*]] = call reassoc double @llvm.exp.f64(double [[TMP3]])
; CHECK-NEXT: ret double [[MUL2]]
;
- %t = call double @llvm.exp.f64(double %a)
- %t1 = call double @llvm.exp.f64(double %b)
+ %t = call reassoc double @llvm.exp.f64(double %a)
+ %t1 = call reassoc double @llvm.exp.f64(double %b)
%mul = fmul reassoc double %t, %t1
- %t2 = call double @llvm.exp.f64(double %c)
+ %t2 = call reassoc double @llvm.exp.f64(double %c)
%mul1 = fmul reassoc double %mul, %t2
- %t3 = call double @llvm.exp.f64(double %d)
+ %t3 = call reassoc double @llvm.exp.f64(double %d)
%mul2 = fmul reassoc double %mul1, %t3
ret double %mul2
}
diff --git a/llvm/test/Transforms/InstCombine/fmul-exp2.ll b/llvm/test/Transforms/InstCombine/fmul-exp2.ll
index 35756cc043518f3..80cd13163b800aa 100644
--- a/llvm/test/Transforms/InstCombine/fmul-exp2.ll
+++ b/llvm/test/Transforms/InstCombine/fmul-exp2.ll
@@ -21,14 +21,14 @@ define double @exp2_a_exp2_b(double %a, double %b) {
; exp2(a) * exp2(b) reassoc, multiple uses
define double @exp2_a_exp2_b_multiple_uses(double %a, double %b) {
; CHECK-LABEL: @exp2_a_exp2_b_multiple_uses(
-; CHECK-NEXT: [[T1:%.*]] = call double @llvm.exp2.f64(double [[B:%.*]])
+; CHECK-NEXT: [[T1:%.*]] = call reassoc double @llvm.exp2.f64(double [[B:%.*]])
; CHECK-NEXT: [[TMP1:%.*]] = fadd reassoc double [[A:%.*]], [[B]]
; CHECK-NEXT: [[MUL:%.*]] = call reassoc double @llvm.exp2.f64(double [[TMP1]])
; CHECK-NEXT: call void @use(double [[T1]])
; CHECK-NEXT: ret double [[MUL]]
;
- %t = call double @llvm.exp2.f64(double %a)
- %t1 = call double @llvm.exp2.f64(double %b)
+ %t = call reassoc double @llvm.exp2.f64(double %a)
+ %t1 = call reassoc double @llvm.exp2.f64(double %b)
%mul = fmul reassoc double %t, %t1
call void @use(double %t1)
ret double %mul
@@ -40,7 +40,7 @@ define double @exp2_a_a(double %a) {
; CHECK-NEXT: [[M:%.*]] = call reassoc double @llvm.exp2.f64(double [[TMP1]])
; CHECK-NEXT: ret double [[M]]
;
- %t = call double @llvm.exp2.f64(double %a)
+ %t = call reassoc double @llvm.exp2.f64(double %a)
%m = fmul reassoc double %t, %t
ret double %m
}
@@ -70,8 +70,8 @@ define double @exp2_a_exp2_b_reassoc(double %a, double %b) {
; CHECK-NEXT: [[MUL:%.*]] = call reassoc double @llvm.exp2.f64(double [[TMP1]])
; CHECK-NEXT: ret double [[MUL]]
;
- %t = call double @llvm.exp2.f64(double %a)
- %t1 = call double @llvm.exp2.f64(double %b)
+ %t = call reassoc double @llvm.exp2.f64(double %a)
+ %t1 = call reassoc double @llvm.exp2.f64(double %b)
%mul = fmul reassoc double %t, %t1
ret double %mul
}
@@ -85,12 +85,12 @@ define double @exp2_a_exp2_b_exp2_c_exp2_d(double %a, double %b, double %c, doub
; CHECK-NEXT: [[MUL2:%.*]] = call reassoc double @llvm.exp2.f64(double [[TMP3]])
; CHECK-NEXT: ret double [[MUL2]]
;
- %t = call double @llvm.exp2.f64(double %a)
- %t1 = call double @llvm.exp2.f64(double %b)
+ %t = call reassoc double @llvm.exp2.f64(double %a)
+ %t1 = call reassoc double @llvm.exp2.f64(double %b)
%mul = fmul reassoc double %t, %t1
- %t2 = call double @llvm.exp2.f64(double %c)
+ %t2 = call reassoc double @llvm.exp2.f64(double %c)
%mul1 = fmul reassoc double %mul, %t2
- %t3 = call double @llvm.exp2.f64(double %d)
+ %t3 = call reassoc double @llvm.exp2.f64(double %d)
%mul2 = fmul reassoc double %mul1, %t3
ret double %mul2
}
diff --git a/llvm/test/Transforms/InstCombine/fmul-pow.ll b/llvm/test/Transforms/InstCombine/fmul-pow.ll
index 63458e136074c90..1d007b174f257b9 100644
--- a/llvm/test/Transforms/InstCombine/fmul-pow.ll
+++ b/llvm/test/Transforms/InstCombine/fmul-pow.ll
@@ -26,7 +26,7 @@ define double @pow_ab_a_reassoc(double %a, double %b) {
; CHECK-NEXT: [[M:%.*]] = call reassoc double @llvm.pow.f64(double [[A:%.*]], double [[TMP1]])
; CHECK-NEXT: ret double [[M]]
;
- %p = call double @llvm.pow.f64(double %a, double %b)
+ %p = call reassoc double @llvm.pow.f64(double %a, double %b)
%m = fmul reassoc double %p, %a
ret double %m
}
@@ -35,13 +35,13 @@ define double @pow_ab_a_reassoc(double %a, double %b) {
define double @pow_ab_a_reassoc_commute(double %pa, double %b) {
; CHECK-LABEL: @pow_ab_a_reassoc_commute(
-; CHECK-NEXT: [[A:%.*]] = fadd double [[PA:%.*]], 4.200000e+01
+; CHECK-NEXT: [[A:%.*]] = fadd reassoc double [[PA:%.*]], 4.200000e+01
; CHECK-NEXT: [[TMP1:%.*]] = fadd reassoc double [[B:%.*]], 1.000000e+00
; CHECK-NEXT: [[M:%.*]] = call reassoc double @llvm.pow.f64(double [[A]], double [[TMP1]])
; CHECK-NEXT: ret double [[M]]
;
- %a = fadd double %pa, 42.0 ; thwart complexity-based canonicalization
- %p = call double @llvm.pow.f64(double %a, double %b)
+ %a = fadd reassoc double %pa, 42.0 ; thwart complexity-based canonicalization
+ %p = call reassoc double @llvm.pow.f64(double %a, double %b)
%m = fmul reassoc double %a, %p
ret double %m
}
@@ -85,8 +85,8 @@ define double @pow_ab_recip_a_reassoc(double %a, double %b) {
; CHECK-NEXT: [[M:%.*]] = call reassoc double @llvm.pow.f64(double [[A:%.*]], double [[TMP1]])
; CHECK-NEXT: ret double [[M]]
;
- %r = fdiv double 1.0, %a
- %p = call double @llvm.pow.f64(double %a, double %b)
+ %r = fdiv reassoc double 1.0, %a
+ %p = call reassoc double @llvm.pow.f64(double %a, double %b)
%m = fmul reassoc double %r, %p
ret double %m
}
@@ -99,8 +99,8 @@ define double @pow_ab_recip_a_reassoc_commute(double %a, double %b) {
; CHECK-NEXT: [[M:%.*]] = call reassoc double @llvm.pow.f64(double [[A:%.*]], double [[TMP1]])
; CHECK-NEXT: ret double [[M]]
;
- %r = fdiv double 1.0, %a
- %p = call double @llvm.pow.f64(double %a, double %b)
+ %r = fdiv reassoc double 1.0, %a
+ %p = call reassoc double @llvm.pow.f64(double %a, double %b)
%m = fmul reassoc double %p, %r
ret double %m
}
@@ -126,13 +126,13 @@ define double @pow_ab_recip_a_reassoc_use1(double %a, double %b) {
define double @pow_ab_recip_a_reassoc_use2(double %a, double %b) {
; CHECK-LABEL: @pow_ab_recip_a_reassoc_use2(
-; CHECK-NEXT: [[P:%.*]] = call double @llvm.pow.f64(double [[A:%.*]], double [[B:%.*]])
+; CHECK-NEXT: [[P:%.*]] = call reassoc double @llvm.pow.f64(double [[A:%.*]], double [[B:%.*]])
; CHECK-NEXT: [[M:%.*]] = fdiv reassoc double [[P]], [[A]]
; CHECK-NEXT: call void @use(double [[P]])
; CHECK-NEXT: ret double [[M]]
;
- %r = fdiv double 1.0, %a
- %p = call double @llvm.pow.f64(double %a, double %b)
+ %r = fdiv reassoc double 1.0, %a
+ %p = call reassoc double @llvm.pow.f64(double %a, double %b)
%m = fmul reassoc double %r, %p
call void @use(double %p)
ret double %m
@@ -181,8 +181,8 @@ define double @pow_ab_pow_cb_reassoc(double %a, double %b, double %c) {
; CHECK-NEXT: [[MUL:%.*]] = call reassoc double @llvm.pow.f64(double [[TMP1]], double [[B:%.*]])
; CHECK-NEXT: ret double [[MUL]]
;
- %1 = call double @llvm.pow.f64(double %a, double %b)
- %2 = call double @llvm.pow.f64(double %c, double %b)
+ %1 = call reassoc double @llvm.pow.f64(double %a, double %b)
+ %2 = call reassoc double @llvm.pow.f64(double %c, double %b)
%mul = fmul reassoc double %2, %1
ret double %mul
}
@@ -191,14 +191,14 @@ define double @pow_ab_pow_cb_reassoc(double %a, double %b, double %c) {
define double @pow_ab_pow_cb_reassoc_use1(double %a, double %b, double %c) {
; CHECK-LABEL: @pow_ab_pow_cb_reassoc_use1(
-; CHECK-NEXT: [[AB:%.*]] = call double @llvm.pow.f64(double [[A:%.*]], double [[B:%.*]])
+; CHECK-NEXT: [[AB:%.*]] = call reassoc double @llvm.pow.f64(double [[A:%.*]], double [[B:%.*]])
; CHECK-NEXT: [[TMP1:%.*]] = fmul reassoc double [[A]], [[C:%.*]]
; CHECK-NEXT: [[MUL:%.*]] = call reassoc double @llvm.pow.f64(double [[TMP1]], double [[B]])
; CHECK-NEXT: call void @use(double [[AB]])
; CHECK-NEXT: ret double [[MUL]]
;
- %ab = call double @llvm.pow.f64(double %a, double %b)
- %cb = call double @llvm.pow.f64(double %c, double %b)
+ %ab = call reassoc double @llvm.pow.f64(double %a, double %b)
+ %cb = call reassoc double @llvm.pow.f64(double %c, double %b)
%mul = fmul reassoc double %ab, %cb
call void @use(double %ab)
ret double %mul
@@ -208,14 +208,14 @@ define double @pow_ab_pow_cb_reassoc_use1(double %a, double %b, double %c) {
define double @pow_ab_pow_cb_reassoc_use2(double %a, double %b, double %c) {
; CHECK-LABEL: @pow_ab_pow_cb_reassoc_use2(
-; CHECK-NEXT: [[CB:%.*]] = call double @llvm.pow.f64(double [[C:%.*]], double [[B:%.*]])
+; CHECK-NEXT: [[CB:%.*]] = call reassoc double @llvm.pow.f64(double [[C:%.*]], double [[B:%.*]])
; CHECK-NEXT: [[TMP1:%.*]] = fmul reassoc double [[A:%.*]], [[C]]
; CHECK-NEXT: [[MUL:%.*]] = call reassoc double @llvm.pow.f64(double [[TMP1]], double [[B]])
; CHECK-NEXT: call void @use(double [[CB]])
; CHECK-NEXT: ret double [[MUL]]
;
- %ab = call double @llvm.pow.f64(double %a, double %b)
- %cb = call double @llvm.pow.f64(double %c, double %b)
+ %ab = call reassoc double @llvm.pow.f64(double %a, double %b)
+ %cb = call reassoc double @llvm.pow.f64(double %c, double %b)
%mul = fmul reassoc double %ab, %cb
call void @use(double %cb)
ret double %mul
@@ -259,8 +259,8 @@ define double @pow_ab_x_pow_ac_reassoc(double %a, double %b, double %c) {
; CHECK-NEXT: [[MUL:%.*]] = call reassoc double @llvm.pow.f64(double [[A:%.*]], double [[TMP1]])
; CHECK-NEXT: ret double [[MUL]]
;
- %1 = call double @llvm.pow.f64(double %a, double %b)
- %2 = call double @llvm.pow.f64(double %a, double %c)
+ %1 = call reassoc double @llvm.pow.f64(double %a, double %b)
+ %2 = call reassoc double @llvm.pow.f64(double %a, double %c)
%mul = fmul reassoc double %2, %1
ret double %mul
}
@@ -271,7 +271,7 @@ define double @pow_ab_reassoc(double %a, double %b) {
; CHECK-NEXT: [[MUL:%.*]] = call reassoc double @llvm.pow.f64(double [[A:%.*]], double [[TMP1]])
; CHECK-NEXT: ret double [[MUL]]
;
- %1 = call double @llvm.pow.f64(double %a, double %b)
+ %1 = call reassoc double @llvm.pow.f64(double %a, double %b)
%mul = fmul reassoc double %1, %1
ret double %mul
}
@@ -291,14 +291,14 @@ define double @pow_ab_reassoc_extra_use(double %a, double %b) {
define double @pow_ab_x_pow_ac_reassoc_extra_use(double %a, double %b, double %c) {
; CHECK-LABEL: @pow_ab_x_pow_ac_reassoc_extra_use(
-; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.pow.f64(double [[A:%.*]], double [[B:%.*]])
+; CHECK-NEXT: [[TMP1:%.*]] = call reassoc double @llvm.pow.f64(double [[A:%.*]], double [[B:%.*]])
; CHECK-NEXT: [[TMP2:%.*]] = fadd reassoc double [[B]], [[C:%.*]]
; CHECK-NEXT: [[MUL:%.*]] = call reassoc double @llvm.pow.f64(double [[A]], double [[TMP2]])
; CHECK-NEXT: call void @use(double [[TMP1]])
; CHECK-NEXT: ret double [[MUL]]
;
- %1 = call double @llvm.pow.f64(double %a, double %b)
- %2 = call double @llvm.pow.f64(double %a, double %c)
+ %1 = call reassoc double @llvm.pow.f64(double %a, double %b)
+ %2 = call reassoc double @llvm.pow.f64(double %a, double %c)
%mul = fmul reassoc double %1, %2
call void @use(double %1)
ret double %mul
diff --git a/llvm/test/Transforms/InstCombine/fmul-sqrt.ll b/llvm/test/Transforms/InstCombine/fmul-sqrt.ll
index 72ac2f18f113a4c..b19c4d1e07ff028 100644
--- a/llvm/test/Transforms/InstCombine/fmul-sqrt.ll
+++ b/llvm/test/Transforms/InstCombine/fmul-sqrt.ll
@@ -45,8 +45,8 @@ define double @sqrt_a_sqrt_b_reassoc_nnan(double %a, double %b) {
; CHECK-NEXT: [[MUL:%.*]] = call reassoc nnan double @llvm.sqrt.f64(double [[TMP1]])
; CHECK-NEXT: ret double [[MUL]]
;
- %1 = call double @llvm.sqrt.f64(double %a)
- %2 = call double @llvm.sqrt.f64(double %b)
+ %1 = call reassoc double @llvm.sqrt.f64(double %a)
+ %2 = call reassoc double @llvm.sqrt.f64(double %b)
%mul = fmul reassoc nnan double %1, %2
ret double %mul
}
@@ -78,10 +78,10 @@ define double @sqrt_a_sqrt_b_sqrt_c_sqrt_d_reassoc(double %a, double %b, double
; CHECK-NEXT: [[MUL2:%.*]] = call reassoc nnan ninf double @llvm.sqrt.f64(double [[TMP3]])
; CHECK-NEXT: ret double [[MUL2]]
;
- %1 = call double @llvm.sqrt.f64(double %a)
- %2 = call double @llvm.sqrt.f64(double %b)
- %3 = call double @llvm.sqrt.f64(double %c)
- %4 = call double @llvm.sqrt.f64(double %d)
+ %1 = call reassoc double @llvm.sqrt.f64(double %a)
+ %2 = call reassoc double @llvm.sqrt.f64(double %b)
+ %3 = call reassoc double @llvm.sqrt.f64(double %c)
+ %4 = call reassoc double @llvm.sqrt.f64(double %d)
%mul = fmul reassoc nnan arcp double %1, %2
%mul1 = fmul reassoc nnan double %mul, %3
%mul2 = fmul reassoc nnan ninf double %mul1, %4
diff --git a/llvm/test/Transforms/InstCombine/fmul.ll b/llvm/test/Transforms/InstCombine/fmul.ll
index 5e6db8f964eb406..5fbff8e5e29446b 100644
--- a/llvm/test/Transforms/InstCombine/fmul.ll
+++ b/llvm/test/Transforms/InstCombine/fmul.ll
@@ -641,7 +641,7 @@ define float @log2half_commute(float %x1, float %y) {
;
%x = fdiv float %x1, 7.0 ; thwart complexity-based canonicalization
%halfy = fmul float %y, 0.5
- %log2 = call float @llvm.log2.f32(float %halfy)
+ %log2 = call reassoc float @llvm.log2.f32(float %halfy)
%mul = fmul fast float %x, %log2
ret float %mul
}
diff --git a/llvm/test/Transforms/InstCombine/powi.ll b/llvm/test/Transforms/InstCombine/powi.ll
index 89efbb6f4536113..466365d0dca06f9 100644
--- a/llvm/test/Transforms/InstCombine/powi.ll
+++ b/llvm/test/Transforms/InstCombine/powi.ll
@@ -149,8 +149,8 @@ define double @powi_fmul_powi(double %x, i32 %y, i32 %z) {
; CHECK-NEXT: ret double [[MUL]]
;
entry:
- %p1 = tail call double @llvm.powi.f64.i32(double %x, i32 %y)
- %p2 = tail call double @llvm.powi.f64.i32(double %x, i32 %z)
+ %p1 = tail call reassoc double @llvm.powi.f64.i32(double %x, i32 %y)
+ %p2 = tail call reassoc double @llvm.powi.f64.i32(double %x, i32 %z)
%mul = fmul reassoc double %p2, %p1
ret double %mul
}
@@ -163,8 +163,8 @@ define double @powi_fmul_powi_fast_on_fmul(double %x, i32 %y, i32 %z) {
; CHECK-NEXT: ret double [[MUL]]
;
entry:
- %p1 = tail call double @llvm.powi.f64.i32(double %x, i32 %y)
- %p2 = tail call double @llvm.powi.f64.i32(double %x, i32 %z)
+ %p1 = tail call reassoc double @llvm.powi.f64.i32(double %x, i32 %y)
+ %p2 = tail call reassoc double @llvm.powi.f64.i32(double %x, i32 %z)
%mul = fmul fast double %p2, %p1
ret double %mul
}
@@ -192,8 +192,8 @@ define double @powi_fmul_powi_same_power(double %x, i32 %y, i32 %z) {
; CHECK-NEXT: ret double [[MUL]]
;
entry:
- %p1 = tail call double @llvm.powi.f64.i32(double %x, i32 %y)
- %p2 = tail call double @llvm.powi.f64.i32(double %x, i32 %y)
+ %p1 = tail call reassoc double @llvm.powi.f64.i32(double %x, i32 %y)
+ %p2 = tail call reassoc double @llvm.powi.f64.i32(double %x, i32 %y)
%mul = fmul reassoc double %p2, %p1
ret double %mul
}
@@ -201,16 +201,16 @@ entry:
define double @powi_fmul_powi_use_first(double %x, i32 %y, i32 %z) {
; CHECK-LABEL: @powi_fmul_powi_use_first(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[P1:%.*]] = tail call double @llvm.powi.f64.i32(double [[X:%.*]], i32 [[Y:%.*]])
+; CHECK-NEXT: [[P1:%.*]] = tail call reassoc double @llvm.powi.f64.i32(double [[X:%.*]], i32 [[Y:%.*]])
; CHECK-NEXT: tail call void @use(double [[P1]])
; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[Y]], [[Z:%.*]]
; CHECK-NEXT: [[MUL:%.*]] = call reassoc double @llvm.powi.f64.i32(double [[X]], i32 [[TMP0]])
; CHECK-NEXT: ret double [[MUL]]
;
entry:
- %p1 = tail call double @llvm.powi.f64.i32(double %x, i32 %y)
+ %p1 = tail call reassoc double @llvm.powi.f64.i32(double %x, i32 %y)
tail call void @use(double %p1)
- %p2 = tail call double @llvm.powi.f64.i32(double %x, i32 %z)
+ %p2 = tail call reassoc double @llvm.powi.f64.i32(double %x, i32 %z)
%mul = fmul reassoc double %p1, %p2
ret double %mul
}
@@ -220,8 +220,8 @@ define double @powi_fmul_powi_use_second(double %x, i32 %y, i32 %z) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[P1:%.*]] = tail call double @llvm.powi.f64.i32(double [[X:%.*]], i32 [[Z:%.*]])
; CHECK-NEXT: tail call void @use(double [[P1]])
-; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[Y:%.*]], [[Z]]
-; CHECK-NEXT: [[MUL:%.*]] = call reassoc double @llvm.powi.f64.i32(double [[X]], i32 [[TMP0]])
+; CHECK-NEXT: [[P2:%.*]] = tail call double @llvm.powi.f64.i32(double [[X]], i32 [[Y:%.*]])
+; CHECK-NEXT: [[MUL:%.*]] = fmul reassoc double [[P2]], [[P1]]
; CHECK-NEXT: ret double [[MUL]]
;
entry:
More information about the llvm-commits
mailing list