[llvm] [InstCombine] Fix the correctness of missing check reassoc attribute (PR #71277)

via llvm-commits llvm-commits at lists.llvm.org
Thu Nov 9 23:40:47 PST 2023


https://github.com/vfdff updated https://github.com/llvm/llvm-project/pull/71277

>From d6be62c05840857911e0427c66f9d66d6d7a0875 Mon Sep 17 00:00:00 2001
From: zhongyunde 00443407 <zhongyunde at huawei.com>
Date: Mon, 23 Oct 2023 09:19:54 -0400
Subject: [PATCH] [InstCombine] Fix the correctness of missing check reassoc
 flag

The potential issue is based on the discussion of PR69998. The Transfrom is
reasonable when the I and all of its operands have the reassoc flag.
Also add some reassoc to the original test case to retain the original
optimization logic.

NOTE:
The IR node may have different fast math flags within a function if you're
doing LTO.
---
 llvm/include/llvm/IR/Instruction.h            |   3 +
 llvm/lib/IR/Instruction.cpp                   |  21 ++++
 .../InstCombine/InstCombineMulDivRem.cpp      |   2 +-
 .../Transforms/InstCombine/extractelement.ll  |   2 +-
 llvm/test/Transforms/InstCombine/fast-math.ll |   4 +-
 llvm/test/Transforms/InstCombine/fmul-exp.ll  |  20 ++--
 llvm/test/Transforms/InstCombine/fmul-exp2.ll |  20 ++--
 llvm/test/Transforms/InstCombine/fmul-pow.ll  |  50 ++++----
 llvm/test/Transforms/InstCombine/fmul-sqrt.ll |  18 +--
 llvm/test/Transforms/InstCombine/fmul.ll      | 110 +++++++++---------
 llvm/test/Transforms/InstCombine/powi.ll      |  18 +--
 11 files changed, 146 insertions(+), 122 deletions(-)

diff --git a/llvm/include/llvm/IR/Instruction.h b/llvm/include/llvm/IR/Instruction.h
index 58fc32237367d93..e245f5f935805f0 100644
--- a/llvm/include/llvm/IR/Instruction.h
+++ b/llvm/include/llvm/IR/Instruction.h
@@ -452,6 +452,9 @@ class Instruction : public User,
   /// instruction.
   void setNonNeg(bool b = true);
 
+  /// Checks all of its operands have reassoc flag if they are instruction.
+  bool hasAllowReassocOfAllOperand() const LLVM_READONLY;
+
   /// Determine whether the no unsigned wrap flag is set.
   bool hasNoUnsignedWrap() const LLVM_READONLY;
 
diff --git a/llvm/lib/IR/Instruction.cpp b/llvm/lib/IR/Instruction.cpp
index 27b2c5ee4d399dc..9324b3cf445ba58 100644
--- a/llvm/lib/IR/Instruction.cpp
+++ b/llvm/lib/IR/Instruction.cpp
@@ -321,6 +321,27 @@ void Instruction::setNonNeg(bool b) {
                          (b * PossiblyNonNegInst::NonNeg);
 }
 
+bool Instruction::hasAllowReassocOfAllOperand() const {
+  return all_of(operands(), [](Value *V) {
+    if (!isa<FPMathOperator>(V))
+      return true;
+
+    auto *FPOp = cast<FPMathOperator>(V);
+    switch (FPOp->getOpcode()) {
+    case Instruction::FNeg:
+    case Instruction::FAdd:
+    case Instruction::FSub:
+    case Instruction::FMul:
+    case Instruction::FDiv:
+    case Instruction::FRem:
+      return FPOp->hasAllowReassoc();
+
+    default:
+      return true;
+    }
+  });
+}
+
 bool Instruction::hasNoUnsignedWrap() const {
   return cast<OverflowingBinaryOperator>(this)->hasNoUnsignedWrap();
 }
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp b/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
index db0804380855e3a..d2ac7abc42b9ce6 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
@@ -781,7 +781,7 @@ Instruction *InstCombinerImpl::visitFMul(BinaryOperator &I) {
   if (Value *V = SimplifySelectsFeedingBinaryOp(I, Op0, Op1))
     return replaceInstUsesWith(I, V);
 
-  if (I.hasAllowReassoc())
+  if (I.hasAllowReassoc() && I.hasAllowReassocOfAllOperand())
     if (Instruction *FoldedMul = foldFMulReassoc(I))
       return FoldedMul;
 
diff --git a/llvm/test/Transforms/InstCombine/extractelement.ll b/llvm/test/Transforms/InstCombine/extractelement.ll
index 1e8b995a5bc025c..3e1e3e6115925c6 100644
--- a/llvm/test/Transforms/InstCombine/extractelement.ll
+++ b/llvm/test/Transforms/InstCombine/extractelement.ll
@@ -912,7 +912,7 @@ define float @crash_4b8320(<2 x float> %i1, float %i12) {
 ; ANY-NEXT:    [[I29:%.*]] = fadd float [[TMP3]], 0.000000e+00
 ; ANY-NEXT:    ret float [[I29]]
 ;
-  %i5 = fmul <2 x float> zeroinitializer, %i1
+  %i5 = fmul reassoc <2 x float> zeroinitializer, %i1
   %i6 = fmul reassoc <2 x float> zeroinitializer, %i5
   %i147 = extractelement <2 x float> %i6, i64 0
   %i15 = extractelement <2 x float> %i6, i64 0
diff --git a/llvm/test/Transforms/InstCombine/fast-math.ll b/llvm/test/Transforms/InstCombine/fast-math.ll
index 129d7811cfb8671..f8b930c77086457 100644
--- a/llvm/test/Transforms/InstCombine/fast-math.ll
+++ b/llvm/test/Transforms/InstCombine/fast-math.ll
@@ -562,7 +562,7 @@ define float @fdiv1(float %x) {
 ; CHECK-NEXT:    [[DIV1:%.*]] = fmul fast float [[X:%.*]], 0x3FD7303B60000000
 ; CHECK-NEXT:    ret float [[DIV1]]
 ;
-  %div = fdiv float %x, 0x3FF3333340000000
+  %div = fdiv reassoc float %x, 0x3FF3333340000000
   %div1 = fdiv fast float %div, 0x4002666660000000
   ret float %div1
 ; 0x3FF3333340000000 = 1.2f
@@ -603,7 +603,7 @@ define float @fdiv3(float %x) {
 ; CHECK-NEXT:    [[DIV1:%.*]] = fdiv fast float [[TMP1]], 0x47EFFFFFE0000000
 ; CHECK-NEXT:    ret float [[DIV1]]
 ;
-  %div = fdiv float %x, 0x47EFFFFFE0000000
+  %div = fdiv reassoc float %x, 0x47EFFFFFE0000000
   %div1 = fdiv fast float %div, 0x4002666660000000
   ret float %div1
 }
diff --git a/llvm/test/Transforms/InstCombine/fmul-exp.ll b/llvm/test/Transforms/InstCombine/fmul-exp.ll
index 62d22b8c085c267..16066b5d5bc5168 100644
--- a/llvm/test/Transforms/InstCombine/fmul-exp.ll
+++ b/llvm/test/Transforms/InstCombine/fmul-exp.ll
@@ -21,14 +21,14 @@ define double @exp_a_exp_b(double %a, double %b) {
 ; exp(a) * exp(b) reassoc, multiple uses
 define double @exp_a_exp_b_multiple_uses(double %a, double %b) {
 ; CHECK-LABEL: @exp_a_exp_b_multiple_uses(
-; CHECK-NEXT:    [[T1:%.*]] = call double @llvm.exp.f64(double [[B:%.*]])
+; CHECK-NEXT:    [[T1:%.*]] = call reassoc double @llvm.exp.f64(double [[B:%.*]])
 ; CHECK-NEXT:    [[TMP1:%.*]] = fadd reassoc double [[A:%.*]], [[B]]
 ; CHECK-NEXT:    [[MUL:%.*]] = call reassoc double @llvm.exp.f64(double [[TMP1]])
 ; CHECK-NEXT:    call void @use(double [[T1]])
 ; CHECK-NEXT:    ret double [[MUL]]
 ;
-  %t = call double @llvm.exp.f64(double %a)
-  %t1 = call double @llvm.exp.f64(double %b)
+  %t = call reassoc double @llvm.exp.f64(double %a)
+  %t1 = call reassoc double @llvm.exp.f64(double %b)
   %mul = fmul reassoc double %t, %t1
   call void @use(double %t1)
   ret double %mul
@@ -59,8 +59,8 @@ define double @exp_a_exp_b_reassoc(double %a, double %b) {
 ; CHECK-NEXT:    [[MUL:%.*]] = call reassoc double @llvm.exp.f64(double [[TMP1]])
 ; CHECK-NEXT:    ret double [[MUL]]
 ;
-  %t = call double @llvm.exp.f64(double %a)
-  %t1 = call double @llvm.exp.f64(double %b)
+  %t = call reassoc double @llvm.exp.f64(double %a)
+  %t1 = call reassoc double @llvm.exp.f64(double %b)
   %mul = fmul reassoc double %t, %t1
   ret double %mul
 }
@@ -71,7 +71,7 @@ define double @exp_a_a(double %a) {
 ; CHECK-NEXT:    [[M:%.*]] = call reassoc double @llvm.exp.f64(double [[TMP1]])
 ; CHECK-NEXT:    ret double [[M]]
 ;
-  %t = call double @llvm.exp.f64(double %a)
+  %t = call reassoc double @llvm.exp.f64(double %a)
   %m = fmul reassoc double %t, %t
   ret double %m
 }
@@ -100,12 +100,12 @@ define double @exp_a_exp_b_exp_c_exp_d_fast(double %a, double %b, double %c, dou
 ; CHECK-NEXT:    [[MUL2:%.*]] = call reassoc double @llvm.exp.f64(double [[TMP3]])
 ; CHECK-NEXT:    ret double [[MUL2]]
 ;
-  %t = call double @llvm.exp.f64(double %a)
-  %t1 = call double @llvm.exp.f64(double %b)
+  %t = call reassoc double @llvm.exp.f64(double %a)
+  %t1 = call reassoc double @llvm.exp.f64(double %b)
   %mul = fmul reassoc double %t, %t1
-  %t2 = call double @llvm.exp.f64(double %c)
+  %t2 = call reassoc double @llvm.exp.f64(double %c)
   %mul1 = fmul reassoc double %mul, %t2
-  %t3 = call double @llvm.exp.f64(double %d)
+  %t3 = call reassoc double @llvm.exp.f64(double %d)
   %mul2 = fmul reassoc double %mul1, %t3
   ret double %mul2
 }
diff --git a/llvm/test/Transforms/InstCombine/fmul-exp2.ll b/llvm/test/Transforms/InstCombine/fmul-exp2.ll
index 35756cc043518f3..80cd13163b800aa 100644
--- a/llvm/test/Transforms/InstCombine/fmul-exp2.ll
+++ b/llvm/test/Transforms/InstCombine/fmul-exp2.ll
@@ -21,14 +21,14 @@ define double @exp2_a_exp2_b(double %a, double %b) {
 ; exp2(a) * exp2(b) reassoc, multiple uses
 define double @exp2_a_exp2_b_multiple_uses(double %a, double %b) {
 ; CHECK-LABEL: @exp2_a_exp2_b_multiple_uses(
-; CHECK-NEXT:    [[T1:%.*]] = call double @llvm.exp2.f64(double [[B:%.*]])
+; CHECK-NEXT:    [[T1:%.*]] = call reassoc double @llvm.exp2.f64(double [[B:%.*]])
 ; CHECK-NEXT:    [[TMP1:%.*]] = fadd reassoc double [[A:%.*]], [[B]]
 ; CHECK-NEXT:    [[MUL:%.*]] = call reassoc double @llvm.exp2.f64(double [[TMP1]])
 ; CHECK-NEXT:    call void @use(double [[T1]])
 ; CHECK-NEXT:    ret double [[MUL]]
 ;
-  %t = call double @llvm.exp2.f64(double %a)
-  %t1 = call double @llvm.exp2.f64(double %b)
+  %t = call reassoc double @llvm.exp2.f64(double %a)
+  %t1 = call reassoc double @llvm.exp2.f64(double %b)
   %mul = fmul reassoc double %t, %t1
   call void @use(double %t1)
   ret double %mul
@@ -40,7 +40,7 @@ define double @exp2_a_a(double %a) {
 ; CHECK-NEXT:    [[M:%.*]] = call reassoc double @llvm.exp2.f64(double [[TMP1]])
 ; CHECK-NEXT:    ret double [[M]]
 ;
-  %t = call double @llvm.exp2.f64(double %a)
+  %t = call reassoc double @llvm.exp2.f64(double %a)
   %m = fmul reassoc double %t, %t
   ret double %m
 }
@@ -70,8 +70,8 @@ define double @exp2_a_exp2_b_reassoc(double %a, double %b) {
 ; CHECK-NEXT:    [[MUL:%.*]] = call reassoc double @llvm.exp2.f64(double [[TMP1]])
 ; CHECK-NEXT:    ret double [[MUL]]
 ;
-  %t = call double @llvm.exp2.f64(double %a)
-  %t1 = call double @llvm.exp2.f64(double %b)
+  %t = call reassoc double @llvm.exp2.f64(double %a)
+  %t1 = call reassoc double @llvm.exp2.f64(double %b)
   %mul = fmul reassoc double %t, %t1
   ret double %mul
 }
@@ -85,12 +85,12 @@ define double @exp2_a_exp2_b_exp2_c_exp2_d(double %a, double %b, double %c, doub
 ; CHECK-NEXT:    [[MUL2:%.*]] = call reassoc double @llvm.exp2.f64(double [[TMP3]])
 ; CHECK-NEXT:    ret double [[MUL2]]
 ;
-  %t = call double @llvm.exp2.f64(double %a)
-  %t1 = call double @llvm.exp2.f64(double %b)
+  %t = call reassoc double @llvm.exp2.f64(double %a)
+  %t1 = call reassoc double @llvm.exp2.f64(double %b)
   %mul = fmul reassoc double %t, %t1
-  %t2 = call double @llvm.exp2.f64(double %c)
+  %t2 = call reassoc double @llvm.exp2.f64(double %c)
   %mul1 = fmul reassoc double %mul, %t2
-  %t3 = call double @llvm.exp2.f64(double %d)
+  %t3 = call reassoc double @llvm.exp2.f64(double %d)
   %mul2 = fmul reassoc double %mul1, %t3
   ret double %mul2
 }
diff --git a/llvm/test/Transforms/InstCombine/fmul-pow.ll b/llvm/test/Transforms/InstCombine/fmul-pow.ll
index 63458e136074c90..1d007b174f257b9 100644
--- a/llvm/test/Transforms/InstCombine/fmul-pow.ll
+++ b/llvm/test/Transforms/InstCombine/fmul-pow.ll
@@ -26,7 +26,7 @@ define double @pow_ab_a_reassoc(double %a, double %b)  {
 ; CHECK-NEXT:    [[M:%.*]] = call reassoc double @llvm.pow.f64(double [[A:%.*]], double [[TMP1]])
 ; CHECK-NEXT:    ret double [[M]]
 ;
-  %p = call double @llvm.pow.f64(double %a, double %b)
+  %p = call reassoc double @llvm.pow.f64(double %a, double %b)
   %m = fmul reassoc double %p, %a
   ret double %m
 }
@@ -35,13 +35,13 @@ define double @pow_ab_a_reassoc(double %a, double %b)  {
 
 define double @pow_ab_a_reassoc_commute(double %pa, double %b)  {
 ; CHECK-LABEL: @pow_ab_a_reassoc_commute(
-; CHECK-NEXT:    [[A:%.*]] = fadd double [[PA:%.*]], 4.200000e+01
+; CHECK-NEXT:    [[A:%.*]] = fadd reassoc double [[PA:%.*]], 4.200000e+01
 ; CHECK-NEXT:    [[TMP1:%.*]] = fadd reassoc double [[B:%.*]], 1.000000e+00
 ; CHECK-NEXT:    [[M:%.*]] = call reassoc double @llvm.pow.f64(double [[A]], double [[TMP1]])
 ; CHECK-NEXT:    ret double [[M]]
 ;
-  %a = fadd double %pa, 42.0 ; thwart complexity-based canonicalization
-  %p = call double @llvm.pow.f64(double %a, double %b)
+  %a = fadd reassoc double %pa, 42.0 ; thwart complexity-based canonicalization
+  %p = call reassoc double @llvm.pow.f64(double %a, double %b)
   %m = fmul reassoc double %a, %p
   ret double %m
 }
@@ -85,8 +85,8 @@ define double @pow_ab_recip_a_reassoc(double %a, double %b)  {
 ; CHECK-NEXT:    [[M:%.*]] = call reassoc double @llvm.pow.f64(double [[A:%.*]], double [[TMP1]])
 ; CHECK-NEXT:    ret double [[M]]
 ;
-  %r = fdiv double 1.0, %a
-  %p = call double @llvm.pow.f64(double %a, double %b)
+  %r = fdiv reassoc double 1.0, %a
+  %p = call reassoc double @llvm.pow.f64(double %a, double %b)
   %m = fmul reassoc double %r, %p
   ret double %m
 }
@@ -99,8 +99,8 @@ define double @pow_ab_recip_a_reassoc_commute(double %a, double %b)  {
 ; CHECK-NEXT:    [[M:%.*]] = call reassoc double @llvm.pow.f64(double [[A:%.*]], double [[TMP1]])
 ; CHECK-NEXT:    ret double [[M]]
 ;
-  %r = fdiv double 1.0, %a
-  %p = call double @llvm.pow.f64(double %a, double %b)
+  %r = fdiv reassoc double 1.0, %a
+  %p = call reassoc double @llvm.pow.f64(double %a, double %b)
   %m = fmul reassoc double %p, %r
   ret double %m
 }
@@ -126,13 +126,13 @@ define double @pow_ab_recip_a_reassoc_use1(double %a, double %b)  {
 
 define double @pow_ab_recip_a_reassoc_use2(double %a, double %b)  {
 ; CHECK-LABEL: @pow_ab_recip_a_reassoc_use2(
-; CHECK-NEXT:    [[P:%.*]] = call double @llvm.pow.f64(double [[A:%.*]], double [[B:%.*]])
+; CHECK-NEXT:    [[P:%.*]] = call reassoc double @llvm.pow.f64(double [[A:%.*]], double [[B:%.*]])
 ; CHECK-NEXT:    [[M:%.*]] = fdiv reassoc double [[P]], [[A]]
 ; CHECK-NEXT:    call void @use(double [[P]])
 ; CHECK-NEXT:    ret double [[M]]
 ;
-  %r = fdiv double 1.0, %a
-  %p = call double @llvm.pow.f64(double %a, double %b)
+  %r = fdiv reassoc double 1.0, %a
+  %p = call reassoc double @llvm.pow.f64(double %a, double %b)
   %m = fmul reassoc double %r, %p
   call void @use(double %p)
   ret double %m
@@ -181,8 +181,8 @@ define double @pow_ab_pow_cb_reassoc(double %a, double %b, double %c) {
 ; CHECK-NEXT:    [[MUL:%.*]] = call reassoc double @llvm.pow.f64(double [[TMP1]], double [[B:%.*]])
 ; CHECK-NEXT:    ret double [[MUL]]
 ;
-  %1 = call double @llvm.pow.f64(double %a, double %b)
-  %2 = call double @llvm.pow.f64(double %c, double %b)
+  %1 = call reassoc double @llvm.pow.f64(double %a, double %b)
+  %2 = call reassoc double @llvm.pow.f64(double %c, double %b)
   %mul = fmul reassoc double %2, %1
   ret double %mul
 }
@@ -191,14 +191,14 @@ define double @pow_ab_pow_cb_reassoc(double %a, double %b, double %c) {
 
 define double @pow_ab_pow_cb_reassoc_use1(double %a, double %b, double %c) {
 ; CHECK-LABEL: @pow_ab_pow_cb_reassoc_use1(
-; CHECK-NEXT:    [[AB:%.*]] = call double @llvm.pow.f64(double [[A:%.*]], double [[B:%.*]])
+; CHECK-NEXT:    [[AB:%.*]] = call reassoc double @llvm.pow.f64(double [[A:%.*]], double [[B:%.*]])
 ; CHECK-NEXT:    [[TMP1:%.*]] = fmul reassoc double [[A]], [[C:%.*]]
 ; CHECK-NEXT:    [[MUL:%.*]] = call reassoc double @llvm.pow.f64(double [[TMP1]], double [[B]])
 ; CHECK-NEXT:    call void @use(double [[AB]])
 ; CHECK-NEXT:    ret double [[MUL]]
 ;
-  %ab = call double @llvm.pow.f64(double %a, double %b)
-  %cb = call double @llvm.pow.f64(double %c, double %b)
+  %ab = call reassoc double @llvm.pow.f64(double %a, double %b)
+  %cb = call reassoc double @llvm.pow.f64(double %c, double %b)
   %mul = fmul reassoc double %ab, %cb
   call void @use(double %ab)
   ret double %mul
@@ -208,14 +208,14 @@ define double @pow_ab_pow_cb_reassoc_use1(double %a, double %b, double %c) {
 
 define double @pow_ab_pow_cb_reassoc_use2(double %a, double %b, double %c) {
 ; CHECK-LABEL: @pow_ab_pow_cb_reassoc_use2(
-; CHECK-NEXT:    [[CB:%.*]] = call double @llvm.pow.f64(double [[C:%.*]], double [[B:%.*]])
+; CHECK-NEXT:    [[CB:%.*]] = call reassoc double @llvm.pow.f64(double [[C:%.*]], double [[B:%.*]])
 ; CHECK-NEXT:    [[TMP1:%.*]] = fmul reassoc double [[A:%.*]], [[C]]
 ; CHECK-NEXT:    [[MUL:%.*]] = call reassoc double @llvm.pow.f64(double [[TMP1]], double [[B]])
 ; CHECK-NEXT:    call void @use(double [[CB]])
 ; CHECK-NEXT:    ret double [[MUL]]
 ;
-  %ab = call double @llvm.pow.f64(double %a, double %b)
-  %cb = call double @llvm.pow.f64(double %c, double %b)
+  %ab = call reassoc double @llvm.pow.f64(double %a, double %b)
+  %cb = call reassoc double @llvm.pow.f64(double %c, double %b)
   %mul = fmul reassoc double %ab, %cb
   call void @use(double %cb)
   ret double %mul
@@ -259,8 +259,8 @@ define double @pow_ab_x_pow_ac_reassoc(double %a, double %b, double %c) {
 ; CHECK-NEXT:    [[MUL:%.*]] = call reassoc double @llvm.pow.f64(double [[A:%.*]], double [[TMP1]])
 ; CHECK-NEXT:    ret double [[MUL]]
 ;
-  %1 = call double @llvm.pow.f64(double %a, double %b)
-  %2 = call double @llvm.pow.f64(double %a, double %c)
+  %1 = call reassoc double @llvm.pow.f64(double %a, double %b)
+  %2 = call reassoc double @llvm.pow.f64(double %a, double %c)
   %mul = fmul reassoc double %2, %1
   ret double %mul
 }
@@ -271,7 +271,7 @@ define double @pow_ab_reassoc(double %a, double %b) {
 ; CHECK-NEXT:    [[MUL:%.*]] = call reassoc double @llvm.pow.f64(double [[A:%.*]], double [[TMP1]])
 ; CHECK-NEXT:    ret double [[MUL]]
 ;
-  %1 = call double @llvm.pow.f64(double %a, double %b)
+  %1 = call reassoc double @llvm.pow.f64(double %a, double %b)
   %mul = fmul reassoc double %1, %1
   ret double %mul
 }
@@ -291,14 +291,14 @@ define double @pow_ab_reassoc_extra_use(double %a, double %b) {
 
 define double @pow_ab_x_pow_ac_reassoc_extra_use(double %a, double %b, double %c) {
 ; CHECK-LABEL: @pow_ab_x_pow_ac_reassoc_extra_use(
-; CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.pow.f64(double [[A:%.*]], double [[B:%.*]])
+; CHECK-NEXT:    [[TMP1:%.*]] = call reassoc double @llvm.pow.f64(double [[A:%.*]], double [[B:%.*]])
 ; CHECK-NEXT:    [[TMP2:%.*]] = fadd reassoc double [[B]], [[C:%.*]]
 ; CHECK-NEXT:    [[MUL:%.*]] = call reassoc double @llvm.pow.f64(double [[A]], double [[TMP2]])
 ; CHECK-NEXT:    call void @use(double [[TMP1]])
 ; CHECK-NEXT:    ret double [[MUL]]
 ;
-  %1 = call double @llvm.pow.f64(double %a, double %b)
-  %2 = call double @llvm.pow.f64(double %a, double %c)
+  %1 = call reassoc double @llvm.pow.f64(double %a, double %b)
+  %2 = call reassoc double @llvm.pow.f64(double %a, double %c)
   %mul = fmul reassoc double %1, %2
   call void @use(double %1)
   ret double %mul
diff --git a/llvm/test/Transforms/InstCombine/fmul-sqrt.ll b/llvm/test/Transforms/InstCombine/fmul-sqrt.ll
index 72ac2f18f113a4c..e4cfc339ffe2d83 100644
--- a/llvm/test/Transforms/InstCombine/fmul-sqrt.ll
+++ b/llvm/test/Transforms/InstCombine/fmul-sqrt.ll
@@ -45,8 +45,8 @@ define double @sqrt_a_sqrt_b_reassoc_nnan(double %a, double %b) {
 ; CHECK-NEXT:    [[MUL:%.*]] = call reassoc nnan double @llvm.sqrt.f64(double [[TMP1]])
 ; CHECK-NEXT:    ret double [[MUL]]
 ;
-  %1 = call double @llvm.sqrt.f64(double %a)
-  %2 = call double @llvm.sqrt.f64(double %b)
+  %1 = call reassoc double @llvm.sqrt.f64(double %a)
+  %2 = call reassoc double @llvm.sqrt.f64(double %b)
   %mul = fmul reassoc nnan double %1, %2
   ret double %mul
 }
@@ -78,10 +78,10 @@ define double @sqrt_a_sqrt_b_sqrt_c_sqrt_d_reassoc(double %a, double %b, double
 ; CHECK-NEXT:    [[MUL2:%.*]] = call reassoc nnan ninf double @llvm.sqrt.f64(double [[TMP3]])
 ; CHECK-NEXT:    ret double [[MUL2]]
 ;
-  %1 = call double @llvm.sqrt.f64(double %a)
-  %2 = call double @llvm.sqrt.f64(double %b)
-  %3 = call double @llvm.sqrt.f64(double %c)
-  %4 = call double @llvm.sqrt.f64(double %d)
+  %1 = call reassoc double @llvm.sqrt.f64(double %a)
+  %2 = call reassoc double @llvm.sqrt.f64(double %b)
+  %3 = call reassoc double @llvm.sqrt.f64(double %c)
+  %4 = call reassoc double @llvm.sqrt.f64(double %d)
   %mul = fmul reassoc nnan arcp double %1, %2
   %mul1 = fmul reassoc nnan double %mul, %3
   %mul2 = fmul reassoc nnan ninf double %mul1, %4
@@ -102,13 +102,13 @@ define double @rsqrt_squared(double %x) {
 define double @rsqrt_x_reassociate_extra_use(double %x, ptr %p) {
 ; CHECK-LABEL: @rsqrt_x_reassociate_extra_use(
 ; CHECK-NEXT:    [[SQRT:%.*]] = call double @llvm.sqrt.f64(double [[X:%.*]])
-; CHECK-NEXT:    [[RSQRT:%.*]] = fdiv double 1.000000e+00, [[SQRT]]
+; CHECK-NEXT:    [[RSQRT:%.*]] = fdiv reassoc double 1.000000e+00, [[SQRT]]
 ; CHECK-NEXT:    [[RES:%.*]] = fdiv reassoc nsz double [[X]], [[SQRT]]
 ; CHECK-NEXT:    store double [[RSQRT]], ptr [[P:%.*]], align 8
 ; CHECK-NEXT:    ret double [[RES]]
 ;
   %sqrt = call double @llvm.sqrt.f64(double %x)
-  %rsqrt = fdiv double 1.0, %sqrt
+  %rsqrt = fdiv reassoc double 1.0, %sqrt
   %res = fmul reassoc nsz double %rsqrt, %x
   store double %rsqrt, ptr %p
   ret double %res
@@ -138,7 +138,7 @@ define double @sqrt_divisor_squared(double %x, double %y) {
 ; CHECK-NEXT:    ret double [[SQUARED]]
 ;
   %sqrt = call double @llvm.sqrt.f64(double %x)
-  %div = fdiv double %y, %sqrt
+  %div = fdiv reassoc double %y, %sqrt
   %squared = fmul reassoc nnan nsz double %div, %div
   ret double %squared
 }
diff --git a/llvm/test/Transforms/InstCombine/fmul.ll b/llvm/test/Transforms/InstCombine/fmul.ll
index 8ecbb85018428dd..dc6c2e406596b14 100644
--- a/llvm/test/Transforms/InstCombine/fmul.ll
+++ b/llvm/test/Transforms/InstCombine/fmul.ll
@@ -542,8 +542,8 @@ define float @fabs_fabs_extra_use3(float %x, float %y) {
 }
 
 ; (X*Y) * X => (X*X) * Y
-; The transform only requires 'reassoc', but test other FMF in
-; the commuted variants to make sure FMF propagates as expected.
+; The transform only requires 'reassoc', and make sure FMF propagates
+; when all its neighbours have the 'reassoc'
 
 define float @reassoc_common_operand1(float %x, float %y) {
 ; CHECK-LABEL: @reassoc_common_operand1(
@@ -551,7 +551,7 @@ define float @reassoc_common_operand1(float %x, float %y) {
 ; CHECK-NEXT:    [[MUL2:%.*]] = fmul reassoc float [[TMP1]], [[Y:%.*]]
 ; CHECK-NEXT:    ret float [[MUL2]]
 ;
-  %mul1 = fmul float %x, %y
+  %mul1 = fmul reassoc float %x, %y
   %mul2 = fmul reassoc float %mul1, %x
   ret float %mul2
 }
@@ -564,7 +564,7 @@ define float @reassoc_common_operand2(float %x, float %y) {
 ; CHECK-NEXT:    [[MUL2:%.*]] = fmul fast float [[TMP1]], [[Y:%.*]]
 ; CHECK-NEXT:    ret float [[MUL2]]
 ;
-  %mul1 = fmul float %y, %x
+  %mul1 = fmul reassoc float %y, %x
   %mul2 = fmul fast float %mul1, %x
   ret float %mul2
 }
@@ -573,13 +573,13 @@ define float @reassoc_common_operand2(float %x, float %y) {
 
 define float @reassoc_common_operand3(float %x1, float %y) {
 ; CHECK-LABEL: @reassoc_common_operand3(
-; CHECK-NEXT:    [[X:%.*]] = fdiv float [[X1:%.*]], 3.000000e+00
+; CHECK-NEXT:    [[X:%.*]] = fdiv reassoc float [[X1:%.*]], 3.000000e+00
 ; CHECK-NEXT:    [[TMP1:%.*]] = fmul reassoc nnan float [[X]], [[X]]
 ; CHECK-NEXT:    [[MUL2:%.*]] = fmul reassoc nnan float [[TMP1]], [[Y:%.*]]
 ; CHECK-NEXT:    ret float [[MUL2]]
 ;
-  %x = fdiv float %x1, 3.0 ; thwart complexity-based canonicalization
-  %mul1 = fmul float %x, %y
+  %x = fdiv reassoc float %x1, 3.0 ; thwart complexity-based canonicalization
+  %mul1 = fmul reassoc float %x, %y
   %mul2 = fmul reassoc nnan float %x, %mul1
   ret float %mul2
 }
@@ -588,13 +588,13 @@ define float @reassoc_common_operand3(float %x1, float %y) {
 
 define float @reassoc_common_operand4(float %x1, float %y) {
 ; CHECK-LABEL: @reassoc_common_operand4(
-; CHECK-NEXT:    [[X:%.*]] = fdiv float [[X1:%.*]], 3.000000e+00
+; CHECK-NEXT:    [[X:%.*]] = fdiv reassoc float [[X1:%.*]], 3.000000e+00
 ; CHECK-NEXT:    [[TMP1:%.*]] = fmul reassoc ninf float [[X]], [[X]]
 ; CHECK-NEXT:    [[MUL2:%.*]] = fmul reassoc ninf float [[TMP1]], [[Y:%.*]]
 ; CHECK-NEXT:    ret float [[MUL2]]
 ;
-  %x = fdiv float %x1, 3.0 ; thwart complexity-based canonicalization
-  %mul1 = fmul float %y, %x
+  %x = fdiv reassoc float %x1, 3.0 ; thwart complexity-based canonicalization
+  %mul1 = fmul reassoc float %y, %x
   %mul2 = fmul reassoc ninf float %x, %mul1
   ret float %mul2
 }
@@ -603,12 +603,12 @@ define float @reassoc_common_operand4(float %x1, float %y) {
 
 define float @reassoc_common_operand_multi_use(float %x, float %y) {
 ; CHECK-LABEL: @reassoc_common_operand_multi_use(
-; CHECK-NEXT:    [[MUL1:%.*]] = fmul float [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT:    [[MUL1:%.*]] = fmul reassoc float [[X:%.*]], [[Y:%.*]]
 ; CHECK-NEXT:    [[MUL2:%.*]] = fmul fast float [[MUL1]], [[X]]
 ; CHECK-NEXT:    call void @use_f32(float [[MUL1]])
 ; CHECK-NEXT:    ret float [[MUL2]]
 ;
-  %mul1 = fmul float %x, %y
+  %mul1 = fmul reassoc float %x, %y
   %mul2 = fmul fast float %mul1, %x
   call void @use_f32(float %mul1)
   ret float %mul2
@@ -639,9 +639,9 @@ define float @log2half_commute(float %x1, float %y) {
 ; CHECK-NEXT:    [[MUL:%.*]] = fmul fast float [[TMP3]], 0x3FC24924A0000000
 ; CHECK-NEXT:    ret float [[MUL]]
 ;
-  %x = fdiv float %x1, 7.0 ; thwart complexity-based canonicalization
+  %x = fdiv reassoc float %x1, 7.0 ; thwart complexity-based canonicalization
   %halfy = fmul float %y, 0.5
-  %log2 = call float @llvm.log2.f32(float %halfy)
+  %log2 = call reassoc float @llvm.log2.f32(float %halfy)
   %mul = fmul fast float %x, %log2
   ret float %mul
 }
@@ -653,7 +653,7 @@ define float @fdiv_constant_numerator_fmul(float %x) {
 ; CHECK-NEXT:    [[T3:%.*]] = fdiv reassoc float 1.200000e+07, [[X:%.*]]
 ; CHECK-NEXT:    ret float [[T3]]
 ;
-  %t1 = fdiv float 2.0e+3, %x
+  %t1 = fdiv reassoc float 2.0e+3, %x
   %t3 = fmul reassoc float %t1, 6.0e+3
   ret float %t3
 }
@@ -682,7 +682,7 @@ define float @fdiv_constant_denominator_fmul(float %x) {
 ; CHECK-NEXT:    [[T3:%.*]] = fmul reassoc float [[X:%.*]], 3.000000e+00
 ; CHECK-NEXT:    ret float [[T3]]
 ;
-  %t1 = fdiv float %x, 2.0e+3
+  %t1 = fdiv reassoc float %x, 2.0e+3
   %t3 = fmul reassoc float %t1, 6.0e+3
   ret float %t3
 }
@@ -692,7 +692,7 @@ define <4 x float> @fdiv_constant_denominator_fmul_vec(<4 x float> %x) {
 ; CHECK-NEXT:    [[T3:%.*]] = fmul reassoc <4 x float> [[X:%.*]], <float 3.000000e+00, float 2.000000e+00, float 1.000000e+00, float 1.000000e+00>
 ; CHECK-NEXT:    ret <4 x float> [[T3]]
 ;
-  %t1 = fdiv <4 x float> %x, <float 2.0e+3, float 3.0e+3, float 2.0e+3, float 1.0e+3>
+  %t1 = fdiv reassoc <4 x float> %x, <float 2.0e+3, float 3.0e+3, float 2.0e+3, float 1.0e+3>
   %t3 = fmul reassoc <4 x float> %t1, <float 6.0e+3, float 6.0e+3, float 2.0e+3, float 1.0e+3>
   ret <4 x float> %t3
 }
@@ -705,7 +705,7 @@ define <4 x float> @fdiv_constant_denominator_fmul_vec_constexpr(<4 x float> %x)
 ; CHECK-NEXT:    ret <4 x float> [[T3]]
 ;
   %constExprMul = bitcast i128 trunc (i160 bitcast (<5 x float> <float 6.0e+3, float 6.0e+3, float 2.0e+3, float 1.0e+3, float undef> to i160) to i128) to <4 x float>
-  %t1 = fdiv <4 x float> %x, <float 2.0e+3, float 3.0e+3, float 2.0e+3, float 1.0e+3>
+  %t1 = fdiv reassoc <4 x float> %x, <float 2.0e+3, float 3.0e+3, float 2.0e+3, float 1.0e+3>
   %t3 = fmul reassoc <4 x float> %t1, %constExprMul
   ret <4 x float> %t3
 }
@@ -734,7 +734,7 @@ define float @fdiv_constant_denominator_fmul_denorm(float %x) {
 ; CHECK-NEXT:    [[T3:%.*]] = fmul fast float [[X:%.*]], 0x3760620000000000
 ; CHECK-NEXT:    ret float [[T3]]
 ;
-  %t1 = fdiv float %x, 2.0e+3
+  %t1 = fdiv reassoc float %x, 2.0e+3
   %t3 = fmul fast float %t1, 0x3810000000000000
   ret float %t3
 }
@@ -748,7 +748,7 @@ define float @fdiv_constant_denominator_fmul_denorm_try_harder(float %x) {
 ; CHECK-NEXT:    [[T3:%.*]] = fdiv reassoc float [[X:%.*]], 0x47E8000000000000
 ; CHECK-NEXT:    ret float [[T3]]
 ;
-  %t1 = fdiv float %x, 3.0
+  %t1 = fdiv reassoc float %x, 3.0
   %t3 = fmul reassoc float %t1, 0x3810000000000000
   ret float %t3
 }
@@ -757,12 +757,12 @@ define float @fdiv_constant_denominator_fmul_denorm_try_harder(float %x) {
 
 define float @fdiv_constant_denominator_fmul_denorm_try_harder_extra_use(float %x) {
 ; CHECK-LABEL: @fdiv_constant_denominator_fmul_denorm_try_harder_extra_use(
-; CHECK-NEXT:    [[T1:%.*]] = fdiv float [[X:%.*]], 3.000000e+00
+; CHECK-NEXT:    [[T1:%.*]] = fdiv reassoc float [[X:%.*]], 3.000000e+00
 ; CHECK-NEXT:    [[T3:%.*]] = fmul fast float [[T1]], 0x3810000000000000
 ; CHECK-NEXT:    [[R:%.*]] = fadd float [[T1]], [[T3]]
 ; CHECK-NEXT:    ret float [[R]]
 ;
-  %t1 = fdiv float %x, 3.0e+0
+  %t1 = fdiv reassoc float %x, 3.0e+0
   %t3 = fmul fast float %t1, 0x3810000000000000
   %r = fadd float %t1, %t3
   ret float %r
@@ -776,7 +776,7 @@ define float @fmul_fadd_distribute(float %x) {
 ; CHECK-NEXT:    [[T3:%.*]] = fadd reassoc float [[TMP1]], 6.000000e+00
 ; CHECK-NEXT:    ret float [[T3]]
 ;
-  %t2 = fadd float %x, 2.0
+  %t2 = fadd reassoc float %x, 2.0
   %t3 = fmul reassoc float %t2, 3.0
   ret float %t3
 }
@@ -787,7 +787,7 @@ define <2 x float> @fmul_fadd_distribute_vec(<2 x float> %x) {
 ; CHECK-NEXT:    [[T3:%.*]] = fadd reassoc <2 x float> [[TMP1]], <float 1.200000e+07, float 1.200000e+07>
 ; CHECK-NEXT:    ret <2 x float> [[T3]]
 ;
-  %t1 = fadd <2 x float> <float 2.0e+3, float 2.0e+3>, %x
+  %t1 = fadd reassoc <2 x float> <float 2.0e+3, float 2.0e+3>, %x
   %t3 = fmul reassoc <2 x float> %t1, <float 6.0e+3, float 6.0e+3>
   ret <2 x float> %t3
 }
@@ -798,7 +798,7 @@ define <vscale x 2 x float> @fmul_fadd_distribute_scalablevec(<vscale x 2 x floa
 ; CHECK-NEXT:    [[T3:%.*]] = fadd reassoc <vscale x 2 x float> [[TMP1]], shufflevector (<vscale x 2 x float> insertelement (<vscale x 2 x float> poison, float 1.200000e+07, i64 0), <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer)
 ; CHECK-NEXT:    ret <vscale x 2 x float> [[T3]]
 ;
-  %t1 = fadd <vscale x 2 x float> shufflevector (<vscale x 2 x float> insertelement (<vscale x 2 x float> undef, float 2.0e+3, i32 0), <vscale x 2 x float> undef, <vscale x 2 x i32> zeroinitializer), %x
+  %t1 = fadd reassoc <vscale x 2 x float> shufflevector (<vscale x 2 x float> insertelement (<vscale x 2 x float> undef, float 2.0e+3, i32 0), <vscale x 2 x float> undef, <vscale x 2 x i32> zeroinitializer), %x
   %t3 = fmul reassoc <vscale x 2 x float> %t1, shufflevector (<vscale x 2 x float> insertelement (<vscale x 2 x float> undef, float 6.0e+3, i32 0), <vscale x 2 x float> undef, <vscale x 2 x i32> zeroinitializer)
 
 
@@ -813,7 +813,7 @@ define float @fmul_fsub_distribute1(float %x) {
 ; CHECK-NEXT:    [[T3:%.*]] = fadd reassoc float [[TMP1]], -6.000000e+00
 ; CHECK-NEXT:    ret float [[T3]]
 ;
-  %t2 = fsub float %x, 2.0
+  %t2 = fsub reassoc float %x, 2.0
   %t3 = fmul reassoc float %t2, 3.0
   ret float %t3
 }
@@ -826,7 +826,7 @@ define float @fmul_fsub_distribute2(float %x) {
 ; CHECK-NEXT:    [[T3:%.*]] = fsub reassoc float 6.000000e+00, [[TMP1]]
 ; CHECK-NEXT:    ret float [[T3]]
 ;
-  %t2 = fsub float 2.0, %x
+  %t2 = fsub reassoc float 2.0, %x
   %t3 = fmul reassoc float %t2, 3.0
   ret float %t3
 }
@@ -841,7 +841,7 @@ define float @fmul_fadd_fmul_distribute(float %x) {
 ; CHECK-NEXT:    ret float [[T3]]
 ;
   %t1 = fmul float %x, 6.0
-  %t2 = fadd float %t1, 2.0
+  %t2 = fadd reassoc float %t1, 2.0
   %t3 = fmul fast float %t2, 5.0
   ret float %t3
 }
@@ -849,13 +849,13 @@ define float @fmul_fadd_fmul_distribute(float %x) {
 define float @fmul_fadd_distribute_extra_use(float %x) {
 ; CHECK-LABEL: @fmul_fadd_distribute_extra_use(
 ; CHECK-NEXT:    [[T1:%.*]] = fmul float [[X:%.*]], 6.000000e+00
-; CHECK-NEXT:    [[T2:%.*]] = fadd float [[T1]], 2.000000e+00
+; CHECK-NEXT:    [[T2:%.*]] = fadd reassoc float [[T1]], 2.000000e+00
 ; CHECK-NEXT:    [[T3:%.*]] = fmul fast float [[T2]], 5.000000e+00
 ; CHECK-NEXT:    call void @use_f32(float [[T2]])
 ; CHECK-NEXT:    ret float [[T3]]
 ;
   %t1 = fmul float %x, 6.0
-  %t2 = fadd float %t1, 2.0
+  %t2 = fadd reassoc float %t1, 2.0
   %t3 = fmul fast float %t2, 5.0
   call void @use_f32(float %t2)
   ret float %t3
@@ -872,8 +872,8 @@ define double @fmul_fadd_fdiv_distribute2(double %x) {
 ; CHECK-NEXT:    [[T3:%.*]] = fadd reassoc double [[TMP1]], 0x34000000000000
 ; CHECK-NEXT:    ret double [[T3]]
 ;
-  %t1 = fdiv double %x, 3.0
-  %t2 = fadd double %t1, 5.0
+  %t1 = fdiv reassoc double %x, 3.0
+  %t2 = fadd reassoc double %t1, 5.0
   %t3 = fmul reassoc double %t2, 0x10000000000000
   ret double %t3
 }
@@ -887,8 +887,8 @@ define double @fmul_fadd_fdiv_distribute3(double %x) {
 ; CHECK-NEXT:    [[T3:%.*]] = fadd reassoc double [[TMP1]], 0x34000000000000
 ; CHECK-NEXT:    ret double [[T3]]
 ;
-  %t1 = fdiv double %x, 3.0
-  %t2 = fadd double %t1, 5.0
+  %t1 = fdiv reassoc double %x, 3.0
+  %t2 = fadd reassoc double %t1, 5.0
   %t3 = fmul reassoc double %t2, 0x10000000000000
   ret double %t3
 }
@@ -902,8 +902,8 @@ define float @fmul_fsub_fmul_distribute(float %x) {
 ; CHECK-NEXT:    [[T3:%.*]] = fsub fast float 1.000000e+01, [[TMP1]]
 ; CHECK-NEXT:    ret float [[T3]]
 ;
-  %t1 = fmul float %x, 6.0
-  %t2 = fsub float 2.0, %t1
+  %t1 = fmul reassoc float %x, 6.0
+  %t2 = fsub reassoc float 2.0, %t1
   %t3 = fmul fast float %t2, 5.0
   ret float %t3
 }
@@ -911,13 +911,13 @@ define float @fmul_fsub_fmul_distribute(float %x) {
 define float @fmul_fsub_fmul_distribute_extra_use(float %x) {
 ; CHECK-LABEL: @fmul_fsub_fmul_distribute_extra_use(
 ; CHECK-NEXT:    [[T1:%.*]] = fmul float [[X:%.*]], 6.000000e+00
-; CHECK-NEXT:    [[T2:%.*]] = fsub float 2.000000e+00, [[T1]]
+; CHECK-NEXT:    [[T2:%.*]] = fsub reassoc float 2.000000e+00, [[T1]]
 ; CHECK-NEXT:    [[T3:%.*]] = fmul fast float [[T2]], 5.000000e+00
 ; CHECK-NEXT:    call void @use_f32(float [[T2]])
 ; CHECK-NEXT:    ret float [[T3]]
 ;
   %t1 = fmul float %x, 6.0
-  %t2 = fsub float 2.0, %t1
+  %t2 = fsub reassoc float 2.0, %t1
   %t3 = fmul fast float %t2, 5.0
   call void @use_f32(float %t2)
   ret float %t3
@@ -933,7 +933,7 @@ define float @fmul_fsub_fmul_distribute2(float %x) {
 ; CHECK-NEXT:    ret float [[T3]]
 ;
   %t1 = fmul float %x, 6.0
-  %t2 = fsub float %t1, 2.0
+  %t2 = fsub reassoc float %t1, 2.0
   %t3 = fmul fast float %t2, 5.0
   ret float %t3
 }
@@ -941,13 +941,13 @@ define float @fmul_fsub_fmul_distribute2(float %x) {
 define float @fmul_fsub_fmul_distribute2_extra_use(float %x) {
 ; CHECK-LABEL: @fmul_fsub_fmul_distribute2_extra_use(
 ; CHECK-NEXT:    [[T1:%.*]] = fmul float [[X:%.*]], 6.000000e+00
-; CHECK-NEXT:    [[T2:%.*]] = fsub float 2.000000e+00, [[T1]]
+; CHECK-NEXT:    [[T2:%.*]] = fsub reassoc float 2.000000e+00, [[T1]]
 ; CHECK-NEXT:    [[T3:%.*]] = fmul fast float [[T2]], 5.000000e+00
 ; CHECK-NEXT:    call void @use_f32(float [[T2]])
 ; CHECK-NEXT:    ret float [[T3]]
 ;
   %t1 = fmul float %x, 6.0
-  %t2 = fsub float 2.0, %t1
+  %t2 = fsub reassoc float 2.0, %t1
   %t3 = fmul fast float %t2, 5.0
   call void @use_f32(float %t2)
   ret float %t3
@@ -957,14 +957,14 @@ define float @fmul_fsub_fmul_distribute2_extra_use(float %x) {
 
 define float @common_factor(float %x, float %y) {
 ; CHECK-LABEL: @common_factor(
-; CHECK-NEXT:    [[MUL:%.*]] = fmul float [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT:    [[MUL:%.*]] = fmul reassoc float [[X:%.*]], [[Y:%.*]]
 ; CHECK-NEXT:    [[MUL1:%.*]] = fmul fast float [[MUL]], [[X]]
-; CHECK-NEXT:    [[ADD:%.*]] = fadd float [[MUL1]], [[MUL]]
+; CHECK-NEXT:    [[ADD:%.*]] = fadd reassoc float [[MUL1]], [[MUL]]
 ; CHECK-NEXT:    ret float [[ADD]]
 ;
-  %mul = fmul float %x, %y
+  %mul = fmul reassoc float %x, %y
   %mul1 = fmul fast float %mul, %x
-  %add = fadd float %mul1, %mul
+  %add = fadd reassoc float %mul1, %mul
   ret float %add
 }
 
@@ -986,8 +986,8 @@ define double @fmul_fdivs_factor_common_denominator(double %x, double %y, double
 ; CHECK-NEXT:    [[MUL:%.*]] = fdiv fast double [[TMP1]], [[TMP2]]
 ; CHECK-NEXT:    ret double [[MUL]]
 ;
-  %div1 = fdiv double %x, %z
-  %div2 = fdiv double %y, %z
+  %div1 = fdiv reassoc double %x, %z
+  %div2 = fdiv reassoc double %y, %z
   %mul = fmul fast double %div1, %div2
   ret double %mul
 }
@@ -999,8 +999,8 @@ define double @fmul_fdivs_factor(double %x, double %y, double %z, double %w) {
 ; CHECK-NEXT:    [[MUL:%.*]] = fdiv reassoc double [[TMP2]], [[Y:%.*]]
 ; CHECK-NEXT:    ret double [[MUL]]
 ;
-  %div1 = fdiv double %x, %y
-  %div2 = fdiv double %z, %w
+  %div1 = fdiv reassoc double %x, %y
+  %div2 = fdiv reassoc double %z, %w
   %mul = fmul reassoc double %div1, %div2
   ret double %mul
 }
@@ -1011,7 +1011,7 @@ define double @fmul_fdiv_factor(double %x, double %y, double %z) {
 ; CHECK-NEXT:    [[MUL:%.*]] = fdiv reassoc double [[TMP1]], [[Y:%.*]]
 ; CHECK-NEXT:    ret double [[MUL]]
 ;
-  %div = fdiv double %x, %y
+  %div = fdiv reassoc double %x, %y
   %mul = fmul reassoc double %div, %z
   ret double %mul
 }
@@ -1022,7 +1022,7 @@ define double @fmul_fdiv_factor_constant1(double %x, double %y) {
 ; CHECK-NEXT:    [[MUL:%.*]] = fdiv reassoc double [[TMP1]], [[Y:%.*]]
 ; CHECK-NEXT:    ret double [[MUL]]
 ;
-  %div = fdiv double %x, %y
+  %div = fdiv reassoc double %x, %y
   %mul = fmul reassoc double %div, 42.0
   ret double %mul
 }
@@ -1033,19 +1033,19 @@ define <2 x float> @fmul_fdiv_factor_constant2(<2 x float> %x, <2 x float> %y) {
 ; CHECK-NEXT:    [[MUL:%.*]] = fdiv reassoc <2 x float> [[TMP1]], <float 4.200000e+01, float 1.200000e+01>
 ; CHECK-NEXT:    ret <2 x float> [[MUL]]
 ;
-  %div = fdiv <2 x float> %x, <float 42.0, float 12.0>
+  %div = fdiv reassoc <2 x float> %x, <float 42.0, float 12.0>
   %mul = fmul reassoc <2 x float> %div, %y
   ret <2 x float> %mul
 }
 
 define float @fmul_fdiv_factor_extra_use(float %x, float %y) {
 ; CHECK-LABEL: @fmul_fdiv_factor_extra_use(
-; CHECK-NEXT:    [[DIV:%.*]] = fdiv float [[X:%.*]], 4.200000e+01
+; CHECK-NEXT:    [[DIV:%.*]] = fdiv reassoc float [[X:%.*]], 4.200000e+01
 ; CHECK-NEXT:    call void @use_f32(float [[DIV]])
 ; CHECK-NEXT:    [[MUL:%.*]] = fmul reassoc float [[DIV]], [[Y:%.*]]
 ; CHECK-NEXT:    ret float [[MUL]]
 ;
-  %div = fdiv float %x, 42.0
+  %div = fdiv reassoc float %x, 42.0
   call void @use_f32(float %div)
   %mul = fmul reassoc float %div, %y
   ret float %mul
@@ -1069,7 +1069,7 @@ define void @fmul_loop_invariant_fdiv(float* %a, float %x) {
 ; CHECK-NEXT:    br i1 [[CMP_NOT]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY]]
 ;
 entry:
-  %d = fdiv fast float 1.0, %x
+  %d = fdiv reassoc fast float 1.0, %x
   br label %for.body
 
 for.cond.cleanup:
diff --git a/llvm/test/Transforms/InstCombine/powi.ll b/llvm/test/Transforms/InstCombine/powi.ll
index 89efbb6f4536113..afe6203937e3514 100644
--- a/llvm/test/Transforms/InstCombine/powi.ll
+++ b/llvm/test/Transforms/InstCombine/powi.ll
@@ -149,8 +149,8 @@ define double @powi_fmul_powi(double %x, i32 %y, i32 %z) {
 ; CHECK-NEXT:    ret double [[MUL]]
 ;
 entry:
-  %p1 = tail call double @llvm.powi.f64.i32(double %x, i32 %y)
-  %p2 = tail call double @llvm.powi.f64.i32(double %x, i32 %z)
+  %p1 = tail call reassoc double @llvm.powi.f64.i32(double %x, i32 %y)
+  %p2 = tail call reassoc double @llvm.powi.f64.i32(double %x, i32 %z)
   %mul = fmul reassoc double %p2, %p1
   ret double %mul
 }
@@ -163,8 +163,8 @@ define double @powi_fmul_powi_fast_on_fmul(double %x, i32 %y, i32 %z) {
 ; CHECK-NEXT:    ret double [[MUL]]
 ;
 entry:
-  %p1 = tail call double @llvm.powi.f64.i32(double %x, i32 %y)
-  %p2 = tail call double @llvm.powi.f64.i32(double %x, i32 %z)
+  %p1 = tail call reassoc double @llvm.powi.f64.i32(double %x, i32 %y)
+  %p2 = tail call reassoc double @llvm.powi.f64.i32(double %x, i32 %z)
   %mul = fmul fast double %p2, %p1
   ret double %mul
 }
@@ -192,8 +192,8 @@ define double @powi_fmul_powi_same_power(double %x, i32 %y, i32 %z) {
 ; CHECK-NEXT:    ret double [[MUL]]
 ;
 entry:
-  %p1 = tail call double @llvm.powi.f64.i32(double %x, i32 %y)
-  %p2 = tail call double @llvm.powi.f64.i32(double %x, i32 %y)
+  %p1 = tail call reassoc double @llvm.powi.f64.i32(double %x, i32 %y)
+  %p2 = tail call reassoc double @llvm.powi.f64.i32(double %x, i32 %y)
   %mul = fmul reassoc double %p2, %p1
   ret double %mul
 }
@@ -201,16 +201,16 @@ entry:
 define double @powi_fmul_powi_use_first(double %x, i32 %y, i32 %z) {
 ; CHECK-LABEL: @powi_fmul_powi_use_first(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[P1:%.*]] = tail call double @llvm.powi.f64.i32(double [[X:%.*]], i32 [[Y:%.*]])
+; CHECK-NEXT:    [[P1:%.*]] = tail call reassoc double @llvm.powi.f64.i32(double [[X:%.*]], i32 [[Y:%.*]])
 ; CHECK-NEXT:    tail call void @use(double [[P1]])
 ; CHECK-NEXT:    [[TMP0:%.*]] = add i32 [[Y]], [[Z:%.*]]
 ; CHECK-NEXT:    [[MUL:%.*]] = call reassoc double @llvm.powi.f64.i32(double [[X]], i32 [[TMP0]])
 ; CHECK-NEXT:    ret double [[MUL]]
 ;
 entry:
-  %p1 = tail call double @llvm.powi.f64.i32(double %x, i32 %y)
+  %p1 = tail call reassoc double @llvm.powi.f64.i32(double %x, i32 %y)
   tail call void @use(double %p1)
-  %p2 = tail call double @llvm.powi.f64.i32(double %x, i32 %z)
+  %p2 = tail call reassoc double @llvm.powi.f64.i32(double %x, i32 %z)
   %mul = fmul reassoc double %p1, %p2
   ret double %mul
 }



More information about the llvm-commits mailing list