[llvm] [InstCombine] Fold (X * 0.0) * constant => X * 0.0 #85241 (PR #92512)

via llvm-commits llvm-commits at lists.llvm.org
Wed May 22 08:09:52 PDT 2024


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-llvm-transforms

Author: None (SahilPatidar)

<details>
<summary>Changes</summary>

Fix #<!-- -->85241 

---
Full diff: https://github.com/llvm/llvm-project/pull/92512.diff


2 Files Affected:

- (modified) llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp (+10) 
- (modified) llvm/test/Transforms/InstCombine/fmul.ll (+247) 


``````````diff
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp b/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
index ca1b1921404d8..8d71ac8716fba 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
@@ -905,6 +905,16 @@ Instruction *InstCombinerImpl::visitFMul(BinaryOperator &I) {
     }
   }
 
+  // (X * 0.0) * constant => X * 0.0
+  if (match(Op0, m_FMul(m_Value(X), m_AnyZeroFP())) &&
+      match(Op1, m_Constant(C))) {
+    Constant *C1 = cast<Constant>(cast<Instruction>(Op0)->getOperand(1));
+    if (Constant *CC1 =
+            ConstantFoldBinaryOpOperands(Instruction::FMul, C, C1, DL)) {
+      return BinaryOperator::CreateFMulFMF(X, CC1, I.getFastMathFlags());
+    }
+  }
+
   // Simplify FMUL recurrences starting with 0.0 to 0.0 if nnan and nsz are set.
   // Given a phi node with entry value as 0 and it used in fmul operation,
   // we can replace fmul with 0 safely and eleminate loop operation.
diff --git a/llvm/test/Transforms/InstCombine/fmul.ll b/llvm/test/Transforms/InstCombine/fmul.ll
index 1526956c5b241..e7f2fb6b41559 100644
--- a/llvm/test/Transforms/InstCombine/fmul.ll
+++ b/llvm/test/Transforms/InstCombine/fmul.ll
@@ -1407,3 +1407,250 @@ entry:
   %ret = fmul <3 x float> %a, <float -0.0, float 0.0, float poison>
   ret <3 x float> %ret
 }
+
+define <2 x float> @mul_pos_zero_neg_const_vec(<2 x float> %a) {
+; CHECK-LABEL: @mul_pos_zero_neg_const_vec(
+; CHECK-NEXT:    [[F2:%.*]] = fmul <2 x float> [[A:%.*]], <float -0.000000e+00, float -0.000000e+00>
+; CHECK-NEXT:    ret <2 x float> [[F2]]
+;
+  %f1 = fmul <2 x float> %a, <float 0.000000, float 0.000000>
+  %f2 = fmul <2 x float> %f1, <float -1.000000, float -1.000000>
+  ret <2 x float> %f2
+}
+
+define <2 x float> @mul_pos_zero_mixed_neg_const_vec(<2 x float> %a) {
+; CHECK-LABEL: @mul_pos_zero_mixed_neg_const_vec(
+; CHECK-NEXT:    [[F2:%.*]] = fmul <2 x float> [[A:%.*]], <float 0.000000e+00, float -0.000000e+00>
+; CHECK-NEXT:    ret <2 x float> [[F2]]
+;
+  %f1 = fmul <2 x float> %a, <float 0.000000, float 0.000000>
+  %f2 = fmul <2 x float> %f1, <float 1.000000, float -1.000000>
+  ret <2 x float> %f2
+}
+
+define <2 x float> @mul_neg_zero_mixed_const_vec(<2 x float> %a) {
+; CHECK-LABEL: @mul_neg_zero_mixed_const_vec(
+; CHECK-NEXT:    [[F2:%.*]] = fmul <2 x float> [[A:%.*]], <float -0.000000e+00, float -0.000000e+00>
+; CHECK-NEXT:    ret <2 x float> [[F2]]
+;
+  %f1 = fmul <2 x float> %a, <float -0.000000, float 0.000000>
+  %f2 = fmul <2 x float> %f1, <float 1.000000, float -1.000000>
+  ret <2 x float> %f2
+}
+
+define <2 x float> @mul_neg_zero_mixed_const_vec_ninf(<2 x float> %a) {
+; CHECK-LABEL: @mul_neg_zero_mixed_const_vec_ninf(
+; CHECK-NEXT:    [[F2:%.*]] = fmul ninf <2 x float> [[A:%.*]], <float -0.000000e+00, float -0.000000e+00>
+; CHECK-NEXT:    ret <2 x float> [[F2]]
+;
+  %f1 = fmul ninf <2 x float> %a, <float -0.000000, float 0.000000>
+  %f2 = fmul ninf <2 x float> %f1, <float 1.000000, float -1.000000>
+  ret <2 x float> %f2
+}
+
+define <2 x float> @mul_neg_zero_mixed_const_vec_nnan(<2 x float> %a) {
+; CHECK-LABEL: @mul_neg_zero_mixed_const_vec_nnan(
+; CHECK-NEXT:    [[TMP1:%.*]] = fneg nnan <2 x float> [[A:%.*]]
+; CHECK-NEXT:    [[F2:%.*]] = call nnan <2 x float> @llvm.copysign.v2f32(<2 x float> zeroinitializer, <2 x float> [[TMP1]])
+; CHECK-NEXT:    ret <2 x float> [[F2]]
+;
+  %f1 = fmul nnan <2 x float> %a, <float -0.000000, float 0.000000>
+  %f2 = fmul nnan <2 x float> %f1, <float 1.000000, float -1.000000>
+  ret <2 x float> %f2
+}
+
+define float @mul_pos_zero_neg_const(float %a) {
+; CHECK-LABEL: @mul_pos_zero_neg_const(
+; CHECK-NEXT:    [[F2:%.*]] = fmul float [[A:%.*]], -0.000000e+00
+; CHECK-NEXT:    ret float [[F2]]
+;
+  %f1 = fmul float %a, 0.000000
+  %f2 = fmul float %f1, -1.000000
+  ret float %f2
+}
+
+define float @mul_neg_zero_pos_const(float %a) {
+; CHECK-LABEL: @mul_neg_zero_pos_const(
+; CHECK-NEXT:    [[F1:%.*]] = fmul float [[A:%.*]], -0.000000e+00
+; CHECK-NEXT:    ret float [[F1]]
+;
+  %f1 = fmul float %a, -0.000000
+  %f2 = fmul float %f1, 1.000000
+  ret float %f2
+}
+
+define float @mul_neg_zero_neg_const(float %a) {
+; CHECK-LABEL: @mul_neg_zero_neg_const(
+; CHECK-NEXT:    [[F2:%.*]] = fmul float [[A:%.*]], 0.000000e+00
+; CHECK-NEXT:    ret float [[F2]]
+;
+  %f1 = fmul float %a, -0.000000
+  %f2 = fmul float %f1, -1.000000
+  ret float %f2
+}
+
+define float @mul_neg_zero_pos_const_ninf(float %a) {
+; CHECK-LABEL: @mul_neg_zero_pos_const_ninf(
+; CHECK-NEXT:    [[F1:%.*]] = fmul ninf float [[A:%.*]], -0.000000e+00
+; CHECK-NEXT:    ret float [[F1]]
+;
+  %f1 = fmul ninf float %a, -0.000000
+  %f2 = fmul ninf float %f1, 1.000000
+  ret float %f2
+}
+
+define float @mul_neg_zero_neg_const_ninf(float %a) {
+; CHECK-LABEL: @mul_neg_zero_neg_const_ninf(
+; CHECK-NEXT:    [[F2:%.*]] = fmul ninf float [[A:%.*]], 0.000000e+00
+; CHECK-NEXT:    ret float [[F2]]
+;
+  %f1 = fmul ninf float %a, -0.000000
+  %f2 = fmul ninf float %f1, -1.000000
+  ret float %f2
+}
+
+define float @mul_pos_zero_neg_const_ninf(float %a) {
+; CHECK-LABEL: @mul_pos_zero_neg_const_ninf(
+; CHECK-NEXT:    [[F2:%.*]] = fmul ninf float [[A:%.*]], -0.000000e+00
+; CHECK-NEXT:    ret float [[F2]]
+;
+  %f1 = fmul ninf float %a, 0.000000
+  %f2 = fmul ninf float %f1, -1.000000
+  ret float %f2
+}
+
+define float @mul_neg_zero_pos_const_nnan(float %a) {
+; CHECK-LABEL: @mul_neg_zero_pos_const_nnan(
+; CHECK-NEXT:    [[TMP1:%.*]] = fneg nnan float [[A:%.*]]
+; CHECK-NEXT:    [[F1:%.*]] = call nnan float @llvm.copysign.f32(float 0.000000e+00, float [[TMP1]])
+; CHECK-NEXT:    ret float [[F1]]
+;
+  %f1 = fmul nnan float %a, -0.000000
+  %f2 = fmul nnan float %f1, 1.000000
+  ret float %f2
+}
+
+define float @mul_neg_zero_neg_const_nnan(float %a) {
+; CHECK-LABEL: @mul_neg_zero_neg_const_nnan(
+; CHECK-NEXT:    [[F2:%.*]] = call nnan float @llvm.copysign.f32(float 0.000000e+00, float [[A:%.*]])
+; CHECK-NEXT:    ret float [[F2]]
+;
+  %f1 = fmul nnan float %a, -0.000000
+  %f2 = fmul nnan float %f1, -1.000000
+  ret float %f2
+}
+
+define float @mul_pos_zero_neg_const_nnan(float %a) {
+; CHECK-LABEL: @mul_pos_zero_neg_const_nnan(
+; CHECK-NEXT:    [[TMP1:%.*]] = fneg nnan float [[A:%.*]]
+; CHECK-NEXT:    [[F2:%.*]] = call nnan float @llvm.copysign.f32(float 0.000000e+00, float [[TMP1]])
+; CHECK-NEXT:    ret float [[F2]]
+;
+  %f1 = fmul nnan float %a, 0.000000
+  %f2 = fmul nnan float %f1, -1.000000
+  ret float %f2
+}
+
+define float @mul_pos_zero_neg_const_nnan_res(float %a) {
+; CHECK-LABEL: @mul_pos_zero_neg_const_nnan_res(
+; CHECK-NEXT:    [[TMP1:%.*]] = fneg nnan float [[A:%.*]]
+; CHECK-NEXT:    [[F2:%.*]] = call nnan float @llvm.copysign.f32(float 0.000000e+00, float [[TMP1]])
+; CHECK-NEXT:    ret float [[F2]]
+;
+  %f1 = fmul float %a, 0.000000
+  %f2 = fmul nnan float %f1, -1.000000
+  ret float %f2
+}
+
+define float @mul_neg_const_with_nnan_fmul_result(float %a) {
+; CHECK-LABEL: @mul_neg_const_with_nnan_fmul_result(
+; CHECK-NEXT:    [[TMP1:%.*]] = fneg float [[A:%.*]]
+; CHECK-NEXT:    [[F2:%.*]] = call float @llvm.copysign.f32(float 0.000000e+00, float [[TMP1]])
+; CHECK-NEXT:    ret float [[F2]]
+;
+  %f1 = fmul nnan float %a, 0.000000
+  %f2 = fmul float %f1, -1.000000
+  ret float %f2
+}
+
+define float @mul_pos_zero_neg_const_ninf_res(float %a) {
+; CHECK-LABEL: @mul_pos_zero_neg_const_ninf_res(
+; CHECK-NEXT:    [[F2:%.*]] = fmul ninf float [[A:%.*]], -0.000000e+00
+; CHECK-NEXT:    ret float [[F2]]
+;
+  %f1 = fmul float %a, 0.000000
+  %f2 = fmul ninf float %f1, -1.000000
+  ret float %f2
+}
+
+define float @mul_neg_const_with_ninf_fmul_result(float %a) {
+; CHECK-LABEL: @mul_neg_const_with_ninf_fmul_result(
+; CHECK-NEXT:    [[F2:%.*]] = fmul float [[A:%.*]], -0.000000e+00
+; CHECK-NEXT:    ret float [[F2]]
+;
+  %f1 = fmul ninf float %a, 0.000000
+  %f2 = fmul float %f1, -1.000000
+  ret float %f2
+}
+
+define float @mul_pos_zero_neg_const_with_mixed_fmf_test1(float %a) {
+; CHECK-LABEL: @mul_pos_zero_neg_const_with_mixed_fmf_test1(
+; CHECK-NEXT:    [[TMP1:%.*]] = fneg nnan float [[A:%.*]]
+; CHECK-NEXT:    [[F2:%.*]] = call nnan float @llvm.copysign.f32(float 0.000000e+00, float [[TMP1]])
+; CHECK-NEXT:    ret float [[F2]]
+;
+  %f1 = fmul ninf float %a, 0.000000
+  %f2 = fmul nnan float %f1, -1.000000
+  ret float %f2
+}
+
+define float @mul_pos_zero_neg_const_with_mixed_fmf_test2(float %a) {
+; CHECK-LABEL: @mul_pos_zero_neg_const_with_mixed_fmf_test2(
+; CHECK-NEXT:    [[TMP1:%.*]] = fneg float [[A:%.*]]
+; CHECK-NEXT:    [[F2:%.*]] = call float @llvm.copysign.f32(float 0.000000e+00, float [[TMP1]])
+; CHECK-NEXT:    ret float [[F2]]
+;
+  %f1 = fmul nnan float %a, 0.000000
+  %f2 = fmul ninf float %f1, -1.000000
+  ret float %f2
+}
+
+define float @mul_neg_zero_const_expr(float %a) {
+; CHECK-LABEL: @mul_neg_zero_const_expr(
+; CHECK-NEXT:    [[F3:%.*]] = fmul float [[A:%.*]], -0.000000e+00
+; CHECK-NEXT:    ret float [[F3]]
+;
+  %f1 = fmul float %a, -0.000000e+00
+  %i1 = inttoptr i32 1000 to ptr
+  %i = ptrtoint ptr %i1 to i32
+  %f2 = bitcast i32 %i to float
+  %f3 = fmul float %f1, %f2
+  ret float %f3
+}
+
+define float @mul_neg_zero_expr(float %a, ptr %b) {
+; CHECK-LABEL: @mul_neg_zero_expr(
+; CHECK-NEXT:    [[F1:%.*]] = fmul float [[A:%.*]], -0.000000e+00
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[B:%.*]] to i64
+; CHECK-NEXT:    [[I:%.*]] = trunc i64 [[TMP1]] to i32
+; CHECK-NEXT:    [[F2:%.*]] = bitcast i32 [[I]] to float
+; CHECK-NEXT:    [[F3:%.*]] = fmul float [[F1]], [[F2]]
+; CHECK-NEXT:    ret float [[F3]]
+;
+  %f1 = fmul float %a, -0.000000e+00
+  %i = ptrtoint ptr %b to i32
+  %f2 = bitcast i32 %i to float
+  %f3 = fmul float %f1, %f2
+  ret float %f3
+}
+
+define float @mul_neg_zero_expr2(float %a, ptr %b) {
+; CHECK-LABEL: @mul_neg_zero_expr2(
+; CHECK-NEXT:    [[F1:%.*]] = fmul float [[A:%.*]], -0.000000e+00
+; CHECK-NEXT:    [[F2:%.*]] = fmul float [[F1]], bitcast (i32 ptrtoint (ptr getelementptr inbounds ({ [2 x ptr] }, ptr @g, i64 1, i32 0, i64 0) to i32) to float)
+; CHECK-NEXT:    ret float [[F2]]
+;
+  %f1 = fmul float %a, -0.000000e+00
+  %f2 = fmul float %f1, bitcast (i32 ptrtoint (ptr getelementptr inbounds ({ [2 x ptr] }, ptr @g, i64 0, i32 0, i64 2) to i32) to float)
+  ret float %f2
+}

``````````

</details>


https://github.com/llvm/llvm-project/pull/92512


More information about the llvm-commits mailing list