[llvm] 8471c53 - Split fast-basictest.ll according to passes responsible for optimizations
Andrew Savonichev via llvm-commits
llvm-commits at lists.llvm.org
Fri Feb 4 01:22:39 PST 2022
Author: Daniil Kovalev
Date: 2022-02-04T12:20:10+03:00
New Revision: 8471c537d55d7bb4a5105063c14f18bc9ea966a6
URL: https://github.com/llvm/llvm-project/commit/8471c537d55d7bb4a5105063c14f18bc9ea966a6
DIFF: https://github.com/llvm/llvm-project/commit/8471c537d55d7bb4a5105063c14f18bc9ea966a6.diff
LOG: Split fast-basictest.ll according to passes responsible for optimizations
- add logically missing test cases.
- add appropriate comments.
- add appropriate TODO's.
See initial motivation in https://reviews.llvm.org/D117302
Differential Revision: https://reviews.llvm.org/D118769
Added:
llvm/test/Transforms/InstCombine/fast-basictest.ll
llvm/test/Transforms/PhaseOrdering/fast-basictest.ll
llvm/test/Transforms/PhaseOrdering/fast-reassociate-gvn.ll
Modified:
llvm/test/Transforms/Reassociate/fast-basictest.ll
Removed:
################################################################################
diff --git a/llvm/test/Transforms/InstCombine/fast-basictest.ll b/llvm/test/Transforms/InstCombine/fast-basictest.ll
new file mode 100644
index 0000000000000..8590c467627ef
--- /dev/null
+++ b/llvm/test/Transforms/InstCombine/fast-basictest.ll
@@ -0,0 +1,701 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+;
+; Test numbering remains continuous across:
+; - InstCombine/fast-basictest.ll
+; - PhaseOrdering/fast-basictest.ll
+; - PhaseOrdering/fast-reassociate-gvn.ll
+; - Reassociate/fast-basictest.ll
+;
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+; With reassociation, constant folding can eliminate the 12 and -12 constants.
+
+define float @test1(float %arg) {
+; CHECK-LABEL: @test1(
+; CHECK-NEXT: [[TMP1:%.*]] = fneg fast float [[ARG:%.*]]
+; CHECK-NEXT: ret float [[TMP1]]
+;
+ %t1 = fsub fast float -1.200000e+01, %arg
+ %t2 = fadd fast float %t1, 1.200000e+01
+ ret float %t2
+}
+
+; Check again using the minimal subset of FMF.
+; Both 'reassoc' and 'nsz' are required.
+define float @test1_minimal(float %arg) {
+; CHECK-LABEL: @test1_minimal(
+; CHECK-NEXT: [[TMP1:%.*]] = fneg reassoc nsz float [[ARG:%.*]]
+; CHECK-NEXT: ret float [[TMP1]]
+;
+ %t1 = fsub reassoc nsz float -1.200000e+01, %arg
+ %t2 = fadd reassoc nsz float %t1, 1.200000e+01
+ ret float %t2
+}
+
+; Verify the fold is not done with only 'reassoc' ('nsz' is required).
+define float @test1_reassoc(float %arg) {
+; CHECK-LABEL: @test1_reassoc(
+; CHECK-NEXT: [[T1:%.*]] = fsub reassoc float -1.200000e+01, [[ARG:%.*]]
+; CHECK-NEXT: [[T2:%.*]] = fadd reassoc float [[T1]], 1.200000e+01
+; CHECK-NEXT: ret float [[T2]]
+;
+ %t1 = fsub reassoc float -1.200000e+01, %arg
+ %t2 = fadd reassoc float %t1, 1.200000e+01
+ ret float %t2
+}
+
+; ((a + (-3)) + b) + 3 -> a + b
+; That only works with both instcombine and reassociate passes enabled.
+; Check that instcombine is not enough.
+
+define float @test2(float %reg109, float %reg1111) {
+; CHECK-LABEL: @test2(
+; CHECK-NEXT: [[REG115:%.*]] = fadd fast float [[REG109:%.*]], -3.000000e+01
+; CHECK-NEXT: [[REG116:%.*]] = fadd fast float [[REG115]], [[REG1111:%.*]]
+; CHECK-NEXT: [[REG117:%.*]] = fadd fast float [[REG116]], 3.000000e+01
+; CHECK-NEXT: ret float [[REG117]]
+;
+ %reg115 = fadd fast float %reg109, -3.000000e+01
+ %reg116 = fadd fast float %reg115, %reg1111
+ %reg117 = fadd fast float %reg116, 3.000000e+01
+ ret float %reg117
+}
+
+define float @test2_no_FMF(float %reg109, float %reg1111) {
+; CHECK-LABEL: @test2_no_FMF(
+; CHECK-NEXT: [[REG115:%.*]] = fadd float [[REG109:%.*]], -3.000000e+01
+; CHECK-NEXT: [[REG116:%.*]] = fadd float [[REG115]], [[REG1111:%.*]]
+; CHECK-NEXT: [[REG117:%.*]] = fadd float [[REG116]], 3.000000e+01
+; CHECK-NEXT: ret float [[REG117]]
+;
+ %reg115 = fadd float %reg109, -3.000000e+01
+ %reg116 = fadd float %reg115, %reg1111
+ %reg117 = fadd float %reg116, 3.000000e+01
+ ret float %reg117
+}
+
+define float @test2_reassoc(float %reg109, float %reg1111) {
+; CHECK-LABEL: @test2_reassoc(
+; CHECK-NEXT: [[REG115:%.*]] = fadd reassoc float [[REG109:%.*]], -3.000000e+01
+; CHECK-NEXT: [[REG116:%.*]] = fadd reassoc float [[REG115]], [[REG1111:%.*]]
+; CHECK-NEXT: [[REG117:%.*]] = fadd reassoc float [[REG116]], 3.000000e+01
+; CHECK-NEXT: ret float [[REG117]]
+;
+ %reg115 = fadd reassoc float %reg109, -3.000000e+01
+ %reg116 = fadd reassoc float %reg115, %reg1111
+ %reg117 = fadd reassoc float %reg116, 3.000000e+01
+ ret float %reg117
+}
+
+; (-X)*Y + Z -> Z-X*Y
+
+define float @test7(float %X, float %Y, float %Z) {
+; CHECK-LABEL: @test7(
+; CHECK-NEXT: [[TMP1:%.*]] = fmul fast float [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[C:%.*]] = fsub fast float [[Z:%.*]], [[TMP1]]
+; CHECK-NEXT: ret float [[C]]
+;
+ %A = fsub fast float 0.0, %X
+ %B = fmul fast float %A, %Y
+ %C = fadd fast float %B, %Z
+ ret float %C
+}
+
+define float @test7_unary_fneg(float %X, float %Y, float %Z) {
+; CHECK-LABEL: @test7_unary_fneg(
+; CHECK-NEXT: [[TMP1:%.*]] = fmul fast float [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[C:%.*]] = fsub fast float [[Z:%.*]], [[TMP1]]
+; CHECK-NEXT: ret float [[C]]
+;
+ %A = fneg fast float %X
+ %B = fmul fast float %A, %Y
+ %C = fadd fast float %B, %Z
+ ret float %C
+}
+
+define float @test7_reassoc_nsz(float %X, float %Y, float %Z) {
+; CHECK-LABEL: @test7_reassoc_nsz(
+; CHECK-NEXT: [[TMP1:%.*]] = fmul reassoc nsz float [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[C:%.*]] = fsub reassoc nsz float [[Z:%.*]], [[TMP1]]
+; CHECK-NEXT: ret float [[C]]
+;
+ %A = fsub reassoc nsz float 0.0, %X
+ %B = fmul reassoc nsz float %A, %Y
+ %C = fadd reassoc nsz float %B, %Z
+ ret float %C
+}
+
+; Verify that fold is not done only with 'reassoc' ('nsz' is required)
+define float @test7_reassoc(float %X, float %Y, float %Z) {
+; CHECK-LABEL: @test7_reassoc(
+; CHECK-NEXT: [[A:%.*]] = fsub reassoc float 0.000000e+00, [[X:%.*]]
+; CHECK-NEXT: [[B:%.*]] = fmul reassoc float [[A]], [[Y:%.*]]
+; CHECK-NEXT: [[C:%.*]] = fadd reassoc float [[B]], [[Z:%.*]]
+; CHECK-NEXT: ret float [[C]]
+;
+ %A = fsub reassoc float 0.0, %X
+ %B = fmul reassoc float %A, %Y
+ %C = fadd reassoc float %B, %Z
+ ret float %C
+}
+
+define float @test8(float %X) {
+; CHECK-LABEL: @test8(
+; CHECK-NEXT: [[TMP1:%.*]] = fmul fast float [[X:%.*]], 9.400000e+01
+; CHECK-NEXT: ret float [[TMP1]]
+;
+ %Y = fmul fast float %X, 4.700000e+01
+ %Z = fadd fast float %Y, %Y
+ ret float %Z
+}
+
+; Check again with 'reassoc' and 'nsz' ('nsz' not technically required).
+define float @test8_reassoc_nsz(float %X) {
+; CHECK-LABEL: @test8_reassoc_nsz(
+; CHECK-NEXT: [[TMP1:%.*]] = fmul reassoc nsz float [[X:%.*]], 9.400000e+01
+; CHECK-NEXT: ret float [[TMP1]]
+;
+ %Y = fmul reassoc nsz float %X, 4.700000e+01
+ %Z = fadd reassoc nsz float %Y, %Y
+ ret float %Z
+}
+
+; TODO: This doesn't require 'nsz'. It should fold to X * 94.0
+define float @test8_reassoc(float %X) {
+; CHECK-LABEL: @test8_reassoc(
+; CHECK-NEXT: [[Y:%.*]] = fmul reassoc float [[X:%.*]], 4.700000e+01
+; CHECK-NEXT: [[Z:%.*]] = fadd reassoc float [[Y]], [[Y]]
+; CHECK-NEXT: ret float [[Z]]
+;
+ %Y = fmul reassoc float %X, 4.700000e+01
+ %Z = fadd reassoc float %Y, %Y
+ ret float %Z
+}
+
+; Side note: (x + x + x) and (3*x) each have only a single rounding. So
+; transforming x+x+x to 3*x is always safe, even without any FMF.
+; To avoid that special-case, we have the addition of 'x' four times, here.
+
+define float @test9(float %X) {
+; CHECK-LABEL: @test9(
+; CHECK-NEXT: [[TMP1:%.*]] = fmul fast float [[X:%.*]], 4.000000e+00
+; CHECK-NEXT: ret float [[TMP1]]
+;
+ %Y = fadd fast float %X ,%X
+ %Z = fadd fast float %Y, %X
+ %W = fadd fast float %Z, %X
+ ret float %W
+}
+
+; Check again with 'reassoc' and 'nsz' ('nsz' not technically required).
+define float @test9_reassoc_nsz(float %X) {
+; CHECK-LABEL: @test9_reassoc_nsz(
+; CHECK-NEXT: [[TMP1:%.*]] = fmul reassoc nsz float [[X:%.*]], 4.000000e+00
+; CHECK-NEXT: ret float [[TMP1]]
+;
+ %Y = fadd reassoc nsz float %X ,%X
+ %Z = fadd reassoc nsz float %Y, %X
+ %W = fadd reassoc nsz float %Z, %X
+ ret float %W
+}
+
+; TODO: This doesn't require 'nsz'. It should fold to 4 * x
+define float @test9_reassoc(float %X) {
+; CHECK-LABEL: @test9_reassoc(
+; CHECK-NEXT: [[Y:%.*]] = fadd reassoc float [[X:%.*]], [[X]]
+; CHECK-NEXT: [[Z:%.*]] = fadd reassoc float [[Y]], [[X]]
+; CHECK-NEXT: [[W:%.*]] = fadd reassoc float [[Z]], [[X]]
+; CHECK-NEXT: ret float [[W]]
+;
+ %Y = fadd reassoc float %X ,%X
+ %Z = fadd reassoc float %Y, %X
+ %W = fadd reassoc float %Z, %X
+ ret float %W
+}
+
+define float @test10(float %W) {
+; CHECK-LABEL: @test10(
+; CHECK-NEXT: [[Z:%.*]] = fmul fast float [[W:%.*]], 3.810000e+02
+; CHECK-NEXT: ret float [[Z]]
+;
+ %X = fmul fast float %W, 127.0
+ %Y = fadd fast float %X ,%X
+ %Z = fadd fast float %Y, %X
+ ret float %Z
+}
+
+; Check again using the minimal subset of FMF.
+; Check again with 'reassoc' and 'nsz' ('nsz' not technically required).
+define float @test10_reassoc_nsz(float %W) {
+; CHECK-LABEL: @test10_reassoc_nsz(
+; CHECK-NEXT: [[Z:%.*]] = fmul reassoc nsz float [[W:%.*]], 3.810000e+02
+; CHECK-NEXT: ret float [[Z]]
+;
+ %X = fmul reassoc nsz float %W, 127.0
+ %Y = fadd reassoc nsz float %X ,%X
+ %Z = fadd reassoc nsz float %Y, %X
+ ret float %Z
+}
+
+; TODO: This doesn't require 'nsz'. It should fold to W*381.0.
+define float @test10_reassoc(float %W) {
+; CHECK-LABEL: @test10_reassoc(
+; CHECK-NEXT: [[X:%.*]] = fmul reassoc float [[W:%.*]], 1.270000e+02
+; CHECK-NEXT: [[Y:%.*]] = fadd reassoc float [[X]], [[X]]
+; CHECK-NEXT: [[Z:%.*]] = fadd reassoc float [[Y]], [[X]]
+; CHECK-NEXT: ret float [[Z]]
+;
+ %X = fmul reassoc float %W, 127.0
+ %Y = fadd reassoc float %X ,%X
+ %Z = fadd reassoc float %Y, %X
+ ret float %Z
+}
+
+define float @test11(float %X) {
+; CHECK-LABEL: @test11(
+; CHECK-NEXT: [[TMP1:%.*]] = fmul fast float [[X:%.*]], 3.000000e+00
+; CHECK-NEXT: [[TMP2:%.*]] = fsub fast float 6.000000e+00, [[TMP1]]
+; CHECK-NEXT: ret float [[TMP2]]
+;
+ %A = fsub fast float 1.000000e+00, %X
+ %B = fsub fast float 2.000000e+00, %X
+ %C = fsub fast float 3.000000e+00, %X
+ %Y = fadd fast float %A ,%B
+ %Z = fadd fast float %Y, %C
+ ret float %Z
+}
+
+; Check again with 'reassoc' and 'nsz' ('nsz' not technically required).
+define float @test11_reassoc_nsz(float %X) {
+; CHECK-LABEL: @test11_reassoc_nsz(
+; CHECK-NEXT: [[TMP1:%.*]] = fmul reassoc nsz float [[X:%.*]], 3.000000e+00
+; CHECK-NEXT: [[TMP2:%.*]] = fsub reassoc nsz float 6.000000e+00, [[TMP1]]
+; CHECK-NEXT: ret float [[TMP2]]
+;
+ %A = fsub reassoc nsz float 1.000000e+00, %X
+ %B = fsub reassoc nsz float 2.000000e+00, %X
+ %C = fsub reassoc nsz float 3.000000e+00, %X
+ %Y = fadd reassoc nsz float %A ,%B
+ %Z = fadd reassoc nsz float %Y, %C
+ ret float %Z
+}
+
+; TODO: This doesn't require 'nsz'. It should fold to (6.0 - 3.0*x)
+define float @test11_reassoc(float %X) {
+; CHECK-LABEL: @test11_reassoc(
+; CHECK-NEXT: [[A:%.*]] = fsub reassoc float 1.000000e+00, [[X:%.*]]
+; CHECK-NEXT: [[B:%.*]] = fsub reassoc float 2.000000e+00, [[X]]
+; CHECK-NEXT: [[C:%.*]] = fsub reassoc float 3.000000e+00, [[X]]
+; CHECK-NEXT: [[Y:%.*]] = fadd reassoc float [[A]], [[B]]
+; CHECK-NEXT: [[Z:%.*]] = fadd reassoc float [[Y]], [[C]]
+; CHECK-NEXT: ret float [[Z]]
+;
+ %A = fsub reassoc float 1.000000e+00, %X
+ %B = fsub reassoc float 2.000000e+00, %X
+ %C = fsub reassoc float 3.000000e+00, %X
+ %Y = fadd reassoc float %A ,%B
+ %Z = fadd reassoc float %Y, %C
+ ret float %Z
+}
+
+define float @test12(float %X1, float %X2, float %X3) {
+; CHECK-LABEL: @test12(
+; CHECK-NEXT: [[TMP1:%.*]] = fsub fast float [[X3:%.*]], [[X2:%.*]]
+; CHECK-NEXT: [[D:%.*]] = fmul fast float [[TMP1]], [[X1:%.*]]
+; CHECK-NEXT: ret float [[D]]
+;
+ %A = fsub fast float 0.000000e+00, %X1
+ %B = fmul fast float %A, %X2 ; -X1*X2
+ %C = fmul fast float %X1, %X3 ; X1*X3
+ %D = fadd fast float %B, %C ; -X1*X2 + X1*X3 -> X1*(X3-X2)
+ ret float %D
+}
+
+define float @test12_unary_fneg(float %X1, float %X2, float %X3) {
+; CHECK-LABEL: @test12_unary_fneg(
+; CHECK-NEXT: [[TMP1:%.*]] = fsub fast float [[X3:%.*]], [[X2:%.*]]
+; CHECK-NEXT: [[D:%.*]] = fmul fast float [[TMP1]], [[X1:%.*]]
+; CHECK-NEXT: ret float [[D]]
+;
+ %A = fneg fast float %X1
+ %B = fmul fast float %A, %X2 ; -X1*X2
+ %C = fmul fast float %X1, %X3 ; X1*X3
+ %D = fadd fast float %B, %C ; -X1*X2 + X1*X3 -> X1*(X3-X2)
+ ret float %D
+}
+
+define float @test12_reassoc_nsz(float %X1, float %X2, float %X3) {
+; CHECK-LABEL: @test12_reassoc_nsz(
+; CHECK-NEXT: [[TMP1:%.*]] = fsub reassoc nsz float [[X3:%.*]], [[X2:%.*]]
+; CHECK-NEXT: [[D:%.*]] = fmul reassoc nsz float [[TMP1]], [[X1:%.*]]
+; CHECK-NEXT: ret float [[D]]
+;
+ %A = fsub reassoc nsz float 0.000000e+00, %X1
+ %B = fmul reassoc nsz float %A, %X2 ; -X1*X2
+ %C = fmul reassoc nsz float %X1, %X3 ; X1*X3
+ %D = fadd reassoc nsz float %B, %C ; -X1*X2 + X1*X3 -> X1*(X3-X2)
+ ret float %D
+}
+
+; TODO: check if 'nsz' is technically required. Currently the optimization
+; is not done with only 'reassoc' without 'nsz'.
+define float @test12_reassoc(float %X1, float %X2, float %X3) {
+; CHECK-LABEL: @test12_reassoc(
+; CHECK-NEXT: [[A:%.*]] = fsub reassoc float 0.000000e+00, [[X1:%.*]]
+; CHECK-NEXT: [[B:%.*]] = fmul reassoc float [[A]], [[X2:%.*]]
+; CHECK-NEXT: [[C:%.*]] = fmul reassoc float [[X1]], [[X3:%.*]]
+; CHECK-NEXT: [[D:%.*]] = fadd reassoc float [[B]], [[C]]
+; CHECK-NEXT: ret float [[D]]
+;
+ %A = fsub reassoc float 0.000000e+00, %X1
+ %B = fmul reassoc float %A, %X2 ; -X1*X2
+ %C = fmul reassoc float %X1, %X3 ; X1*X3
+ %D = fadd reassoc float %B, %C ; -X1*X2 + X1*X3 -> X1*(X3-X2)
+ ret float %D
+}
+
+; (x1 * 47) + (x2 * -47) => (x1 - x2) * 47
+; That only works with both instcombine and reassociate passes enabled.
+; Check that instcombine is not enough.
+
+define float @test13(float %X1, float %X2) {
+; CHECK-LABEL: @test13(
+; CHECK-NEXT: [[B:%.*]] = fmul fast float [[X1:%.*]], 4.700000e+01
+; CHECK-NEXT: [[C:%.*]] = fmul fast float [[X2:%.*]], -4.700000e+01
+; CHECK-NEXT: [[D:%.*]] = fadd fast float [[B]], [[C]]
+; CHECK-NEXT: ret float [[D]]
+;
+ %B = fmul fast float %X1, 47. ; X1*47
+ %C = fmul fast float %X2, -47. ; X2*-47
+ %D = fadd fast float %B, %C ; X1*47 + X2*-47 -> 47*(X1-X2)
+ ret float %D
+}
+
+define float @test13_reassoc_nsz(float %X1, float %X2) {
+; CHECK-LABEL: @test13_reassoc_nsz(
+; CHECK-NEXT: [[B:%.*]] = fmul reassoc nsz float [[X1:%.*]], 4.700000e+01
+; CHECK-NEXT: [[C:%.*]] = fmul reassoc nsz float [[X2:%.*]], -4.700000e+01
+; CHECK-NEXT: [[D:%.*]] = fadd reassoc nsz float [[B]], [[C]]
+; CHECK-NEXT: ret float [[D]]
+;
+ %B = fmul reassoc nsz float %X1, 47. ; X1*47
+ %C = fmul reassoc nsz float %X2, -47. ; X2*-47
+ %D = fadd reassoc nsz float %B, %C ; X1*47 + X2*-47 -> 47*(X1-X2)
+ ret float %D
+}
+
+define float @test13_reassoc(float %X1, float %X2) {
+; CHECK-LABEL: @test13_reassoc(
+; CHECK-NEXT: [[B:%.*]] = fmul reassoc float [[X1:%.*]], 4.700000e+01
+; CHECK-NEXT: [[C:%.*]] = fmul reassoc float [[X2:%.*]], -4.700000e+01
+; CHECK-NEXT: [[D:%.*]] = fadd reassoc float [[B]], [[C]]
+; CHECK-NEXT: ret float [[D]]
+;
+ %B = fmul reassoc float %X1, 47. ; X1*47
+ %C = fmul reassoc float %X2, -47. ; X2*-47
+ %D = fadd reassoc float %B, %C ; X1*47 + X2*-47 -> 47*(X1-X2)
+ ret float %D
+}
+
+define float @test14(float %arg) {
+; CHECK-LABEL: @test14(
+; CHECK-NEXT: [[T2:%.*]] = fmul fast float [[ARG:%.*]], 1.440000e+02
+; CHECK-NEXT: ret float [[T2]]
+;
+ %t1 = fmul fast float 1.200000e+01, %arg
+ %t2 = fmul fast float %t1, 1.200000e+01
+ ret float %t2
+}
+
+define float @test14_reassoc(float %arg) {
+; CHECK-LABEL: @test14_reassoc(
+; CHECK-NEXT: [[T2:%.*]] = fmul reassoc float [[ARG:%.*]], 1.440000e+02
+; CHECK-NEXT: ret float [[T2]]
+;
+ %t1 = fmul reassoc float 1.200000e+01, %arg
+ %t2 = fmul reassoc float %t1, 1.200000e+01
+ ret float %t2
+}
+
+; (b+(a+1234))+-a -> b+1234
+; That only works with both instcombine and reassociate passes enabled.
+; Check that instcombine is not enough.
+
+define float @test15(float %b, float %a) {
+; CHECK-LABEL: @test15(
+; CHECK-NEXT: [[TMP1:%.*]] = fadd fast float [[A:%.*]], 1.234000e+03
+; CHECK-NEXT: [[TMP2:%.*]] = fadd fast float [[TMP1]], [[B:%.*]]
+; CHECK-NEXT: [[TMP3:%.*]] = fsub fast float [[TMP2]], [[A]]
+; CHECK-NEXT: ret float [[TMP3]]
+;
+ %1 = fadd fast float %a, 1234.0
+ %2 = fadd fast float %b, %1
+ %3 = fsub fast float 0.0, %a
+ %4 = fadd fast float %2, %3
+ ret float %4
+}
+
+define float @test15_unary_fneg(float %b, float %a) {
+; CHECK-LABEL: @test15_unary_fneg(
+; CHECK-NEXT: [[TMP1:%.*]] = fadd fast float [[A:%.*]], 1.234000e+03
+; CHECK-NEXT: [[TMP2:%.*]] = fadd fast float [[TMP1]], [[B:%.*]]
+; CHECK-NEXT: [[TMP3:%.*]] = fsub fast float [[TMP2]], [[A]]
+; CHECK-NEXT: ret float [[TMP3]]
+;
+ %1 = fadd fast float %a, 1234.0
+ %2 = fadd fast float %b, %1
+ %3 = fneg fast float %a
+ %4 = fadd fast float %2, %3
+ ret float %4
+}
+
+define float @test15_reassoc_nsz(float %b, float %a) {
+; CHECK-LABEL: @test15_reassoc_nsz(
+; CHECK-NEXT: [[TMP1:%.*]] = fadd reassoc nsz float [[A:%.*]], 1.234000e+03
+; CHECK-NEXT: [[TMP2:%.*]] = fadd reassoc nsz float [[TMP1]], [[B:%.*]]
+; CHECK-NEXT: [[TMP3:%.*]] = fsub reassoc nsz float [[TMP2]], [[A]]
+; CHECK-NEXT: ret float [[TMP3]]
+;
+ %1 = fadd reassoc nsz float %a, 1234.0
+ %2 = fadd reassoc nsz float %b, %1
+ %3 = fsub reassoc nsz float 0.0, %a
+ %4 = fadd reassoc nsz float %2, %3
+ ret float %4
+}
+
+define float @test15_reassoc(float %b, float %a) {
+; CHECK-LABEL: @test15_reassoc(
+; CHECK-NEXT: [[TMP1:%.*]] = fadd reassoc float [[A:%.*]], 1.234000e+03
+; CHECK-NEXT: [[TMP2:%.*]] = fadd reassoc float [[TMP1]], [[B:%.*]]
+; CHECK-NEXT: [[TMP3:%.*]] = fsub reassoc float 0.000000e+00, [[A]]
+; CHECK-NEXT: [[TMP4:%.*]] = fadd reassoc float [[TMP2]], [[TMP3]]
+; CHECK-NEXT: ret float [[TMP4]]
+;
+ %1 = fadd reassoc float %a, 1234.0
+ %2 = fadd reassoc float %b, %1
+ %3 = fsub reassoc float 0.0, %a
+ %4 = fadd reassoc float %2, %3
+ ret float %4
+}
+
+; X*-(Y*Z) -> X*-1*Y*Z
+; That only works with both instcombine and reassociate passes enabled.
+; Check that instcombine is not enough.
+
+define float @test16(float %a, float %b, float %z) {
+; CHECK-LABEL: @test16(
+; CHECK-NEXT: [[C:%.*]] = fneg fast float [[Z:%.*]]
+; CHECK-NEXT: [[D:%.*]] = fmul fast float [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: [[E:%.*]] = fmul fast float [[D]], [[C]]
+; CHECK-NEXT: [[G:%.*]] = fmul fast float [[E]], -1.234500e+04
+; CHECK-NEXT: ret float [[G]]
+;
+ %c = fsub fast float 0.000000e+00, %z
+ %d = fmul fast float %a, %b
+ %e = fmul fast float %c, %d
+ %f = fmul fast float %e, 1.234500e+04
+ %g = fsub fast float 0.000000e+00, %f
+ ret float %g
+}
+
+define float @test16_unary_fneg(float %a, float %b, float %z) {
+; CHECK-LABEL: @test16_unary_fneg(
+; CHECK-NEXT: [[C:%.*]] = fneg fast float [[Z:%.*]]
+; CHECK-NEXT: [[D:%.*]] = fmul fast float [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: [[E:%.*]] = fmul fast float [[D]], [[C]]
+; CHECK-NEXT: [[G:%.*]] = fmul fast float [[E]], -1.234500e+04
+; CHECK-NEXT: ret float [[G]]
+;
+ %c = fneg fast float %z
+ %d = fmul fast float %a, %b
+ %e = fmul fast float %c, %d
+ %f = fmul fast float %e, 1.234500e+04
+ %g = fneg fast float %f
+ ret float %g
+}
+
+define float @test16_reassoc_nsz(float %a, float %b, float %z) {
+; CHECK-LABEL: @test16_reassoc_nsz(
+; CHECK-NEXT: [[C:%.*]] = fneg reassoc nsz float [[Z:%.*]]
+; CHECK-NEXT: [[D:%.*]] = fmul reassoc nsz float [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: [[E:%.*]] = fmul reassoc nsz float [[D]], [[C]]
+; CHECK-NEXT: [[G:%.*]] = fmul reassoc nsz float [[E]], -1.234500e+04
+; CHECK-NEXT: ret float [[G]]
+;
+ %c = fsub reassoc nsz float 0.000000e+00, %z
+ %d = fmul reassoc nsz float %a, %b
+ %e = fmul reassoc nsz float %c, %d
+ %f = fmul reassoc nsz float %e, 1.234500e+04
+ %g = fsub reassoc nsz float 0.000000e+00, %f
+ ret float %g
+}
+
+define float @test16_reassoc(float %a, float %b, float %z) {
+; CHECK-LABEL: @test16_reassoc(
+; CHECK-NEXT: [[C:%.*]] = fsub reassoc float 0.000000e+00, [[Z:%.*]]
+; CHECK-NEXT: [[D:%.*]] = fmul reassoc float [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: [[E:%.*]] = fmul reassoc float [[C]], [[D]]
+; CHECK-NEXT: [[F:%.*]] = fmul reassoc float [[E]], 1.234500e+04
+; CHECK-NEXT: [[G:%.*]] = fsub reassoc float 0.000000e+00, [[F]]
+; CHECK-NEXT: ret float [[G]]
+;
+ %c = fsub reassoc float 0.000000e+00, %z
+ %d = fmul reassoc float %a, %b
+ %e = fmul reassoc float %c, %d
+ %f = fmul reassoc float %e, 1.234500e+04
+ %g = fsub reassoc float 0.000000e+00, %f
+ ret float %g
+}
+
+define float @test17(float %a, float %b, float %z) {
+; CHECK-LABEL: @test17(
+; CHECK-NEXT: [[TMP1:%.*]] = fmul fast float [[Z:%.*]], 4.000000e+01
+; CHECK-NEXT: [[F:%.*]] = fmul fast float [[TMP1]], [[A:%.*]]
+; CHECK-NEXT: ret float [[F]]
+;
+ %d = fmul fast float %z, 4.000000e+01
+ %c = fsub fast float 0.000000e+00, %d
+ %e = fmul fast float %a, %c
+ %f = fsub fast float 0.000000e+00, %e
+ ret float %f
+}
+
+define float @test17_unary_fneg(float %a, float %b, float %z) {
+; CHECK-LABEL: @test17_unary_fneg(
+; CHECK-NEXT: [[TMP1:%.*]] = fmul fast float [[Z:%.*]], 4.000000e+01
+; CHECK-NEXT: [[F:%.*]] = fmul fast float [[TMP1]], [[A:%.*]]
+; CHECK-NEXT: ret float [[F]]
+;
+ %d = fmul fast float %z, 4.000000e+01
+ %c = fneg fast float %d
+ %e = fmul fast float %a, %c
+ %f = fneg fast float %e
+ ret float %f
+}
+
+define float @test17_reassoc_nsz(float %a, float %b, float %z) {
+; CHECK-LABEL: @test17_reassoc_nsz(
+; CHECK-NEXT: [[TMP1:%.*]] = fmul reassoc nsz float [[Z:%.*]], 4.000000e+01
+; CHECK-NEXT: [[F:%.*]] = fmul reassoc nsz float [[TMP1]], [[A:%.*]]
+; CHECK-NEXT: ret float [[F]]
+;
+ %d = fmul reassoc nsz float %z, 4.000000e+01
+ %c = fsub reassoc nsz float 0.000000e+00, %d
+ %e = fmul reassoc nsz float %a, %c
+ %f = fsub reassoc nsz float 0.000000e+00, %e
+ ret float %f
+}
+
+; Verify the fold is not done with only 'reassoc' ('nsz' is required).
+define float @test17_reassoc(float %a, float %b, float %z) {
+; CHECK-LABEL: @test17_reassoc(
+; CHECK-NEXT: [[D:%.*]] = fmul reassoc float [[Z:%.*]], 4.000000e+01
+; CHECK-NEXT: [[C:%.*]] = fsub reassoc float 0.000000e+00, [[D]]
+; CHECK-NEXT: [[E:%.*]] = fmul reassoc float [[C]], [[A:%.*]]
+; CHECK-NEXT: [[F:%.*]] = fsub reassoc float 0.000000e+00, [[E]]
+; CHECK-NEXT: ret float [[F]]
+;
+ %d = fmul reassoc float %z, 4.000000e+01
+ %c = fsub reassoc float 0.000000e+00, %d
+ %e = fmul reassoc float %a, %c
+ %f = fsub reassoc float 0.000000e+00, %e
+ ret float %f
+}
+
+; fneg of fneg is an identity operation, so no FMF are needed to remove those instructions.
+
+define float @test17_unary_fneg_no_FMF(float %a, float %b, float %z) {
+; CHECK-LABEL: @test17_unary_fneg_no_FMF(
+; CHECK-NEXT: [[TMP1:%.*]] = fmul float [[Z:%.*]], 4.000000e+01
+; CHECK-NEXT: [[F:%.*]] = fmul float [[TMP1]], [[A:%.*]]
+; CHECK-NEXT: ret float [[F]]
+;
+ %d = fmul float %z, 4.000000e+01
+ %c = fneg float %d
+ %e = fmul float %a, %c
+ %f = fneg float %e
+ ret float %f
+}
+
+define float @test17_reassoc_unary_fneg(float %a, float %b, float %z) {
+; CHECK-LABEL: @test17_reassoc_unary_fneg(
+; CHECK-NEXT: [[TMP1:%.*]] = fmul reassoc float [[Z:%.*]], 4.000000e+01
+; CHECK-NEXT: [[F:%.*]] = fmul reassoc float [[TMP1]], [[A:%.*]]
+; CHECK-NEXT: ret float [[F]]
+;
+ %d = fmul reassoc float %z, 4.000000e+01
+ %c = fneg reassoc float %d
+ %e = fmul reassoc float %a, %c
+ %f = fneg reassoc float %e
+ ret float %f
+}
+
+; With sub reassociation, constant folding can eliminate the 12 and -12 constants.
+; That only works with both instcombine and reassociate passes enabled.
+; Check that instcombine is not enough.
+
+define float @test18(float %A, float %B) {
+; CHECK-LABEL: @test18(
+; CHECK-NEXT: [[X:%.*]] = fadd fast float [[A:%.*]], -1.200000e+01
+; CHECK-NEXT: [[Y:%.*]] = fsub fast float [[X]], [[B:%.*]]
+; CHECK-NEXT: [[Z:%.*]] = fadd fast float [[Y]], 1.200000e+01
+; CHECK-NEXT: ret float [[Z]]
+;
+ %X = fadd fast float -1.200000e+01, %A
+ %Y = fsub fast float %X, %B
+ %Z = fadd fast float %Y, 1.200000e+01
+ ret float %Z
+}
+
+define float @test18_reassoc(float %A, float %B) {
+; CHECK-LABEL: @test18_reassoc(
+; CHECK-NEXT: [[X:%.*]] = fadd reassoc float [[A:%.*]], -1.200000e+01
+; CHECK-NEXT: [[Y:%.*]] = fsub reassoc float [[X]], [[B:%.*]]
+; CHECK-NEXT: [[Z:%.*]] = fadd reassoc float [[Y]], 1.200000e+01
+; CHECK-NEXT: ret float [[Z]]
+;
+ %X = fadd reassoc float -1.200000e+01, %A
+ %Y = fsub reassoc float %X, %B
+ %Z = fadd reassoc float %Y, 1.200000e+01
+ ret float %Z
+}
+
+; With sub reassociation, constant folding can eliminate the uses of %a.
+
+define float @test19(float %a, float %b, float %c) nounwind {
+; CHECK-LABEL: @test19(
+; CHECK-NEXT: [[TMP1:%.*]] = fadd fast float [[B:%.*]], [[C:%.*]]
+; CHECK-NEXT: [[T7:%.*]] = fneg fast float [[TMP1]]
+; CHECK-NEXT: ret float [[T7]]
+;
+ %t3 = fsub fast float %a, %b
+ %t5 = fsub fast float %t3, %c
+ %t7 = fsub fast float %t5, %a
+ ret float %t7
+}
+
+define float @test19_reassoc_nsz(float %a, float %b, float %c) nounwind {
+; CHECK-LABEL: @test19_reassoc_nsz(
+; CHECK-NEXT: [[TMP1:%.*]] = fadd reassoc nsz float [[B:%.*]], [[C:%.*]]
+; CHECK-NEXT: [[T7:%.*]] = fneg reassoc nsz float [[TMP1]]
+; CHECK-NEXT: ret float [[T7]]
+;
+ %t3 = fsub reassoc nsz float %a, %b
+ %t5 = fsub reassoc nsz float %t3, %c
+ %t7 = fsub reassoc nsz float %t5, %a
+ ret float %t7
+}
+
+; Verify the fold is not done with only 'reassoc' ('nsz' is required).
+define float @test19_reassoc(float %a, float %b, float %c) nounwind {
+; CHECK-LABEL: @test19_reassoc(
+; CHECK-NEXT: [[T3:%.*]] = fsub reassoc float [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: [[T5:%.*]] = fsub reassoc float [[T3]], [[C:%.*]]
+; CHECK-NEXT: [[T7:%.*]] = fsub reassoc float [[T5]], [[A]]
+; CHECK-NEXT: ret float [[T7]]
+;
+ %t3 = fsub reassoc float %a, %b
+ %t5 = fsub reassoc float %t3, %c
+ %t7 = fsub reassoc float %t5, %a
+ ret float %t7
+}
diff --git a/llvm/test/Transforms/PhaseOrdering/fast-basictest.ll b/llvm/test/Transforms/PhaseOrdering/fast-basictest.ll
new file mode 100644
index 0000000000000..f44ee73652a56
--- /dev/null
+++ b/llvm/test/Transforms/PhaseOrdering/fast-basictest.ll
@@ -0,0 +1,307 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+;
+; Test cases in this file are intended to be run with both reassociate and
+; instcombine passes enabled.
+;
+; Test numbering remains continuous across:
+; - InstCombine/fast-basictest.ll
+; - PhaseOrdering/fast-basictest.ll
+; - PhaseOrdering/fast-reassociate-gvn.ll
+; - Reassociate/fast-basictest.ll
+;
+; RUN: opt < %s -reassociate -instcombine -S | FileCheck %s --check-prefixes=CHECK,REASSOC_AND_IC --allow-unused-prefixes
+; RUN: opt < %s -O2 -S | FileCheck %s --check-prefixes=CHECK,O2 --allow-unused-prefixes
+
+; test2 ... test18 - both reassociate and instcombine passes
+; are required to perform a transform
+
+; ((a + (-3)) + b) + 3 -> a + b
+
+define float @test2(float %reg109, float %reg1111) {
+; CHECK-LABEL: @test2(
+; CHECK-NEXT: [[REG117:%.*]] = fadd fast float [[REG109:%.*]], [[REG1111:%.*]]
+; CHECK-NEXT: ret float [[REG117]]
+;
+ %reg115 = fadd fast float %reg109, -3.000000e+01
+ %reg116 = fadd fast float %reg115, %reg1111
+ %reg117 = fadd fast float %reg116, 3.000000e+01
+ ret float %reg117
+}
+
+; Verify that fold is not done without 'fast'
+define float @test2_no_FMF(float %reg109, float %reg1111) {
+; CHECK-LABEL: @test2_no_FMF(
+; CHECK-NEXT: [[REG115:%.*]] = fadd float [[REG109:%.*]], -3.000000e+01
+; CHECK-NEXT: [[REG116:%.*]] = fadd float [[REG115]], [[REG1111:%.*]]
+; CHECK-NEXT: [[REG117:%.*]] = fadd float [[REG116]], 3.000000e+01
+; CHECK-NEXT: ret float [[REG117]]
+;
+ %reg115 = fadd float %reg109, -3.000000e+01
+ %reg116 = fadd float %reg115, %reg1111
+ %reg117 = fadd float %reg116, 3.000000e+01
+ ret float %reg117
+}
+
+define float @test2_reassoc(float %reg109, float %reg1111) {
+; CHECK-LABEL: @test2_reassoc(
+; CHECK-NEXT: [[REG115:%.*]] = fadd reassoc float [[REG109:%.*]], -3.000000e+01
+; CHECK-NEXT: [[REG116:%.*]] = fadd reassoc float [[REG115]], [[REG1111:%.*]]
+; CHECK-NEXT: [[REG117:%.*]] = fadd reassoc float [[REG116]], 3.000000e+01
+; CHECK-NEXT: ret float [[REG117]]
+;
+ %reg115 = fadd reassoc float %reg109, -3.000000e+01
+ %reg116 = fadd reassoc float %reg115, %reg1111
+ %reg117 = fadd reassoc float %reg116, 3.000000e+01
+ ret float %reg117
+}
+
+; (x1 * 47) + (x2 * -47) => (x1 - x2) * 47
+
+define float @test13(float %X1, float %X2) {
+; CHECK-LABEL: @test13(
+; CHECK-NEXT: [[TMP1:%.*]] = fsub fast float [[X1:%.*]], [[X2:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = fmul fast float [[TMP1]], 4.700000e+01
+; CHECK-NEXT: ret float [[TMP2]]
+;
+ %B = fmul fast float %X1, 47. ; X1*47
+ %C = fmul fast float %X2, -47. ; X2*-47
+ %D = fadd fast float %B, %C ; X1*47 + X2*-47 -> 47*(X1-X2)
+ ret float %D
+}
+
+; Check again with 'reassoc' and 'nsz' ('nsz' not technically required).
+define float @test13_reassoc_nsz(float %X1, float %X2) {
+; CHECK-LABEL: @test13_reassoc_nsz(
+; CHECK-NEXT: [[TMP1:%.*]] = fsub reassoc nsz float [[X1:%.*]], [[X2:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = fmul reassoc nsz float [[TMP1]], 4.700000e+01
+; CHECK-NEXT: ret float [[TMP2]]
+;
+ %B = fmul reassoc nsz float %X1, 47. ; X1*47
+ %C = fmul reassoc nsz float %X2, -47. ; X2*-47
+ %D = fadd reassoc nsz float %B, %C ; X1*47 + X2*-47 -> 47*(X1-X2)
+ ret float %D
+}
+
+; TODO: This doesn't require 'nsz'. It should fold to ((x1 - x2) * 47.0)
+define float @test13_reassoc(float %X1, float %X2) {
+; CHECK-LABEL: @test13_reassoc(
+; CHECK-NEXT: [[B:%.*]] = fmul reassoc float [[X1:%.*]], 4.700000e+01
+; CHECK-NEXT: [[C:%.*]] = fmul reassoc float [[X2:%.*]], 4.700000e+01
+; CHECK-NEXT: [[TMP1:%.*]] = fsub reassoc float [[B]], [[C]]
+; CHECK-NEXT: ret float [[TMP1]]
+;
+ %B = fmul reassoc float %X1, 47. ; X1*47
+ %C = fmul reassoc float %X2, -47. ; X2*-47
+ %D = fadd reassoc float %B, %C ; X1*47 + X2*-47 -> 47*(X1-X2)
+ ret float %D
+}
+
+; (b+(a+1234))+-a -> b+1234
+
+define float @test15(float %b, float %a) {
+; CHECK-LABEL: @test15(
+; CHECK-NEXT: [[TMP1:%.*]] = fadd fast float [[B:%.*]], 1.234000e+03
+; CHECK-NEXT: ret float [[TMP1]]
+;
+ %1 = fadd fast float %a, 1234.0
+ %2 = fadd fast float %b, %1
+ %3 = fsub fast float 0.0, %a
+ %4 = fadd fast float %2, %3
+ ret float %4
+}
+
+define float @test15_unary_fneg(float %b, float %a) {
+; CHECK-LABEL: @test15_unary_fneg(
+; CHECK-NEXT: [[TMP1:%.*]] = fadd fast float [[B:%.*]], 1.234000e+03
+; CHECK-NEXT: ret float [[TMP1]]
+;
+ %1 = fadd fast float %a, 1234.0
+ %2 = fadd fast float %b, %1
+ %3 = fneg fast float %a
+ %4 = fadd fast float %2, %3
+ ret float %4
+}
+
+; TODO: check if it is possible to perform the optimization without 'fast'
+; with 'reassoc' and 'nsz' only.
+define float @test15_reassoc_nsz(float %b, float %a) {
+; CHECK-LABEL: @test15_reassoc_nsz(
+; CHECK-NEXT: [[TMP1:%.*]] = fadd reassoc nsz float [[A:%.*]], 1.234000e+03
+; CHECK-NEXT: [[TMP2:%.*]] = fadd reassoc nsz float [[TMP1]], [[B:%.*]]
+; CHECK-NEXT: [[TMP3:%.*]] = fsub reassoc nsz float [[TMP2]], [[A]]
+; CHECK-NEXT: ret float [[TMP3]]
+;
+ %1 = fadd reassoc nsz float %a, 1234.0
+ %2 = fadd reassoc nsz float %b, %1
+ %3 = fsub reassoc nsz float 0.0, %a
+ %4 = fadd reassoc nsz float %2, %3
+ ret float %4
+}
+
+define float @test15_reassoc(float %b, float %a) {
+; CHECK-LABEL: @test15_reassoc(
+; CHECK-NEXT: [[TMP1:%.*]] = fadd reassoc float [[A:%.*]], 1.234000e+03
+; CHECK-NEXT: [[TMP2:%.*]] = fadd reassoc float [[TMP1]], [[B:%.*]]
+; CHECK-NEXT: [[TMP3:%.*]] = fsub reassoc float 0.000000e+00, [[A]]
+; CHECK-NEXT: [[TMP4:%.*]] = fadd reassoc float [[TMP3]], [[TMP2]]
+; CHECK-NEXT: ret float [[TMP4]]
+;
+ %1 = fadd reassoc float %a, 1234.0
+ %2 = fadd reassoc float %b, %1
+ %3 = fsub reassoc float 0.0, %a
+ %4 = fadd reassoc float %2, %3
+ ret float %4
+}
+
+; Test that we can turn things like X*-(Y*Z) -> X*-1*Y*Z.
+
+define float @test16(float %a, float %b, float %z) {
+; REASSOC_AND_IC-LABEL: @test16(
+; REASSOC_AND_IC-NEXT: [[C:%.*]] = fmul fast float [[A:%.*]], 1.234500e+04
+; REASSOC_AND_IC-NEXT: [[E:%.*]] = fmul fast float [[C]], [[B:%.*]]
+; REASSOC_AND_IC-NEXT: [[F:%.*]] = fmul fast float [[E]], [[Z:%.*]]
+; REASSOC_AND_IC-NEXT: ret float [[F]]
+;
+; O2-LABEL: @test16(
+; O2-NEXT: [[D:%.*]] = fmul fast float [[A:%.*]], 1.234500e+04
+; O2-NEXT: [[E:%.*]] = fmul fast float [[D]], [[B:%.*]]
+; O2-NEXT: [[G:%.*]] = fmul fast float [[E]], [[Z:%.*]]
+; O2-NEXT: ret float [[G]]
+;
+ %c = fsub fast float 0.000000e+00, %z
+ %d = fmul fast float %a, %b
+ %e = fmul fast float %c, %d
+ %f = fmul fast float %e, 1.234500e+04
+ %g = fsub fast float 0.000000e+00, %f
+ ret float %g
+}
+
+define float @test16_unary_fneg(float %a, float %b, float %z) {
+; REASSOC_AND_IC-LABEL: @test16_unary_fneg(
+; REASSOC_AND_IC-NEXT: [[E:%.*]] = fmul fast float [[A:%.*]], 1.234500e+04
+; REASSOC_AND_IC-NEXT: [[F:%.*]] = fmul fast float [[E]], [[B:%.*]]
+; REASSOC_AND_IC-NEXT: [[G:%.*]] = fmul fast float [[F]], [[Z:%.*]]
+; REASSOC_AND_IC-NEXT: ret float [[G]]
+;
+; O2-LABEL: @test16_unary_fneg(
+; O2-NEXT: [[D:%.*]] = fmul fast float [[A:%.*]], 1.234500e+04
+; O2-NEXT: [[E:%.*]] = fmul fast float [[D]], [[B:%.*]]
+; O2-NEXT: [[G:%.*]] = fmul fast float [[E]], [[Z:%.*]]
+; O2-NEXT: ret float [[G]]
+;
+ %c = fneg fast float %z
+ %d = fmul fast float %a, %b
+ %e = fmul fast float %c, %d
+ %f = fmul fast float %e, 1.234500e+04
+ %g = fneg fast float %f
+ ret float %g
+}
+
+; TODO: check if it is possible to perform the optimization without 'fast'
+; with 'reassoc' and 'nsz' only.
+define float @test16_reassoc_nsz(float %a, float %b, float %z) {
+; CHECK-LABEL: @test16_reassoc_nsz(
+; CHECK-NEXT: [[C:%.*]] = fneg reassoc nsz float [[Z:%.*]]
+; CHECK-NEXT: [[D:%.*]] = fmul reassoc nsz float [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: [[E:%.*]] = fmul reassoc nsz float [[D]], [[C]]
+; CHECK-NEXT: [[G:%.*]] = fmul reassoc nsz float [[E]], -1.234500e+04
+; CHECK-NEXT: ret float [[G]]
+;
+ %c = fsub reassoc nsz float 0.000000e+00, %z
+ %d = fmul reassoc nsz float %a, %b
+ %e = fmul reassoc nsz float %c, %d
+ %f = fmul reassoc nsz float %e, 1.234500e+04
+ %g = fsub reassoc nsz float 0.000000e+00, %f
+ ret float %g
+}
+
+define float @test16_reassoc(float %a, float %b, float %z) {
+; CHECK-LABEL: @test16_reassoc(
+; CHECK-NEXT: [[C:%.*]] = fsub reassoc float 0.000000e+00, [[Z:%.*]]
+; CHECK-NEXT: [[D:%.*]] = fmul reassoc float [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: [[E:%.*]] = fmul reassoc float [[D]], [[C]]
+; CHECK-NEXT: [[F:%.*]] = fmul reassoc float [[E]], 1.234500e+04
+; CHECK-NEXT: [[G:%.*]] = fsub reassoc float 0.000000e+00, [[F]]
+; CHECK-NEXT: ret float [[G]]
+;
+ %c = fsub reassoc float 0.000000e+00, %z
+ %d = fmul reassoc float %a, %b
+ %e = fmul reassoc float %c, %d
+ %f = fmul reassoc float %e, 1.234500e+04
+ %g = fsub reassoc float 0.000000e+00, %f
+ ret float %g
+}
+
+; With sub reassociation, constant folding can eliminate the 12 and -12 constants.
+
+define float @test18(float %A, float %B) {
+; CHECK-LABEL: @test18(
+; CHECK-NEXT: [[Z:%.*]] = fsub fast float [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: ret float [[Z]]
+;
+ %X = fadd fast float -1.200000e+01, %A
+ %Y = fsub fast float %X, %B
+ %Z = fadd fast float %Y, 1.200000e+01
+ ret float %Z
+}
+
+define float @test18_reassoc(float %A, float %B) {
+; CHECK-LABEL: @test18_reassoc(
+; CHECK-NEXT: [[X:%.*]] = fadd reassoc float [[A:%.*]], -1.200000e+01
+; CHECK-NEXT: [[Y:%.*]] = fsub reassoc float [[X]], [[B:%.*]]
+; CHECK-NEXT: [[Z:%.*]] = fadd reassoc float [[Y]], 1.200000e+01
+; CHECK-NEXT: ret float [[Z]]
+;
+ %X = fadd reassoc float -1.200000e+01, %A
+ %Y = fsub reassoc float %X, %B
+ %Z = fadd reassoc float %Y, 1.200000e+01
+ ret float %Z
+}
+
+; test18 - check that the bug described in the revision does not appear:
+; https://reviews.llvm.org/D72521
+
+; With sub reassociation, constant folding can eliminate the uses of %a.
+
+define float @test19(float %a, float %b, float %c) nounwind {
+; REASSOC_AND_IC-LABEL: @test19(
+; REASSOC_AND_IC-NEXT: [[TMP1:%.*]] = fadd fast float [[B:%.*]], [[C:%.*]]
+; REASSOC_AND_IC-NEXT: [[T7:%.*]] = fneg fast float [[TMP1]]
+; REASSOC_AND_IC-NEXT: ret float [[T7]]
+;
+; O2-LABEL: @test19(
+; O2-NEXT: [[TMP1:%.*]] = fadd fast float [[C:%.*]], [[B:%.*]]
+; O2-NEXT: [[T7:%.*]] = fneg fast float [[TMP1]]
+; O2-NEXT: ret float [[T7]]
+;
+ %t3 = fsub fast float %a, %b
+ %t5 = fsub fast float %t3, %c
+ %t7 = fsub fast float %t5, %a
+ ret float %t7
+}
+
+define float @test19_reassoc_nsz(float %a, float %b, float %c) nounwind {
+; CHECK-LABEL: @test19_reassoc_nsz(
+; CHECK-NEXT: [[TMP1:%.*]] = fadd reassoc nsz float [[B:%.*]], [[C:%.*]]
+; CHECK-NEXT: [[T7:%.*]] = fneg reassoc nsz float [[TMP1]]
+; CHECK-NEXT: ret float [[T7]]
+;
+ %t3 = fsub reassoc nsz float %a, %b
+ %t5 = fsub reassoc nsz float %t3, %c
+ %t7 = fsub reassoc nsz float %t5, %a
+ ret float %t7
+}
+
+; Verify the fold is not done with only 'reassoc' ('nsz' is required).
+define float @test19_reassoc(float %a, float %b, float %c) nounwind {
+; CHECK-LABEL: @test19_reassoc(
+; CHECK-NEXT: [[T3:%.*]] = fsub reassoc float [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: [[T5:%.*]] = fsub reassoc float [[T3]], [[C:%.*]]
+; CHECK-NEXT: [[T7:%.*]] = fsub reassoc float [[T5]], [[A]]
+; CHECK-NEXT: ret float [[T7]]
+;
+ %t3 = fsub reassoc float %a, %b
+ %t5 = fsub reassoc float %t3, %c
+ %t7 = fsub reassoc float %t5, %a
+ ret float %t7
+}
diff --git a/llvm/test/Transforms/PhaseOrdering/fast-reassociate-gvn.ll b/llvm/test/Transforms/PhaseOrdering/fast-reassociate-gvn.ll
new file mode 100644
index 0000000000000..11c4d7afbcacb
--- /dev/null
+++ b/llvm/test/Transforms/PhaseOrdering/fast-reassociate-gvn.ll
@@ -0,0 +1,103 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+;
+; Test cases in this file are intended to be run with both reassociate and
+; gvn passes enabled.
+;
+; Test numbering remains continuous across:
+; - InstCombine/fast-basictest.ll
+; - PhaseOrdering/fast-basictest.ll
+; - PhaseOrdering/fast-reassociate-gvn.ll
+; - Reassociate/fast-basictest.ll
+;
+; RUN: opt < %s -reassociate -gvn -S | FileCheck %s --check-prefixes=CHECK,REASSOC_AND_GVN --allow-unused-prefixes
+; RUN: opt < %s -O2 -S | FileCheck %s --check-prefixes=CHECK,O2 --allow-unused-prefixes
+
+ at fe = external global float
+ at fa = external global float
+ at fb = external global float
+ at fc = external global float
+ at ff = external global float
+
+; If two sums of the same operands in
diff erent order are counted with 'fast'
+; flag and then stored to global variables, we can reuse the same value twice.
+; Sums:
+; - test3: (a+b)+c and (a+c)+b
+; - test4: c+(a+b) and (c+a)+b
+; - test5: c+(b+a) and (c+a)+b
+; TODO: check if 'reassoc' flag is technically enough for this optimization
+; (currently the transformation is not done with 'reassoc' only).
+
+define void @test3() {
+; CHECK-LABEL: @test3(
+; CHECK-NEXT: [[A:%.*]] = load float, float* @fa, align 4
+; CHECK-NEXT: [[B:%.*]] = load float, float* @fb, align 4
+; CHECK-NEXT: [[C:%.*]] = load float, float* @fc, align 4
+; CHECK-NEXT: [[T1:%.*]] = fadd fast float [[B]], [[A]]
+; CHECK-NEXT: [[T2:%.*]] = fadd fast float [[T1]], [[C]]
+; CHECK-NEXT: store float [[T2]], float* @fe, align 4
+; CHECK-NEXT: store float [[T2]], float* @ff, align 4
+; CHECK-NEXT: ret void
+;
+ %A = load float, float* @fa
+ %B = load float, float* @fb
+ %C = load float, float* @fc
+ %t1 = fadd fast float %A, %B
+ %t2 = fadd fast float %t1, %C
+ %t3 = fadd fast float %A, %C
+ %t4 = fadd fast float %t3, %B
+ ; e = (a+b)+c;
+ store float %t2, float* @fe
+ ; f = (a+c)+b
+ store float %t4, float* @ff
+ ret void
+}
+
+define void @test4() {
+; CHECK-LABEL: @test4(
+; CHECK-NEXT: [[A:%.*]] = load float, float* @fa, align 4
+; CHECK-NEXT: [[B:%.*]] = load float, float* @fb, align 4
+; CHECK-NEXT: [[C:%.*]] = load float, float* @fc, align 4
+; CHECK-NEXT: [[T1:%.*]] = fadd fast float [[B]], [[A]]
+; CHECK-NEXT: [[T2:%.*]] = fadd fast float [[T1]], [[C]]
+; CHECK-NEXT: store float [[T2]], float* @fe, align 4
+; CHECK-NEXT: store float [[T2]], float* @ff, align 4
+; CHECK-NEXT: ret void
+;
+ %A = load float, float* @fa
+ %B = load float, float* @fb
+ %C = load float, float* @fc
+ %t1 = fadd fast float %A, %B
+ %t2 = fadd fast float %C, %t1
+ %t3 = fadd fast float %C, %A
+ %t4 = fadd fast float %t3, %B
+ ; e = c+(a+b)
+ store float %t2, float* @fe
+ ; f = (c+a)+b
+ store float %t4, float* @ff
+ ret void
+}
+
+define void @test5() {
+; CHECK-LABEL: @test5(
+; CHECK-NEXT: [[A:%.*]] = load float, float* @fa, align 4
+; CHECK-NEXT: [[B:%.*]] = load float, float* @fb, align 4
+; CHECK-NEXT: [[C:%.*]] = load float, float* @fc, align 4
+; CHECK-NEXT: [[T1:%.*]] = fadd fast float [[B]], [[A]]
+; CHECK-NEXT: [[T2:%.*]] = fadd fast float [[T1]], [[C]]
+; CHECK-NEXT: store float [[T2]], float* @fe, align 4
+; CHECK-NEXT: store float [[T2]], float* @ff, align 4
+; CHECK-NEXT: ret void
+;
+ %A = load float, float* @fa
+ %B = load float, float* @fb
+ %C = load float, float* @fc
+ %t1 = fadd fast float %B, %A
+ %t2 = fadd fast float %C, %t1
+ %t3 = fadd fast float %C, %A
+ %t4 = fadd fast float %t3, %B
+ ; e = c+(b+a)
+ store float %t2, float* @fe
+ ; f = (c+a)+b
+ store float %t4, float* @ff
+ ret void
+}
diff --git a/llvm/test/Transforms/Reassociate/fast-basictest.ll b/llvm/test/Transforms/Reassociate/fast-basictest.ll
index 561a406daa2c6..038c8c8adbe9e 100644
--- a/llvm/test/Transforms/Reassociate/fast-basictest.ll
+++ b/llvm/test/Transforms/Reassociate/fast-basictest.ll
@@ -1,43 +1,32 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt < %s -reassociate -gvn -instcombine -S | FileCheck %s
-
-; With reassociation, constant folding can eliminate the 12 and -12 constants.
-define float @test1(float %arg) {
-; CHECK-LABEL: @test1(
-; CHECK-NEXT: [[ARG_NEG:%.*]] = fneg fast float [[ARG:%.*]]
-; CHECK-NEXT: ret float [[ARG_NEG]]
;
- %t1 = fsub fast float -1.200000e+01, %arg
- %t2 = fadd fast float %t1, 1.200000e+01
- ret float %t2
-}
-
-; Check again using the minimal subset of FMF.
-; Both 'reassoc' and 'nsz' are required.
-define float @test1_minimal(float %arg) {
-; CHECK-LABEL: @test1_minimal(
-; CHECK-NEXT: [[TMP1:%.*]] = fneg reassoc nsz float [[ARG:%.*]]
-; CHECK-NEXT: ret float [[TMP1]]
+; Test numbering remains continuous across:
+; - InstCombine/fast-basictest.ll
+; - PhaseOrdering/fast-basictest.ll
+; - PhaseOrdering/fast-reassociate-gvn.ll
+; - Reassociate/fast-basictest.ll
;
- %t1 = fsub reassoc nsz float -1.200000e+01, %arg
- %t2 = fadd reassoc nsz float %t1, 1.200000e+01
- ret float %t2
-}
+; RUN: opt < %s -reassociate -S | FileCheck %s
-; Verify the fold is not done with only 'reassoc' ('nsz' is required).
-define float @test1_reassoc(float %arg) {
-; CHECK-LABEL: @test1_reassoc(
-; CHECK-NEXT: [[T1:%.*]] = fsub reassoc float -1.200000e+01, [[ARG:%.*]]
-; CHECK-NEXT: [[T2:%.*]] = fadd reassoc float [[T1]], 1.200000e+01
-; CHECK-NEXT: ret float [[T2]]
-;
- %t1 = fsub reassoc float -1.200000e+01, %arg
- %t2 = fadd reassoc float %t1, 1.200000e+01
- ret float %t2
-}
+; ((a + (-3)) + b) + 3 -> a + b
+; That only works with both instcombine and reassociate passes enabled.
+; Check that reassociate is not enough.
+; TODO: check if we can eliminate zero add.
define float @test2(float %reg109, float %reg1111) {
; CHECK-LABEL: @test2(
+; CHECK-NEXT: [[REG116:%.*]] = fadd fast float [[REG109:%.*]], 0.000000e+00
+; CHECK-NEXT: [[REG117:%.*]] = fadd fast float [[REG116]], [[REG1111:%.*]]
+; CHECK-NEXT: ret float [[REG117]]
+;
+ %reg115 = fadd fast float %reg109, -3.000000e+01
+ %reg116 = fadd fast float %reg115, %reg1111
+ %reg117 = fadd fast float %reg116, 3.000000e+01
+ ret float %reg117
+}
+
+define float @test2_no_FMF(float %reg109, float %reg1111) {
+; CHECK-LABEL: @test2_no_FMF(
; CHECK-NEXT: [[REG115:%.*]] = fadd float [[REG109:%.*]], -3.000000e+01
; CHECK-NEXT: [[REG116:%.*]] = fadd float [[REG115]], [[REG1111:%.*]]
; CHECK-NEXT: [[REG117:%.*]] = fadd float [[REG116]], 3.000000e+01
@@ -49,19 +38,8 @@ define float @test2(float %reg109, float %reg1111) {
ret float %reg117
}
-define float @test3(float %reg109, float %reg1111) {
-; CHECK-LABEL: @test3(
-; CHECK-NEXT: [[REG117:%.*]] = fadd fast float [[REG109:%.*]], [[REG1111:%.*]]
-; CHECK-NEXT: ret float [[REG117]]
-;
- %reg115 = fadd fast float %reg109, -3.000000e+01
- %reg116 = fadd fast float %reg115, %reg1111
- %reg117 = fadd fast float %reg116, 3.000000e+01
- ret float %reg117
-}
-
-define float @test3_reassoc(float %reg109, float %reg1111) {
-; CHECK-LABEL: @test3_reassoc(
+define float @test2_reassoc(float %reg109, float %reg1111) {
+; CHECK-LABEL: @test2_reassoc(
; CHECK-NEXT: [[REG115:%.*]] = fadd reassoc float [[REG109:%.*]], -3.000000e+01
; CHECK-NEXT: [[REG116:%.*]] = fadd reassoc float [[REG115]], [[REG1111:%.*]]
; CHECK-NEXT: [[REG117:%.*]] = fadd reassoc float [[REG116]], 3.000000e+01
@@ -79,15 +57,27 @@ define float @test3_reassoc(float %reg109, float %reg1111) {
@fc = external global float
@ff = external global float
-define void @test4() {
-; CHECK-LABEL: @test4(
+; If two sums of the same operands in
diff erent order are counted with 'fast'
+; flag and then stored to global variables, we can reuse the same value twice.
+; Sums:
+; - test3: (a+b)+c and (a+c)+b
+; - test4: c+(a+b) and (c+a)+b
+; - test5: c+(b+a) and (c+a)+b
+;
+; That only works with both gvn and reassociate passes enabled.
+; Check that reassociate is not enough.
+
+define void @test3() {
+; CHECK-LABEL: @test3(
; CHECK-NEXT: [[A:%.*]] = load float, float* @fa, align 4
; CHECK-NEXT: [[B:%.*]] = load float, float* @fb, align 4
; CHECK-NEXT: [[C:%.*]] = load float, float* @fc, align 4
; CHECK-NEXT: [[T1:%.*]] = fadd fast float [[B]], [[A]]
; CHECK-NEXT: [[T2:%.*]] = fadd fast float [[T1]], [[C]]
+; CHECK-NEXT: [[T3:%.*]] = fadd fast float [[B]], [[A]]
+; CHECK-NEXT: [[T4:%.*]] = fadd fast float [[T3]], [[C]]
; CHECK-NEXT: store float [[T2]], float* @fe, align 4
-; CHECK-NEXT: store float [[T2]], float* @ff, align 4
+; CHECK-NEXT: store float [[T4]], float* @ff, align 4
; CHECK-NEXT: ret void
;
%A = load float, float* @fa
@@ -95,7 +85,7 @@ define void @test4() {
%C = load float, float* @fc
%t1 = fadd fast float %A, %B
%t2 = fadd fast float %t1, %C
- %t3 = fadd fast float %C, %A
+ %t3 = fadd fast float %A, %C
%t4 = fadd fast float %t3, %B
; e = (a+b)+c;
store float %t2, float* @fe
@@ -104,22 +94,24 @@ define void @test4() {
ret void
}
-define void @test5() {
-; CHECK-LABEL: @test5(
+define void @test4() {
+; CHECK-LABEL: @test4(
; CHECK-NEXT: [[A:%.*]] = load float, float* @fa, align 4
; CHECK-NEXT: [[B:%.*]] = load float, float* @fb, align 4
; CHECK-NEXT: [[C:%.*]] = load float, float* @fc, align 4
; CHECK-NEXT: [[T1:%.*]] = fadd fast float [[B]], [[A]]
; CHECK-NEXT: [[T2:%.*]] = fadd fast float [[T1]], [[C]]
+; CHECK-NEXT: [[T3:%.*]] = fadd fast float [[B]], [[A]]
+; CHECK-NEXT: [[T4:%.*]] = fadd fast float [[T3]], [[C]]
; CHECK-NEXT: store float [[T2]], float* @fe, align 4
-; CHECK-NEXT: store float [[T2]], float* @ff, align 4
+; CHECK-NEXT: store float [[T4]], float* @ff, align 4
; CHECK-NEXT: ret void
;
%A = load float, float* @fa
%B = load float, float* @fb
%C = load float, float* @fc
%t1 = fadd fast float %A, %B
- %t2 = fadd fast float %t1, %C
+ %t2 = fadd fast float %C, %t1
%t3 = fadd fast float %C, %A
%t4 = fadd fast float %t3, %B
; e = c+(a+b)
@@ -129,22 +121,24 @@ define void @test5() {
ret void
}
-define void @test6() {
-; CHECK-LABEL: @test6(
+define void @test5() {
+; CHECK-LABEL: @test5(
; CHECK-NEXT: [[A:%.*]] = load float, float* @fa, align 4
; CHECK-NEXT: [[B:%.*]] = load float, float* @fb, align 4
; CHECK-NEXT: [[C:%.*]] = load float, float* @fc, align 4
; CHECK-NEXT: [[T1:%.*]] = fadd fast float [[B]], [[A]]
; CHECK-NEXT: [[T2:%.*]] = fadd fast float [[T1]], [[C]]
+; CHECK-NEXT: [[T3:%.*]] = fadd fast float [[B]], [[A]]
+; CHECK-NEXT: [[T4:%.*]] = fadd fast float [[T3]], [[C]]
; CHECK-NEXT: store float [[T2]], float* @fe, align 4
-; CHECK-NEXT: store float [[T2]], float* @ff, align 4
+; CHECK-NEXT: store float [[T4]], float* @ff, align 4
; CHECK-NEXT: ret void
;
%A = load float, float* @fa
%B = load float, float* @fb
%C = load float, float* @fc
%t1 = fadd fast float %B, %A
- %t2 = fadd fast float %t1, %C
+ %t2 = fadd fast float %C, %t1
%t3 = fadd fast float %C, %A
%t4 = fadd fast float %t3, %B
; e = c+(b+a)
@@ -154,8 +148,8 @@ define void @test6() {
ret void
}
-define float @test7(float %A, float %B, float %C) {
-; CHECK-LABEL: @test7(
+define float @test6(float %A, float %B, float %C) {
+; CHECK-LABEL: @test6(
; CHECK-NEXT: [[REASS_ADD1:%.*]] = fadd fast float [[C:%.*]], [[B:%.*]]
; CHECK-NEXT: [[REASS_MUL2:%.*]] = fmul fast float [[A:%.*]], [[A]]
; CHECK-NEXT: [[REASS_MUL:%.*]] = fmul fast float [[REASS_MUL2]], [[REASS_ADD1]]
@@ -169,12 +163,12 @@ define float @test7(float %A, float %B, float %C) {
ret float %r
}
-define float @test7_reassoc(float %A, float %B, float %C) {
-; CHECK-LABEL: @test7_reassoc(
+define float @test6_reassoc(float %A, float %B, float %C) {
+; CHECK-LABEL: @test6_reassoc(
; CHECK-NEXT: [[AA:%.*]] = fmul reassoc float [[A:%.*]], [[A]]
; CHECK-NEXT: [[AAB:%.*]] = fmul reassoc float [[AA]], [[B:%.*]]
-; CHECK-NEXT: [[TMP1:%.*]] = fmul reassoc float [[A]], [[A]]
-; CHECK-NEXT: [[AAC:%.*]] = fmul reassoc float [[TMP1]], [[C:%.*]]
+; CHECK-NEXT: [[AC:%.*]] = fmul reassoc float [[A]], [[C:%.*]]
+; CHECK-NEXT: [[AAC:%.*]] = fmul reassoc float [[A]], [[AC]]
; CHECK-NEXT: [[R:%.*]] = fadd reassoc float [[AAB]], [[AAC]]
; CHECK-NEXT: ret float [[R]]
;
@@ -187,12 +181,16 @@ define float @test7_reassoc(float %A, float %B, float %C) {
}
; (-X)*Y + Z -> Z-X*Y
+; TODO: check why IR transformation of test7 with 'fast' math flag
+; is worse than without it (and even without transformation)
-define float @test8(float %X, float %Y, float %Z) {
-; CHECK-LABEL: @test8(
+define float @test7(float %X, float %Y, float %Z) {
+; CHECK-LABEL: @test7(
+; CHECK-NEXT: [[TMP1:%.*]] = fsub fast float 0.000000e+00, 0.000000e+00
; CHECK-NEXT: [[A:%.*]] = fmul fast float [[Y:%.*]], [[X:%.*]]
-; CHECK-NEXT: [[C:%.*]] = fsub fast float [[Z:%.*]], [[A]]
-; CHECK-NEXT: ret float [[C]]
+; CHECK-NEXT: [[B:%.*]] = fmul fast float [[A]], 1.000000e+00
+; CHECK-NEXT: [[TMP2:%.*]] = fsub fast float [[Z:%.*]], [[B]]
+; CHECK-NEXT: ret float [[TMP2]]
;
%A = fsub fast float 0.0, %X
%B = fmul fast float %A, %Y
@@ -200,11 +198,13 @@ define float @test8(float %X, float %Y, float %Z) {
ret float %C
}
-define float @test8_unary_fneg(float %X, float %Y, float %Z) {
-; CHECK-LABEL: @test8_unary_fneg(
-; CHECK-NEXT: [[TMP1:%.*]] = fmul fast float [[X:%.*]], [[Y:%.*]]
-; CHECK-NEXT: [[C:%.*]] = fsub fast float [[Z:%.*]], [[TMP1]]
-; CHECK-NEXT: ret float [[C]]
+define float @test7_unary_fneg(float %X, float %Y, float %Z) {
+; CHECK-LABEL: @test7_unary_fneg(
+; CHECK-NEXT: [[TMP1:%.*]] = fneg fast float 0.000000e+00
+; CHECK-NEXT: [[A:%.*]] = fmul fast float [[Y:%.*]], [[X:%.*]]
+; CHECK-NEXT: [[B:%.*]] = fmul fast float [[A]], 1.000000e+00
+; CHECK-NEXT: [[TMP2:%.*]] = fsub fast float [[Z:%.*]], [[B]]
+; CHECK-NEXT: ret float [[TMP2]]
;
%A = fneg fast float %X
%B = fmul fast float %A, %Y
@@ -212,8 +212,22 @@ define float @test8_unary_fneg(float %X, float %Y, float %Z) {
ret float %C
}
-define float @test8_reassoc(float %X, float %Y, float %Z) {
-; CHECK-LABEL: @test8_reassoc(
+define float @test7_reassoc_nsz(float %X, float %Y, float %Z) {
+; CHECK-LABEL: @test7_reassoc_nsz(
+; CHECK-NEXT: [[A:%.*]] = fsub reassoc nsz float 0.000000e+00, [[X:%.*]]
+; CHECK-NEXT: [[B:%.*]] = fmul reassoc nsz float [[A]], [[Y:%.*]]
+; CHECK-NEXT: [[C:%.*]] = fadd reassoc nsz float [[B]], [[Z:%.*]]
+; CHECK-NEXT: ret float [[C]]
+;
+ %A = fsub reassoc nsz float 0.0, %X
+ %B = fmul reassoc nsz float %A, %Y
+ %C = fadd reassoc nsz float %B, %Z
+ ret float %C
+}
+
+; Verify that fold is not done only with 'reassoc' ('nsz' is required)
+define float @test7_reassoc(float %X, float %Y, float %Z) {
+; CHECK-LABEL: @test7_reassoc(
; CHECK-NEXT: [[A:%.*]] = fsub reassoc float 0.000000e+00, [[X:%.*]]
; CHECK-NEXT: [[B:%.*]] = fmul reassoc float [[A]], [[Y:%.*]]
; CHECK-NEXT: [[C:%.*]] = fadd reassoc float [[B]], [[Z:%.*]]
@@ -225,8 +239,8 @@ define float @test8_reassoc(float %X, float %Y, float %Z) {
ret float %C
}
-define float @test9(float %X) {
-; CHECK-LABEL: @test9(
+define float @test8(float %X) {
+; CHECK-LABEL: @test8(
; CHECK-NEXT: [[FACTOR:%.*]] = fmul fast float [[X:%.*]], 9.400000e+01
; CHECK-NEXT: ret float [[FACTOR]]
;
@@ -235,34 +249,11 @@ define float @test9(float %X) {
ret float %Z
}
-; Check again with 'reassoc' and 'nsz' ('nsz' not technically required).
-define float @test9_reassoc_nsz(float %X) {
-; CHECK-LABEL: @test9_reassoc_nsz(
-; CHECK-NEXT: [[TMP1:%.*]] = fmul reassoc nsz float [[X:%.*]], 9.400000e+01
-; CHECK-NEXT: ret float [[TMP1]]
-;
- %Y = fmul reassoc nsz float %X, 4.700000e+01
- %Z = fadd reassoc nsz float %Y, %Y
- ret float %Z
-}
-
-; TODO: This doesn't require 'nsz'. It should fold to X * 94.0
-define float @test9_reassoc(float %X) {
-; CHECK-LABEL: @test9_reassoc(
-; CHECK-NEXT: [[Y:%.*]] = fmul reassoc float [[X:%.*]], 4.700000e+01
-; CHECK-NEXT: [[Z:%.*]] = fadd reassoc float [[Y]], [[Y]]
-; CHECK-NEXT: ret float [[Z]]
-;
- %Y = fmul reassoc float %X, 4.700000e+01
- %Z = fadd reassoc float %Y, %Y
- ret float %Z
-}
-
; Side note: (x + x + x) and (3*x) each have only a single rounding. So
; transforming x+x+x to 3*x is always safe, even without any FMF.
; To avoid that special-case, we have the addition of 'x' four times, here.
-define float @test10(float %X) {
-; CHECK-LABEL: @test10(
+define float @test9(float %X) {
+; CHECK-LABEL: @test9(
; CHECK-NEXT: [[FACTOR:%.*]] = fmul fast float [[X:%.*]], 4.000000e+00
; CHECK-NEXT: ret float [[FACTOR]]
;
@@ -272,34 +263,8 @@ define float @test10(float %X) {
ret float %W
}
-; Check again with 'reassoc' and 'nsz' ('nsz' not technically required).
-define float @test10_reassoc_nsz(float %X) {
-; CHECK-LABEL: @test10_reassoc_nsz(
-; CHECK-NEXT: [[TMP1:%.*]] = fmul reassoc nsz float [[X:%.*]], 4.000000e+00
-; CHECK-NEXT: ret float [[TMP1]]
-;
- %Y = fadd reassoc nsz float %X ,%X
- %Z = fadd reassoc nsz float %Y, %X
- %W = fadd reassoc nsz float %Z, %X
- ret float %W
-}
-
-; TODO: This doesn't require 'nsz'. It should fold to 4 * x
-define float @test10_reassoc(float %X) {
-; CHECK-LABEL: @test10_reassoc(
-; CHECK-NEXT: [[Y:%.*]] = fadd reassoc float [[X:%.*]], [[X]]
-; CHECK-NEXT: [[Z:%.*]] = fadd reassoc float [[Y]], [[X]]
-; CHECK-NEXT: [[W:%.*]] = fadd reassoc float [[Z]], [[X]]
-; CHECK-NEXT: ret float [[W]]
-;
- %Y = fadd reassoc float %X ,%X
- %Z = fadd reassoc float %Y, %X
- %W = fadd reassoc float %Z, %X
- ret float %W
-}
-
-define float @test11(float %W) {
-; CHECK-LABEL: @test11(
+define float @test10(float %W) {
+; CHECK-LABEL: @test10(
; CHECK-NEXT: [[FACTOR:%.*]] = fmul fast float [[W:%.*]], 3.810000e+02
; CHECK-NEXT: ret float [[FACTOR]]
;
@@ -309,35 +274,9 @@ define float @test11(float %W) {
ret float %Z
}
-; Check again using the minimal subset of FMF.
-; Check again with 'reassoc' and 'nsz' ('nsz' not technically required).
-define float @test11_reassoc_nsz(float %W) {
-; CHECK-LABEL: @test11_reassoc_nsz(
-; CHECK-NEXT: [[Z:%.*]] = fmul reassoc nsz float [[W:%.*]], 3.810000e+02
-; CHECK-NEXT: ret float [[Z]]
-;
- %X = fmul reassoc nsz float %W, 127.0
- %Y = fadd reassoc nsz float %X ,%X
- %Z = fadd reassoc nsz float %Y, %X
- ret float %Z
-}
-
-; TODO: This doesn't require 'nsz'. It should fold to W*381.0.
-define float @test11_reassoc(float %W) {
-; CHECK-LABEL: @test11_reassoc(
-; CHECK-NEXT: [[X:%.*]] = fmul reassoc float [[W:%.*]], 1.270000e+02
-; CHECK-NEXT: [[Y:%.*]] = fadd reassoc float [[X]], [[X]]
-; CHECK-NEXT: [[Z:%.*]] = fadd reassoc float [[X]], [[Y]]
-; CHECK-NEXT: ret float [[Z]]
-;
- %X = fmul reassoc float %W, 127.0
- %Y = fadd reassoc float %X ,%X
- %Z = fadd reassoc float %Y, %X
- ret float %Z
-}
-
-define float @test12(float %X) {
-; CHECK-LABEL: @test12(
+define float @test11(float %X) {
+; CHECK-LABEL: @test11(
+; CHECK-NEXT: [[TMP1:%.*]] = fneg fast float 0.000000e+00
; CHECK-NEXT: [[FACTOR:%.*]] = fmul fast float [[X:%.*]], -3.000000e+00
; CHECK-NEXT: [[Z:%.*]] = fadd fast float [[FACTOR]], 6.000000e+00
; CHECK-NEXT: ret float [[Z]]
@@ -350,44 +289,17 @@ define float @test12(float %X) {
ret float %Z
}
-; Check again with 'reassoc' and 'nsz' ('nsz' not technically required).
-define float @test12_reassoc_nsz(float %X) {
-; CHECK-LABEL: @test12_reassoc_nsz(
-; CHECK-NEXT: [[TMP1:%.*]] = fmul reassoc nsz float [[X:%.*]], -3.000000e+00
-; CHECK-NEXT: [[TMP2:%.*]] = fadd reassoc nsz float [[TMP1]], 6.000000e+00
-; CHECK-NEXT: ret float [[TMP2]]
-;
- %A = fsub reassoc nsz float 1.000000e+00, %X
- %B = fsub reassoc nsz float 2.000000e+00, %X
- %C = fsub reassoc nsz float 3.000000e+00, %X
- %Y = fadd reassoc nsz float %A ,%B
- %Z = fadd reassoc nsz float %Y, %C
- ret float %Z
-}
-
-; TODO: This doesn't require 'nsz'. It should fold to (6.0 - 3.0*x)
-define float @test12_reassoc(float %X) {
-; CHECK-LABEL: @test12_reassoc(
-; CHECK-NEXT: [[A:%.*]] = fsub reassoc float 1.000000e+00, [[X:%.*]]
-; CHECK-NEXT: [[B:%.*]] = fsub reassoc float 2.000000e+00, [[X]]
-; CHECK-NEXT: [[C:%.*]] = fsub reassoc float 3.000000e+00, [[X]]
-; CHECK-NEXT: [[Y:%.*]] = fadd reassoc float [[A]], [[B]]
-; CHECK-NEXT: [[Z:%.*]] = fadd reassoc float [[C]], [[Y]]
-; CHECK-NEXT: ret float [[Z]]
-;
- %A = fsub reassoc float 1.000000e+00, %X
- %B = fsub reassoc float 2.000000e+00, %X
- %C = fsub reassoc float 3.000000e+00, %X
- %Y = fadd reassoc float %A ,%B
- %Z = fadd reassoc float %Y, %C
- ret float %Z
-}
+; TODO: check why IR transformation of test12 with 'fast' math flag
+; is worse than without it (and even without transformation)
-define float @test13(float %X1, float %X2, float %X3) {
-; CHECK-LABEL: @test13(
-; CHECK-NEXT: [[REASS_ADD:%.*]] = fsub fast float [[X3:%.*]], [[X2:%.*]]
-; CHECK-NEXT: [[REASS_MUL:%.*]] = fmul fast float [[REASS_ADD]], [[X1:%.*]]
-; CHECK-NEXT: ret float [[REASS_MUL]]
+define float @test12(float %X1, float %X2, float %X3) {
+; CHECK-LABEL: @test12(
+; CHECK-NEXT: [[TMP1:%.*]] = fsub fast float 0.000000e+00, 0.000000e+00
+; CHECK-NEXT: [[A:%.*]] = fmul fast float [[X2:%.*]], [[X1:%.*]]
+; CHECK-NEXT: [[B:%.*]] = fmul fast float [[A]], 1.000000e+00
+; CHECK-NEXT: [[C:%.*]] = fmul fast float [[X3:%.*]], [[X1]]
+; CHECK-NEXT: [[TMP2:%.*]] = fsub fast float [[C]], [[B]]
+; CHECK-NEXT: ret float [[TMP2]]
;
%A = fsub fast float 0.000000e+00, %X1
%B = fmul fast float %A, %X2 ; -X1*X2
@@ -396,11 +308,14 @@ define float @test13(float %X1, float %X2, float %X3) {
ret float %D
}
-define float @test13_unary_fneg(float %X1, float %X2, float %X3) {
-; CHECK-LABEL: @test13_unary_fneg(
-; CHECK-NEXT: [[TMP1:%.*]] = fsub fast float [[X3:%.*]], [[X2:%.*]]
-; CHECK-NEXT: [[D:%.*]] = fmul fast float [[TMP1]], [[X1:%.*]]
-; CHECK-NEXT: ret float [[D]]
+define float @test12_unary_fneg(float %X1, float %X2, float %X3) {
+; CHECK-LABEL: @test12_unary_fneg(
+; CHECK-NEXT: [[TMP1:%.*]] = fneg fast float 0.000000e+00
+; CHECK-NEXT: [[A:%.*]] = fmul fast float [[X2:%.*]], [[X1:%.*]]
+; CHECK-NEXT: [[B:%.*]] = fmul fast float [[A]], 1.000000e+00
+; CHECK-NEXT: [[C:%.*]] = fmul fast float [[X3:%.*]], [[X1]]
+; CHECK-NEXT: [[TMP2:%.*]] = fsub fast float [[C]], [[B]]
+; CHECK-NEXT: ret float [[TMP2]]
;
%A = fneg fast float %X1
%B = fmul fast float %A, %X2 ; -X1*X2
@@ -409,8 +324,25 @@ define float @test13_unary_fneg(float %X1, float %X2, float %X3) {
ret float %D
}
-define float @test13_reassoc(float %X1, float %X2, float %X3) {
-; CHECK-LABEL: @test13_reassoc(
+define float @test12_reassoc_nsz(float %X1, float %X2, float %X3) {
+; CHECK-LABEL: @test12_reassoc_nsz(
+; CHECK-NEXT: [[A:%.*]] = fsub reassoc nsz float 0.000000e+00, [[X1:%.*]]
+; CHECK-NEXT: [[B:%.*]] = fmul reassoc nsz float [[A]], [[X2:%.*]]
+; CHECK-NEXT: [[C:%.*]] = fmul reassoc nsz float [[X1]], [[X3:%.*]]
+; CHECK-NEXT: [[D:%.*]] = fadd reassoc nsz float [[B]], [[C]]
+; CHECK-NEXT: ret float [[D]]
+;
+ %A = fsub reassoc nsz float 0.000000e+00, %X1
+ %B = fmul reassoc nsz float %A, %X2 ; -X1*X2
+ %C = fmul reassoc nsz float %X1, %X3 ; X1*X3
+ %D = fadd reassoc nsz float %B, %C ; -X1*X2 + X1*X3 -> X1*(X3-X2)
+ ret float %D
+}
+
+; TODO: check if 'nsz' is technically required. Currently the optimization
+; is not done with only 'reassoc' without 'nsz'.
+define float @test12_reassoc(float %X1, float %X2, float %X3) {
+; CHECK-LABEL: @test12_reassoc(
; CHECK-NEXT: [[A:%.*]] = fsub reassoc float 0.000000e+00, [[X1:%.*]]
; CHECK-NEXT: [[B:%.*]] = fmul reassoc float [[A]], [[X2:%.*]]
; CHECK-NEXT: [[C:%.*]] = fmul reassoc float [[X1]], [[X3:%.*]]
@@ -424,48 +356,51 @@ define float @test13_reassoc(float %X1, float %X2, float %X3) {
ret float %D
}
-define float @test14(float %X1, float %X2) {
-; CHECK-LABEL: @test14(
-; CHECK-NEXT: [[TMP1:%.*]] = fsub fast float [[X1:%.*]], [[X2:%.*]]
-; CHECK-NEXT: [[D1:%.*]] = fmul fast float [[TMP1]], 4.700000e+01
-; CHECK-NEXT: ret float [[D1]]
+; (x1 * 47) + (x2 * -47) => (x1 - x2) * 47
+; That only works with both instcombine and reassociate passes enabled.
+; Check that reassociate is not enough.
+
+define float @test13(float %X1, float %X2) {
+; CHECK-LABEL: @test13(
+; CHECK-NEXT: [[B:%.*]] = fmul fast float [[X1:%.*]], 4.700000e+01
+; CHECK-NEXT: [[C:%.*]] = fmul fast float [[X2:%.*]], 4.700000e+01
+; CHECK-NEXT: [[TMP1:%.*]] = fsub fast float [[B]], [[C]]
+; CHECK-NEXT: ret float [[TMP1]]
;
%B = fmul fast float %X1, 47. ; X1*47
%C = fmul fast float %X2, -47. ; X2*-47
- %D = fadd fast float %B, %C ; X1*47 + X2*-47 -> 47*(X1-X2)
+ %D = fadd fast float %B, %C ; X1*47 + X2*-47 -> 47*(X1-X2)
ret float %D
}
-; (x1 * 47) + (x2 * -47) => (x1 - x2) * 47
-; Check again with 'reassoc' and 'nsz' ('nsz' not technically required).
-define float @test14_reassoc_nsz(float %X1, float %X2) {
-; CHECK-LABEL: @test14_reassoc_nsz(
-; CHECK-NEXT: [[TMP1:%.*]] = fsub reassoc nsz float [[X1:%.*]], [[X2:%.*]]
-; CHECK-NEXT: [[D1:%.*]] = fmul reassoc nsz float [[TMP1]], 4.700000e+01
-; CHECK-NEXT: ret float [[D1]]
+define float @test13_reassoc_nsz(float %X1, float %X2) {
+; CHECK-LABEL: @test13_reassoc_nsz(
+; CHECK-NEXT: [[B:%.*]] = fmul reassoc nsz float [[X1:%.*]], 4.700000e+01
+; CHECK-NEXT: [[C:%.*]] = fmul reassoc nsz float [[X2:%.*]], 4.700000e+01
+; CHECK-NEXT: [[TMP1:%.*]] = fsub reassoc nsz float [[B]], [[C]]
+; CHECK-NEXT: ret float [[TMP1]]
;
%B = fmul reassoc nsz float %X1, 47. ; X1*47
%C = fmul reassoc nsz float %X2, -47. ; X2*-47
- %D = fadd reassoc nsz float %B, %C ; X1*47 + X2*-47 -> 47*(X1-X2)
+ %D = fadd reassoc nsz float %B, %C ; X1*47 + X2*-47 -> 47*(X1-X2)
ret float %D
}
-; TODO: This doesn't require 'nsz'. It should fold to ((x1 - x2) * 47.0)
-define float @test14_reassoc(float %X1, float %X2) {
-; CHECK-LABEL: @test14_reassoc(
+define float @test13_reassoc(float %X1, float %X2) {
+; CHECK-LABEL: @test13_reassoc(
; CHECK-NEXT: [[B:%.*]] = fmul reassoc float [[X1:%.*]], 4.700000e+01
; CHECK-NEXT: [[C:%.*]] = fmul reassoc float [[X2:%.*]], 4.700000e+01
-; CHECK-NEXT: [[D1:%.*]] = fsub reassoc float [[B]], [[C]]
-; CHECK-NEXT: ret float [[D1]]
+; CHECK-NEXT: [[TMP1:%.*]] = fsub reassoc float [[B]], [[C]]
+; CHECK-NEXT: ret float [[TMP1]]
;
%B = fmul reassoc float %X1, 47. ; X1*47
%C = fmul reassoc float %X2, -47. ; X2*-47
- %D = fadd reassoc float %B, %C ; X1*47 + X2*-47 -> 47*(X1-X2)
+ %D = fadd reassoc float %B, %C ; X1*47 + X2*-47 -> 47*(X1-X2)
ret float %D
}
-define float @test15(float %arg) {
-; CHECK-LABEL: @test15(
+define float @test14(float %arg) {
+; CHECK-LABEL: @test14(
; CHECK-NEXT: [[T2:%.*]] = fmul fast float [[ARG:%.*]], 1.440000e+02
; CHECK-NEXT: ret float [[T2]]
;
@@ -474,9 +409,12 @@ define float @test15(float %arg) {
ret float %t2
}
-define float @test15_reassoc(float %arg) {
-; CHECK-LABEL: @test15_reassoc(
-; CHECK-NEXT: [[T2:%.*]] = fmul reassoc float [[ARG:%.*]], 1.440000e+02
+; TODO: check if we can transform the code with 'reassoc' only.
+; The same IR is transformed to one fmul in instcombine pass.
+define float @test14_reassoc(float %arg) {
+; CHECK-LABEL: @test14_reassoc(
+; CHECK-NEXT: [[T1:%.*]] = fmul reassoc float [[ARG:%.*]], 1.200000e+01
+; CHECK-NEXT: [[T2:%.*]] = fmul reassoc float [[T1]], 1.200000e+01
; CHECK-NEXT: ret float [[T2]]
;
%t1 = fmul reassoc float 1.200000e+01, %arg
@@ -485,10 +423,15 @@ define float @test15_reassoc(float %arg) {
}
; (b+(a+1234))+-a -> b+1234
-define float @test16(float %b, float %a) {
-; CHECK-LABEL: @test16(
-; CHECK-NEXT: [[TMP1:%.*]] = fadd fast float [[B:%.*]], 1.234000e+03
-; CHECK-NEXT: ret float [[TMP1]]
+; That only works with both instcombine and reassociate passes enabled.
+; Check that reassociate is not enough.
+
+; TODO: check if we can remove dead fsub.
+define float @test15(float %b, float %a) {
+; CHECK-LABEL: @test15(
+; CHECK-NEXT: [[TMP1:%.*]] = fsub fast float 0.000000e+00, [[A:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = fadd fast float [[B:%.*]], 1.234000e+03
+; CHECK-NEXT: ret float [[TMP2]]
;
%1 = fadd fast float %a, 1234.0
%2 = fadd fast float %b, %1
@@ -497,10 +440,12 @@ define float @test16(float %b, float %a) {
ret float %4
}
-define float @test16_unary_fneg(float %b, float %a) {
-; CHECK-LABEL: @test16_unary_fneg(
-; CHECK-NEXT: [[TMP1:%.*]] = fadd fast float [[B:%.*]], 1.234000e+03
-; CHECK-NEXT: ret float [[TMP1]]
+; TODO: check if we can remove dead fneg.
+define float @test15_unary_fneg(float %b, float %a) {
+; CHECK-LABEL: @test15_unary_fneg(
+; CHECK-NEXT: [[TMP1:%.*]] = fneg fast float [[A:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = fadd fast float [[B:%.*]], 1.234000e+03
+; CHECK-NEXT: ret float [[TMP2]]
;
%1 = fadd fast float %a, 1234.0
%2 = fadd fast float %b, %1
@@ -509,10 +454,25 @@ define float @test16_unary_fneg(float %b, float %a) {
ret float %4
}
-define float @test16_reassoc(float %b, float %a) {
-; CHECK-LABEL: @test16_reassoc(
+define float @test15_reassoc_nsz(float %b, float %a) {
+; CHECK-LABEL: @test15_reassoc_nsz(
+; CHECK-NEXT: [[TMP1:%.*]] = fadd reassoc nsz float [[A:%.*]], 1.234000e+03
+; CHECK-NEXT: [[TMP2:%.*]] = fadd reassoc nsz float [[B:%.*]], [[TMP1]]
+; CHECK-NEXT: [[TMP3:%.*]] = fsub reassoc nsz float 0.000000e+00, [[A]]
+; CHECK-NEXT: [[TMP4:%.*]] = fadd reassoc nsz float [[TMP3]], [[TMP2]]
+; CHECK-NEXT: ret float [[TMP4]]
+;
+ %1 = fadd reassoc nsz float %a, 1234.0
+ %2 = fadd reassoc nsz float %b, %1
+ %3 = fsub reassoc nsz float 0.0, %a
+ %4 = fadd reassoc nsz float %2, %3
+ ret float %4
+}
+
+define float @test15_reassoc(float %b, float %a) {
+; CHECK-LABEL: @test15_reassoc(
; CHECK-NEXT: [[TMP1:%.*]] = fadd reassoc float [[A:%.*]], 1.234000e+03
-; CHECK-NEXT: [[TMP2:%.*]] = fadd reassoc float [[TMP1]], [[B:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = fadd reassoc float [[B:%.*]], [[TMP1]]
; CHECK-NEXT: [[TMP3:%.*]] = fsub reassoc float 0.000000e+00, [[A]]
; CHECK-NEXT: [[TMP4:%.*]] = fadd reassoc float [[TMP3]], [[TMP2]]
; CHECK-NEXT: ret float [[TMP4]]
@@ -525,13 +485,17 @@ define float @test16_reassoc(float %b, float %a) {
}
; Test that we can turn things like X*-(Y*Z) -> X*-1*Y*Z.
+; That only works with both instcombine and reassociate passes enabled.
+; Check that reassociate is not enough.
-define float @test17(float %a, float %b, float %z) {
-; CHECK-LABEL: @test17(
-; CHECK-NEXT: [[E:%.*]] = fmul fast float [[A:%.*]], 1.234500e+04
-; CHECK-NEXT: [[F:%.*]] = fmul fast float [[E]], [[B:%.*]]
-; CHECK-NEXT: [[G:%.*]] = fmul fast float [[F]], [[Z:%.*]]
-; CHECK-NEXT: ret float [[G]]
+define float @test16(float %a, float %b, float %z) {
+; CHECK-LABEL: @test16(
+; CHECK-NEXT: [[TMP1:%.*]] = fsub fast float 0.000000e+00, 0.000000e+00
+; CHECK-NEXT: [[C:%.*]] = fmul fast float [[A:%.*]], 1.234500e+04
+; CHECK-NEXT: [[E:%.*]] = fmul fast float [[C]], [[B:%.*]]
+; CHECK-NEXT: [[F:%.*]] = fmul fast float [[E]], [[Z:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = fadd fast float [[F]], 0.000000e+00
+; CHECK-NEXT: ret float [[TMP2]]
;
%c = fsub fast float 0.000000e+00, %z
%d = fmul fast float %a, %b
@@ -541,12 +505,13 @@ define float @test17(float %a, float %b, float %z) {
ret float %g
}
-define float @test17_unary_fneg(float %a, float %b, float %z) {
-; CHECK-LABEL: @test17_unary_fneg(
-; CHECK-NEXT: [[D:%.*]] = fmul fast float [[A:%.*]], 1.234500e+04
-; CHECK-NEXT: [[E:%.*]] = fmul fast float [[D]], [[B:%.*]]
-; CHECK-NEXT: [[TMP1:%.*]] = fmul fast float [[E]], [[Z:%.*]]
-; CHECK-NEXT: ret float [[TMP1]]
+define float @test16_unary_fneg(float %a, float %b, float %z) {
+; CHECK-LABEL: @test16_unary_fneg(
+; CHECK-NEXT: [[TMP1:%.*]] = fneg fast float 0.000000e+00
+; CHECK-NEXT: [[E:%.*]] = fmul fast float [[A:%.*]], 1.234500e+04
+; CHECK-NEXT: [[F:%.*]] = fmul fast float [[E]], [[B:%.*]]
+; CHECK-NEXT: [[G:%.*]] = fmul fast float [[F]], [[Z:%.*]]
+; CHECK-NEXT: ret float [[G]]
;
%c = fneg fast float %z
%d = fmul fast float %a, %b
@@ -556,8 +521,8 @@ define float @test17_unary_fneg(float %a, float %b, float %z) {
ret float %g
}
-define float @test17_reassoc(float %a, float %b, float %z) {
-; CHECK-LABEL: @test17_reassoc(
+define float @test16_reassoc(float %a, float %b, float %z) {
+; CHECK-LABEL: @test16_reassoc(
; CHECK-NEXT: [[C:%.*]] = fsub reassoc float 0.000000e+00, [[Z:%.*]]
; CHECK-NEXT: [[D:%.*]] = fmul reassoc float [[A:%.*]], [[B:%.*]]
; CHECK-NEXT: [[E:%.*]] = fmul reassoc float [[D]], [[C]]
@@ -573,11 +538,17 @@ define float @test17_reassoc(float %a, float %b, float %z) {
ret float %g
}
-define float @test18(float %a, float %b, float %z) {
-; CHECK-LABEL: @test18(
-; CHECK-NEXT: [[E:%.*]] = fmul fast float [[A:%.*]], 4.000000e+01
-; CHECK-NEXT: [[F:%.*]] = fmul fast float [[E]], [[Z:%.*]]
-; CHECK-NEXT: ret float [[F]]
+; TODO: check if we can remove:
+; - fsub fast 0, 0
+; - fadd fast x, 0
+; ... as 'fast' implies 'nsz'
+define float @test17(float %a, float %b, float %z) {
+; CHECK-LABEL: @test17(
+; CHECK-NEXT: [[TMP1:%.*]] = fsub fast float 0.000000e+00, 0.000000e+00
+; CHECK-NEXT: [[C:%.*]] = fmul fast float [[A:%.*]], 4.000000e+01
+; CHECK-NEXT: [[E:%.*]] = fmul fast float [[C]], [[Z:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = fadd fast float [[E]], 0.000000e+00
+; CHECK-NEXT: ret float [[TMP2]]
;
%d = fmul fast float %z, 4.000000e+01
%c = fsub fast float 0.000000e+00, %d
@@ -586,8 +557,10 @@ define float @test18(float %a, float %b, float %z) {
ret float %f
}
-define float @test18_unary_fneg(float %a, float %b, float %z) {
-; CHECK-LABEL: @test18_unary_fneg(
+; TODO: check if we can remove fneg fast 0 as 'fast' implies 'nsz'
+define float @test17_unary_fneg(float %a, float %b, float %z) {
+; CHECK-LABEL: @test17_unary_fneg(
+; CHECK-NEXT: [[TMP1:%.*]] = fneg fast float 0.000000e+00
; CHECK-NEXT: [[E:%.*]] = fmul fast float [[A:%.*]], 4.000000e+01
; CHECK-NEXT: [[F:%.*]] = fmul fast float [[E]], [[Z:%.*]]
; CHECK-NEXT: ret float [[F]]
@@ -599,53 +572,13 @@ define float @test18_unary_fneg(float %a, float %b, float %z) {
ret float %f
}
-define float @test18_reassoc(float %a, float %b, float %z) {
-; CHECK-LABEL: @test18_reassoc(
-; CHECK-NEXT: [[D:%.*]] = fmul reassoc float [[Z:%.*]], 4.000000e+01
-; CHECK-NEXT: [[C:%.*]] = fsub reassoc float 0.000000e+00, [[D]]
-; CHECK-NEXT: [[E:%.*]] = fmul reassoc float [[C]], [[A:%.*]]
-; CHECK-NEXT: [[F:%.*]] = fsub reassoc float 0.000000e+00, [[E]]
-; CHECK-NEXT: ret float [[F]]
-;
- %d = fmul reassoc float %z, 4.000000e+01
- %c = fsub reassoc float 0.000000e+00, %d
- %e = fmul reassoc float %a, %c
- %f = fsub reassoc float 0.000000e+00, %e
- ret float %f
-}
-
-; fneg of fneg is an identity operation, so no FMF are needed to remove those instructions.
-
-define float @test18_unary_fneg_no_FMF(float %a, float %b, float %z) {
-; CHECK-LABEL: @test18_unary_fneg_no_FMF(
-; CHECK-NEXT: [[TMP1:%.*]] = fmul float [[Z:%.*]], 4.000000e+01
-; CHECK-NEXT: [[F:%.*]] = fmul float [[TMP1]], [[A:%.*]]
-; CHECK-NEXT: ret float [[F]]
-;
- %d = fmul float %z, 4.000000e+01
- %c = fneg float %d
- %e = fmul float %a, %c
- %f = fneg float %e
- ret float %f
-}
-
-define float @test18_reassoc_unary_fneg(float %a, float %b, float %z) {
-; CHECK-LABEL: @test18_reassoc_unary_fneg(
-; CHECK-NEXT: [[TMP1:%.*]] = fmul reassoc float [[Z:%.*]], 4.000000e+01
-; CHECK-NEXT: [[F:%.*]] = fmul reassoc float [[TMP1]], [[A:%.*]]
-; CHECK-NEXT: ret float [[F]]
-;
- %d = fmul reassoc float %z, 4.000000e+01
- %c = fneg reassoc float %d
- %e = fmul reassoc float %a, %c
- %f = fneg reassoc float %e
- ret float %f
-}
-
; With sub reassociation, constant folding can eliminate the 12 and -12 constants.
-define float @test19(float %A, float %B) {
-; CHECK-LABEL: @test19(
-; CHECK-NEXT: [[Z:%.*]] = fsub fast float [[A:%.*]], [[B:%.*]]
+; TODO: check if we can remove fadd fast x, 0 as 'fast' implies 'nsz'
+define float @test18(float %A, float %B) {
+; CHECK-LABEL: @test18(
+; CHECK-NEXT: [[B_NEG:%.*]] = fneg fast float [[B:%.*]]
+; CHECK-NEXT: [[Y:%.*]] = fadd fast float [[A:%.*]], 0.000000e+00
+; CHECK-NEXT: [[Z:%.*]] = fadd fast float [[Y]], [[B_NEG]]
; CHECK-NEXT: ret float [[Z]]
;
%X = fadd fast float -1.200000e+01, %A
@@ -653,43 +586,3 @@ define float @test19(float %A, float %B) {
%Z = fadd fast float %Y, 1.200000e+01
ret float %Z
}
-
-define float @test19_reassoc(float %A, float %B) {
-; CHECK-LABEL: @test19_reassoc(
-; CHECK-NEXT: [[X:%.*]] = fadd reassoc float [[A:%.*]], -1.200000e+01
-; CHECK-NEXT: [[Y:%.*]] = fsub reassoc float [[X]], [[B:%.*]]
-; CHECK-NEXT: [[Z:%.*]] = fadd reassoc float [[Y]], 1.200000e+01
-; CHECK-NEXT: ret float [[Z]]
-;
- %X = fadd reassoc float -1.200000e+01, %A
- %Y = fsub reassoc float %X, %B
- %Z = fadd reassoc float %Y, 1.200000e+01
- ret float %Z
-}
-
-; With sub reassociation, constant folding can eliminate the uses of %a.
-define float @test20(float %a, float %b, float %c) nounwind {
-; CHECK-LABEL: @test20(
-; CHECK-NEXT: [[TMP1:%.*]] = fadd fast float [[B:%.*]], [[C:%.*]]
-; CHECK-NEXT: [[T7:%.*]] = fneg fast float [[TMP1]]
-; CHECK-NEXT: ret float [[T7]]
-;
- %t3 = fsub fast float %a, %b
- %t5 = fsub fast float %t3, %c
- %t7 = fsub fast float %t5, %a
- ret float %t7
-}
-
-define float @test20_reassoc(float %a, float %b, float %c) nounwind {
-; CHECK-LABEL: @test20_reassoc(
-; CHECK-NEXT: [[T3:%.*]] = fsub reassoc float [[A:%.*]], [[B:%.*]]
-; CHECK-NEXT: [[T5:%.*]] = fsub reassoc float [[T3]], [[C:%.*]]
-; CHECK-NEXT: [[T7:%.*]] = fsub reassoc float [[T5]], [[A]]
-; CHECK-NEXT: ret float [[T7]]
-;
- %t3 = fsub reassoc float %a, %b
- %t5 = fsub reassoc float %t3, %c
- %t7 = fsub reassoc float %t5, %a
- ret float %t7
-}
-
More information about the llvm-commits
mailing list