[llvm] [DAGCombiner] Eliminate fp casts if we have the right fast math flags (PR #131345)
John Brawn via llvm-commits
llvm-commits at lists.llvm.org
Tue Mar 25 07:56:59 PDT 2025
https://github.com/john-brawn-arm updated https://github.com/llvm/llvm-project/pull/131345
>From 4c6fd631680495e0689ad07a989bf0820163e1b0 Mon Sep 17 00:00:00 2001
From: John Brawn <john.brawn at arm.com>
Date: Wed, 26 Feb 2025 17:54:24 +0000
Subject: [PATCH 1/2] [DAGCombiner] Eliminate fp casts if we have the right
fast math flags
When floating-point operations are legalized to operations of a higher
precision (e.g. f16 fadd being legalized to f32 fadd) then we get
narrowing then widening operations between each operation. With the
appropriate fast math flags (nnan ninf contract) we can eliminate
these casts.
---
llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 45 ++++
llvm/test/CodeGen/AArch64/f16-instructions.ll | 16 +-
llvm/test/CodeGen/AArch64/fmla.ll | 7 +-
llvm/test/CodeGen/AArch64/fp16_fast_math.ll | 109 +++++++++
...e-streaming-mode-fixed-length-fp-reduce.ll | 172 +++++---------
llvm/test/CodeGen/AArch64/vecreduce-fadd.ll | 216 +++++++-----------
llvm/test/CodeGen/AArch64/vecreduce-fmul.ll | 148 +++++-------
llvm/test/CodeGen/AMDGPU/llvm.exp.ll | 6 -
llvm/test/CodeGen/AMDGPU/llvm.exp10.ll | 6 -
llvm/test/CodeGen/AMDGPU/llvm.exp2.ll | 6 -
llvm/test/CodeGen/AMDGPU/llvm.log.ll | 28 +--
llvm/test/CodeGen/AMDGPU/llvm.log10.ll | 28 +--
llvm/test/CodeGen/AMDGPU/llvm.log2.ll | 20 --
llvm/test/CodeGen/ARM/fp16_fast_math.ll | 149 +++++++++++-
llvm/test/CodeGen/Thumb2/bf16-instructions.ll | 25 +-
15 files changed, 519 insertions(+), 462 deletions(-)
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index c35838601cc9c..c16e2f0bc865b 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -18455,7 +18455,45 @@ SDValue DAGCombiner::visitFP_ROUND(SDNode *N) {
return SDValue();
}
+// Eliminate a floating-point widening of a narrowed value if the fast math
+// flags allow it.
+static SDValue eliminateFPCastPair(SDNode *N) {
+ SDValue N0 = N->getOperand(0);
+ EVT VT = N->getValueType(0);
+
+ unsigned NarrowingOp;
+ switch (N->getOpcode()) {
+ case ISD::FP16_TO_FP:
+ NarrowingOp = ISD::FP_TO_FP16;
+ break;
+ case ISD::BF16_TO_FP:
+ NarrowingOp = ISD::FP_TO_BF16;
+ break;
+ case ISD::FP_EXTEND:
+ NarrowingOp = ISD::FP_ROUND;
+ break;
+ default:
+ llvm_unreachable("Expected widening FP cast");
+ }
+
+ if (N0.getOpcode() == NarrowingOp && N0.getOperand(0).getValueType() == VT) {
+ const SDNodeFlags SrcFlags = N0->getFlags();
+ const SDNodeFlags DstFlags = N->getFlags();
+ // Narrowing can introduce inf and change the encoding of a nan, so the
+ // destination must have the nnan and ninf flags to indicate that we don't
+ // need to care about that. We are also removing a rounding step, and that
+ // requires both the source and destination to allow contraction.
+ if (DstFlags.hasNoNaNs() && DstFlags.hasNoInfs() &&
+ SrcFlags.hasAllowContract() && DstFlags.hasAllowContract()) {
+ return N0.getOperand(0);
+ }
+ }
+
+ return SDValue();
+}
+
SDValue DAGCombiner::visitFP_EXTEND(SDNode *N) {
+ SelectionDAG::FlagInserter FlagsInserter(DAG, N);
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
SDLoc DL(N);
@@ -18507,6 +18545,9 @@ SDValue DAGCombiner::visitFP_EXTEND(SDNode *N) {
if (SDValue NewVSel = matchVSelectOpSizesWithSetCC(N))
return NewVSel;
+ if (SDValue CastEliminated = eliminateFPCastPair(N))
+ return CastEliminated;
+
return SDValue();
}
@@ -27209,6 +27250,7 @@ SDValue DAGCombiner::visitFP_TO_FP16(SDNode *N) {
}
SDValue DAGCombiner::visitFP16_TO_FP(SDNode *N) {
+ SelectionDAG::FlagInserter FlagsInserter(DAG, N);
auto Op = N->getOpcode();
assert((Op == ISD::FP16_TO_FP || Op == ISD::BF16_TO_FP) &&
"opcode should be FP16_TO_FP or BF16_TO_FP.");
@@ -27223,6 +27265,9 @@ SDValue DAGCombiner::visitFP16_TO_FP(SDNode *N) {
}
}
+ if (SDValue CastEliminated = eliminateFPCastPair(N))
+ return CastEliminated;
+
// Sometimes constants manage to survive very late in the pipeline, e.g.,
// because they are wrapped inside the <1 x f16> type. Try one last time to
// get rid of them.
diff --git a/llvm/test/CodeGen/AArch64/f16-instructions.ll b/llvm/test/CodeGen/AArch64/f16-instructions.ll
index 5460a376931a5..adc536da26f26 100644
--- a/llvm/test/CodeGen/AArch64/f16-instructions.ll
+++ b/llvm/test/CodeGen/AArch64/f16-instructions.ll
@@ -84,11 +84,8 @@ define half @test_fmadd(half %a, half %b, half %c) #0 {
; CHECK-CVT-SD: // %bb.0:
; CHECK-CVT-SD-NEXT: fcvt s1, h1
; CHECK-CVT-SD-NEXT: fcvt s0, h0
-; CHECK-CVT-SD-NEXT: fmul s0, s0, s1
-; CHECK-CVT-SD-NEXT: fcvt s1, h2
-; CHECK-CVT-SD-NEXT: fcvt h0, s0
-; CHECK-CVT-SD-NEXT: fcvt s0, h0
-; CHECK-CVT-SD-NEXT: fadd s0, s0, s1
+; CHECK-CVT-SD-NEXT: fcvt s2, h2
+; CHECK-CVT-SD-NEXT: fmadd s0, s0, s1, s2
; CHECK-CVT-SD-NEXT: fcvt h0, s0
; CHECK-CVT-SD-NEXT: ret
;
@@ -1248,6 +1245,15 @@ define half @test_atan(half %a) #0 {
}
define half @test_atan2(half %a, half %b) #0 {
+; CHECK-LABEL: test_atan2:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: fcvt s0, h0
+; CHECK-NEXT: fcvt s1, h1
+; CHECK-NEXT: bl atan2f
+; CHECK-NEXT: fcvt h0, s0
+; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
%r = call half @llvm.atan2.f16(half %a, half %b)
ret half %r
}
diff --git a/llvm/test/CodeGen/AArch64/fmla.ll b/llvm/test/CodeGen/AArch64/fmla.ll
index 7bcaae5a77eac..a37aabb0b5384 100644
--- a/llvm/test/CodeGen/AArch64/fmla.ll
+++ b/llvm/test/CodeGen/AArch64/fmla.ll
@@ -1114,11 +1114,8 @@ define half @fmul_f16(half %a, half %b, half %c) {
; CHECK-SD-NOFP16: // %bb.0: // %entry
; CHECK-SD-NOFP16-NEXT: fcvt s1, h1
; CHECK-SD-NOFP16-NEXT: fcvt s0, h0
-; CHECK-SD-NOFP16-NEXT: fmul s0, s0, s1
-; CHECK-SD-NOFP16-NEXT: fcvt s1, h2
-; CHECK-SD-NOFP16-NEXT: fcvt h0, s0
-; CHECK-SD-NOFP16-NEXT: fcvt s0, h0
-; CHECK-SD-NOFP16-NEXT: fadd s0, s0, s1
+; CHECK-SD-NOFP16-NEXT: fcvt s2, h2
+; CHECK-SD-NOFP16-NEXT: fmadd s0, s0, s1, s2
; CHECK-SD-NOFP16-NEXT: fcvt h0, s0
; CHECK-SD-NOFP16-NEXT: ret
;
diff --git a/llvm/test/CodeGen/AArch64/fp16_fast_math.ll b/llvm/test/CodeGen/AArch64/fp16_fast_math.ll
index b7d2de708a110..7d9654d1ff8c0 100644
--- a/llvm/test/CodeGen/AArch64/fp16_fast_math.ll
+++ b/llvm/test/CodeGen/AArch64/fp16_fast_math.ll
@@ -88,3 +88,112 @@ entry:
%add = fadd ninf half %x, %y
ret half %add
}
+
+; Check that when we have the right fast math flags the converts in between the
+; two fadds are removed.
+
+define half @normal_fadd_sequence(half %x, half %y, half %z) {
+ ; CHECK-CVT-LABEL: name: normal_fadd_sequence
+ ; CHECK-CVT: bb.0.entry:
+ ; CHECK-CVT-NEXT: liveins: $h0, $h1, $h2
+ ; CHECK-CVT-NEXT: {{ $}}
+ ; CHECK-CVT-NEXT: [[COPY:%[0-9]+]]:fpr16 = COPY $h2
+ ; CHECK-CVT-NEXT: [[COPY1:%[0-9]+]]:fpr16 = COPY $h1
+ ; CHECK-CVT-NEXT: [[COPY2:%[0-9]+]]:fpr16 = COPY $h0
+ ; CHECK-CVT-NEXT: [[FCVTSHr:%[0-9]+]]:fpr32 = nofpexcept FCVTSHr [[COPY1]], implicit $fpcr
+ ; CHECK-CVT-NEXT: [[FCVTSHr1:%[0-9]+]]:fpr32 = nofpexcept FCVTSHr [[COPY2]], implicit $fpcr
+ ; CHECK-CVT-NEXT: [[FADDSrr:%[0-9]+]]:fpr32 = nofpexcept FADDSrr killed [[FCVTSHr1]], killed [[FCVTSHr]], implicit $fpcr
+ ; CHECK-CVT-NEXT: [[FCVTHSr:%[0-9]+]]:fpr16 = nofpexcept FCVTHSr killed [[FADDSrr]], implicit $fpcr
+ ; CHECK-CVT-NEXT: [[FCVTSHr2:%[0-9]+]]:fpr32 = nofpexcept FCVTSHr killed [[FCVTHSr]], implicit $fpcr
+ ; CHECK-CVT-NEXT: [[FCVTSHr3:%[0-9]+]]:fpr32 = nofpexcept FCVTSHr [[COPY]], implicit $fpcr
+ ; CHECK-CVT-NEXT: [[FADDSrr1:%[0-9]+]]:fpr32 = nofpexcept FADDSrr killed [[FCVTSHr2]], killed [[FCVTSHr3]], implicit $fpcr
+ ; CHECK-CVT-NEXT: [[FCVTHSr1:%[0-9]+]]:fpr16 = nofpexcept FCVTHSr killed [[FADDSrr1]], implicit $fpcr
+ ; CHECK-CVT-NEXT: $h0 = COPY [[FCVTHSr1]]
+ ; CHECK-CVT-NEXT: RET_ReallyLR implicit $h0
+ ;
+ ; CHECK-FP16-LABEL: name: normal_fadd_sequence
+ ; CHECK-FP16: bb.0.entry:
+ ; CHECK-FP16-NEXT: liveins: $h0, $h1, $h2
+ ; CHECK-FP16-NEXT: {{ $}}
+ ; CHECK-FP16-NEXT: [[COPY:%[0-9]+]]:fpr16 = COPY $h2
+ ; CHECK-FP16-NEXT: [[COPY1:%[0-9]+]]:fpr16 = COPY $h1
+ ; CHECK-FP16-NEXT: [[COPY2:%[0-9]+]]:fpr16 = COPY $h0
+ ; CHECK-FP16-NEXT: [[FADDHrr:%[0-9]+]]:fpr16 = nofpexcept FADDHrr [[COPY2]], [[COPY1]], implicit $fpcr
+ ; CHECK-FP16-NEXT: [[FADDHrr1:%[0-9]+]]:fpr16 = nofpexcept FADDHrr killed [[FADDHrr]], [[COPY]], implicit $fpcr
+ ; CHECK-FP16-NEXT: $h0 = COPY [[FADDHrr1]]
+ ; CHECK-FP16-NEXT: RET_ReallyLR implicit $h0
+entry:
+ %add1 = fadd half %x, %y
+ %add2 = fadd half %add1, %z
+ ret half %add2
+}
+
+define half @nnan_ninf_contract_fadd_sequence(half %x, half %y, half %z) {
+ ; CHECK-CVT-LABEL: name: nnan_ninf_contract_fadd_sequence
+ ; CHECK-CVT: bb.0.entry:
+ ; CHECK-CVT-NEXT: liveins: $h0, $h1, $h2
+ ; CHECK-CVT-NEXT: {{ $}}
+ ; CHECK-CVT-NEXT: [[COPY:%[0-9]+]]:fpr16 = COPY $h2
+ ; CHECK-CVT-NEXT: [[COPY1:%[0-9]+]]:fpr16 = COPY $h1
+ ; CHECK-CVT-NEXT: [[COPY2:%[0-9]+]]:fpr16 = COPY $h0
+ ; CHECK-CVT-NEXT: [[FCVTSHr:%[0-9]+]]:fpr32 = nnan ninf contract nofpexcept FCVTSHr [[COPY1]], implicit $fpcr
+ ; CHECK-CVT-NEXT: [[FCVTSHr1:%[0-9]+]]:fpr32 = nnan ninf contract nofpexcept FCVTSHr [[COPY2]], implicit $fpcr
+ ; CHECK-CVT-NEXT: [[FADDSrr:%[0-9]+]]:fpr32 = nnan ninf contract nofpexcept FADDSrr killed [[FCVTSHr1]], killed [[FCVTSHr]], implicit $fpcr
+ ; CHECK-CVT-NEXT: [[FCVTSHr2:%[0-9]+]]:fpr32 = nnan ninf contract nofpexcept FCVTSHr [[COPY]], implicit $fpcr
+ ; CHECK-CVT-NEXT: [[FADDSrr1:%[0-9]+]]:fpr32 = nnan ninf contract nofpexcept FADDSrr killed [[FADDSrr]], killed [[FCVTSHr2]], implicit $fpcr
+ ; CHECK-CVT-NEXT: [[FCVTHSr:%[0-9]+]]:fpr16 = nnan ninf contract nofpexcept FCVTHSr killed [[FADDSrr1]], implicit $fpcr
+ ; CHECK-CVT-NEXT: $h0 = COPY [[FCVTHSr]]
+ ; CHECK-CVT-NEXT: RET_ReallyLR implicit $h0
+ ;
+ ; CHECK-FP16-LABEL: name: nnan_ninf_contract_fadd_sequence
+ ; CHECK-FP16: bb.0.entry:
+ ; CHECK-FP16-NEXT: liveins: $h0, $h1, $h2
+ ; CHECK-FP16-NEXT: {{ $}}
+ ; CHECK-FP16-NEXT: [[COPY:%[0-9]+]]:fpr16 = COPY $h2
+ ; CHECK-FP16-NEXT: [[COPY1:%[0-9]+]]:fpr16 = COPY $h1
+ ; CHECK-FP16-NEXT: [[COPY2:%[0-9]+]]:fpr16 = COPY $h0
+ ; CHECK-FP16-NEXT: [[FADDHrr:%[0-9]+]]:fpr16 = nnan ninf contract nofpexcept FADDHrr [[COPY2]], [[COPY1]], implicit $fpcr
+ ; CHECK-FP16-NEXT: [[FADDHrr1:%[0-9]+]]:fpr16 = nnan ninf contract nofpexcept FADDHrr killed [[FADDHrr]], [[COPY]], implicit $fpcr
+ ; CHECK-FP16-NEXT: $h0 = COPY [[FADDHrr1]]
+ ; CHECK-FP16-NEXT: RET_ReallyLR implicit $h0
+entry:
+ %add1 = fadd nnan ninf contract half %x, %y
+ %add2 = fadd nnan ninf contract half %add1, %z
+ ret half %add2
+}
+
+define half @ninf_fadd_sequence(half %x, half %y, half %z) {
+ ; CHECK-CVT-LABEL: name: ninf_fadd_sequence
+ ; CHECK-CVT: bb.0.entry:
+ ; CHECK-CVT-NEXT: liveins: $h0, $h1, $h2
+ ; CHECK-CVT-NEXT: {{ $}}
+ ; CHECK-CVT-NEXT: [[COPY:%[0-9]+]]:fpr16 = COPY $h2
+ ; CHECK-CVT-NEXT: [[COPY1:%[0-9]+]]:fpr16 = COPY $h1
+ ; CHECK-CVT-NEXT: [[COPY2:%[0-9]+]]:fpr16 = COPY $h0
+ ; CHECK-CVT-NEXT: [[FCVTSHr:%[0-9]+]]:fpr32 = ninf nofpexcept FCVTSHr [[COPY1]], implicit $fpcr
+ ; CHECK-CVT-NEXT: [[FCVTSHr1:%[0-9]+]]:fpr32 = ninf nofpexcept FCVTSHr [[COPY2]], implicit $fpcr
+ ; CHECK-CVT-NEXT: [[FADDSrr:%[0-9]+]]:fpr32 = ninf nofpexcept FADDSrr killed [[FCVTSHr1]], killed [[FCVTSHr]], implicit $fpcr
+ ; CHECK-CVT-NEXT: [[FCVTHSr:%[0-9]+]]:fpr16 = ninf nofpexcept FCVTHSr killed [[FADDSrr]], implicit $fpcr
+ ; CHECK-CVT-NEXT: [[FCVTSHr2:%[0-9]+]]:fpr32 = ninf nofpexcept FCVTSHr killed [[FCVTHSr]], implicit $fpcr
+ ; CHECK-CVT-NEXT: [[FCVTSHr3:%[0-9]+]]:fpr32 = ninf nofpexcept FCVTSHr [[COPY]], implicit $fpcr
+ ; CHECK-CVT-NEXT: [[FADDSrr1:%[0-9]+]]:fpr32 = ninf nofpexcept FADDSrr killed [[FCVTSHr2]], killed [[FCVTSHr3]], implicit $fpcr
+ ; CHECK-CVT-NEXT: [[FCVTHSr1:%[0-9]+]]:fpr16 = ninf nofpexcept FCVTHSr killed [[FADDSrr1]], implicit $fpcr
+ ; CHECK-CVT-NEXT: $h0 = COPY [[FCVTHSr1]]
+ ; CHECK-CVT-NEXT: RET_ReallyLR implicit $h0
+ ;
+ ; CHECK-FP16-LABEL: name: ninf_fadd_sequence
+ ; CHECK-FP16: bb.0.entry:
+ ; CHECK-FP16-NEXT: liveins: $h0, $h1, $h2
+ ; CHECK-FP16-NEXT: {{ $}}
+ ; CHECK-FP16-NEXT: [[COPY:%[0-9]+]]:fpr16 = COPY $h2
+ ; CHECK-FP16-NEXT: [[COPY1:%[0-9]+]]:fpr16 = COPY $h1
+ ; CHECK-FP16-NEXT: [[COPY2:%[0-9]+]]:fpr16 = COPY $h0
+ ; CHECK-FP16-NEXT: [[FADDHrr:%[0-9]+]]:fpr16 = ninf nofpexcept FADDHrr [[COPY2]], [[COPY1]], implicit $fpcr
+ ; CHECK-FP16-NEXT: [[FADDHrr1:%[0-9]+]]:fpr16 = ninf nofpexcept FADDHrr killed [[FADDHrr]], [[COPY]], implicit $fpcr
+ ; CHECK-FP16-NEXT: $h0 = COPY [[FADDHrr1]]
+ ; CHECK-FP16-NEXT: RET_ReallyLR implicit $h0
+entry:
+ %add1 = fadd ninf half %x, %y
+ %add2 = fadd ninf half %add1, %z
+ ret half %add2
+}
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-reduce.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-reduce.ll
index 4eaaee7ce5055..95ca0a68a7212 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-reduce.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-reduce.ll
@@ -443,23 +443,17 @@ define half @faddv_v4f16(half %start, <4 x half> %a) {
; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16
; NONEON-NOSVE-NEXT: str d1, [sp, #8]
; NONEON-NOSVE-NEXT: fcvt s0, h0
-; NONEON-NOSVE-NEXT: ldr h1, [sp, #8]
-; NONEON-NOSVE-NEXT: ldr h2, [sp, #10]
-; NONEON-NOSVE-NEXT: fcvt s2, h2
-; NONEON-NOSVE-NEXT: fcvt s1, h1
-; NONEON-NOSVE-NEXT: fadd s1, s1, s2
-; NONEON-NOSVE-NEXT: ldr h2, [sp, #12]
-; NONEON-NOSVE-NEXT: fcvt s2, h2
-; NONEON-NOSVE-NEXT: fcvt h1, s1
-; NONEON-NOSVE-NEXT: fcvt s1, h1
-; NONEON-NOSVE-NEXT: fadd s1, s1, s2
+; NONEON-NOSVE-NEXT: ldr h1, [sp, #10]
; NONEON-NOSVE-NEXT: ldr h2, [sp, #14]
-; NONEON-NOSVE-NEXT: fcvt s2, h2
-; NONEON-NOSVE-NEXT: fcvt h1, s1
+; NONEON-NOSVE-NEXT: ldr h3, [sp, #12]
+; NONEON-NOSVE-NEXT: ldr h4, [sp, #8]
; NONEON-NOSVE-NEXT: fcvt s1, h1
+; NONEON-NOSVE-NEXT: fcvt s3, h3
+; NONEON-NOSVE-NEXT: fcvt s2, h2
+; NONEON-NOSVE-NEXT: fcvt s4, h4
+; NONEON-NOSVE-NEXT: fadd s2, s3, s2
+; NONEON-NOSVE-NEXT: fadd s1, s4, s1
; NONEON-NOSVE-NEXT: fadd s1, s1, s2
-; NONEON-NOSVE-NEXT: fcvt h1, s1
-; NONEON-NOSVE-NEXT: fcvt s1, h1
; NONEON-NOSVE-NEXT: fadd s0, s0, s1
; NONEON-NOSVE-NEXT: fcvt h0, s0
; NONEON-NOSVE-NEXT: add sp, sp, #16
@@ -481,44 +475,30 @@ define half @faddv_v8f16(half %start, <8 x half> %a) {
; NONEON-NOSVE: // %bb.0:
; NONEON-NOSVE-NEXT: str q1, [sp, #-16]!
; NONEON-NOSVE-NEXT: .cfi_def_cfa_offset 16
-; NONEON-NOSVE-NEXT: ldr h1, [sp]
-; NONEON-NOSVE-NEXT: ldr h2, [sp, #2]
-; NONEON-NOSVE-NEXT: fcvt s0, h0
-; NONEON-NOSVE-NEXT: fcvt s2, h2
-; NONEON-NOSVE-NEXT: fcvt s1, h1
-; NONEON-NOSVE-NEXT: fadd s1, s1, s2
-; NONEON-NOSVE-NEXT: ldr h2, [sp, #4]
-; NONEON-NOSVE-NEXT: fcvt s2, h2
-; NONEON-NOSVE-NEXT: fcvt h1, s1
-; NONEON-NOSVE-NEXT: fcvt s1, h1
-; NONEON-NOSVE-NEXT: fadd s1, s1, s2
+; NONEON-NOSVE-NEXT: ldr h1, [sp, #2]
; NONEON-NOSVE-NEXT: ldr h2, [sp, #6]
-; NONEON-NOSVE-NEXT: fcvt s2, h2
-; NONEON-NOSVE-NEXT: fcvt h1, s1
-; NONEON-NOSVE-NEXT: fcvt s1, h1
-; NONEON-NOSVE-NEXT: fadd s1, s1, s2
-; NONEON-NOSVE-NEXT: ldr h2, [sp, #8]
-; NONEON-NOSVE-NEXT: fcvt s2, h2
-; NONEON-NOSVE-NEXT: fcvt h1, s1
-; NONEON-NOSVE-NEXT: fcvt s1, h1
-; NONEON-NOSVE-NEXT: fadd s1, s1, s2
-; NONEON-NOSVE-NEXT: ldr h2, [sp, #10]
-; NONEON-NOSVE-NEXT: fcvt s2, h2
-; NONEON-NOSVE-NEXT: fcvt h1, s1
+; NONEON-NOSVE-NEXT: fcvt s0, h0
+; NONEON-NOSVE-NEXT: ldr h3, [sp, #4]
+; NONEON-NOSVE-NEXT: ldr h4, [sp]
+; NONEON-NOSVE-NEXT: ldr h5, [sp, #10]
+; NONEON-NOSVE-NEXT: ldr h6, [sp, #8]
; NONEON-NOSVE-NEXT: fcvt s1, h1
-; NONEON-NOSVE-NEXT: fadd s1, s1, s2
-; NONEON-NOSVE-NEXT: ldr h2, [sp, #12]
; NONEON-NOSVE-NEXT: fcvt s2, h2
-; NONEON-NOSVE-NEXT: fcvt h1, s1
-; NONEON-NOSVE-NEXT: fcvt s1, h1
+; NONEON-NOSVE-NEXT: fcvt s4, h4
+; NONEON-NOSVE-NEXT: fcvt s3, h3
+; NONEON-NOSVE-NEXT: fcvt s5, h5
+; NONEON-NOSVE-NEXT: fcvt s6, h6
+; NONEON-NOSVE-NEXT: ldr h7, [sp, #12]
+; NONEON-NOSVE-NEXT: fadd s1, s4, s1
+; NONEON-NOSVE-NEXT: fadd s2, s3, s2
+; NONEON-NOSVE-NEXT: fcvt s3, h7
+; NONEON-NOSVE-NEXT: fadd s4, s6, s5
+; NONEON-NOSVE-NEXT: ldr h5, [sp, #14]
; NONEON-NOSVE-NEXT: fadd s1, s1, s2
-; NONEON-NOSVE-NEXT: ldr h2, [sp, #14]
-; NONEON-NOSVE-NEXT: fcvt s2, h2
-; NONEON-NOSVE-NEXT: fcvt h1, s1
-; NONEON-NOSVE-NEXT: fcvt s1, h1
+; NONEON-NOSVE-NEXT: fadd s2, s4, s3
+; NONEON-NOSVE-NEXT: fcvt s3, h5
; NONEON-NOSVE-NEXT: fadd s1, s1, s2
-; NONEON-NOSVE-NEXT: fcvt h1, s1
-; NONEON-NOSVE-NEXT: fcvt s1, h1
+; NONEON-NOSVE-NEXT: fadd s0, s0, s3
; NONEON-NOSVE-NEXT: fadd s0, s0, s1
; NONEON-NOSVE-NEXT: fcvt h0, s0
; NONEON-NOSVE-NEXT: add sp, sp, #16
@@ -547,79 +527,49 @@ define half @faddv_v16f16(half %start, ptr %a) {
; NONEON-NOSVE-NEXT: fcvt s0, h0
; NONEON-NOSVE-NEXT: ldr h3, [sp, #16]
; NONEON-NOSVE-NEXT: ldr h4, [sp]
+; NONEON-NOSVE-NEXT: ldr h5, [sp, #20]
+; NONEON-NOSVE-NEXT: ldr h6, [sp, #4]
; NONEON-NOSVE-NEXT: fcvt s1, h1
; NONEON-NOSVE-NEXT: fcvt s2, h2
+; NONEON-NOSVE-NEXT: ldr h7, [sp, #22]
+; NONEON-NOSVE-NEXT: ldr h16, [sp, #6]
; NONEON-NOSVE-NEXT: fcvt s3, h3
+; NONEON-NOSVE-NEXT: ldr h17, [sp, #24]
+; NONEON-NOSVE-NEXT: ldr h18, [sp, #8]
; NONEON-NOSVE-NEXT: fcvt s4, h4
+; NONEON-NOSVE-NEXT: ldr h19, [sp, #26]
+; NONEON-NOSVE-NEXT: ldr h20, [sp, #10]
+; NONEON-NOSVE-NEXT: fcvt s5, h5
+; NONEON-NOSVE-NEXT: fcvt s6, h6
+; NONEON-NOSVE-NEXT: fcvt s7, h7
+; NONEON-NOSVE-NEXT: fcvt s16, h16
+; NONEON-NOSVE-NEXT: fcvt s17, h17
+; NONEON-NOSVE-NEXT: fcvt s18, h18
+; NONEON-NOSVE-NEXT: fcvt s19, h19
+; NONEON-NOSVE-NEXT: fcvt s20, h20
+; NONEON-NOSVE-NEXT: ldr h21, [sp, #28]
+; NONEON-NOSVE-NEXT: ldr h22, [sp, #12]
; NONEON-NOSVE-NEXT: fadd s1, s2, s1
; NONEON-NOSVE-NEXT: fadd s2, s4, s3
-; NONEON-NOSVE-NEXT: ldr h3, [sp, #20]
-; NONEON-NOSVE-NEXT: ldr h4, [sp, #4]
-; NONEON-NOSVE-NEXT: fcvt s3, h3
-; NONEON-NOSVE-NEXT: fcvt s4, h4
-; NONEON-NOSVE-NEXT: fcvt h1, s1
-; NONEON-NOSVE-NEXT: fcvt h2, s2
-; NONEON-NOSVE-NEXT: fadd s3, s4, s3
-; NONEON-NOSVE-NEXT: ldr h4, [sp, #6]
-; NONEON-NOSVE-NEXT: fcvt s1, h1
-; NONEON-NOSVE-NEXT: fcvt s2, h2
-; NONEON-NOSVE-NEXT: fcvt s4, h4
+; NONEON-NOSVE-NEXT: fadd s3, s6, s5
+; NONEON-NOSVE-NEXT: fadd s4, s16, s7
+; NONEON-NOSVE-NEXT: fcvt s5, h21
+; NONEON-NOSVE-NEXT: fcvt s6, h22
+; NONEON-NOSVE-NEXT: fadd s7, s18, s17
+; NONEON-NOSVE-NEXT: ldr h17, [sp, #30]
+; NONEON-NOSVE-NEXT: fadd s16, s20, s19
+; NONEON-NOSVE-NEXT: ldr h18, [sp, #14]
; NONEON-NOSVE-NEXT: fadd s1, s2, s1
-; NONEON-NOSVE-NEXT: fcvt h2, s3
-; NONEON-NOSVE-NEXT: ldr h3, [sp, #22]
-; NONEON-NOSVE-NEXT: fcvt s3, h3
-; NONEON-NOSVE-NEXT: fcvt h1, s1
-; NONEON-NOSVE-NEXT: fcvt s2, h2
-; NONEON-NOSVE-NEXT: fadd s3, s4, s3
-; NONEON-NOSVE-NEXT: ldr h4, [sp, #8]
-; NONEON-NOSVE-NEXT: fcvt s1, h1
-; NONEON-NOSVE-NEXT: fcvt s4, h4
-; NONEON-NOSVE-NEXT: fadd s1, s1, s2
-; NONEON-NOSVE-NEXT: fcvt h2, s3
-; NONEON-NOSVE-NEXT: ldr h3, [sp, #24]
-; NONEON-NOSVE-NEXT: fcvt s3, h3
-; NONEON-NOSVE-NEXT: fcvt h1, s1
-; NONEON-NOSVE-NEXT: fcvt s2, h2
-; NONEON-NOSVE-NEXT: fadd s3, s4, s3
-; NONEON-NOSVE-NEXT: ldr h4, [sp, #10]
-; NONEON-NOSVE-NEXT: fcvt s1, h1
-; NONEON-NOSVE-NEXT: fcvt s4, h4
-; NONEON-NOSVE-NEXT: fadd s1, s1, s2
-; NONEON-NOSVE-NEXT: fcvt h2, s3
-; NONEON-NOSVE-NEXT: ldr h3, [sp, #26]
-; NONEON-NOSVE-NEXT: fcvt s3, h3
-; NONEON-NOSVE-NEXT: fcvt h1, s1
-; NONEON-NOSVE-NEXT: fcvt s2, h2
-; NONEON-NOSVE-NEXT: fadd s3, s4, s3
-; NONEON-NOSVE-NEXT: ldr h4, [sp, #12]
-; NONEON-NOSVE-NEXT: fcvt s1, h1
-; NONEON-NOSVE-NEXT: fcvt s4, h4
-; NONEON-NOSVE-NEXT: fadd s1, s1, s2
-; NONEON-NOSVE-NEXT: fcvt h2, s3
-; NONEON-NOSVE-NEXT: ldr h3, [sp, #28]
-; NONEON-NOSVE-NEXT: fcvt s3, h3
-; NONEON-NOSVE-NEXT: fcvt h1, s1
-; NONEON-NOSVE-NEXT: fcvt s2, h2
-; NONEON-NOSVE-NEXT: fcvt s1, h1
-; NONEON-NOSVE-NEXT: fadd s1, s1, s2
-; NONEON-NOSVE-NEXT: fadd s2, s4, s3
-; NONEON-NOSVE-NEXT: ldr h3, [sp, #30]
-; NONEON-NOSVE-NEXT: ldr h4, [sp, #14]
-; NONEON-NOSVE-NEXT: fcvt s3, h3
-; NONEON-NOSVE-NEXT: fcvt s4, h4
-; NONEON-NOSVE-NEXT: fcvt h1, s1
-; NONEON-NOSVE-NEXT: fcvt h2, s2
-; NONEON-NOSVE-NEXT: fcvt s1, h1
-; NONEON-NOSVE-NEXT: fcvt s2, h2
+; NONEON-NOSVE-NEXT: fadd s2, s3, s4
+; NONEON-NOSVE-NEXT: fcvt s4, h17
+; NONEON-NOSVE-NEXT: fadd s5, s6, s5
+; NONEON-NOSVE-NEXT: fcvt s6, h18
+; NONEON-NOSVE-NEXT: fadd s3, s7, s16
; NONEON-NOSVE-NEXT: fadd s1, s1, s2
-; NONEON-NOSVE-NEXT: fadd s2, s4, s3
-; NONEON-NOSVE-NEXT: fcvt h1, s1
-; NONEON-NOSVE-NEXT: fcvt h2, s2
-; NONEON-NOSVE-NEXT: fcvt s1, h1
-; NONEON-NOSVE-NEXT: fcvt s2, h2
+; NONEON-NOSVE-NEXT: fadd s2, s3, s5
+; NONEON-NOSVE-NEXT: fadd s3, s6, s4
; NONEON-NOSVE-NEXT: fadd s1, s1, s2
-; NONEON-NOSVE-NEXT: fcvt h1, s1
-; NONEON-NOSVE-NEXT: fcvt s1, h1
+; NONEON-NOSVE-NEXT: fadd s0, s0, s3
; NONEON-NOSVE-NEXT: fadd s0, s0, s1
; NONEON-NOSVE-NEXT: fcvt h0, s0
; NONEON-NOSVE-NEXT: add sp, sp, #32
diff --git a/llvm/test/CodeGen/AArch64/vecreduce-fadd.ll b/llvm/test/CodeGen/AArch64/vecreduce-fadd.ll
index 03db1d0d433d3..11ce20f109623 100644
--- a/llvm/test/CodeGen/AArch64/vecreduce-fadd.ll
+++ b/llvm/test/CodeGen/AArch64/vecreduce-fadd.ll
@@ -18,19 +18,15 @@ define half @add_HalfH(<4 x half> %bin.rdx) {
; CHECK-SD-NOFP16: // %bb.0:
; CHECK-SD-NOFP16-NEXT: // kill: def $d0 killed $d0 def $q0
; CHECK-SD-NOFP16-NEXT: mov h1, v0.h[1]
-; CHECK-SD-NOFP16-NEXT: fcvt s2, h0
-; CHECK-SD-NOFP16-NEXT: fcvt s1, h1
-; CHECK-SD-NOFP16-NEXT: fadd s1, s2, s1
-; CHECK-SD-NOFP16-NEXT: mov h2, v0.h[2]
-; CHECK-SD-NOFP16-NEXT: mov h0, v0.h[3]
-; CHECK-SD-NOFP16-NEXT: fcvt h1, s1
-; CHECK-SD-NOFP16-NEXT: fcvt s2, h2
+; CHECK-SD-NOFP16-NEXT: mov h2, v0.h[3]
+; CHECK-SD-NOFP16-NEXT: mov h3, v0.h[2]
; CHECK-SD-NOFP16-NEXT: fcvt s0, h0
; CHECK-SD-NOFP16-NEXT: fcvt s1, h1
-; CHECK-SD-NOFP16-NEXT: fadd s1, s1, s2
-; CHECK-SD-NOFP16-NEXT: fcvt h1, s1
-; CHECK-SD-NOFP16-NEXT: fcvt s1, h1
-; CHECK-SD-NOFP16-NEXT: fadd s0, s1, s0
+; CHECK-SD-NOFP16-NEXT: fcvt s3, h3
+; CHECK-SD-NOFP16-NEXT: fcvt s2, h2
+; CHECK-SD-NOFP16-NEXT: fadd s0, s0, s1
+; CHECK-SD-NOFP16-NEXT: fadd s1, s3, s2
+; CHECK-SD-NOFP16-NEXT: fadd s0, s0, s1
; CHECK-SD-NOFP16-NEXT: fcvt h0, s0
; CHECK-SD-NOFP16-NEXT: ret
;
@@ -61,39 +57,27 @@ define half @add_HalfH(<4 x half> %bin.rdx) {
define half @add_H(<8 x half> %bin.rdx) {
; CHECK-SD-NOFP16-LABEL: add_H:
; CHECK-SD-NOFP16: // %bb.0:
-; CHECK-SD-NOFP16-NEXT: mov h1, v0.h[1]
-; CHECK-SD-NOFP16-NEXT: fcvt s2, h0
-; CHECK-SD-NOFP16-NEXT: fcvt s1, h1
-; CHECK-SD-NOFP16-NEXT: fadd s1, s2, s1
-; CHECK-SD-NOFP16-NEXT: mov h2, v0.h[2]
-; CHECK-SD-NOFP16-NEXT: fcvt h1, s1
-; CHECK-SD-NOFP16-NEXT: fcvt s2, h2
-; CHECK-SD-NOFP16-NEXT: fcvt s1, h1
-; CHECK-SD-NOFP16-NEXT: fadd s1, s1, s2
-; CHECK-SD-NOFP16-NEXT: mov h2, v0.h[3]
-; CHECK-SD-NOFP16-NEXT: fcvt h1, s1
-; CHECK-SD-NOFP16-NEXT: fcvt s2, h2
-; CHECK-SD-NOFP16-NEXT: fcvt s1, h1
-; CHECK-SD-NOFP16-NEXT: fadd s1, s1, s2
-; CHECK-SD-NOFP16-NEXT: mov h2, v0.h[4]
-; CHECK-SD-NOFP16-NEXT: fcvt h1, s1
-; CHECK-SD-NOFP16-NEXT: fcvt s2, h2
-; CHECK-SD-NOFP16-NEXT: fcvt s1, h1
-; CHECK-SD-NOFP16-NEXT: fadd s1, s1, s2
-; CHECK-SD-NOFP16-NEXT: mov h2, v0.h[5]
-; CHECK-SD-NOFP16-NEXT: fcvt h1, s1
-; CHECK-SD-NOFP16-NEXT: fcvt s2, h2
-; CHECK-SD-NOFP16-NEXT: fcvt s1, h1
-; CHECK-SD-NOFP16-NEXT: fadd s1, s1, s2
-; CHECK-SD-NOFP16-NEXT: mov h2, v0.h[6]
+; CHECK-SD-NOFP16-NEXT: mov h1, v0.h[2]
+; CHECK-SD-NOFP16-NEXT: mov h2, v0.h[1]
+; CHECK-SD-NOFP16-NEXT: mov h3, v0.h[5]
+; CHECK-SD-NOFP16-NEXT: mov h4, v0.h[4]
+; CHECK-SD-NOFP16-NEXT: mov h5, v0.h[3]
+; CHECK-SD-NOFP16-NEXT: mov h6, v0.h[6]
+; CHECK-SD-NOFP16-NEXT: fcvt s7, h0
; CHECK-SD-NOFP16-NEXT: mov h0, v0.h[7]
-; CHECK-SD-NOFP16-NEXT: fcvt h1, s1
; CHECK-SD-NOFP16-NEXT: fcvt s2, h2
-; CHECK-SD-NOFP16-NEXT: fcvt s0, h0
; CHECK-SD-NOFP16-NEXT: fcvt s1, h1
+; CHECK-SD-NOFP16-NEXT: fcvt s3, h3
+; CHECK-SD-NOFP16-NEXT: fcvt s5, h5
+; CHECK-SD-NOFP16-NEXT: fcvt s4, h4
+; CHECK-SD-NOFP16-NEXT: fcvt s0, h0
+; CHECK-SD-NOFP16-NEXT: fadd s2, s7, s2
+; CHECK-SD-NOFP16-NEXT: fadd s1, s1, s5
+; CHECK-SD-NOFP16-NEXT: fadd s3, s4, s3
+; CHECK-SD-NOFP16-NEXT: fcvt s4, h6
+; CHECK-SD-NOFP16-NEXT: fadd s1, s2, s1
+; CHECK-SD-NOFP16-NEXT: fadd s2, s3, s4
; CHECK-SD-NOFP16-NEXT: fadd s1, s1, s2
-; CHECK-SD-NOFP16-NEXT: fcvt h1, s1
-; CHECK-SD-NOFP16-NEXT: fcvt s1, h1
; CHECK-SD-NOFP16-NEXT: fadd s0, s1, s0
; CHECK-SD-NOFP16-NEXT: fcvt h0, s0
; CHECK-SD-NOFP16-NEXT: ret
@@ -156,38 +140,26 @@ define half @add_2H(<16 x half> %bin.rdx) {
; CHECK-SD-NOFP16-NEXT: fcvtn v1.4h, v2.4s
; CHECK-SD-NOFP16-NEXT: fcvtn2 v1.8h, v0.4s
; CHECK-SD-NOFP16-NEXT: mov h0, v1.h[1]
-; CHECK-SD-NOFP16-NEXT: fcvt s2, h1
-; CHECK-SD-NOFP16-NEXT: fcvt s0, h0
-; CHECK-SD-NOFP16-NEXT: fadd s0, s2, s0
; CHECK-SD-NOFP16-NEXT: mov h2, v1.h[2]
-; CHECK-SD-NOFP16-NEXT: fcvt h0, s0
-; CHECK-SD-NOFP16-NEXT: fcvt s2, h2
-; CHECK-SD-NOFP16-NEXT: fcvt s0, h0
-; CHECK-SD-NOFP16-NEXT: fadd s0, s0, s2
-; CHECK-SD-NOFP16-NEXT: mov h2, v1.h[3]
-; CHECK-SD-NOFP16-NEXT: fcvt h0, s0
-; CHECK-SD-NOFP16-NEXT: fcvt s2, h2
-; CHECK-SD-NOFP16-NEXT: fcvt s0, h0
-; CHECK-SD-NOFP16-NEXT: fadd s0, s0, s2
-; CHECK-SD-NOFP16-NEXT: mov h2, v1.h[4]
-; CHECK-SD-NOFP16-NEXT: fcvt h0, s0
-; CHECK-SD-NOFP16-NEXT: fcvt s2, h2
-; CHECK-SD-NOFP16-NEXT: fcvt s0, h0
-; CHECK-SD-NOFP16-NEXT: fadd s0, s0, s2
-; CHECK-SD-NOFP16-NEXT: mov h2, v1.h[5]
-; CHECK-SD-NOFP16-NEXT: fcvt h0, s0
-; CHECK-SD-NOFP16-NEXT: fcvt s2, h2
-; CHECK-SD-NOFP16-NEXT: fcvt s0, h0
-; CHECK-SD-NOFP16-NEXT: fadd s0, s0, s2
-; CHECK-SD-NOFP16-NEXT: mov h2, v1.h[6]
+; CHECK-SD-NOFP16-NEXT: mov h3, v1.h[3]
+; CHECK-SD-NOFP16-NEXT: mov h4, v1.h[4]
+; CHECK-SD-NOFP16-NEXT: mov h5, v1.h[5]
+; CHECK-SD-NOFP16-NEXT: fcvt s6, h1
+; CHECK-SD-NOFP16-NEXT: mov h7, v1.h[6]
; CHECK-SD-NOFP16-NEXT: mov h1, v1.h[7]
-; CHECK-SD-NOFP16-NEXT: fcvt h0, s0
+; CHECK-SD-NOFP16-NEXT: fcvt s0, h0
; CHECK-SD-NOFP16-NEXT: fcvt s2, h2
+; CHECK-SD-NOFP16-NEXT: fcvt s3, h3
+; CHECK-SD-NOFP16-NEXT: fcvt s4, h4
+; CHECK-SD-NOFP16-NEXT: fcvt s5, h5
; CHECK-SD-NOFP16-NEXT: fcvt s1, h1
-; CHECK-SD-NOFP16-NEXT: fcvt s0, h0
+; CHECK-SD-NOFP16-NEXT: fadd s0, s6, s0
+; CHECK-SD-NOFP16-NEXT: fadd s2, s2, s3
+; CHECK-SD-NOFP16-NEXT: fadd s3, s4, s5
+; CHECK-SD-NOFP16-NEXT: fcvt s4, h7
+; CHECK-SD-NOFP16-NEXT: fadd s0, s0, s2
+; CHECK-SD-NOFP16-NEXT: fadd s2, s3, s4
; CHECK-SD-NOFP16-NEXT: fadd s0, s0, s2
-; CHECK-SD-NOFP16-NEXT: fcvt h0, s0
-; CHECK-SD-NOFP16-NEXT: fcvt s0, h0
; CHECK-SD-NOFP16-NEXT: fadd s0, s0, s1
; CHECK-SD-NOFP16-NEXT: fcvt h0, s0
; CHECK-SD-NOFP16-NEXT: ret
@@ -311,21 +283,15 @@ define half @fadd_reduction_v4f16_in_loop(ptr %ptr.start) {
; CHECK-SD-NOFP16-NEXT: add x8, x8, #8
; CHECK-SD-NOFP16-NEXT: cmp w8, #56
; CHECK-SD-NOFP16-NEXT: mov h2, v1.h[1]
-; CHECK-SD-NOFP16-NEXT: fcvt s3, h1
-; CHECK-SD-NOFP16-NEXT: fcvt s2, h2
-; CHECK-SD-NOFP16-NEXT: fadd s2, s3, s2
-; CHECK-SD-NOFP16-NEXT: mov h3, v1.h[2]
-; CHECK-SD-NOFP16-NEXT: mov h1, v1.h[3]
-; CHECK-SD-NOFP16-NEXT: fcvt h2, s2
-; CHECK-SD-NOFP16-NEXT: fcvt s3, h3
+; CHECK-SD-NOFP16-NEXT: mov h3, v1.h[3]
+; CHECK-SD-NOFP16-NEXT: mov h4, v1.h[2]
; CHECK-SD-NOFP16-NEXT: fcvt s1, h1
; CHECK-SD-NOFP16-NEXT: fcvt s2, h2
-; CHECK-SD-NOFP16-NEXT: fadd s2, s2, s3
-; CHECK-SD-NOFP16-NEXT: fcvt h2, s2
-; CHECK-SD-NOFP16-NEXT: fcvt s2, h2
-; CHECK-SD-NOFP16-NEXT: fadd s1, s2, s1
-; CHECK-SD-NOFP16-NEXT: fcvt h1, s1
-; CHECK-SD-NOFP16-NEXT: fcvt s1, h1
+; CHECK-SD-NOFP16-NEXT: fcvt s4, h4
+; CHECK-SD-NOFP16-NEXT: fcvt s3, h3
+; CHECK-SD-NOFP16-NEXT: fadd s1, s1, s2
+; CHECK-SD-NOFP16-NEXT: fadd s2, s4, s3
+; CHECK-SD-NOFP16-NEXT: fadd s1, s1, s2
; CHECK-SD-NOFP16-NEXT: fadd s0, s1, s0
; CHECK-SD-NOFP16-NEXT: fcvt h0, s0
; CHECK-SD-NOFP16-NEXT: b.ne .LBB10_1
@@ -419,43 +385,29 @@ define half @fadd_reduction_v8f16_in_loop(ptr %ptr.start) {
; CHECK-SD-NOFP16-NEXT: fcvt s0, h0
; CHECK-SD-NOFP16-NEXT: add x8, x8, #8
; CHECK-SD-NOFP16-NEXT: cmp w8, #56
-; CHECK-SD-NOFP16-NEXT: mov h2, v1.h[1]
-; CHECK-SD-NOFP16-NEXT: fcvt s3, h1
-; CHECK-SD-NOFP16-NEXT: fcvt s2, h2
-; CHECK-SD-NOFP16-NEXT: fadd s2, s3, s2
+; CHECK-SD-NOFP16-NEXT: mov h2, v1.h[3]
; CHECK-SD-NOFP16-NEXT: mov h3, v1.h[2]
-; CHECK-SD-NOFP16-NEXT: fcvt h2, s2
-; CHECK-SD-NOFP16-NEXT: fcvt s3, h3
-; CHECK-SD-NOFP16-NEXT: fcvt s2, h2
-; CHECK-SD-NOFP16-NEXT: fadd s2, s2, s3
-; CHECK-SD-NOFP16-NEXT: mov h3, v1.h[3]
-; CHECK-SD-NOFP16-NEXT: fcvt h2, s2
-; CHECK-SD-NOFP16-NEXT: fcvt s3, h3
-; CHECK-SD-NOFP16-NEXT: fcvt s2, h2
-; CHECK-SD-NOFP16-NEXT: fadd s2, s2, s3
-; CHECK-SD-NOFP16-NEXT: mov h3, v1.h[4]
-; CHECK-SD-NOFP16-NEXT: fcvt h2, s2
-; CHECK-SD-NOFP16-NEXT: fcvt s3, h3
-; CHECK-SD-NOFP16-NEXT: fcvt s2, h2
-; CHECK-SD-NOFP16-NEXT: fadd s2, s2, s3
-; CHECK-SD-NOFP16-NEXT: mov h3, v1.h[5]
-; CHECK-SD-NOFP16-NEXT: fcvt h2, s2
-; CHECK-SD-NOFP16-NEXT: fcvt s3, h3
-; CHECK-SD-NOFP16-NEXT: fcvt s2, h2
-; CHECK-SD-NOFP16-NEXT: fadd s2, s2, s3
-; CHECK-SD-NOFP16-NEXT: mov h3, v1.h[6]
+; CHECK-SD-NOFP16-NEXT: mov h4, v1.h[1]
+; CHECK-SD-NOFP16-NEXT: mov h5, v1.h[5]
+; CHECK-SD-NOFP16-NEXT: mov h6, v1.h[4]
+; CHECK-SD-NOFP16-NEXT: fcvt s7, h1
+; CHECK-SD-NOFP16-NEXT: mov h16, v1.h[6]
; CHECK-SD-NOFP16-NEXT: mov h1, v1.h[7]
-; CHECK-SD-NOFP16-NEXT: fcvt h2, s2
+; CHECK-SD-NOFP16-NEXT: fcvt s4, h4
; CHECK-SD-NOFP16-NEXT: fcvt s3, h3
-; CHECK-SD-NOFP16-NEXT: fcvt s1, h1
; CHECK-SD-NOFP16-NEXT: fcvt s2, h2
-; CHECK-SD-NOFP16-NEXT: fadd s2, s2, s3
-; CHECK-SD-NOFP16-NEXT: fcvt h2, s2
-; CHECK-SD-NOFP16-NEXT: fcvt s2, h2
-; CHECK-SD-NOFP16-NEXT: fadd s1, s2, s1
-; CHECK-SD-NOFP16-NEXT: fcvt h1, s1
+; CHECK-SD-NOFP16-NEXT: fcvt s6, h6
+; CHECK-SD-NOFP16-NEXT: fcvt s5, h5
; CHECK-SD-NOFP16-NEXT: fcvt s1, h1
+; CHECK-SD-NOFP16-NEXT: fadd s4, s7, s4
+; CHECK-SD-NOFP16-NEXT: fadd s2, s3, s2
+; CHECK-SD-NOFP16-NEXT: fcvt s3, h16
+; CHECK-SD-NOFP16-NEXT: fadd s5, s6, s5
; CHECK-SD-NOFP16-NEXT: fadd s0, s1, s0
+; CHECK-SD-NOFP16-NEXT: fadd s2, s4, s2
+; CHECK-SD-NOFP16-NEXT: fadd s3, s5, s3
+; CHECK-SD-NOFP16-NEXT: fadd s2, s2, s3
+; CHECK-SD-NOFP16-NEXT: fadd s0, s2, s0
; CHECK-SD-NOFP16-NEXT: fcvt h0, s0
; CHECK-SD-NOFP16-NEXT: b.ne .LBB11_1
; CHECK-SD-NOFP16-NEXT: // %bb.2: // %exit
@@ -552,38 +504,26 @@ define half @fadd_reduct_reassoc_v8f16(<8 x half> %a, <8 x half> %b) {
; CHECK-SD-NOFP16-NEXT: fcvtn v1.4h, v2.4s
; CHECK-SD-NOFP16-NEXT: fcvtn2 v1.8h, v0.4s
; CHECK-SD-NOFP16-NEXT: mov h0, v1.h[1]
-; CHECK-SD-NOFP16-NEXT: fcvt s2, h1
-; CHECK-SD-NOFP16-NEXT: fcvt s0, h0
-; CHECK-SD-NOFP16-NEXT: fadd s0, s2, s0
; CHECK-SD-NOFP16-NEXT: mov h2, v1.h[2]
-; CHECK-SD-NOFP16-NEXT: fcvt h0, s0
-; CHECK-SD-NOFP16-NEXT: fcvt s2, h2
-; CHECK-SD-NOFP16-NEXT: fcvt s0, h0
-; CHECK-SD-NOFP16-NEXT: fadd s0, s0, s2
-; CHECK-SD-NOFP16-NEXT: mov h2, v1.h[3]
-; CHECK-SD-NOFP16-NEXT: fcvt h0, s0
-; CHECK-SD-NOFP16-NEXT: fcvt s2, h2
-; CHECK-SD-NOFP16-NEXT: fcvt s0, h0
-; CHECK-SD-NOFP16-NEXT: fadd s0, s0, s2
-; CHECK-SD-NOFP16-NEXT: mov h2, v1.h[4]
-; CHECK-SD-NOFP16-NEXT: fcvt h0, s0
-; CHECK-SD-NOFP16-NEXT: fcvt s2, h2
-; CHECK-SD-NOFP16-NEXT: fcvt s0, h0
-; CHECK-SD-NOFP16-NEXT: fadd s0, s0, s2
-; CHECK-SD-NOFP16-NEXT: mov h2, v1.h[5]
-; CHECK-SD-NOFP16-NEXT: fcvt h0, s0
-; CHECK-SD-NOFP16-NEXT: fcvt s2, h2
-; CHECK-SD-NOFP16-NEXT: fcvt s0, h0
-; CHECK-SD-NOFP16-NEXT: fadd s0, s0, s2
-; CHECK-SD-NOFP16-NEXT: mov h2, v1.h[6]
+; CHECK-SD-NOFP16-NEXT: mov h3, v1.h[3]
+; CHECK-SD-NOFP16-NEXT: mov h4, v1.h[4]
+; CHECK-SD-NOFP16-NEXT: mov h5, v1.h[5]
+; CHECK-SD-NOFP16-NEXT: fcvt s6, h1
+; CHECK-SD-NOFP16-NEXT: mov h7, v1.h[6]
; CHECK-SD-NOFP16-NEXT: mov h1, v1.h[7]
-; CHECK-SD-NOFP16-NEXT: fcvt h0, s0
+; CHECK-SD-NOFP16-NEXT: fcvt s0, h0
; CHECK-SD-NOFP16-NEXT: fcvt s2, h2
+; CHECK-SD-NOFP16-NEXT: fcvt s3, h3
+; CHECK-SD-NOFP16-NEXT: fcvt s4, h4
+; CHECK-SD-NOFP16-NEXT: fcvt s5, h5
; CHECK-SD-NOFP16-NEXT: fcvt s1, h1
-; CHECK-SD-NOFP16-NEXT: fcvt s0, h0
+; CHECK-SD-NOFP16-NEXT: fadd s0, s6, s0
+; CHECK-SD-NOFP16-NEXT: fadd s2, s2, s3
+; CHECK-SD-NOFP16-NEXT: fadd s3, s4, s5
+; CHECK-SD-NOFP16-NEXT: fcvt s4, h7
+; CHECK-SD-NOFP16-NEXT: fadd s0, s0, s2
+; CHECK-SD-NOFP16-NEXT: fadd s2, s3, s4
; CHECK-SD-NOFP16-NEXT: fadd s0, s0, s2
-; CHECK-SD-NOFP16-NEXT: fcvt h0, s0
-; CHECK-SD-NOFP16-NEXT: fcvt s0, h0
; CHECK-SD-NOFP16-NEXT: fadd s0, s0, s1
; CHECK-SD-NOFP16-NEXT: fcvt h0, s0
; CHECK-SD-NOFP16-NEXT: ret
diff --git a/llvm/test/CodeGen/AArch64/vecreduce-fmul.ll b/llvm/test/CodeGen/AArch64/vecreduce-fmul.ll
index da75a80595212..2429cf4b4597a 100644
--- a/llvm/test/CodeGen/AArch64/vecreduce-fmul.ll
+++ b/llvm/test/CodeGen/AArch64/vecreduce-fmul.ll
@@ -26,19 +26,15 @@ define half @mul_HalfH(<4 x half> %bin.rdx) {
; CHECK-SD-NOFP16: // %bb.0:
; CHECK-SD-NOFP16-NEXT: // kill: def $d0 killed $d0 def $q0
; CHECK-SD-NOFP16-NEXT: mov h1, v0.h[1]
-; CHECK-SD-NOFP16-NEXT: fcvt s2, h0
-; CHECK-SD-NOFP16-NEXT: fcvt s1, h1
-; CHECK-SD-NOFP16-NEXT: fmul s1, s2, s1
-; CHECK-SD-NOFP16-NEXT: mov h2, v0.h[2]
-; CHECK-SD-NOFP16-NEXT: mov h0, v0.h[3]
-; CHECK-SD-NOFP16-NEXT: fcvt h1, s1
-; CHECK-SD-NOFP16-NEXT: fcvt s2, h2
+; CHECK-SD-NOFP16-NEXT: mov h2, v0.h[3]
+; CHECK-SD-NOFP16-NEXT: mov h3, v0.h[2]
; CHECK-SD-NOFP16-NEXT: fcvt s0, h0
; CHECK-SD-NOFP16-NEXT: fcvt s1, h1
-; CHECK-SD-NOFP16-NEXT: fmul s1, s1, s2
-; CHECK-SD-NOFP16-NEXT: fcvt h1, s1
-; CHECK-SD-NOFP16-NEXT: fcvt s1, h1
-; CHECK-SD-NOFP16-NEXT: fmul s0, s1, s0
+; CHECK-SD-NOFP16-NEXT: fcvt s3, h3
+; CHECK-SD-NOFP16-NEXT: fcvt s2, h2
+; CHECK-SD-NOFP16-NEXT: fmul s0, s0, s1
+; CHECK-SD-NOFP16-NEXT: fmul s1, s3, s2
+; CHECK-SD-NOFP16-NEXT: fmul s0, s0, s1
; CHECK-SD-NOFP16-NEXT: fcvt h0, s0
; CHECK-SD-NOFP16-NEXT: ret
;
@@ -78,39 +74,27 @@ define half @mul_HalfH(<4 x half> %bin.rdx) {
define half @mul_H(<8 x half> %bin.rdx) {
; CHECK-SD-NOFP16-LABEL: mul_H:
; CHECK-SD-NOFP16: // %bb.0:
-; CHECK-SD-NOFP16-NEXT: mov h1, v0.h[1]
-; CHECK-SD-NOFP16-NEXT: fcvt s2, h0
-; CHECK-SD-NOFP16-NEXT: fcvt s1, h1
-; CHECK-SD-NOFP16-NEXT: fmul s1, s2, s1
-; CHECK-SD-NOFP16-NEXT: mov h2, v0.h[2]
-; CHECK-SD-NOFP16-NEXT: fcvt h1, s1
-; CHECK-SD-NOFP16-NEXT: fcvt s2, h2
-; CHECK-SD-NOFP16-NEXT: fcvt s1, h1
-; CHECK-SD-NOFP16-NEXT: fmul s1, s1, s2
-; CHECK-SD-NOFP16-NEXT: mov h2, v0.h[3]
-; CHECK-SD-NOFP16-NEXT: fcvt h1, s1
-; CHECK-SD-NOFP16-NEXT: fcvt s2, h2
-; CHECK-SD-NOFP16-NEXT: fcvt s1, h1
-; CHECK-SD-NOFP16-NEXT: fmul s1, s1, s2
-; CHECK-SD-NOFP16-NEXT: mov h2, v0.h[4]
-; CHECK-SD-NOFP16-NEXT: fcvt h1, s1
-; CHECK-SD-NOFP16-NEXT: fcvt s2, h2
-; CHECK-SD-NOFP16-NEXT: fcvt s1, h1
-; CHECK-SD-NOFP16-NEXT: fmul s1, s1, s2
-; CHECK-SD-NOFP16-NEXT: mov h2, v0.h[5]
-; CHECK-SD-NOFP16-NEXT: fcvt h1, s1
-; CHECK-SD-NOFP16-NEXT: fcvt s2, h2
-; CHECK-SD-NOFP16-NEXT: fcvt s1, h1
-; CHECK-SD-NOFP16-NEXT: fmul s1, s1, s2
-; CHECK-SD-NOFP16-NEXT: mov h2, v0.h[6]
+; CHECK-SD-NOFP16-NEXT: mov h1, v0.h[2]
+; CHECK-SD-NOFP16-NEXT: mov h2, v0.h[1]
+; CHECK-SD-NOFP16-NEXT: mov h3, v0.h[5]
+; CHECK-SD-NOFP16-NEXT: mov h4, v0.h[4]
+; CHECK-SD-NOFP16-NEXT: mov h5, v0.h[3]
+; CHECK-SD-NOFP16-NEXT: mov h6, v0.h[6]
+; CHECK-SD-NOFP16-NEXT: fcvt s7, h0
; CHECK-SD-NOFP16-NEXT: mov h0, v0.h[7]
-; CHECK-SD-NOFP16-NEXT: fcvt h1, s1
; CHECK-SD-NOFP16-NEXT: fcvt s2, h2
-; CHECK-SD-NOFP16-NEXT: fcvt s0, h0
; CHECK-SD-NOFP16-NEXT: fcvt s1, h1
+; CHECK-SD-NOFP16-NEXT: fcvt s3, h3
+; CHECK-SD-NOFP16-NEXT: fcvt s5, h5
+; CHECK-SD-NOFP16-NEXT: fcvt s4, h4
+; CHECK-SD-NOFP16-NEXT: fcvt s0, h0
+; CHECK-SD-NOFP16-NEXT: fmul s2, s7, s2
+; CHECK-SD-NOFP16-NEXT: fmul s1, s1, s5
+; CHECK-SD-NOFP16-NEXT: fmul s3, s4, s3
+; CHECK-SD-NOFP16-NEXT: fcvt s4, h6
+; CHECK-SD-NOFP16-NEXT: fmul s1, s2, s1
+; CHECK-SD-NOFP16-NEXT: fmul s2, s3, s4
; CHECK-SD-NOFP16-NEXT: fmul s1, s1, s2
-; CHECK-SD-NOFP16-NEXT: fcvt h1, s1
-; CHECK-SD-NOFP16-NEXT: fcvt s1, h1
; CHECK-SD-NOFP16-NEXT: fmul s0, s1, s0
; CHECK-SD-NOFP16-NEXT: fcvt h0, s0
; CHECK-SD-NOFP16-NEXT: ret
@@ -191,38 +175,26 @@ define half @mul_2H(<16 x half> %bin.rdx) {
; CHECK-SD-NOFP16-NEXT: fcvtn v1.4h, v2.4s
; CHECK-SD-NOFP16-NEXT: fcvtn2 v1.8h, v0.4s
; CHECK-SD-NOFP16-NEXT: mov h0, v1.h[1]
-; CHECK-SD-NOFP16-NEXT: fcvt s2, h1
-; CHECK-SD-NOFP16-NEXT: fcvt s0, h0
-; CHECK-SD-NOFP16-NEXT: fmul s0, s2, s0
; CHECK-SD-NOFP16-NEXT: mov h2, v1.h[2]
-; CHECK-SD-NOFP16-NEXT: fcvt h0, s0
-; CHECK-SD-NOFP16-NEXT: fcvt s2, h2
-; CHECK-SD-NOFP16-NEXT: fcvt s0, h0
-; CHECK-SD-NOFP16-NEXT: fmul s0, s0, s2
-; CHECK-SD-NOFP16-NEXT: mov h2, v1.h[3]
-; CHECK-SD-NOFP16-NEXT: fcvt h0, s0
-; CHECK-SD-NOFP16-NEXT: fcvt s2, h2
-; CHECK-SD-NOFP16-NEXT: fcvt s0, h0
-; CHECK-SD-NOFP16-NEXT: fmul s0, s0, s2
-; CHECK-SD-NOFP16-NEXT: mov h2, v1.h[4]
-; CHECK-SD-NOFP16-NEXT: fcvt h0, s0
-; CHECK-SD-NOFP16-NEXT: fcvt s2, h2
-; CHECK-SD-NOFP16-NEXT: fcvt s0, h0
-; CHECK-SD-NOFP16-NEXT: fmul s0, s0, s2
-; CHECK-SD-NOFP16-NEXT: mov h2, v1.h[5]
-; CHECK-SD-NOFP16-NEXT: fcvt h0, s0
-; CHECK-SD-NOFP16-NEXT: fcvt s2, h2
-; CHECK-SD-NOFP16-NEXT: fcvt s0, h0
-; CHECK-SD-NOFP16-NEXT: fmul s0, s0, s2
-; CHECK-SD-NOFP16-NEXT: mov h2, v1.h[6]
+; CHECK-SD-NOFP16-NEXT: mov h3, v1.h[3]
+; CHECK-SD-NOFP16-NEXT: mov h4, v1.h[4]
+; CHECK-SD-NOFP16-NEXT: mov h5, v1.h[5]
+; CHECK-SD-NOFP16-NEXT: fcvt s6, h1
+; CHECK-SD-NOFP16-NEXT: mov h7, v1.h[6]
; CHECK-SD-NOFP16-NEXT: mov h1, v1.h[7]
-; CHECK-SD-NOFP16-NEXT: fcvt h0, s0
+; CHECK-SD-NOFP16-NEXT: fcvt s0, h0
; CHECK-SD-NOFP16-NEXT: fcvt s2, h2
+; CHECK-SD-NOFP16-NEXT: fcvt s3, h3
+; CHECK-SD-NOFP16-NEXT: fcvt s4, h4
+; CHECK-SD-NOFP16-NEXT: fcvt s5, h5
; CHECK-SD-NOFP16-NEXT: fcvt s1, h1
-; CHECK-SD-NOFP16-NEXT: fcvt s0, h0
+; CHECK-SD-NOFP16-NEXT: fmul s0, s6, s0
+; CHECK-SD-NOFP16-NEXT: fmul s2, s2, s3
+; CHECK-SD-NOFP16-NEXT: fmul s3, s4, s5
+; CHECK-SD-NOFP16-NEXT: fcvt s4, h7
+; CHECK-SD-NOFP16-NEXT: fmul s0, s0, s2
+; CHECK-SD-NOFP16-NEXT: fmul s2, s3, s4
; CHECK-SD-NOFP16-NEXT: fmul s0, s0, s2
-; CHECK-SD-NOFP16-NEXT: fcvt h0, s0
-; CHECK-SD-NOFP16-NEXT: fcvt s0, h0
; CHECK-SD-NOFP16-NEXT: fmul s0, s0, s1
; CHECK-SD-NOFP16-NEXT: fcvt h0, s0
; CHECK-SD-NOFP16-NEXT: ret
@@ -339,38 +311,26 @@ define half @fmul_reduct_reassoc_v8f16(<8 x half> %a, <8 x half> %b) {
; CHECK-SD-NOFP16-NEXT: fcvtn v1.4h, v2.4s
; CHECK-SD-NOFP16-NEXT: fcvtn2 v1.8h, v0.4s
; CHECK-SD-NOFP16-NEXT: mov h0, v1.h[1]
-; CHECK-SD-NOFP16-NEXT: fcvt s2, h1
-; CHECK-SD-NOFP16-NEXT: fcvt s0, h0
-; CHECK-SD-NOFP16-NEXT: fmul s0, s2, s0
; CHECK-SD-NOFP16-NEXT: mov h2, v1.h[2]
-; CHECK-SD-NOFP16-NEXT: fcvt h0, s0
-; CHECK-SD-NOFP16-NEXT: fcvt s2, h2
-; CHECK-SD-NOFP16-NEXT: fcvt s0, h0
-; CHECK-SD-NOFP16-NEXT: fmul s0, s0, s2
-; CHECK-SD-NOFP16-NEXT: mov h2, v1.h[3]
-; CHECK-SD-NOFP16-NEXT: fcvt h0, s0
-; CHECK-SD-NOFP16-NEXT: fcvt s2, h2
-; CHECK-SD-NOFP16-NEXT: fcvt s0, h0
-; CHECK-SD-NOFP16-NEXT: fmul s0, s0, s2
-; CHECK-SD-NOFP16-NEXT: mov h2, v1.h[4]
-; CHECK-SD-NOFP16-NEXT: fcvt h0, s0
-; CHECK-SD-NOFP16-NEXT: fcvt s2, h2
-; CHECK-SD-NOFP16-NEXT: fcvt s0, h0
-; CHECK-SD-NOFP16-NEXT: fmul s0, s0, s2
-; CHECK-SD-NOFP16-NEXT: mov h2, v1.h[5]
-; CHECK-SD-NOFP16-NEXT: fcvt h0, s0
-; CHECK-SD-NOFP16-NEXT: fcvt s2, h2
-; CHECK-SD-NOFP16-NEXT: fcvt s0, h0
-; CHECK-SD-NOFP16-NEXT: fmul s0, s0, s2
-; CHECK-SD-NOFP16-NEXT: mov h2, v1.h[6]
+; CHECK-SD-NOFP16-NEXT: mov h3, v1.h[3]
+; CHECK-SD-NOFP16-NEXT: mov h4, v1.h[4]
+; CHECK-SD-NOFP16-NEXT: mov h5, v1.h[5]
+; CHECK-SD-NOFP16-NEXT: fcvt s6, h1
+; CHECK-SD-NOFP16-NEXT: mov h7, v1.h[6]
; CHECK-SD-NOFP16-NEXT: mov h1, v1.h[7]
-; CHECK-SD-NOFP16-NEXT: fcvt h0, s0
+; CHECK-SD-NOFP16-NEXT: fcvt s0, h0
; CHECK-SD-NOFP16-NEXT: fcvt s2, h2
+; CHECK-SD-NOFP16-NEXT: fcvt s3, h3
+; CHECK-SD-NOFP16-NEXT: fcvt s4, h4
+; CHECK-SD-NOFP16-NEXT: fcvt s5, h5
; CHECK-SD-NOFP16-NEXT: fcvt s1, h1
-; CHECK-SD-NOFP16-NEXT: fcvt s0, h0
+; CHECK-SD-NOFP16-NEXT: fmul s0, s6, s0
+; CHECK-SD-NOFP16-NEXT: fmul s2, s2, s3
+; CHECK-SD-NOFP16-NEXT: fmul s3, s4, s5
+; CHECK-SD-NOFP16-NEXT: fcvt s4, h7
+; CHECK-SD-NOFP16-NEXT: fmul s0, s0, s2
+; CHECK-SD-NOFP16-NEXT: fmul s2, s3, s4
; CHECK-SD-NOFP16-NEXT: fmul s0, s0, s2
-; CHECK-SD-NOFP16-NEXT: fcvt h0, s0
-; CHECK-SD-NOFP16-NEXT: fcvt s0, h0
; CHECK-SD-NOFP16-NEXT: fmul s0, s0, s1
; CHECK-SD-NOFP16-NEXT: fcvt h0, s0
; CHECK-SD-NOFP16-NEXT: ret
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.exp.ll b/llvm/test/CodeGen/AMDGPU/llvm.exp.ll
index fdccacf372dfa..978f223aafb94 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.exp.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.exp.ll
@@ -6243,8 +6243,6 @@ define half @v_exp_f16_fast(half %in) {
; SI-SDAG-NEXT: v_cvt_f32_f16_e32 v0, v0
; SI-SDAG-NEXT: v_mul_f32_e32 v0, 0x3fb8a000, v0
; SI-SDAG-NEXT: v_exp_f32_e32 v0, v0
-; SI-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0
-; SI-SDAG-NEXT: v_cvt_f32_f16_e32 v0, v0
; SI-SDAG-NEXT: s_setpc_b64 s[30:31]
;
; SI-GISEL-LABEL: v_exp_f16_fast:
@@ -6746,10 +6744,6 @@ define <2 x half> @v_exp_v2f16_fast(<2 x half> %in) {
; SI-SDAG-NEXT: v_mul_f32_e32 v1, 0x3fb8a000, v1
; SI-SDAG-NEXT: v_exp_f32_e32 v0, v0
; SI-SDAG-NEXT: v_exp_f32_e32 v1, v1
-; SI-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0
-; SI-SDAG-NEXT: v_cvt_f16_f32_e32 v1, v1
-; SI-SDAG-NEXT: v_cvt_f32_f16_e32 v0, v0
-; SI-SDAG-NEXT: v_cvt_f32_f16_e32 v1, v1
; SI-SDAG-NEXT: s_setpc_b64 s[30:31]
;
; SI-GISEL-LABEL: v_exp_v2f16_fast:
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.exp10.ll b/llvm/test/CodeGen/AMDGPU/llvm.exp10.ll
index 0c2e6f82c9115..70c3787bac9a1 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.exp10.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.exp10.ll
@@ -6336,8 +6336,6 @@ define half @v_exp10_f16_fast(half %in) {
; SI-SDAG-NEXT: v_cvt_f32_f16_e32 v0, v0
; SI-SDAG-NEXT: v_mul_f32_e32 v0, 0x3fb8a000, v0
; SI-SDAG-NEXT: v_exp_f32_e32 v0, v0
-; SI-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0
-; SI-SDAG-NEXT: v_cvt_f32_f16_e32 v0, v0
; SI-SDAG-NEXT: s_setpc_b64 s[30:31]
;
; SI-GISEL-LABEL: v_exp10_f16_fast:
@@ -6840,10 +6838,6 @@ define <2 x half> @v_exp10_v2f16_fast(<2 x half> %in) {
; SI-SDAG-NEXT: v_mul_f32_e32 v1, 0x3fb8a000, v1
; SI-SDAG-NEXT: v_exp_f32_e32 v0, v0
; SI-SDAG-NEXT: v_exp_f32_e32 v1, v1
-; SI-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0
-; SI-SDAG-NEXT: v_cvt_f16_f32_e32 v1, v1
-; SI-SDAG-NEXT: v_cvt_f32_f16_e32 v0, v0
-; SI-SDAG-NEXT: v_cvt_f32_f16_e32 v1, v1
; SI-SDAG-NEXT: s_setpc_b64 s[30:31]
;
; SI-GISEL-LABEL: v_exp10_v2f16_fast:
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.exp2.ll b/llvm/test/CodeGen/AMDGPU/llvm.exp2.ll
index c34113a5dfab0..15bcab9f774e4 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.exp2.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.exp2.ll
@@ -3247,8 +3247,6 @@ define half @v_exp2_f16_fast(half %in) {
; SI-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0
; SI-SDAG-NEXT: v_cvt_f32_f16_e32 v0, v0
; SI-SDAG-NEXT: v_exp_f32_e32 v0, v0
-; SI-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0
-; SI-SDAG-NEXT: v_cvt_f32_f16_e32 v0, v0
; SI-SDAG-NEXT: s_setpc_b64 s[30:31]
;
; SI-GISEL-LABEL: v_exp2_f16_fast:
@@ -3620,10 +3618,6 @@ define <2 x half> @v_exp2_v2f16_fast(<2 x half> %in) {
; SI-SDAG-NEXT: v_cvt_f32_f16_e32 v1, v1
; SI-SDAG-NEXT: v_exp_f32_e32 v0, v0
; SI-SDAG-NEXT: v_exp_f32_e32 v1, v1
-; SI-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0
-; SI-SDAG-NEXT: v_cvt_f16_f32_e32 v1, v1
-; SI-SDAG-NEXT: v_cvt_f32_f16_e32 v0, v0
-; SI-SDAG-NEXT: v_cvt_f32_f16_e32 v1, v1
; SI-SDAG-NEXT: s_setpc_b64 s[30:31]
;
; SI-GISEL-LABEL: v_exp2_v2f16_fast:
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.log.ll b/llvm/test/CodeGen/AMDGPU/llvm.log.ll
index ff8b539fd5ebb..bee47def5692b 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.log.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.log.ll
@@ -6577,8 +6577,6 @@ define half @v_log_f16_fast(half %in) {
; SI-SDAG-NEXT: v_cvt_f32_f16_e32 v0, v0
; SI-SDAG-NEXT: v_log_f32_e32 v0, v0
; SI-SDAG-NEXT: v_mul_f32_e32 v0, 0x3f317218, v0
-; SI-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0
-; SI-SDAG-NEXT: v_cvt_f32_f16_e32 v0, v0
; SI-SDAG-NEXT: s_setpc_b64 s[30:31]
;
; SI-GISEL-LABEL: v_log_f16_fast:
@@ -7101,10 +7099,6 @@ define <2 x half> @v_log_v2f16_fast(<2 x half> %in) {
; SI-SDAG-NEXT: v_log_f32_e32 v1, v1
; SI-SDAG-NEXT: v_mul_f32_e32 v0, 0x3f317218, v0
; SI-SDAG-NEXT: v_mul_f32_e32 v1, 0x3f317218, v1
-; SI-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0
-; SI-SDAG-NEXT: v_cvt_f16_f32_e32 v1, v1
-; SI-SDAG-NEXT: v_cvt_f32_f16_e32 v0, v0
-; SI-SDAG-NEXT: v_cvt_f32_f16_e32 v1, v1
; SI-SDAG-NEXT: s_setpc_b64 s[30:31]
;
; SI-GISEL-LABEL: v_log_v2f16_fast:
@@ -7289,12 +7283,6 @@ define <3 x half> @v_log_v3f16_fast(<3 x half> %in) {
; SI-SDAG-NEXT: v_mul_f32_e32 v0, 0x3f317218, v0
; SI-SDAG-NEXT: v_mul_f32_e32 v1, 0x3f317218, v1
; SI-SDAG-NEXT: v_mul_f32_e32 v2, 0x3f317218, v2
-; SI-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0
-; SI-SDAG-NEXT: v_cvt_f16_f32_e32 v1, v1
-; SI-SDAG-NEXT: v_cvt_f16_f32_e32 v2, v2
-; SI-SDAG-NEXT: v_cvt_f32_f16_e32 v0, v0
-; SI-SDAG-NEXT: v_cvt_f32_f16_e32 v1, v1
-; SI-SDAG-NEXT: v_cvt_f32_f16_e32 v2, v2
; SI-SDAG-NEXT: s_setpc_b64 s[30:31]
;
; SI-GISEL-LABEL: v_log_v3f16_fast:
@@ -7542,28 +7530,20 @@ define <4 x half> @v_log_v4f16_fast(<4 x half> %in) {
; SI-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0
; SI-SDAG-NEXT: v_cvt_f16_f32_e32 v1, v1
-; SI-SDAG-NEXT: v_cvt_f16_f32_e32 v3, v3
; SI-SDAG-NEXT: v_cvt_f16_f32_e32 v2, v2
+; SI-SDAG-NEXT: v_cvt_f16_f32_e32 v3, v3
; SI-SDAG-NEXT: v_cvt_f32_f16_e32 v0, v0
; SI-SDAG-NEXT: v_cvt_f32_f16_e32 v1, v1
-; SI-SDAG-NEXT: v_cvt_f32_f16_e32 v3, v3
; SI-SDAG-NEXT: v_cvt_f32_f16_e32 v2, v2
+; SI-SDAG-NEXT: v_cvt_f32_f16_e32 v3, v3
; SI-SDAG-NEXT: v_log_f32_e32 v0, v0
; SI-SDAG-NEXT: v_log_f32_e32 v1, v1
-; SI-SDAG-NEXT: v_log_f32_e32 v3, v3
; SI-SDAG-NEXT: v_log_f32_e32 v2, v2
+; SI-SDAG-NEXT: v_log_f32_e32 v3, v3
; SI-SDAG-NEXT: v_mul_f32_e32 v0, 0x3f317218, v0
; SI-SDAG-NEXT: v_mul_f32_e32 v1, 0x3f317218, v1
-; SI-SDAG-NEXT: v_mul_f32_e32 v3, 0x3f317218, v3
; SI-SDAG-NEXT: v_mul_f32_e32 v2, 0x3f317218, v2
-; SI-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0
-; SI-SDAG-NEXT: v_cvt_f16_f32_e32 v1, v1
-; SI-SDAG-NEXT: v_cvt_f16_f32_e32 v2, v2
-; SI-SDAG-NEXT: v_cvt_f16_f32_e32 v3, v3
-; SI-SDAG-NEXT: v_cvt_f32_f16_e32 v0, v0
-; SI-SDAG-NEXT: v_cvt_f32_f16_e32 v1, v1
-; SI-SDAG-NEXT: v_cvt_f32_f16_e32 v2, v2
-; SI-SDAG-NEXT: v_cvt_f32_f16_e32 v3, v3
+; SI-SDAG-NEXT: v_mul_f32_e32 v3, 0x3f317218, v3
; SI-SDAG-NEXT: s_setpc_b64 s[30:31]
;
; SI-GISEL-LABEL: v_log_v4f16_fast:
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.log10.ll b/llvm/test/CodeGen/AMDGPU/llvm.log10.ll
index 4f783589f148f..57c12963639c1 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.log10.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.log10.ll
@@ -6577,8 +6577,6 @@ define half @v_log10_f16_fast(half %in) {
; SI-SDAG-NEXT: v_cvt_f32_f16_e32 v0, v0
; SI-SDAG-NEXT: v_log_f32_e32 v0, v0
; SI-SDAG-NEXT: v_mul_f32_e32 v0, 0x3e9a209b, v0
-; SI-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0
-; SI-SDAG-NEXT: v_cvt_f32_f16_e32 v0, v0
; SI-SDAG-NEXT: s_setpc_b64 s[30:31]
;
; SI-GISEL-LABEL: v_log10_f16_fast:
@@ -7101,10 +7099,6 @@ define <2 x half> @v_log10_v2f16_fast(<2 x half> %in) {
; SI-SDAG-NEXT: v_log_f32_e32 v1, v1
; SI-SDAG-NEXT: v_mul_f32_e32 v0, 0x3e9a209b, v0
; SI-SDAG-NEXT: v_mul_f32_e32 v1, 0x3e9a209b, v1
-; SI-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0
-; SI-SDAG-NEXT: v_cvt_f16_f32_e32 v1, v1
-; SI-SDAG-NEXT: v_cvt_f32_f16_e32 v0, v0
-; SI-SDAG-NEXT: v_cvt_f32_f16_e32 v1, v1
; SI-SDAG-NEXT: s_setpc_b64 s[30:31]
;
; SI-GISEL-LABEL: v_log10_v2f16_fast:
@@ -7289,12 +7283,6 @@ define <3 x half> @v_log10_v3f16_fast(<3 x half> %in) {
; SI-SDAG-NEXT: v_mul_f32_e32 v0, 0x3e9a209b, v0
; SI-SDAG-NEXT: v_mul_f32_e32 v1, 0x3e9a209b, v1
; SI-SDAG-NEXT: v_mul_f32_e32 v2, 0x3e9a209b, v2
-; SI-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0
-; SI-SDAG-NEXT: v_cvt_f16_f32_e32 v1, v1
-; SI-SDAG-NEXT: v_cvt_f16_f32_e32 v2, v2
-; SI-SDAG-NEXT: v_cvt_f32_f16_e32 v0, v0
-; SI-SDAG-NEXT: v_cvt_f32_f16_e32 v1, v1
-; SI-SDAG-NEXT: v_cvt_f32_f16_e32 v2, v2
; SI-SDAG-NEXT: s_setpc_b64 s[30:31]
;
; SI-GISEL-LABEL: v_log10_v3f16_fast:
@@ -7542,28 +7530,20 @@ define <4 x half> @v_log10_v4f16_fast(<4 x half> %in) {
; SI-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0
; SI-SDAG-NEXT: v_cvt_f16_f32_e32 v1, v1
-; SI-SDAG-NEXT: v_cvt_f16_f32_e32 v3, v3
; SI-SDAG-NEXT: v_cvt_f16_f32_e32 v2, v2
+; SI-SDAG-NEXT: v_cvt_f16_f32_e32 v3, v3
; SI-SDAG-NEXT: v_cvt_f32_f16_e32 v0, v0
; SI-SDAG-NEXT: v_cvt_f32_f16_e32 v1, v1
-; SI-SDAG-NEXT: v_cvt_f32_f16_e32 v3, v3
; SI-SDAG-NEXT: v_cvt_f32_f16_e32 v2, v2
+; SI-SDAG-NEXT: v_cvt_f32_f16_e32 v3, v3
; SI-SDAG-NEXT: v_log_f32_e32 v0, v0
; SI-SDAG-NEXT: v_log_f32_e32 v1, v1
-; SI-SDAG-NEXT: v_log_f32_e32 v3, v3
; SI-SDAG-NEXT: v_log_f32_e32 v2, v2
+; SI-SDAG-NEXT: v_log_f32_e32 v3, v3
; SI-SDAG-NEXT: v_mul_f32_e32 v0, 0x3e9a209b, v0
; SI-SDAG-NEXT: v_mul_f32_e32 v1, 0x3e9a209b, v1
-; SI-SDAG-NEXT: v_mul_f32_e32 v3, 0x3e9a209b, v3
; SI-SDAG-NEXT: v_mul_f32_e32 v2, 0x3e9a209b, v2
-; SI-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0
-; SI-SDAG-NEXT: v_cvt_f16_f32_e32 v1, v1
-; SI-SDAG-NEXT: v_cvt_f16_f32_e32 v2, v2
-; SI-SDAG-NEXT: v_cvt_f16_f32_e32 v3, v3
-; SI-SDAG-NEXT: v_cvt_f32_f16_e32 v0, v0
-; SI-SDAG-NEXT: v_cvt_f32_f16_e32 v1, v1
-; SI-SDAG-NEXT: v_cvt_f32_f16_e32 v2, v2
-; SI-SDAG-NEXT: v_cvt_f32_f16_e32 v3, v3
+; SI-SDAG-NEXT: v_mul_f32_e32 v3, 0x3e9a209b, v3
; SI-SDAG-NEXT: s_setpc_b64 s[30:31]
;
; SI-GISEL-LABEL: v_log10_v4f16_fast:
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.log2.ll b/llvm/test/CodeGen/AMDGPU/llvm.log2.ll
index a98baa2fdb35c..a2792ccb43d25 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.log2.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.log2.ll
@@ -4017,8 +4017,6 @@ define half @v_log2_f16_fast(half %in) {
; SI-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0
; SI-SDAG-NEXT: v_cvt_f32_f16_e32 v0, v0
; SI-SDAG-NEXT: v_log_f32_e32 v0, v0
-; SI-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0
-; SI-SDAG-NEXT: v_cvt_f32_f16_e32 v0, v0
; SI-SDAG-NEXT: s_setpc_b64 s[30:31]
;
; SI-GISEL-LABEL: v_log2_f16_fast:
@@ -4471,10 +4469,6 @@ define <2 x half> @v_log2_v2f16_fast(<2 x half> %in) {
; SI-SDAG-NEXT: v_cvt_f32_f16_e32 v1, v1
; SI-SDAG-NEXT: v_log_f32_e32 v0, v0
; SI-SDAG-NEXT: v_log_f32_e32 v1, v1
-; SI-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0
-; SI-SDAG-NEXT: v_cvt_f16_f32_e32 v1, v1
-; SI-SDAG-NEXT: v_cvt_f32_f16_e32 v0, v0
-; SI-SDAG-NEXT: v_cvt_f32_f16_e32 v1, v1
; SI-SDAG-NEXT: s_setpc_b64 s[30:31]
;
; SI-GISEL-LABEL: v_log2_v2f16_fast:
@@ -4653,12 +4647,6 @@ define <3 x half> @v_log2_v3f16_fast(<3 x half> %in) {
; SI-SDAG-NEXT: v_log_f32_e32 v0, v0
; SI-SDAG-NEXT: v_log_f32_e32 v1, v1
; SI-SDAG-NEXT: v_log_f32_e32 v2, v2
-; SI-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0
-; SI-SDAG-NEXT: v_cvt_f16_f32_e32 v1, v1
-; SI-SDAG-NEXT: v_cvt_f16_f32_e32 v2, v2
-; SI-SDAG-NEXT: v_cvt_f32_f16_e32 v0, v0
-; SI-SDAG-NEXT: v_cvt_f32_f16_e32 v1, v1
-; SI-SDAG-NEXT: v_cvt_f32_f16_e32 v2, v2
; SI-SDAG-NEXT: s_setpc_b64 s[30:31]
;
; SI-GISEL-LABEL: v_log2_v3f16_fast:
@@ -4882,14 +4870,6 @@ define <4 x half> @v_log2_v4f16_fast(<4 x half> %in) {
; SI-SDAG-NEXT: v_log_f32_e32 v1, v1
; SI-SDAG-NEXT: v_log_f32_e32 v2, v2
; SI-SDAG-NEXT: v_log_f32_e32 v3, v3
-; SI-SDAG-NEXT: v_cvt_f16_f32_e32 v0, v0
-; SI-SDAG-NEXT: v_cvt_f16_f32_e32 v1, v1
-; SI-SDAG-NEXT: v_cvt_f16_f32_e32 v2, v2
-; SI-SDAG-NEXT: v_cvt_f16_f32_e32 v3, v3
-; SI-SDAG-NEXT: v_cvt_f32_f16_e32 v0, v0
-; SI-SDAG-NEXT: v_cvt_f32_f16_e32 v1, v1
-; SI-SDAG-NEXT: v_cvt_f32_f16_e32 v2, v2
-; SI-SDAG-NEXT: v_cvt_f32_f16_e32 v3, v3
; SI-SDAG-NEXT: s_setpc_b64 s[30:31]
;
; SI-GISEL-LABEL: v_log2_v4f16_fast:
diff --git a/llvm/test/CodeGen/ARM/fp16_fast_math.ll b/llvm/test/CodeGen/ARM/fp16_fast_math.ll
index b440bb97674b4..4c2e3ce4efcd5 100644
--- a/llvm/test/CodeGen/ARM/fp16_fast_math.ll
+++ b/llvm/test/CodeGen/ARM/fp16_fast_math.ll
@@ -4,8 +4,6 @@
; Check that the output instructions have the same fast math flags as the input
; fadd, even when f16 is legalized to f32.
-; FIXME: We don't get fast math flags on VCVTBHS because they get lost during a
-; DAGCombine transformation.
; FIXME: We don't get fast math flags on VCVTBSH because the outermost node in
; the isel pattern is COPY_TO_REGCLASS and the fast math flags end up there.
@@ -52,8 +50,8 @@ define half @fast_fadd(half %x, half %y) {
; CHECK-CVT-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $r0
; CHECK-CVT-NEXT: [[COPY2:%[0-9]+]]:spr = COPY [[COPY1]]
; CHECK-CVT-NEXT: [[COPY3:%[0-9]+]]:spr = COPY [[COPY]]
- ; CHECK-CVT-NEXT: [[VCVTBHS:%[0-9]+]]:spr = VCVTBHS killed [[COPY3]], 14 /* CC::al */, $noreg
- ; CHECK-CVT-NEXT: [[VCVTBHS1:%[0-9]+]]:spr = VCVTBHS killed [[COPY2]], 14 /* CC::al */, $noreg
+ ; CHECK-CVT-NEXT: [[VCVTBHS:%[0-9]+]]:spr = nnan ninf nsz arcp contract afn reassoc VCVTBHS killed [[COPY3]], 14 /* CC::al */, $noreg
+ ; CHECK-CVT-NEXT: [[VCVTBHS1:%[0-9]+]]:spr = nnan ninf nsz arcp contract afn reassoc VCVTBHS killed [[COPY2]], 14 /* CC::al */, $noreg
; CHECK-CVT-NEXT: [[VADDS:%[0-9]+]]:spr = nnan ninf nsz arcp contract afn reassoc VADDS killed [[VCVTBHS1]], killed [[VCVTBHS]], 14 /* CC::al */, $noreg
; CHECK-CVT-NEXT: [[DEF:%[0-9]+]]:spr = IMPLICIT_DEF
; CHECK-CVT-NEXT: [[VCVTBSH:%[0-9]+]]:spr = VCVTBSH [[DEF]], killed [[VADDS]], 14 /* CC::al */, $noreg
@@ -86,8 +84,8 @@ define half @ninf_fadd(half %x, half %y) {
; CHECK-CVT-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $r0
; CHECK-CVT-NEXT: [[COPY2:%[0-9]+]]:spr = COPY [[COPY1]]
; CHECK-CVT-NEXT: [[COPY3:%[0-9]+]]:spr = COPY [[COPY]]
- ; CHECK-CVT-NEXT: [[VCVTBHS:%[0-9]+]]:spr = VCVTBHS killed [[COPY3]], 14 /* CC::al */, $noreg
- ; CHECK-CVT-NEXT: [[VCVTBHS1:%[0-9]+]]:spr = VCVTBHS killed [[COPY2]], 14 /* CC::al */, $noreg
+ ; CHECK-CVT-NEXT: [[VCVTBHS:%[0-9]+]]:spr = ninf VCVTBHS killed [[COPY3]], 14 /* CC::al */, $noreg
+ ; CHECK-CVT-NEXT: [[VCVTBHS1:%[0-9]+]]:spr = ninf VCVTBHS killed [[COPY2]], 14 /* CC::al */, $noreg
; CHECK-CVT-NEXT: [[VADDS:%[0-9]+]]:spr = ninf VADDS killed [[VCVTBHS1]], killed [[VCVTBHS]], 14 /* CC::al */, $noreg
; CHECK-CVT-NEXT: [[DEF:%[0-9]+]]:spr = IMPLICIT_DEF
; CHECK-CVT-NEXT: [[VCVTBSH:%[0-9]+]]:spr = VCVTBSH [[DEF]], killed [[VADDS]], 14 /* CC::al */, $noreg
@@ -110,3 +108,142 @@ entry:
%add = fadd ninf half %x, %y
ret half %add
}
+
+; Check that when we have the right fast math flags the converts in between the
+; two fadds are removed.
+
+define half @normal_fadd_sequence(half %x, half %y, half %z) {
+ ; CHECK-CVT-LABEL: name: normal_fadd_sequence
+ ; CHECK-CVT: bb.0.entry:
+ ; CHECK-CVT-NEXT: liveins: $r0, $r1, $r2
+ ; CHECK-CVT-NEXT: {{ $}}
+ ; CHECK-CVT-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $r2
+ ; CHECK-CVT-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $r1
+ ; CHECK-CVT-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $r0
+ ; CHECK-CVT-NEXT: [[COPY3:%[0-9]+]]:spr = COPY [[COPY2]]
+ ; CHECK-CVT-NEXT: [[COPY4:%[0-9]+]]:spr = COPY [[COPY1]]
+ ; CHECK-CVT-NEXT: [[VCVTBHS:%[0-9]+]]:spr = VCVTBHS killed [[COPY4]], 14 /* CC::al */, $noreg
+ ; CHECK-CVT-NEXT: [[VCVTBHS1:%[0-9]+]]:spr = VCVTBHS killed [[COPY3]], 14 /* CC::al */, $noreg
+ ; CHECK-CVT-NEXT: [[VADDS:%[0-9]+]]:spr = VADDS killed [[VCVTBHS1]], killed [[VCVTBHS]], 14 /* CC::al */, $noreg
+ ; CHECK-CVT-NEXT: [[COPY5:%[0-9]+]]:spr = COPY [[COPY]]
+ ; CHECK-CVT-NEXT: [[VCVTBHS2:%[0-9]+]]:spr = VCVTBHS killed [[COPY5]], 14 /* CC::al */, $noreg
+ ; CHECK-CVT-NEXT: [[DEF:%[0-9]+]]:spr = IMPLICIT_DEF
+ ; CHECK-CVT-NEXT: [[VCVTBSH:%[0-9]+]]:spr = VCVTBSH [[DEF]], killed [[VADDS]], 14 /* CC::al */, $noreg
+ ; CHECK-CVT-NEXT: [[COPY6:%[0-9]+]]:gpr = COPY [[VCVTBSH]]
+ ; CHECK-CVT-NEXT: [[COPY7:%[0-9]+]]:spr = COPY [[COPY6]]
+ ; CHECK-CVT-NEXT: [[VCVTBHS3:%[0-9]+]]:spr = VCVTBHS killed [[COPY7]], 14 /* CC::al */, $noreg
+ ; CHECK-CVT-NEXT: [[VADDS1:%[0-9]+]]:spr = VADDS killed [[VCVTBHS3]], killed [[VCVTBHS2]], 14 /* CC::al */, $noreg
+ ; CHECK-CVT-NEXT: [[DEF1:%[0-9]+]]:spr = IMPLICIT_DEF
+ ; CHECK-CVT-NEXT: [[VCVTBSH1:%[0-9]+]]:spr = VCVTBSH [[DEF1]], killed [[VADDS1]], 14 /* CC::al */, $noreg
+ ; CHECK-CVT-NEXT: [[COPY8:%[0-9]+]]:gpr = COPY [[VCVTBSH1]]
+ ; CHECK-CVT-NEXT: $r0 = COPY [[COPY8]]
+ ; CHECK-CVT-NEXT: MOVPCLR 14 /* CC::al */, $noreg, implicit $r0
+ ;
+ ; CHECK-FP16-LABEL: name: normal_fadd_sequence
+ ; CHECK-FP16: bb.0.entry:
+ ; CHECK-FP16-NEXT: liveins: $r0, $r1, $r2
+ ; CHECK-FP16-NEXT: {{ $}}
+ ; CHECK-FP16-NEXT: [[COPY:%[0-9]+]]:rgpr = COPY $r2
+ ; CHECK-FP16-NEXT: [[COPY1:%[0-9]+]]:rgpr = COPY $r1
+ ; CHECK-FP16-NEXT: [[COPY2:%[0-9]+]]:rgpr = COPY $r0
+ ; CHECK-FP16-NEXT: [[VMOVHR:%[0-9]+]]:hpr = VMOVHR [[COPY1]], 14, $noreg
+ ; CHECK-FP16-NEXT: [[VMOVHR1:%[0-9]+]]:hpr = VMOVHR [[COPY2]], 14, $noreg
+ ; CHECK-FP16-NEXT: [[VADDH:%[0-9]+]]:hpr = VADDH killed [[VMOVHR1]], killed [[VMOVHR]], 14, $noreg
+ ; CHECK-FP16-NEXT: [[VMOVHR2:%[0-9]+]]:hpr = VMOVHR [[COPY]], 14, $noreg
+ ; CHECK-FP16-NEXT: [[VADDH1:%[0-9]+]]:hpr = VADDH killed [[VADDH]], killed [[VMOVHR2]], 14, $noreg
+ ; CHECK-FP16-NEXT: $r0 = COPY [[VADDH1]]
+ ; CHECK-FP16-NEXT: MOVPCLR 14 /* CC::al */, $noreg, implicit $r0
+entry:
+ %add1 = fadd half %x, %y
+ %add2 = fadd half %add1, %z
+ ret half %add2
+}
+
+define half @nnan_ninf_contract_fadd_sequence(half %x, half %y, half %z) {
+ ; CHECK-CVT-LABEL: name: nnan_ninf_contract_fadd_sequence
+ ; CHECK-CVT: bb.0.entry:
+ ; CHECK-CVT-NEXT: liveins: $r0, $r1, $r2
+ ; CHECK-CVT-NEXT: {{ $}}
+ ; CHECK-CVT-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $r2
+ ; CHECK-CVT-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $r1
+ ; CHECK-CVT-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $r0
+ ; CHECK-CVT-NEXT: [[COPY3:%[0-9]+]]:spr = COPY [[COPY2]]
+ ; CHECK-CVT-NEXT: [[COPY4:%[0-9]+]]:spr = COPY [[COPY1]]
+ ; CHECK-CVT-NEXT: [[VCVTBHS:%[0-9]+]]:spr = nnan ninf contract VCVTBHS killed [[COPY4]], 14 /* CC::al */, $noreg
+ ; CHECK-CVT-NEXT: [[VCVTBHS1:%[0-9]+]]:spr = nnan ninf contract VCVTBHS killed [[COPY3]], 14 /* CC::al */, $noreg
+ ; CHECK-CVT-NEXT: [[VADDS:%[0-9]+]]:spr = nnan ninf contract VADDS killed [[VCVTBHS1]], killed [[VCVTBHS]], 14 /* CC::al */, $noreg
+ ; CHECK-CVT-NEXT: [[COPY5:%[0-9]+]]:spr = COPY [[COPY]]
+ ; CHECK-CVT-NEXT: [[VCVTBHS2:%[0-9]+]]:spr = nnan ninf contract VCVTBHS killed [[COPY5]], 14 /* CC::al */, $noreg
+ ; CHECK-CVT-NEXT: [[VADDS1:%[0-9]+]]:spr = nnan ninf contract VADDS killed [[VADDS]], killed [[VCVTBHS2]], 14 /* CC::al */, $noreg
+ ; CHECK-CVT-NEXT: [[DEF:%[0-9]+]]:spr = IMPLICIT_DEF
+ ; CHECK-CVT-NEXT: [[VCVTBSH:%[0-9]+]]:spr = VCVTBSH [[DEF]], killed [[VADDS1]], 14 /* CC::al */, $noreg
+ ; CHECK-CVT-NEXT: [[COPY6:%[0-9]+]]:gpr = COPY [[VCVTBSH]]
+ ; CHECK-CVT-NEXT: $r0 = COPY [[COPY6]]
+ ; CHECK-CVT-NEXT: MOVPCLR 14 /* CC::al */, $noreg, implicit $r0
+ ;
+ ; CHECK-FP16-LABEL: name: nnan_ninf_contract_fadd_sequence
+ ; CHECK-FP16: bb.0.entry:
+ ; CHECK-FP16-NEXT: liveins: $r0, $r1, $r2
+ ; CHECK-FP16-NEXT: {{ $}}
+ ; CHECK-FP16-NEXT: [[COPY:%[0-9]+]]:rgpr = COPY $r2
+ ; CHECK-FP16-NEXT: [[COPY1:%[0-9]+]]:rgpr = COPY $r1
+ ; CHECK-FP16-NEXT: [[COPY2:%[0-9]+]]:rgpr = COPY $r0
+ ; CHECK-FP16-NEXT: [[VMOVHR:%[0-9]+]]:hpr = VMOVHR [[COPY1]], 14, $noreg
+ ; CHECK-FP16-NEXT: [[VMOVHR1:%[0-9]+]]:hpr = VMOVHR [[COPY2]], 14, $noreg
+ ; CHECK-FP16-NEXT: [[VADDH:%[0-9]+]]:hpr = nnan ninf contract VADDH killed [[VMOVHR1]], killed [[VMOVHR]], 14, $noreg
+ ; CHECK-FP16-NEXT: [[VMOVHR2:%[0-9]+]]:hpr = VMOVHR [[COPY]], 14, $noreg
+ ; CHECK-FP16-NEXT: [[VADDH1:%[0-9]+]]:hpr = nnan ninf contract VADDH killed [[VADDH]], killed [[VMOVHR2]], 14, $noreg
+ ; CHECK-FP16-NEXT: $r0 = COPY [[VADDH1]]
+ ; CHECK-FP16-NEXT: MOVPCLR 14 /* CC::al */, $noreg, implicit $r0
+entry:
+ %add1 = fadd nnan ninf contract half %x, %y
+ %add2 = fadd nnan ninf contract half %add1, %z
+ ret half %add2
+}
+
+define half @ninf_fadd_sequence(half %x, half %y, half %z) {
+ ; CHECK-CVT-LABEL: name: ninf_fadd_sequence
+ ; CHECK-CVT: bb.0.entry:
+ ; CHECK-CVT-NEXT: liveins: $r0, $r1, $r2
+ ; CHECK-CVT-NEXT: {{ $}}
+ ; CHECK-CVT-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $r2
+ ; CHECK-CVT-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $r1
+ ; CHECK-CVT-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $r0
+ ; CHECK-CVT-NEXT: [[COPY3:%[0-9]+]]:spr = COPY [[COPY2]]
+ ; CHECK-CVT-NEXT: [[COPY4:%[0-9]+]]:spr = COPY [[COPY1]]
+ ; CHECK-CVT-NEXT: [[VCVTBHS:%[0-9]+]]:spr = ninf VCVTBHS killed [[COPY4]], 14 /* CC::al */, $noreg
+ ; CHECK-CVT-NEXT: [[VCVTBHS1:%[0-9]+]]:spr = ninf VCVTBHS killed [[COPY3]], 14 /* CC::al */, $noreg
+ ; CHECK-CVT-NEXT: [[VADDS:%[0-9]+]]:spr = ninf VADDS killed [[VCVTBHS1]], killed [[VCVTBHS]], 14 /* CC::al */, $noreg
+ ; CHECK-CVT-NEXT: [[COPY5:%[0-9]+]]:spr = COPY [[COPY]]
+ ; CHECK-CVT-NEXT: [[VCVTBHS2:%[0-9]+]]:spr = ninf VCVTBHS killed [[COPY5]], 14 /* CC::al */, $noreg
+ ; CHECK-CVT-NEXT: [[DEF:%[0-9]+]]:spr = IMPLICIT_DEF
+ ; CHECK-CVT-NEXT: [[VCVTBSH:%[0-9]+]]:spr = VCVTBSH [[DEF]], killed [[VADDS]], 14 /* CC::al */, $noreg
+ ; CHECK-CVT-NEXT: [[COPY6:%[0-9]+]]:gpr = COPY [[VCVTBSH]]
+ ; CHECK-CVT-NEXT: [[COPY7:%[0-9]+]]:spr = COPY [[COPY6]]
+ ; CHECK-CVT-NEXT: [[VCVTBHS3:%[0-9]+]]:spr = ninf VCVTBHS killed [[COPY7]], 14 /* CC::al */, $noreg
+ ; CHECK-CVT-NEXT: [[VADDS1:%[0-9]+]]:spr = ninf VADDS killed [[VCVTBHS3]], killed [[VCVTBHS2]], 14 /* CC::al */, $noreg
+ ; CHECK-CVT-NEXT: [[DEF1:%[0-9]+]]:spr = IMPLICIT_DEF
+ ; CHECK-CVT-NEXT: [[VCVTBSH1:%[0-9]+]]:spr = VCVTBSH [[DEF1]], killed [[VADDS1]], 14 /* CC::al */, $noreg
+ ; CHECK-CVT-NEXT: [[COPY8:%[0-9]+]]:gpr = COPY [[VCVTBSH1]]
+ ; CHECK-CVT-NEXT: $r0 = COPY [[COPY8]]
+ ; CHECK-CVT-NEXT: MOVPCLR 14 /* CC::al */, $noreg, implicit $r0
+ ;
+ ; CHECK-FP16-LABEL: name: ninf_fadd_sequence
+ ; CHECK-FP16: bb.0.entry:
+ ; CHECK-FP16-NEXT: liveins: $r0, $r1, $r2
+ ; CHECK-FP16-NEXT: {{ $}}
+ ; CHECK-FP16-NEXT: [[COPY:%[0-9]+]]:rgpr = COPY $r2
+ ; CHECK-FP16-NEXT: [[COPY1:%[0-9]+]]:rgpr = COPY $r1
+ ; CHECK-FP16-NEXT: [[COPY2:%[0-9]+]]:rgpr = COPY $r0
+ ; CHECK-FP16-NEXT: [[VMOVHR:%[0-9]+]]:hpr = VMOVHR [[COPY1]], 14, $noreg
+ ; CHECK-FP16-NEXT: [[VMOVHR1:%[0-9]+]]:hpr = VMOVHR [[COPY2]], 14, $noreg
+ ; CHECK-FP16-NEXT: [[VADDH:%[0-9]+]]:hpr = ninf VADDH killed [[VMOVHR1]], killed [[VMOVHR]], 14, $noreg
+ ; CHECK-FP16-NEXT: [[VMOVHR2:%[0-9]+]]:hpr = VMOVHR [[COPY]], 14, $noreg
+ ; CHECK-FP16-NEXT: [[VADDH1:%[0-9]+]]:hpr = ninf VADDH killed [[VADDH]], killed [[VMOVHR2]], 14, $noreg
+ ; CHECK-FP16-NEXT: $r0 = COPY [[VADDH1]]
+ ; CHECK-FP16-NEXT: MOVPCLR 14 /* CC::al */, $noreg, implicit $r0
+entry:
+ %add1 = fadd ninf half %x, %y
+ %add2 = fadd ninf half %add1, %z
+ ret half %add2
+}
diff --git a/llvm/test/CodeGen/Thumb2/bf16-instructions.ll b/llvm/test/CodeGen/Thumb2/bf16-instructions.ll
index 786e35517fd7c..c93ddca949234 100644
--- a/llvm/test/CodeGen/Thumb2/bf16-instructions.ll
+++ b/llvm/test/CodeGen/Thumb2/bf16-instructions.ll
@@ -116,28 +116,19 @@ define bfloat @test_fmadd(bfloat %a, bfloat %b, bfloat %c) {
; CHECK-FP: @ %bb.0:
; CHECK-FP-NEXT: .save {r7, lr}
; CHECK-FP-NEXT: push {r7, lr}
-; CHECK-FP-NEXT: .vsave {d8}
-; CHECK-FP-NEXT: vpush {d8}
-; CHECK-FP-NEXT: vmov r0, s1
-; CHECK-FP-NEXT: vmov r1, s0
-; CHECK-FP-NEXT: vmov.f32 s16, s2
-; CHECK-FP-NEXT: lsls r0, r0, #16
-; CHECK-FP-NEXT: vmov s0, r0
-; CHECK-FP-NEXT: lsls r0, r1, #16
-; CHECK-FP-NEXT: vmov s2, r0
-; CHECK-FP-NEXT: vmul.f32 s0, s2, s0
-; CHECK-FP-NEXT: bl __truncsfbf2
-; CHECK-FP-NEXT: vmov r0, s16
-; CHECK-FP-NEXT: vmov r1, s0
+; CHECK-FP-NEXT: vmov r0, s0
+; CHECK-FP-NEXT: vmov r1, s1
+; CHECK-FP-NEXT: vmov r2, s2
; CHECK-FP-NEXT: lsls r0, r0, #16
+; CHECK-FP-NEXT: lsls r1, r1, #16
+; CHECK-FP-NEXT: vmov s4, r0
+; CHECK-FP-NEXT: lsls r0, r2, #16
+; CHECK-FP-NEXT: vmov s2, r1
; CHECK-FP-NEXT: vmov s0, r0
-; CHECK-FP-NEXT: lsls r0, r1, #16
-; CHECK-FP-NEXT: vmov s2, r0
-; CHECK-FP-NEXT: vadd.f32 s0, s2, s0
+; CHECK-FP-NEXT: vfma.f32 s0, s4, s2
; CHECK-FP-NEXT: bl __truncsfbf2
; CHECK-FP-NEXT: vmov.f16 r0, s0
; CHECK-FP-NEXT: vmov s0, r0
-; CHECK-FP-NEXT: vpop {d8}
; CHECK-FP-NEXT: pop {r7, pc}
%mul = fmul fast bfloat %a, %b
%r = fadd fast bfloat %mul, %c
>From b7d56384bd7bc4b5613c1f6abcb35e09558564eb Mon Sep 17 00:00:00 2001
From: John Brawn <john.brawn at arm.com>
Date: Mon, 24 Mar 2025 17:56:50 +0000
Subject: [PATCH 2/2] Adjust names of flags variables
---
llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 14 +++++++-------
1 file changed, 7 insertions(+), 7 deletions(-)
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index c16e2f0bc865b..7708b6a42f656 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -18477,14 +18477,14 @@ static SDValue eliminateFPCastPair(SDNode *N) {
}
if (N0.getOpcode() == NarrowingOp && N0.getOperand(0).getValueType() == VT) {
- const SDNodeFlags SrcFlags = N0->getFlags();
- const SDNodeFlags DstFlags = N->getFlags();
+ const SDNodeFlags NarrowFlags = N0->getFlags();
+ const SDNodeFlags WidenFlags = N->getFlags();
// Narrowing can introduce inf and change the encoding of a nan, so the
- // destination must have the nnan and ninf flags to indicate that we don't
- // need to care about that. We are also removing a rounding step, and that
- // requires both the source and destination to allow contraction.
- if (DstFlags.hasNoNaNs() && DstFlags.hasNoInfs() &&
- SrcFlags.hasAllowContract() && DstFlags.hasAllowContract()) {
+ // widen must have the nnan and ninf flags to indicate that we don't need to
+ // care about that. We are also removing a rounding step, and that requires
+ // both the narrow and widen to allow contraction.
+ if (WidenFlags.hasNoNaNs() && WidenFlags.hasNoInfs() &&
+ NarrowFlags.hasAllowContract() && WidenFlags.hasAllowContract()) {
return N0.getOperand(0);
}
}
More information about the llvm-commits
mailing list