[llvm] [RISCV][VLOPT] Add support for checkUsers when UserMI is a Single-Width Integer Reduction (PR #120345)
Michael Maitland via llvm-commits
llvm-commits at lists.llvm.org
Mon Dec 23 13:48:25 PST 2024
https://github.com/michaelmaitland updated https://github.com/llvm/llvm-project/pull/120345
>From bcfe630f4301348228915a193d8af240a67e574d Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Tue, 17 Dec 2024 17:50:34 -0800
Subject: [PATCH 1/8] [RISCV][VLOPT] Add support for checkUsers when UserMI is
an reduction
Reductions are weird because for some operands, they are vector registers but
only read the first lane. For these operands, we do not need to check to make
sure the EEW and EMUL ratios match. However, we need to make sure that when
the reduction instruction has a non-zero VL operand, we don't try and set the
CommonVL=0. Since this is an edge case, we decide just not to optimize anything
when this occurs.
---
llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp | 42 +++++++++++++++++--
.../test/CodeGen/RISCV/rvv/vl-opt-op-info.mir | 19 +++++++++
llvm/test/CodeGen/RISCV/rvv/vl-opt.mir | 15 ++++++-
3 files changed, 70 insertions(+), 6 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
index 4e3212c70ee9b0..8e8b2d0c7d1010 100644
--- a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
+++ b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
@@ -616,6 +616,23 @@ static OperandInfo getOperandInfo(const MachineOperand &MO,
return OperandInfo(MIVLMul, MILog2SEW);
}
+ // Vector Reduction Operations
+ // Vector Single-Width Integer Reduction Instructions
+ // The Dest and VS1 only read element 0 of the vector register. Return unknown
+ // for these. VS2 has EEW=SEW and EMUL=LMUL.
+ case RISCV::VREDAND_VS:
+ case RISCV::VREDMAX_VS:
+ case RISCV::VREDMAXU_VS:
+ case RISCV::VREDMIN_VS:
+ case RISCV::VREDMINU_VS:
+ case RISCV::VREDOR_VS:
+ case RISCV::VREDSUM_VS:
+ case RISCV::VREDXOR_VS: {
+ if (MO.getOperandNo() == 2)
+ return OperandInfo(MIVLMul, MILog2SEW);
+ return {};
+ }
+
default:
return {};
}
@@ -938,11 +955,28 @@ bool RISCVVLOptimizer::checkUsers(const MachineOperand *&CommonVL,
// Instructions like reductions may use a vector register as a scalar
// register. In this case, we should treat it like a scalar register which
- // does not impact the decision on whether to optimize VL.
- // TODO: Treat it like a scalar register instead of bailing out.
+ // does not impact the decision on whether to optimize VL. But if there is
+ // another user of MI and it has VL=0, we need to be sure not to reduce the
+ // VL of MI to zero when the VLOp of UserOp is may be non-zero.
if (isVectorOpUsedAsScalarOp(UserOp)) {
- CanReduceVL = false;
- break;
+ [[maybe_unused]] Register R = UserOp.getReg();
+ [[maybe_unused]] const TargetRegisterClass *RC = MRI->getRegClass(R);
+ assert(RISCV::VRRegClass.hasSubClassEq(RC) &&
+ "Expect LMUL 1 register class for vector as scalar operands!");
+ LLVM_DEBUG(dbgs() << " Used this operand as a scalar operand\n");
+ const MCInstrDesc &Desc = UserMI.getDesc();
+ unsigned VLOpNum = RISCVII::getVLOpNum(Desc);
+ const MachineOperand &VLOp = UserMI.getOperand(VLOpNum);
+ if ((VLOp.isReg() && VLOp.getReg() != RISCV::X0) ||
+ (VLOp.isImm() && VLOp.getImm() != 0)) {
+ if (!CommonVL) {
+ CommonVL = &VLOp;
+ continue;
+ } else if (!CommonVL->isIdenticalTo(VLOp)) {
+ CanReduceVL = false;
+ break;
+ }
+ }
}
if (mayReadPastVL(UserMI)) {
diff --git a/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir b/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir
index b304769b27731f..75abf775b206a4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir
@@ -1064,3 +1064,22 @@ body: |
%x:vr = PseudoVMAND_MM_B1 $noreg, $noreg, -1, 0
%y:vr = PseudoVIOTA_M_MF2 $noreg, %x, 1, 3 /* e8 */, 0
...
+name: vred_vs2
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: vred_vs2
+ ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %y:vr = PseudoVREDAND_VS_M1_E8 $noreg, %x, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
+ %y:vr = PseudoVREDAND_VS_M1_E8 $noreg, %x, $noreg, 1, 3 /* e8 */, 0
+...
+---
+name: vred_vs1
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: vred_vs1
+ ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %y:vr = PseudoVREDAND_VS_M1_E8 $noreg, $noreg, %x, 1, 3 /* e8 */, 0 /* tu, mu */
+ %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
+ %y:vr = PseudoVREDAND_VS_M1_E8 $noreg, $noreg, %x, 1, 3 /* e8 */, 0
+...
diff --git a/llvm/test/CodeGen/RISCV/rvv/vl-opt.mir b/llvm/test/CodeGen/RISCV/rvv/vl-opt.mir
index 3f966b036589fd..d5d0db25129460 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vl-opt.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/vl-opt.mir
@@ -38,7 +38,7 @@ name: use_largest_common_vl_imm_imm
body: |
bb.0:
; CHECK-LABEL: name: use_largest_common_vl_imm_imm
- ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, 2, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %z:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 2, 3 /* e8 */, 0 /* tu, mu */
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
@@ -110,4 +110,15 @@ body: |
%y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 3 /* e8 */, 0
%z:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, -1, 3 /* e8 */, 0
...
-
+---
+name: vred_other_user_is_vl0
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: vred_other_user_is_vl0
+ ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %y:vr = PseudoVREDSUM_VS_M1_E8 $noreg, $noreg, %x, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %z:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 0, 3 /* e8 */, 0 /* tu, mu */
+ %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
+ %y:vr = PseudoVREDSUM_VS_M1_E8 $noreg, $noreg, %x, 1, 3 /* e8 */, 0
+ %z:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 0, 3 /* e8 */, 0
+...
>From fc8ccd0c7d7c09be8457b57fe5b13a1c4507550a Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Tue, 17 Dec 2024 18:09:51 -0800
Subject: [PATCH 2/8] fixup! add more incompat tests
---
.../test/CodeGen/RISCV/rvv/vl-opt-op-info.mir | 31 +++++++++++++++++++
1 file changed, 31 insertions(+)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir b/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir
index 75abf775b206a4..fa64e2744e26c3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir
@@ -1083,3 +1083,34 @@ body: |
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vr = PseudoVREDAND_VS_M1_E8 $noreg, $noreg, %x, 1, 3 /* e8 */, 0
...
+---
+name: vred_vs1_vs2
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: vred_vs1_vs2
+ ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %y:vr = PseudoVREDAND_VS_M1_E8 $noreg, %x, %x, 1, 3 /* e8 */, 0 /* tu, mu */
+ %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
+ %y:vr = PseudoVREDAND_VS_M1_E8 $noreg, %x, %x, 1, 3 /* e8 */, 0
+...
+---
+name: vred_vs1_vs2_incompatible_eew
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: vred_vs1_vs2_incompatible_eew
+ ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %y:vr = PseudoVREDAND_VS_M1_E8 $noreg, %x, %x, 1, 4 /* e16 */, 0 /* tu, mu */
+ %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
+ %y:vr = PseudoVREDAND_VS_M1_E8 $noreg, %x, %x, 1, 4 /* e16 */, 0
+...
+---
+name: vred_vs1_vs2_incomaptible_emul
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: vred_vs1_vs2_incomaptible_emul
+ ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %y:vr = PseudoVREDAND_VS_MF2_E8 $noreg, %x, %x, 1, 3 /* e8 */, 0 /* tu, mu */
+ %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
+ %y:vr = PseudoVREDAND_VS_MF2_E8 $noreg, %x, %x, 1, 3 /* e8 */, 0
+...
+
>From 4dcd8caea27dd0b5e7945ed9f6dae252c5d0c864 Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Tue, 17 Dec 2024 18:17:35 -0800
Subject: [PATCH 3/8] fixup! do not else after continue/break
---
llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
index 8e8b2d0c7d1010..66b97eab814e4f 100644
--- a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
+++ b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
@@ -972,10 +972,12 @@ bool RISCVVLOptimizer::checkUsers(const MachineOperand *&CommonVL,
if (!CommonVL) {
CommonVL = &VLOp;
continue;
- } else if (!CommonVL->isIdenticalTo(VLOp)) {
+ }
+ if (!CommonVL->isIdenticalTo(VLOp)) {
CanReduceVL = false;
break;
}
+ continue;
}
}
>From 9f888596498d4105094759101ff3064c8eb5e6e9 Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Tue, 17 Dec 2024 18:21:05 -0800
Subject: [PATCH 4/8] fixup! rerun test checks
---
llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir b/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir
index fa64e2744e26c3..b605a76bcf5b4e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir
@@ -1088,7 +1088,7 @@ name: vred_vs1_vs2
body: |
bb.0:
; CHECK-LABEL: name: vred_vs1_vs2
- ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVREDAND_VS_M1_E8 $noreg, %x, %x, 1, 3 /* e8 */, 0 /* tu, mu */
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vr = PseudoVREDAND_VS_M1_E8 $noreg, %x, %x, 1, 3 /* e8 */, 0
>From 426f4d6aa430da9ea1b05d4e16d8bb4a54858edf Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Tue, 17 Dec 2024 18:29:33 -0800
Subject: [PATCH 5/8] fixup! move continue
---
llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
index 66b97eab814e4f..b4dffc4e3c61ac 100644
--- a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
+++ b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
@@ -977,8 +977,8 @@ bool RISCVVLOptimizer::checkUsers(const MachineOperand *&CommonVL,
CanReduceVL = false;
break;
}
- continue;
}
+ continue;
}
if (mayReadPastVL(UserMI)) {
>From 644ff874af73067f3ac574db56a08021ca994899 Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Tue, 17 Dec 2024 18:30:34 -0800
Subject: [PATCH 6/8] fixup! VL operand is never x0
---
llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
index b4dffc4e3c61ac..f85af00a168af2 100644
--- a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
+++ b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
@@ -967,8 +967,7 @@ bool RISCVVLOptimizer::checkUsers(const MachineOperand *&CommonVL,
const MCInstrDesc &Desc = UserMI.getDesc();
unsigned VLOpNum = RISCVII::getVLOpNum(Desc);
const MachineOperand &VLOp = UserMI.getOperand(VLOpNum);
- if ((VLOp.isReg() && VLOp.getReg() != RISCV::X0) ||
- (VLOp.isImm() && VLOp.getImm() != 0)) {
+ if (VLOp.isReg() || (VLOp.isImm() && VLOp.getImm() != 0)) {
if (!CommonVL) {
CommonVL = &VLOp;
continue;
>From e0e2c23e741b9a6aa5024215f1b2a2e5e53175b4 Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Tue, 17 Dec 2024 18:35:27 -0800
Subject: [PATCH 7/8] fixup! fix typo
---
llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir b/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir
index b605a76bcf5b4e..9e18362a41a51c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir
@@ -1104,10 +1104,10 @@ body: |
%y:vr = PseudoVREDAND_VS_M1_E8 $noreg, %x, %x, 1, 4 /* e16 */, 0
...
---
-name: vred_vs1_vs2_incomaptible_emul
+name: vred_vs1_vs2_incompatible_emul
body: |
bb.0:
- ; CHECK-LABEL: name: vred_vs1_vs2_incomaptible_emul
+ ; CHECK-LABEL: name: vred_vs1_vs2_incompatible_emul
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVREDAND_VS_MF2_E8 $noreg, %x, %x, 1, 3 /* e8 */, 0 /* tu, mu */
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
>From a2e2938a853859bfdca79346122495439a9e369a Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Mon, 23 Dec 2024 13:45:26 -0800
Subject: [PATCH 8/8] fixup! be less conservative
---
llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp | 18 ++++----
llvm/test/CodeGen/RISCV/double_reduct.ll | 30 +++++++-------
.../rvv/fixed-vectors-reduction-formation.ll | 41 +++++++------------
.../CodeGen/RISCV/rvv/fold-binary-reduce.ll | 6 +--
4 files changed, 39 insertions(+), 56 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
index f85af00a168af2..6a5a93b8ea24cc 100644
--- a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
+++ b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
@@ -942,6 +942,8 @@ bool RISCVVLOptimizer::isCandidate(const MachineInstr &MI) const {
return true;
}
+static MachineOperand One = MachineOperand::CreateImm(1);
+
bool RISCVVLOptimizer::checkUsers(const MachineOperand *&CommonVL,
MachineInstr &MI) {
// FIXME: Avoid visiting each user for each time we visit something on the
@@ -956,8 +958,9 @@ bool RISCVVLOptimizer::checkUsers(const MachineOperand *&CommonVL,
// Instructions like reductions may use a vector register as a scalar
// register. In this case, we should treat it like a scalar register which
// does not impact the decision on whether to optimize VL. But if there is
- // another user of MI and it has VL=0, we need to be sure not to reduce the
- // VL of MI to zero when the VLOp of UserOp is may be non-zero.
+ // another user of MI and it may have VL=0, we need to be sure not to reduce
+ // the VL of MI to zero when the VLOp of UserOp is may be non-zero. The most
+ // we can reduce it to is one.
if (isVectorOpUsedAsScalarOp(UserOp)) {
[[maybe_unused]] Register R = UserOp.getReg();
[[maybe_unused]] const TargetRegisterClass *RC = MRI->getRegClass(R);
@@ -968,16 +971,9 @@ bool RISCVVLOptimizer::checkUsers(const MachineOperand *&CommonVL,
unsigned VLOpNum = RISCVII::getVLOpNum(Desc);
const MachineOperand &VLOp = UserMI.getOperand(VLOpNum);
if (VLOp.isReg() || (VLOp.isImm() && VLOp.getImm() != 0)) {
- if (!CommonVL) {
- CommonVL = &VLOp;
- continue;
- }
- if (!CommonVL->isIdenticalTo(VLOp)) {
- CanReduceVL = false;
- break;
- }
+ CommonVL = &One;
+ continue;
}
- continue;
}
if (mayReadPastVL(UserMI)) {
diff --git a/llvm/test/CodeGen/RISCV/double_reduct.ll b/llvm/test/CodeGen/RISCV/double_reduct.ll
index cecdd77a079e42..25228b21ef0554 100644
--- a/llvm/test/CodeGen/RISCV/double_reduct.ll
+++ b/llvm/test/CodeGen/RISCV/double_reduct.ll
@@ -25,14 +25,14 @@ define float @fmul_f32(<4 x float> %a, <4 x float> %b) {
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vslidedown.vi v10, v8, 2
; CHECK-NEXT: vfmul.vv v8, v8, v10
-; CHECK-NEXT: vslidedown.vi v10, v9, 2
-; CHECK-NEXT: vfmul.vv v9, v9, v10
; CHECK-NEXT: vrgather.vi v10, v8, 1
; CHECK-NEXT: vfmul.vv v8, v8, v10
-; CHECK-NEXT: vrgather.vi v10, v9, 1
-; CHECK-NEXT: vfmul.vv v9, v9, v10
; CHECK-NEXT: vfmv.f.s fa5, v8
-; CHECK-NEXT: vfmv.f.s fa4, v9
+; CHECK-NEXT: vslidedown.vi v8, v9, 2
+; CHECK-NEXT: vfmul.vv v8, v9, v8
+; CHECK-NEXT: vrgather.vi v9, v8, 1
+; CHECK-NEXT: vfmul.vv v8, v8, v9
+; CHECK-NEXT: vfmv.f.s fa4, v8
; CHECK-NEXT: fmul.s fa0, fa5, fa4
; CHECK-NEXT: ret
%r1 = call fast float @llvm.vector.reduce.fmul.f32.v4f32(float 1.0, <4 x float> %a)
@@ -130,14 +130,14 @@ define i32 @mul_i32(<4 x i32> %a, <4 x i32> %b) {
; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV32-NEXT: vslidedown.vi v10, v8, 2
; RV32-NEXT: vmul.vv v8, v8, v10
-; RV32-NEXT: vslidedown.vi v10, v9, 2
-; RV32-NEXT: vmul.vv v9, v9, v10
; RV32-NEXT: vrgather.vi v10, v8, 1
; RV32-NEXT: vmul.vv v8, v8, v10
-; RV32-NEXT: vrgather.vi v10, v9, 1
-; RV32-NEXT: vmul.vv v9, v9, v10
; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: vmv.x.s a1, v9
+; RV32-NEXT: vslidedown.vi v8, v9, 2
+; RV32-NEXT: vmul.vv v8, v9, v8
+; RV32-NEXT: vrgather.vi v9, v8, 1
+; RV32-NEXT: vmul.vv v8, v8, v9
+; RV32-NEXT: vmv.x.s a1, v8
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: ret
;
@@ -146,14 +146,14 @@ define i32 @mul_i32(<4 x i32> %a, <4 x i32> %b) {
; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV64-NEXT: vslidedown.vi v10, v8, 2
; RV64-NEXT: vmul.vv v8, v8, v10
-; RV64-NEXT: vslidedown.vi v10, v9, 2
-; RV64-NEXT: vmul.vv v9, v9, v10
; RV64-NEXT: vrgather.vi v10, v8, 1
; RV64-NEXT: vmul.vv v8, v8, v10
-; RV64-NEXT: vrgather.vi v10, v9, 1
-; RV64-NEXT: vmul.vv v9, v9, v10
; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: vmv.x.s a1, v9
+; RV64-NEXT: vslidedown.vi v8, v9, 2
+; RV64-NEXT: vmul.vv v8, v9, v8
+; RV64-NEXT: vrgather.vi v9, v8, 1
+; RV64-NEXT: vmul.vv v8, v8, v9
+; RV64-NEXT: vmv.x.s a1, v8
; RV64-NEXT: mulw a0, a0, a1
; RV64-NEXT: ret
%r1 = call i32 @llvm.vector.reduce.mul.i32.v4i32(<4 x i32> %a)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-formation.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-formation.ll
index 4f0f5dd78c94b6..6f52bee591d30d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-formation.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-formation.ll
@@ -437,8 +437,8 @@ define i32 @reduce_sum_4xi32_reduce_order(<4 x i32> %v) {
; RV32-NEXT: vslidedown.vi v9, v8, 1
; RV32-NEXT: vmv.x.s a1, v9
; RV32-NEXT: vslidedown.vi v9, v8, 2
-; RV32-NEXT: vslidedown.vi v8, v8, 3
; RV32-NEXT: vmv.x.s a2, v9
+; RV32-NEXT: vslidedown.vi v8, v8, 3
; RV32-NEXT: vmv.x.s a3, v8
; RV32-NEXT: add a1, a1, a2
; RV32-NEXT: add a0, a0, a3
@@ -452,8 +452,8 @@ define i32 @reduce_sum_4xi32_reduce_order(<4 x i32> %v) {
; RV64-NEXT: vslidedown.vi v9, v8, 1
; RV64-NEXT: vmv.x.s a1, v9
; RV64-NEXT: vslidedown.vi v9, v8, 2
-; RV64-NEXT: vslidedown.vi v8, v8, 3
; RV64-NEXT: vmv.x.s a2, v9
+; RV64-NEXT: vslidedown.vi v8, v8, 3
; RV64-NEXT: vmv.x.s a3, v8
; RV64-NEXT: add a1, a1, a2
; RV64-NEXT: add a0, a0, a3
@@ -799,11 +799,8 @@ define float @reduce_fadd_16xi32_prefix5(ptr %p) {
define float @reduce_fadd_2xf32_non_associative(ptr %p) {
; CHECK-LABEL: reduce_fadd_2xf32_non_associative:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vle32.v v8, (a0)
-; CHECK-NEXT: vfmv.f.s fa5, v8
-; CHECK-NEXT: vslidedown.vi v8, v8, 1
-; CHECK-NEXT: vfmv.f.s fa4, v8
+; CHECK-NEXT: flw fa5, 0(a0)
+; CHECK-NEXT: flw fa4, 4(a0)
; CHECK-NEXT: fadd.s fa0, fa5, fa4
; CHECK-NEXT: ret
%v = load <2 x float>, ptr %p, align 256
@@ -835,11 +832,8 @@ define float @reduce_fadd_2xf32_reassoc_only(ptr %p) {
define float @reduce_fadd_2xf32_ninf_only(ptr %p) {
; CHECK-LABEL: reduce_fadd_2xf32_ninf_only:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vle32.v v8, (a0)
-; CHECK-NEXT: vfmv.f.s fa5, v8
-; CHECK-NEXT: vslidedown.vi v8, v8, 1
-; CHECK-NEXT: vfmv.f.s fa4, v8
+; CHECK-NEXT: flw fa5, 0(a0)
+; CHECK-NEXT: flw fa4, 4(a0)
; CHECK-NEXT: fadd.s fa0, fa5, fa4
; CHECK-NEXT: ret
%v = load <2 x float>, ptr %p, align 256
@@ -854,15 +848,13 @@ define float @reduce_fadd_2xf32_ninf_only(ptr %p) {
define float @reduce_fadd_4xi32_non_associative(ptr %p) {
; CHECK-LABEL: reduce_fadd_4xi32_non_associative:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vsetivli zero, 3, e32, m1, ta, ma
; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: flw fa5, 12(a0)
; CHECK-NEXT: lui a0, 524288
; CHECK-NEXT: vmv.s.x v9, a0
-; CHECK-NEXT: vsetivli zero, 3, e32, m1, ta, ma
-; CHECK-NEXT: vfredusum.vs v9, v8, v9
-; CHECK-NEXT: vslidedown.vi v8, v8, 3
-; CHECK-NEXT: vfmv.f.s fa5, v8
-; CHECK-NEXT: vfmv.f.s fa4, v9
+; CHECK-NEXT: vfredusum.vs v8, v8, v9
+; CHECK-NEXT: vfmv.f.s fa4, v8
; CHECK-NEXT: fadd.s fa0, fa4, fa5
; CHECK-NEXT: ret
%v = load <4 x float>, ptr %p, align 256
@@ -881,15 +873,10 @@ define float @reduce_fadd_4xi32_non_associative(ptr %p) {
define float @reduce_fadd_4xi32_non_associative2(ptr %p) {
; CHECK-LABEL: reduce_fadd_4xi32_non_associative2:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vle32.v v8, (a0)
-; CHECK-NEXT: vfmv.f.s fa5, v8
-; CHECK-NEXT: vslidedown.vi v9, v8, 1
-; CHECK-NEXT: vfmv.f.s fa4, v9
-; CHECK-NEXT: vslidedown.vi v9, v8, 2
-; CHECK-NEXT: vslidedown.vi v8, v8, 3
-; CHECK-NEXT: vfmv.f.s fa3, v9
-; CHECK-NEXT: vfmv.f.s fa2, v8
+; CHECK-NEXT: flw fa5, 0(a0)
+; CHECK-NEXT: flw fa4, 4(a0)
+; CHECK-NEXT: flw fa3, 8(a0)
+; CHECK-NEXT: flw fa2, 12(a0)
; CHECK-NEXT: fadd.s fa5, fa5, fa4
; CHECK-NEXT: fadd.s fa4, fa3, fa2
; CHECK-NEXT: fadd.s fa0, fa5, fa4
diff --git a/llvm/test/CodeGen/RISCV/rvv/fold-binary-reduce.ll b/llvm/test/CodeGen/RISCV/rvv/fold-binary-reduce.ll
index 2fda344690bfc6..5bc1ab9820d6cc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fold-binary-reduce.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fold-binary-reduce.ll
@@ -282,10 +282,10 @@ define float @reduce_fadd4(float %x, float %y, <4 x float> %v, <4 x float> %w) {
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vfmv.s.f v10, fa0
; CHECK-NEXT: vfredusum.vs v8, v8, v10
-; CHECK-NEXT: vfmv.s.f v10, fa1
-; CHECK-NEXT: vfredusum.vs v9, v9, v10
; CHECK-NEXT: vfmv.f.s fa5, v8
-; CHECK-NEXT: vfmv.f.s fa4, v9
+; CHECK-NEXT: vfmv.s.f v8, fa1
+; CHECK-NEXT: vfredusum.vs v8, v9, v8
+; CHECK-NEXT: vfmv.f.s fa4, v8
; CHECK-NEXT: fdiv.s fa0, fa5, fa4
; CHECK-NEXT: ret
entry:
More information about the llvm-commits
mailing list