[llvm] [RISCV][VLOPT] Add support for checkUsers when UserMI is a Single-Width Integer Reduction (PR #120345)
Michael Maitland via llvm-commits
llvm-commits at lists.llvm.org
Mon Dec 23 13:45:50 PST 2024
https://github.com/michaelmaitland updated https://github.com/llvm/llvm-project/pull/120345
>From 776db58658092ff7ef7d1f20d1eaf496b3ea9db7 Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Tue, 17 Dec 2024 17:50:34 -0800
Subject: [PATCH 1/8] [RISCV][VLOPT] Add support for checkUsers when UserMI is
an reduction
Reductions are weird because for some operands, they are vector registers but
only read the first lane. For these operands, we do not need to check to make
sure the EEW and EMUL ratios match. However, we need to make sure that when
the reduction instruction has a non-zero VL operand, we don't try and set the
CommonVL=0. Since this is an edge case, we decide just not to optimize anything
when this occurs.
---
llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp | 42 +++++++++++++++++--
.../test/CodeGen/RISCV/rvv/vl-opt-op-info.mir | 19 +++++++++
llvm/test/CodeGen/RISCV/rvv/vl-opt.mir | 12 ++++++
3 files changed, 69 insertions(+), 4 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
index e8719d02cfa0aa..df28aa1c0b5e6d 100644
--- a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
+++ b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
@@ -579,6 +579,23 @@ static OperandInfo getOperandInfo(const MachineOperand &MO,
return OperandInfo(MIVLMul, MILog2SEW);
}
+ // Vector Reduction Operations
+ // Vector Single-Width Integer Reduction Instructions
+ // The Dest and VS1 only read element 0 of the vector register. Return unknown
+ // for these. VS2 has EEW=SEW and EMUL=LMUL.
+ case RISCV::VREDAND_VS:
+ case RISCV::VREDMAX_VS:
+ case RISCV::VREDMAXU_VS:
+ case RISCV::VREDMIN_VS:
+ case RISCV::VREDMINU_VS:
+ case RISCV::VREDOR_VS:
+ case RISCV::VREDSUM_VS:
+ case RISCV::VREDXOR_VS: {
+ if (MO.getOperandNo() == 2)
+ return OperandInfo(MIVLMul, MILog2SEW);
+ return {};
+ }
+
default:
return {};
}
@@ -901,11 +918,28 @@ bool RISCVVLOptimizer::checkUsers(const MachineOperand *&CommonVL,
// Instructions like reductions may use a vector register as a scalar
// register. In this case, we should treat it like a scalar register which
- // does not impact the decision on whether to optimize VL.
- // TODO: Treat it like a scalar register instead of bailing out.
+ // does not impact the decision on whether to optimize VL. But if there is
+ // another user of MI and it has VL=0, we need to be sure not to reduce the
+ // VL of MI to zero when the VLOp of UserOp is may be non-zero.
if (isVectorOpUsedAsScalarOp(UserOp)) {
- CanReduceVL = false;
- break;
+ [[maybe_unused]] Register R = UserOp.getReg();
+ [[maybe_unused]] const TargetRegisterClass *RC = MRI->getRegClass(R);
+ assert(RISCV::VRRegClass.hasSubClassEq(RC) &&
+ "Expect LMUL 1 register class for vector as scalar operands!");
+ LLVM_DEBUG(dbgs() << " Used this operand as a scalar operand\n");
+ const MCInstrDesc &Desc = UserMI.getDesc();
+ unsigned VLOpNum = RISCVII::getVLOpNum(Desc);
+ const MachineOperand &VLOp = UserMI.getOperand(VLOpNum);
+ if ((VLOp.isReg() && VLOp.getReg() != RISCV::X0) ||
+ (VLOp.isImm() && VLOp.getImm() != 0)) {
+ if (!CommonVL) {
+ CommonVL = &VLOp;
+ continue;
+ } else if (!CommonVL->isIdenticalTo(VLOp)) {
+ CanReduceVL = false;
+ break;
+ }
+ }
}
if (mayReadPastVL(UserMI)) {
diff --git a/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir b/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir
index 808f1d4e939c13..217d8f2e2573d7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir
@@ -962,3 +962,22 @@ body: |
%x:vr = PseudoVMAND_MM_B1 $noreg, $noreg, -1, 0
%y:vr = PseudoVIOTA_M_MF2 $noreg, %x, 1, 3 /* e8 */, 0
...
+name: vred_vs2
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: vred_vs2
+ ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %y:vr = PseudoVREDAND_VS_M1_E8 $noreg, %x, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
+ %y:vr = PseudoVREDAND_VS_M1_E8 $noreg, %x, $noreg, 1, 3 /* e8 */, 0
+...
+---
+name: vred_vs1
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: vred_vs1
+ ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %y:vr = PseudoVREDAND_VS_M1_E8 $noreg, $noreg, %x, 1, 3 /* e8 */, 0 /* tu, mu */
+ %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
+ %y:vr = PseudoVREDAND_VS_M1_E8 $noreg, $noreg, %x, 1, 3 /* e8 */, 0
+...
diff --git a/llvm/test/CodeGen/RISCV/rvv/vl-opt.mir b/llvm/test/CodeGen/RISCV/rvv/vl-opt.mir
index 010e3ca642269b..64a6a09fb6e8dd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vl-opt.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/vl-opt.mir
@@ -33,3 +33,15 @@ body: |
%y:vr = PseudoVREDSUM_VS_M1_E64 $noreg, %x, $noreg, -1, 6 /* e64 */, 0 /* tu, mu */
%z:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, %vl, 5 /* e32 */, 0 /* tu, mu */
...
+---
+name: vred_other_user_is_vl0
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: vred_other_user_is_vl0
+ ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %y:vr = PseudoVREDSUM_VS_M1_E8 $noreg, $noreg, %x, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %z:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 0, 3 /* e8 */, 0 /* tu, mu */
+ %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
+ %y:vr = PseudoVREDSUM_VS_M1_E8 $noreg, $noreg, %x, 1, 3 /* e8 */, 0
+ %z:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 0, 3 /* e8 */, 0
+...
>From a956f89a9f4fd89cf229a7d1f296e5cea08f9536 Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Tue, 17 Dec 2024 18:09:51 -0800
Subject: [PATCH 2/8] fixup! add more incompat tests
---
.../test/CodeGen/RISCV/rvv/vl-opt-op-info.mir | 31 +++++++++++++++++++
1 file changed, 31 insertions(+)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir b/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir
index 217d8f2e2573d7..5aa6e8903fa8b0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir
@@ -981,3 +981,34 @@ body: |
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vr = PseudoVREDAND_VS_M1_E8 $noreg, $noreg, %x, 1, 3 /* e8 */, 0
...
+---
+name: vred_vs1_vs2
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: vred_vs1_vs2
+ ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %y:vr = PseudoVREDAND_VS_M1_E8 $noreg, %x, %x, 1, 3 /* e8 */, 0 /* tu, mu */
+ %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
+ %y:vr = PseudoVREDAND_VS_M1_E8 $noreg, %x, %x, 1, 3 /* e8 */, 0
+...
+---
+name: vred_vs1_vs2_incompatible_eew
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: vred_vs1_vs2_incompatible_eew
+ ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %y:vr = PseudoVREDAND_VS_M1_E8 $noreg, %x, %x, 1, 4 /* e16 */, 0 /* tu, mu */
+ %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
+ %y:vr = PseudoVREDAND_VS_M1_E8 $noreg, %x, %x, 1, 4 /* e16 */, 0
+...
+---
+name: vred_vs1_vs2_incomaptible_emul
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: vred_vs1_vs2_incomaptible_emul
+ ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %y:vr = PseudoVREDAND_VS_MF2_E8 $noreg, %x, %x, 1, 3 /* e8 */, 0 /* tu, mu */
+ %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
+ %y:vr = PseudoVREDAND_VS_MF2_E8 $noreg, %x, %x, 1, 3 /* e8 */, 0
+...
+
>From dec39bbad9f18730c59a163ad51264988b8a5c80 Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Tue, 17 Dec 2024 18:17:35 -0800
Subject: [PATCH 3/8] fixup! do not else after continue/break
---
llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
index df28aa1c0b5e6d..fcf03c94fc3d93 100644
--- a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
+++ b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
@@ -935,10 +935,12 @@ bool RISCVVLOptimizer::checkUsers(const MachineOperand *&CommonVL,
if (!CommonVL) {
CommonVL = &VLOp;
continue;
- } else if (!CommonVL->isIdenticalTo(VLOp)) {
+ }
+ if (!CommonVL->isIdenticalTo(VLOp)) {
CanReduceVL = false;
break;
}
+ continue;
}
}
>From d06d8c480f522c4fd8495c018b552692b983bd24 Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Tue, 17 Dec 2024 18:21:05 -0800
Subject: [PATCH 4/8] fixup! rerun test checks
---
llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir b/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir
index 5aa6e8903fa8b0..49d6ad15b24d35 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir
@@ -986,7 +986,7 @@ name: vred_vs1_vs2
body: |
bb.0:
; CHECK-LABEL: name: vred_vs1_vs2
- ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVREDAND_VS_M1_E8 $noreg, %x, %x, 1, 3 /* e8 */, 0 /* tu, mu */
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vr = PseudoVREDAND_VS_M1_E8 $noreg, %x, %x, 1, 3 /* e8 */, 0
>From b5c8efe7c445efc9d48f889c25feac037abba9d1 Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Tue, 17 Dec 2024 18:29:33 -0800
Subject: [PATCH 5/8] fixup! move continue
---
llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
index fcf03c94fc3d93..a02ca7700193b2 100644
--- a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
+++ b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
@@ -940,8 +940,8 @@ bool RISCVVLOptimizer::checkUsers(const MachineOperand *&CommonVL,
CanReduceVL = false;
break;
}
- continue;
}
+ continue;
}
if (mayReadPastVL(UserMI)) {
>From d4f774106e609b8826ce61ab53eee7ab20e5655a Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Tue, 17 Dec 2024 18:30:34 -0800
Subject: [PATCH 6/8] fixup! VL operand is never x0
---
llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
index a02ca7700193b2..a414f4c895e01c 100644
--- a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
+++ b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
@@ -930,8 +930,7 @@ bool RISCVVLOptimizer::checkUsers(const MachineOperand *&CommonVL,
const MCInstrDesc &Desc = UserMI.getDesc();
unsigned VLOpNum = RISCVII::getVLOpNum(Desc);
const MachineOperand &VLOp = UserMI.getOperand(VLOpNum);
- if ((VLOp.isReg() && VLOp.getReg() != RISCV::X0) ||
- (VLOp.isImm() && VLOp.getImm() != 0)) {
+ if (VLOp.isReg() || (VLOp.isImm() && VLOp.getImm() != 0)) {
if (!CommonVL) {
CommonVL = &VLOp;
continue;
>From da1b1c977db0768b27c626dc6317ac4a01e14834 Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Tue, 17 Dec 2024 18:35:27 -0800
Subject: [PATCH 7/8] fixup! fix typo
---
llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir b/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir
index 49d6ad15b24d35..ebf60e4e753c9b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir
@@ -1002,10 +1002,10 @@ body: |
%y:vr = PseudoVREDAND_VS_M1_E8 $noreg, %x, %x, 1, 4 /* e16 */, 0
...
---
-name: vred_vs1_vs2_incomaptible_emul
+name: vred_vs1_vs2_incompatible_emul
body: |
bb.0:
- ; CHECK-LABEL: name: vred_vs1_vs2_incomaptible_emul
+ ; CHECK-LABEL: name: vred_vs1_vs2_incompatible_emul
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVREDAND_VS_MF2_E8 $noreg, %x, %x, 1, 3 /* e8 */, 0 /* tu, mu */
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
>From 97661e32c76eff4ec64ab3336a017ba947daf4e2 Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Mon, 23 Dec 2024 13:45:26 -0800
Subject: [PATCH 8/8] fixup! be less conservative
---
llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp | 18 ++++----
llvm/test/CodeGen/RISCV/double_reduct.ll | 30 +++++++-------
.../rvv/fixed-vectors-reduction-formation.ll | 41 +++++++------------
.../CodeGen/RISCV/rvv/fold-binary-reduce.ll | 6 +--
4 files changed, 39 insertions(+), 56 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
index a414f4c895e01c..55fe3c74f32d95 100644
--- a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
+++ b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
@@ -905,6 +905,8 @@ bool RISCVVLOptimizer::isCandidate(const MachineInstr &MI) const {
return true;
}
+static MachineOperand One = MachineOperand::CreateImm(1);
+
bool RISCVVLOptimizer::checkUsers(const MachineOperand *&CommonVL,
MachineInstr &MI) {
// FIXME: Avoid visiting each user for each time we visit something on the
@@ -919,8 +921,9 @@ bool RISCVVLOptimizer::checkUsers(const MachineOperand *&CommonVL,
// Instructions like reductions may use a vector register as a scalar
// register. In this case, we should treat it like a scalar register which
// does not impact the decision on whether to optimize VL. But if there is
- // another user of MI and it has VL=0, we need to be sure not to reduce the
- // VL of MI to zero when the VLOp of UserOp is may be non-zero.
+ // another user of MI and it may have VL=0, we need to be sure not to reduce
+ // the VL of MI to zero when the VLOp of UserOp is may be non-zero. The most
+ // we can reduce it to is one.
if (isVectorOpUsedAsScalarOp(UserOp)) {
[[maybe_unused]] Register R = UserOp.getReg();
[[maybe_unused]] const TargetRegisterClass *RC = MRI->getRegClass(R);
@@ -931,16 +934,9 @@ bool RISCVVLOptimizer::checkUsers(const MachineOperand *&CommonVL,
unsigned VLOpNum = RISCVII::getVLOpNum(Desc);
const MachineOperand &VLOp = UserMI.getOperand(VLOpNum);
if (VLOp.isReg() || (VLOp.isImm() && VLOp.getImm() != 0)) {
- if (!CommonVL) {
- CommonVL = &VLOp;
- continue;
- }
- if (!CommonVL->isIdenticalTo(VLOp)) {
- CanReduceVL = false;
- break;
- }
+ CommonVL = &One;
+ continue;
}
- continue;
}
if (mayReadPastVL(UserMI)) {
diff --git a/llvm/test/CodeGen/RISCV/double_reduct.ll b/llvm/test/CodeGen/RISCV/double_reduct.ll
index cecdd77a079e42..25228b21ef0554 100644
--- a/llvm/test/CodeGen/RISCV/double_reduct.ll
+++ b/llvm/test/CodeGen/RISCV/double_reduct.ll
@@ -25,14 +25,14 @@ define float @fmul_f32(<4 x float> %a, <4 x float> %b) {
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vslidedown.vi v10, v8, 2
; CHECK-NEXT: vfmul.vv v8, v8, v10
-; CHECK-NEXT: vslidedown.vi v10, v9, 2
-; CHECK-NEXT: vfmul.vv v9, v9, v10
; CHECK-NEXT: vrgather.vi v10, v8, 1
; CHECK-NEXT: vfmul.vv v8, v8, v10
-; CHECK-NEXT: vrgather.vi v10, v9, 1
-; CHECK-NEXT: vfmul.vv v9, v9, v10
; CHECK-NEXT: vfmv.f.s fa5, v8
-; CHECK-NEXT: vfmv.f.s fa4, v9
+; CHECK-NEXT: vslidedown.vi v8, v9, 2
+; CHECK-NEXT: vfmul.vv v8, v9, v8
+; CHECK-NEXT: vrgather.vi v9, v8, 1
+; CHECK-NEXT: vfmul.vv v8, v8, v9
+; CHECK-NEXT: vfmv.f.s fa4, v8
; CHECK-NEXT: fmul.s fa0, fa5, fa4
; CHECK-NEXT: ret
%r1 = call fast float @llvm.vector.reduce.fmul.f32.v4f32(float 1.0, <4 x float> %a)
@@ -130,14 +130,14 @@ define i32 @mul_i32(<4 x i32> %a, <4 x i32> %b) {
; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV32-NEXT: vslidedown.vi v10, v8, 2
; RV32-NEXT: vmul.vv v8, v8, v10
-; RV32-NEXT: vslidedown.vi v10, v9, 2
-; RV32-NEXT: vmul.vv v9, v9, v10
; RV32-NEXT: vrgather.vi v10, v8, 1
; RV32-NEXT: vmul.vv v8, v8, v10
-; RV32-NEXT: vrgather.vi v10, v9, 1
-; RV32-NEXT: vmul.vv v9, v9, v10
; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: vmv.x.s a1, v9
+; RV32-NEXT: vslidedown.vi v8, v9, 2
+; RV32-NEXT: vmul.vv v8, v9, v8
+; RV32-NEXT: vrgather.vi v9, v8, 1
+; RV32-NEXT: vmul.vv v8, v8, v9
+; RV32-NEXT: vmv.x.s a1, v8
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: ret
;
@@ -146,14 +146,14 @@ define i32 @mul_i32(<4 x i32> %a, <4 x i32> %b) {
; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV64-NEXT: vslidedown.vi v10, v8, 2
; RV64-NEXT: vmul.vv v8, v8, v10
-; RV64-NEXT: vslidedown.vi v10, v9, 2
-; RV64-NEXT: vmul.vv v9, v9, v10
; RV64-NEXT: vrgather.vi v10, v8, 1
; RV64-NEXT: vmul.vv v8, v8, v10
-; RV64-NEXT: vrgather.vi v10, v9, 1
-; RV64-NEXT: vmul.vv v9, v9, v10
; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: vmv.x.s a1, v9
+; RV64-NEXT: vslidedown.vi v8, v9, 2
+; RV64-NEXT: vmul.vv v8, v9, v8
+; RV64-NEXT: vrgather.vi v9, v8, 1
+; RV64-NEXT: vmul.vv v8, v8, v9
+; RV64-NEXT: vmv.x.s a1, v8
; RV64-NEXT: mulw a0, a0, a1
; RV64-NEXT: ret
%r1 = call i32 @llvm.vector.reduce.mul.i32.v4i32(<4 x i32> %a)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-formation.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-formation.ll
index 4f0f5dd78c94b6..6f52bee591d30d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-formation.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-formation.ll
@@ -437,8 +437,8 @@ define i32 @reduce_sum_4xi32_reduce_order(<4 x i32> %v) {
; RV32-NEXT: vslidedown.vi v9, v8, 1
; RV32-NEXT: vmv.x.s a1, v9
; RV32-NEXT: vslidedown.vi v9, v8, 2
-; RV32-NEXT: vslidedown.vi v8, v8, 3
; RV32-NEXT: vmv.x.s a2, v9
+; RV32-NEXT: vslidedown.vi v8, v8, 3
; RV32-NEXT: vmv.x.s a3, v8
; RV32-NEXT: add a1, a1, a2
; RV32-NEXT: add a0, a0, a3
@@ -452,8 +452,8 @@ define i32 @reduce_sum_4xi32_reduce_order(<4 x i32> %v) {
; RV64-NEXT: vslidedown.vi v9, v8, 1
; RV64-NEXT: vmv.x.s a1, v9
; RV64-NEXT: vslidedown.vi v9, v8, 2
-; RV64-NEXT: vslidedown.vi v8, v8, 3
; RV64-NEXT: vmv.x.s a2, v9
+; RV64-NEXT: vslidedown.vi v8, v8, 3
; RV64-NEXT: vmv.x.s a3, v8
; RV64-NEXT: add a1, a1, a2
; RV64-NEXT: add a0, a0, a3
@@ -799,11 +799,8 @@ define float @reduce_fadd_16xi32_prefix5(ptr %p) {
define float @reduce_fadd_2xf32_non_associative(ptr %p) {
; CHECK-LABEL: reduce_fadd_2xf32_non_associative:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vle32.v v8, (a0)
-; CHECK-NEXT: vfmv.f.s fa5, v8
-; CHECK-NEXT: vslidedown.vi v8, v8, 1
-; CHECK-NEXT: vfmv.f.s fa4, v8
+; CHECK-NEXT: flw fa5, 0(a0)
+; CHECK-NEXT: flw fa4, 4(a0)
; CHECK-NEXT: fadd.s fa0, fa5, fa4
; CHECK-NEXT: ret
%v = load <2 x float>, ptr %p, align 256
@@ -835,11 +832,8 @@ define float @reduce_fadd_2xf32_reassoc_only(ptr %p) {
define float @reduce_fadd_2xf32_ninf_only(ptr %p) {
; CHECK-LABEL: reduce_fadd_2xf32_ninf_only:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vle32.v v8, (a0)
-; CHECK-NEXT: vfmv.f.s fa5, v8
-; CHECK-NEXT: vslidedown.vi v8, v8, 1
-; CHECK-NEXT: vfmv.f.s fa4, v8
+; CHECK-NEXT: flw fa5, 0(a0)
+; CHECK-NEXT: flw fa4, 4(a0)
; CHECK-NEXT: fadd.s fa0, fa5, fa4
; CHECK-NEXT: ret
%v = load <2 x float>, ptr %p, align 256
@@ -854,15 +848,13 @@ define float @reduce_fadd_2xf32_ninf_only(ptr %p) {
define float @reduce_fadd_4xi32_non_associative(ptr %p) {
; CHECK-LABEL: reduce_fadd_4xi32_non_associative:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vsetivli zero, 3, e32, m1, ta, ma
; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: flw fa5, 12(a0)
; CHECK-NEXT: lui a0, 524288
; CHECK-NEXT: vmv.s.x v9, a0
-; CHECK-NEXT: vsetivli zero, 3, e32, m1, ta, ma
-; CHECK-NEXT: vfredusum.vs v9, v8, v9
-; CHECK-NEXT: vslidedown.vi v8, v8, 3
-; CHECK-NEXT: vfmv.f.s fa5, v8
-; CHECK-NEXT: vfmv.f.s fa4, v9
+; CHECK-NEXT: vfredusum.vs v8, v8, v9
+; CHECK-NEXT: vfmv.f.s fa4, v8
; CHECK-NEXT: fadd.s fa0, fa4, fa5
; CHECK-NEXT: ret
%v = load <4 x float>, ptr %p, align 256
@@ -881,15 +873,10 @@ define float @reduce_fadd_4xi32_non_associative(ptr %p) {
define float @reduce_fadd_4xi32_non_associative2(ptr %p) {
; CHECK-LABEL: reduce_fadd_4xi32_non_associative2:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vle32.v v8, (a0)
-; CHECK-NEXT: vfmv.f.s fa5, v8
-; CHECK-NEXT: vslidedown.vi v9, v8, 1
-; CHECK-NEXT: vfmv.f.s fa4, v9
-; CHECK-NEXT: vslidedown.vi v9, v8, 2
-; CHECK-NEXT: vslidedown.vi v8, v8, 3
-; CHECK-NEXT: vfmv.f.s fa3, v9
-; CHECK-NEXT: vfmv.f.s fa2, v8
+; CHECK-NEXT: flw fa5, 0(a0)
+; CHECK-NEXT: flw fa4, 4(a0)
+; CHECK-NEXT: flw fa3, 8(a0)
+; CHECK-NEXT: flw fa2, 12(a0)
; CHECK-NEXT: fadd.s fa5, fa5, fa4
; CHECK-NEXT: fadd.s fa4, fa3, fa2
; CHECK-NEXT: fadd.s fa0, fa5, fa4
diff --git a/llvm/test/CodeGen/RISCV/rvv/fold-binary-reduce.ll b/llvm/test/CodeGen/RISCV/rvv/fold-binary-reduce.ll
index 2fda344690bfc6..5bc1ab9820d6cc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fold-binary-reduce.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fold-binary-reduce.ll
@@ -282,10 +282,10 @@ define float @reduce_fadd4(float %x, float %y, <4 x float> %v, <4 x float> %w) {
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vfmv.s.f v10, fa0
; CHECK-NEXT: vfredusum.vs v8, v8, v10
-; CHECK-NEXT: vfmv.s.f v10, fa1
-; CHECK-NEXT: vfredusum.vs v9, v9, v10
; CHECK-NEXT: vfmv.f.s fa5, v8
-; CHECK-NEXT: vfmv.f.s fa4, v9
+; CHECK-NEXT: vfmv.s.f v8, fa1
+; CHECK-NEXT: vfredusum.vs v8, v9, v8
+; CHECK-NEXT: vfmv.f.s fa4, v8
; CHECK-NEXT: fdiv.s fa0, fa5, fa4
; CHECK-NEXT: ret
entry:
More information about the llvm-commits
mailing list