[llvm] [RISCV][VLOPT] Add support for checkUsers when UserMI is a Single-Width Integer Reduction (PR #120345)
Michael Maitland via llvm-commits
llvm-commits at lists.llvm.org
Mon Dec 30 08:04:43 PST 2024
https://github.com/michaelmaitland updated https://github.com/llvm/llvm-project/pull/120345
>From eae73449bbe3f5e63f9d32954a9798924e5c8789 Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Tue, 17 Dec 2024 17:50:34 -0800
Subject: [PATCH 01/12] [RISCV][VLOPT] Add support for checkUsers when UserMI
is an reduction
Reductions are weird because for some operands, they are vector registers but
only read the first lane. For these operands, we do not need to check to make
sure the EEW and EMUL ratios match. However, we need to make sure that when
the reduction instruction has a non-zero VL operand, we don't try and set the
CommonVL=0. Since this is an edge case, we decide just not to optimize anything
when this occurs.
---
llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp | 42 +++++++++++++++++--
.../test/CodeGen/RISCV/rvv/vl-opt-op-info.mir | 19 +++++++++
llvm/test/CodeGen/RISCV/rvv/vl-opt.mir | 15 ++++++-
3 files changed, 70 insertions(+), 6 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
index 85ea5a23e8f293..924a8b0086e029 100644
--- a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
+++ b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
@@ -621,6 +621,23 @@ static OperandInfo getOperandInfo(const MachineOperand &MO,
return OperandInfo(MIVLMul, MILog2SEW);
}
+ // Vector Reduction Operations
+ // Vector Single-Width Integer Reduction Instructions
+ // The Dest and VS1 only read element 0 of the vector register. Return unknown
+ // for these. VS2 has EEW=SEW and EMUL=LMUL.
+ case RISCV::VREDAND_VS:
+ case RISCV::VREDMAX_VS:
+ case RISCV::VREDMAXU_VS:
+ case RISCV::VREDMIN_VS:
+ case RISCV::VREDMINU_VS:
+ case RISCV::VREDOR_VS:
+ case RISCV::VREDSUM_VS:
+ case RISCV::VREDXOR_VS: {
+ if (MO.getOperandNo() == 2)
+ return OperandInfo(MIVLMul, MILog2SEW);
+ return {};
+ }
+
default:
return {};
}
@@ -943,11 +960,28 @@ bool RISCVVLOptimizer::checkUsers(const MachineOperand *&CommonVL,
// Instructions like reductions may use a vector register as a scalar
// register. In this case, we should treat it like a scalar register which
- // does not impact the decision on whether to optimize VL.
- // TODO: Treat it like a scalar register instead of bailing out.
+ // does not impact the decision on whether to optimize VL. But if there is
+ // another user of MI and it has VL=0, we need to be sure not to reduce the
+ // VL of MI to zero when the VLOp of UserOp is may be non-zero.
if (isVectorOpUsedAsScalarOp(UserOp)) {
- CanReduceVL = false;
- break;
+ [[maybe_unused]] Register R = UserOp.getReg();
+ [[maybe_unused]] const TargetRegisterClass *RC = MRI->getRegClass(R);
+ assert(RISCV::VRRegClass.hasSubClassEq(RC) &&
+ "Expect LMUL 1 register class for vector as scalar operands!");
+ LLVM_DEBUG(dbgs() << " Used this operand as a scalar operand\n");
+ const MCInstrDesc &Desc = UserMI.getDesc();
+ unsigned VLOpNum = RISCVII::getVLOpNum(Desc);
+ const MachineOperand &VLOp = UserMI.getOperand(VLOpNum);
+ if ((VLOp.isReg() && VLOp.getReg() != RISCV::X0) ||
+ (VLOp.isImm() && VLOp.getImm() != 0)) {
+ if (!CommonVL) {
+ CommonVL = &VLOp;
+ continue;
+ } else if (!CommonVL->isIdenticalTo(VLOp)) {
+ CanReduceVL = false;
+ break;
+ }
+ }
}
if (mayReadPastVL(UserMI)) {
diff --git a/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir b/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir
index b304769b27731f..75abf775b206a4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir
@@ -1064,3 +1064,22 @@ body: |
%x:vr = PseudoVMAND_MM_B1 $noreg, $noreg, -1, 0
%y:vr = PseudoVIOTA_M_MF2 $noreg, %x, 1, 3 /* e8 */, 0
...
+name: vred_vs2
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: vred_vs2
+ ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %y:vr = PseudoVREDAND_VS_M1_E8 $noreg, %x, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
+ %y:vr = PseudoVREDAND_VS_M1_E8 $noreg, %x, $noreg, 1, 3 /* e8 */, 0
+...
+---
+name: vred_vs1
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: vred_vs1
+ ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %y:vr = PseudoVREDAND_VS_M1_E8 $noreg, $noreg, %x, 1, 3 /* e8 */, 0 /* tu, mu */
+ %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
+ %y:vr = PseudoVREDAND_VS_M1_E8 $noreg, $noreg, %x, 1, 3 /* e8 */, 0
+...
diff --git a/llvm/test/CodeGen/RISCV/rvv/vl-opt.mir b/llvm/test/CodeGen/RISCV/rvv/vl-opt.mir
index 3f966b036589fd..d5d0db25129460 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vl-opt.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/vl-opt.mir
@@ -38,7 +38,7 @@ name: use_largest_common_vl_imm_imm
body: |
bb.0:
; CHECK-LABEL: name: use_largest_common_vl_imm_imm
- ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, 2, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %z:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 2, 3 /* e8 */, 0 /* tu, mu */
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
@@ -110,4 +110,15 @@ body: |
%y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 3 /* e8 */, 0
%z:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, -1, 3 /* e8 */, 0
...
-
+---
+name: vred_other_user_is_vl0
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: vred_other_user_is_vl0
+ ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %y:vr = PseudoVREDSUM_VS_M1_E8 $noreg, $noreg, %x, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %z:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 0, 3 /* e8 */, 0 /* tu, mu */
+ %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
+ %y:vr = PseudoVREDSUM_VS_M1_E8 $noreg, $noreg, %x, 1, 3 /* e8 */, 0
+ %z:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 0, 3 /* e8 */, 0
+...
>From 56c9f43cb4e6e61135a97d443a8c431e3a9e450d Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Tue, 17 Dec 2024 18:09:51 -0800
Subject: [PATCH 02/12] fixup! add more incompat tests
---
.../test/CodeGen/RISCV/rvv/vl-opt-op-info.mir | 31 +++++++++++++++++++
1 file changed, 31 insertions(+)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir b/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir
index 75abf775b206a4..fa64e2744e26c3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir
@@ -1083,3 +1083,34 @@ body: |
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vr = PseudoVREDAND_VS_M1_E8 $noreg, $noreg, %x, 1, 3 /* e8 */, 0
...
+---
+name: vred_vs1_vs2
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: vred_vs1_vs2
+ ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %y:vr = PseudoVREDAND_VS_M1_E8 $noreg, %x, %x, 1, 3 /* e8 */, 0 /* tu, mu */
+ %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
+ %y:vr = PseudoVREDAND_VS_M1_E8 $noreg, %x, %x, 1, 3 /* e8 */, 0
+...
+---
+name: vred_vs1_vs2_incompatible_eew
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: vred_vs1_vs2_incompatible_eew
+ ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %y:vr = PseudoVREDAND_VS_M1_E8 $noreg, %x, %x, 1, 4 /* e16 */, 0 /* tu, mu */
+ %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
+ %y:vr = PseudoVREDAND_VS_M1_E8 $noreg, %x, %x, 1, 4 /* e16 */, 0
+...
+---
+name: vred_vs1_vs2_incomaptible_emul
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: vred_vs1_vs2_incomaptible_emul
+ ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %y:vr = PseudoVREDAND_VS_MF2_E8 $noreg, %x, %x, 1, 3 /* e8 */, 0 /* tu, mu */
+ %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
+ %y:vr = PseudoVREDAND_VS_MF2_E8 $noreg, %x, %x, 1, 3 /* e8 */, 0
+...
+
>From 358428117318fd6add3525f664866f8a446fa37f Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Tue, 17 Dec 2024 18:17:35 -0800
Subject: [PATCH 03/12] fixup! do not else after continue/break
---
llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
index 924a8b0086e029..6f620e1fbdecf3 100644
--- a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
+++ b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
@@ -977,10 +977,12 @@ bool RISCVVLOptimizer::checkUsers(const MachineOperand *&CommonVL,
if (!CommonVL) {
CommonVL = &VLOp;
continue;
- } else if (!CommonVL->isIdenticalTo(VLOp)) {
+ }
+ if (!CommonVL->isIdenticalTo(VLOp)) {
CanReduceVL = false;
break;
}
+ continue;
}
}
>From 3c2a5546b630bbbeddba664fe35730c4151ed382 Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Tue, 17 Dec 2024 18:21:05 -0800
Subject: [PATCH 04/12] fixup! rerun test checks
---
llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir b/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir
index fa64e2744e26c3..b605a76bcf5b4e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir
@@ -1088,7 +1088,7 @@ name: vred_vs1_vs2
body: |
bb.0:
; CHECK-LABEL: name: vred_vs1_vs2
- ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVREDAND_VS_M1_E8 $noreg, %x, %x, 1, 3 /* e8 */, 0 /* tu, mu */
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
%y:vr = PseudoVREDAND_VS_M1_E8 $noreg, %x, %x, 1, 3 /* e8 */, 0
>From 6aa3d728ff179f7f68c4d6fea436180f10183658 Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Tue, 17 Dec 2024 18:29:33 -0800
Subject: [PATCH 05/12] fixup! move continue
---
llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
index 6f620e1fbdecf3..e242830a7af206 100644
--- a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
+++ b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
@@ -982,8 +982,8 @@ bool RISCVVLOptimizer::checkUsers(const MachineOperand *&CommonVL,
CanReduceVL = false;
break;
}
- continue;
}
+ continue;
}
if (mayReadPastVL(UserMI)) {
>From c67245c82155dc27924ad2f2dc82565d1a6c1f34 Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Tue, 17 Dec 2024 18:30:34 -0800
Subject: [PATCH 06/12] fixup! VL operand is never x0
---
llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
index e242830a7af206..f2110b0e424365 100644
--- a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
+++ b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
@@ -972,8 +972,7 @@ bool RISCVVLOptimizer::checkUsers(const MachineOperand *&CommonVL,
const MCInstrDesc &Desc = UserMI.getDesc();
unsigned VLOpNum = RISCVII::getVLOpNum(Desc);
const MachineOperand &VLOp = UserMI.getOperand(VLOpNum);
- if ((VLOp.isReg() && VLOp.getReg() != RISCV::X0) ||
- (VLOp.isImm() && VLOp.getImm() != 0)) {
+ if (VLOp.isReg() || (VLOp.isImm() && VLOp.getImm() != 0)) {
if (!CommonVL) {
CommonVL = &VLOp;
continue;
>From 69291c003b8d620b639fee6e25c0077075ac71f2 Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Tue, 17 Dec 2024 18:35:27 -0800
Subject: [PATCH 07/12] fixup! fix typo
---
llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir b/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir
index b605a76bcf5b4e..9e18362a41a51c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir
@@ -1104,10 +1104,10 @@ body: |
%y:vr = PseudoVREDAND_VS_M1_E8 $noreg, %x, %x, 1, 4 /* e16 */, 0
...
---
-name: vred_vs1_vs2_incomaptible_emul
+name: vred_vs1_vs2_incompatible_emul
body: |
bb.0:
- ; CHECK-LABEL: name: vred_vs1_vs2_incomaptible_emul
+ ; CHECK-LABEL: name: vred_vs1_vs2_incompatible_emul
; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVREDAND_VS_MF2_E8 $noreg, %x, %x, 1, 3 /* e8 */, 0 /* tu, mu */
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
>From d1113d220908932f05b1dd607d801e4d1d2beb6c Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Mon, 23 Dec 2024 13:45:26 -0800
Subject: [PATCH 08/12] fixup! be less conservative
---
llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp | 18 ++++----
llvm/test/CodeGen/RISCV/double_reduct.ll | 30 +++++++-------
.../rvv/fixed-vectors-reduction-formation.ll | 41 +++++++------------
.../CodeGen/RISCV/rvv/fold-binary-reduce.ll | 6 +--
4 files changed, 39 insertions(+), 56 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
index f2110b0e424365..596ea1b4167fcf 100644
--- a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
+++ b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
@@ -947,6 +947,8 @@ bool RISCVVLOptimizer::isCandidate(const MachineInstr &MI) const {
return true;
}
+static MachineOperand One = MachineOperand::CreateImm(1);
+
bool RISCVVLOptimizer::checkUsers(const MachineOperand *&CommonVL,
MachineInstr &MI) {
// FIXME: Avoid visiting each user for each time we visit something on the
@@ -961,8 +963,9 @@ bool RISCVVLOptimizer::checkUsers(const MachineOperand *&CommonVL,
// Instructions like reductions may use a vector register as a scalar
// register. In this case, we should treat it like a scalar register which
// does not impact the decision on whether to optimize VL. But if there is
- // another user of MI and it has VL=0, we need to be sure not to reduce the
- // VL of MI to zero when the VLOp of UserOp is may be non-zero.
+ // another user of MI and it may have VL=0, we need to be sure not to reduce
+ // the VL of MI to zero when the VLOp of UserOp is may be non-zero. The most
+ // we can reduce it to is one.
if (isVectorOpUsedAsScalarOp(UserOp)) {
[[maybe_unused]] Register R = UserOp.getReg();
[[maybe_unused]] const TargetRegisterClass *RC = MRI->getRegClass(R);
@@ -973,16 +976,9 @@ bool RISCVVLOptimizer::checkUsers(const MachineOperand *&CommonVL,
unsigned VLOpNum = RISCVII::getVLOpNum(Desc);
const MachineOperand &VLOp = UserMI.getOperand(VLOpNum);
if (VLOp.isReg() || (VLOp.isImm() && VLOp.getImm() != 0)) {
- if (!CommonVL) {
- CommonVL = &VLOp;
- continue;
- }
- if (!CommonVL->isIdenticalTo(VLOp)) {
- CanReduceVL = false;
- break;
- }
+ CommonVL = &One;
+ continue;
}
- continue;
}
if (mayReadPastVL(UserMI)) {
diff --git a/llvm/test/CodeGen/RISCV/double_reduct.ll b/llvm/test/CodeGen/RISCV/double_reduct.ll
index cecdd77a079e42..25228b21ef0554 100644
--- a/llvm/test/CodeGen/RISCV/double_reduct.ll
+++ b/llvm/test/CodeGen/RISCV/double_reduct.ll
@@ -25,14 +25,14 @@ define float @fmul_f32(<4 x float> %a, <4 x float> %b) {
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vslidedown.vi v10, v8, 2
; CHECK-NEXT: vfmul.vv v8, v8, v10
-; CHECK-NEXT: vslidedown.vi v10, v9, 2
-; CHECK-NEXT: vfmul.vv v9, v9, v10
; CHECK-NEXT: vrgather.vi v10, v8, 1
; CHECK-NEXT: vfmul.vv v8, v8, v10
-; CHECK-NEXT: vrgather.vi v10, v9, 1
-; CHECK-NEXT: vfmul.vv v9, v9, v10
; CHECK-NEXT: vfmv.f.s fa5, v8
-; CHECK-NEXT: vfmv.f.s fa4, v9
+; CHECK-NEXT: vslidedown.vi v8, v9, 2
+; CHECK-NEXT: vfmul.vv v8, v9, v8
+; CHECK-NEXT: vrgather.vi v9, v8, 1
+; CHECK-NEXT: vfmul.vv v8, v8, v9
+; CHECK-NEXT: vfmv.f.s fa4, v8
; CHECK-NEXT: fmul.s fa0, fa5, fa4
; CHECK-NEXT: ret
%r1 = call fast float @llvm.vector.reduce.fmul.f32.v4f32(float 1.0, <4 x float> %a)
@@ -130,14 +130,14 @@ define i32 @mul_i32(<4 x i32> %a, <4 x i32> %b) {
; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV32-NEXT: vslidedown.vi v10, v8, 2
; RV32-NEXT: vmul.vv v8, v8, v10
-; RV32-NEXT: vslidedown.vi v10, v9, 2
-; RV32-NEXT: vmul.vv v9, v9, v10
; RV32-NEXT: vrgather.vi v10, v8, 1
; RV32-NEXT: vmul.vv v8, v8, v10
-; RV32-NEXT: vrgather.vi v10, v9, 1
-; RV32-NEXT: vmul.vv v9, v9, v10
; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: vmv.x.s a1, v9
+; RV32-NEXT: vslidedown.vi v8, v9, 2
+; RV32-NEXT: vmul.vv v8, v9, v8
+; RV32-NEXT: vrgather.vi v9, v8, 1
+; RV32-NEXT: vmul.vv v8, v8, v9
+; RV32-NEXT: vmv.x.s a1, v8
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: ret
;
@@ -146,14 +146,14 @@ define i32 @mul_i32(<4 x i32> %a, <4 x i32> %b) {
; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV64-NEXT: vslidedown.vi v10, v8, 2
; RV64-NEXT: vmul.vv v8, v8, v10
-; RV64-NEXT: vslidedown.vi v10, v9, 2
-; RV64-NEXT: vmul.vv v9, v9, v10
; RV64-NEXT: vrgather.vi v10, v8, 1
; RV64-NEXT: vmul.vv v8, v8, v10
-; RV64-NEXT: vrgather.vi v10, v9, 1
-; RV64-NEXT: vmul.vv v9, v9, v10
; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: vmv.x.s a1, v9
+; RV64-NEXT: vslidedown.vi v8, v9, 2
+; RV64-NEXT: vmul.vv v8, v9, v8
+; RV64-NEXT: vrgather.vi v9, v8, 1
+; RV64-NEXT: vmul.vv v8, v8, v9
+; RV64-NEXT: vmv.x.s a1, v8
; RV64-NEXT: mulw a0, a0, a1
; RV64-NEXT: ret
%r1 = call i32 @llvm.vector.reduce.mul.i32.v4i32(<4 x i32> %a)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-formation.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-formation.ll
index 4f0f5dd78c94b6..6f52bee591d30d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-formation.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-formation.ll
@@ -437,8 +437,8 @@ define i32 @reduce_sum_4xi32_reduce_order(<4 x i32> %v) {
; RV32-NEXT: vslidedown.vi v9, v8, 1
; RV32-NEXT: vmv.x.s a1, v9
; RV32-NEXT: vslidedown.vi v9, v8, 2
-; RV32-NEXT: vslidedown.vi v8, v8, 3
; RV32-NEXT: vmv.x.s a2, v9
+; RV32-NEXT: vslidedown.vi v8, v8, 3
; RV32-NEXT: vmv.x.s a3, v8
; RV32-NEXT: add a1, a1, a2
; RV32-NEXT: add a0, a0, a3
@@ -452,8 +452,8 @@ define i32 @reduce_sum_4xi32_reduce_order(<4 x i32> %v) {
; RV64-NEXT: vslidedown.vi v9, v8, 1
; RV64-NEXT: vmv.x.s a1, v9
; RV64-NEXT: vslidedown.vi v9, v8, 2
-; RV64-NEXT: vslidedown.vi v8, v8, 3
; RV64-NEXT: vmv.x.s a2, v9
+; RV64-NEXT: vslidedown.vi v8, v8, 3
; RV64-NEXT: vmv.x.s a3, v8
; RV64-NEXT: add a1, a1, a2
; RV64-NEXT: add a0, a0, a3
@@ -799,11 +799,8 @@ define float @reduce_fadd_16xi32_prefix5(ptr %p) {
define float @reduce_fadd_2xf32_non_associative(ptr %p) {
; CHECK-LABEL: reduce_fadd_2xf32_non_associative:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vle32.v v8, (a0)
-; CHECK-NEXT: vfmv.f.s fa5, v8
-; CHECK-NEXT: vslidedown.vi v8, v8, 1
-; CHECK-NEXT: vfmv.f.s fa4, v8
+; CHECK-NEXT: flw fa5, 0(a0)
+; CHECK-NEXT: flw fa4, 4(a0)
; CHECK-NEXT: fadd.s fa0, fa5, fa4
; CHECK-NEXT: ret
%v = load <2 x float>, ptr %p, align 256
@@ -835,11 +832,8 @@ define float @reduce_fadd_2xf32_reassoc_only(ptr %p) {
define float @reduce_fadd_2xf32_ninf_only(ptr %p) {
; CHECK-LABEL: reduce_fadd_2xf32_ninf_only:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vle32.v v8, (a0)
-; CHECK-NEXT: vfmv.f.s fa5, v8
-; CHECK-NEXT: vslidedown.vi v8, v8, 1
-; CHECK-NEXT: vfmv.f.s fa4, v8
+; CHECK-NEXT: flw fa5, 0(a0)
+; CHECK-NEXT: flw fa4, 4(a0)
; CHECK-NEXT: fadd.s fa0, fa5, fa4
; CHECK-NEXT: ret
%v = load <2 x float>, ptr %p, align 256
@@ -854,15 +848,13 @@ define float @reduce_fadd_2xf32_ninf_only(ptr %p) {
define float @reduce_fadd_4xi32_non_associative(ptr %p) {
; CHECK-LABEL: reduce_fadd_4xi32_non_associative:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vsetivli zero, 3, e32, m1, ta, ma
; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: flw fa5, 12(a0)
; CHECK-NEXT: lui a0, 524288
; CHECK-NEXT: vmv.s.x v9, a0
-; CHECK-NEXT: vsetivli zero, 3, e32, m1, ta, ma
-; CHECK-NEXT: vfredusum.vs v9, v8, v9
-; CHECK-NEXT: vslidedown.vi v8, v8, 3
-; CHECK-NEXT: vfmv.f.s fa5, v8
-; CHECK-NEXT: vfmv.f.s fa4, v9
+; CHECK-NEXT: vfredusum.vs v8, v8, v9
+; CHECK-NEXT: vfmv.f.s fa4, v8
; CHECK-NEXT: fadd.s fa0, fa4, fa5
; CHECK-NEXT: ret
%v = load <4 x float>, ptr %p, align 256
@@ -881,15 +873,10 @@ define float @reduce_fadd_4xi32_non_associative(ptr %p) {
define float @reduce_fadd_4xi32_non_associative2(ptr %p) {
; CHECK-LABEL: reduce_fadd_4xi32_non_associative2:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vle32.v v8, (a0)
-; CHECK-NEXT: vfmv.f.s fa5, v8
-; CHECK-NEXT: vslidedown.vi v9, v8, 1
-; CHECK-NEXT: vfmv.f.s fa4, v9
-; CHECK-NEXT: vslidedown.vi v9, v8, 2
-; CHECK-NEXT: vslidedown.vi v8, v8, 3
-; CHECK-NEXT: vfmv.f.s fa3, v9
-; CHECK-NEXT: vfmv.f.s fa2, v8
+; CHECK-NEXT: flw fa5, 0(a0)
+; CHECK-NEXT: flw fa4, 4(a0)
+; CHECK-NEXT: flw fa3, 8(a0)
+; CHECK-NEXT: flw fa2, 12(a0)
; CHECK-NEXT: fadd.s fa5, fa5, fa4
; CHECK-NEXT: fadd.s fa4, fa3, fa2
; CHECK-NEXT: fadd.s fa0, fa5, fa4
diff --git a/llvm/test/CodeGen/RISCV/rvv/fold-binary-reduce.ll b/llvm/test/CodeGen/RISCV/rvv/fold-binary-reduce.ll
index 2fda344690bfc6..5bc1ab9820d6cc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fold-binary-reduce.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fold-binary-reduce.ll
@@ -282,10 +282,10 @@ define float @reduce_fadd4(float %x, float %y, <4 x float> %v, <4 x float> %w) {
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vfmv.s.f v10, fa0
; CHECK-NEXT: vfredusum.vs v8, v8, v10
-; CHECK-NEXT: vfmv.s.f v10, fa1
-; CHECK-NEXT: vfredusum.vs v9, v9, v10
; CHECK-NEXT: vfmv.f.s fa5, v8
-; CHECK-NEXT: vfmv.f.s fa4, v9
+; CHECK-NEXT: vfmv.s.f v8, fa1
+; CHECK-NEXT: vfredusum.vs v8, v9, v8
+; CHECK-NEXT: vfmv.f.s fa4, v8
; CHECK-NEXT: fdiv.s fa0, fa5, fa4
; CHECK-NEXT: ret
entry:
>From 60cd462c4044c204507e755bce16d07bbe6880bb Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Mon, 23 Dec 2024 13:57:50 -0800
Subject: [PATCH 09/12] fixup! fix tests after rebase:
---
llvm/test/CodeGen/RISCV/rvv/vl-opt.mir | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vl-opt.mir b/llvm/test/CodeGen/RISCV/rvv/vl-opt.mir
index d5d0db25129460..121dd32afde11a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vl-opt.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/vl-opt.mir
@@ -38,7 +38,7 @@ name: use_largest_common_vl_imm_imm
body: |
bb.0:
; CHECK-LABEL: name: use_largest_common_vl_imm_imm
- ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, 2, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %z:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 2, 3 /* e8 */, 0 /* tu, mu */
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
@@ -115,7 +115,7 @@ name: vred_other_user_is_vl0
body: |
bb.0:
; CHECK-LABEL: name: vred_other_user_is_vl0
- ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %y:vr = PseudoVREDSUM_VS_M1_E8 $noreg, $noreg, %x, 1, 3 /* e8 */, 0 /* tu, mu */
; CHECK-NEXT: %z:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 0, 3 /* e8 */, 0 /* tu, mu */
%x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
>From 15d5ba53b60656d1e183c038f280b7350f09536d Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Mon, 23 Dec 2024 13:59:59 -0800
Subject: [PATCH 10/12] fixup! add test case where reduction has VL=0
---
llvm/test/CodeGen/RISCV/rvv/vl-opt.mir | 12 ++++++++++++
1 file changed, 12 insertions(+)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vl-opt.mir b/llvm/test/CodeGen/RISCV/rvv/vl-opt.mir
index 121dd32afde11a..a13043765410ef 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vl-opt.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/vl-opt.mir
@@ -122,3 +122,15 @@ body: |
%y:vr = PseudoVREDSUM_VS_M1_E8 $noreg, $noreg, %x, 1, 3 /* e8 */, 0
%z:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 0, 3 /* e8 */, 0
...
+---
+name: vred_both_vl0
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: vred_both_vl0
+ ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %y:vr = PseudoVREDSUM_VS_M1_E8 $noreg, $noreg, %x, 0, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %z:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 0, 3 /* e8 */, 0 /* tu, mu */
+ %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
+ %y:vr = PseudoVREDSUM_VS_M1_E8 $noreg, $noreg, %x, 0, 3 /* e8 */, 0
+ %z:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 0, 3 /* e8 */, 0
+...
>From 0329514073c119b7c0bc25e1295f0c01eee855e5 Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Mon, 23 Dec 2024 15:21:43 -0800
Subject: [PATCH 11/12] fixup! add instructions to isVectorOpUsedAsScalarOp
---
llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp | 3 ++
llvm/test/CodeGen/RISCV/double_reduct.ll | 48 +++++++++++--------
.../test/CodeGen/RISCV/intrinsic-cttz-elts.ll | 4 ++
.../rvv/fixed-vectors-reduction-formation.ll | 45 ++++++++++-------
.../CodeGen/RISCV/rvv/fold-binary-reduce.ll | 8 ++--
5 files changed, 67 insertions(+), 41 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
index 596ea1b4167fcf..260033423e471e 100644
--- a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
+++ b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
@@ -857,6 +857,9 @@ static bool isVectorOpUsedAsScalarOp(MachineOperand &MO) {
case RISCV::VFWREDOSUM_VS:
case RISCV::VFWREDUSUM_VS:
return MO.getOperandNo() == 3;
+ case RISCV::VMV_X_S:
+ case RISCV::VFMV_F_S:
+ return MO.getOperandNo() == 2;
default:
return false;
}
diff --git a/llvm/test/CodeGen/RISCV/double_reduct.ll b/llvm/test/CodeGen/RISCV/double_reduct.ll
index 25228b21ef0554..b74922761ad7c6 100644
--- a/llvm/test/CodeGen/RISCV/double_reduct.ll
+++ b/llvm/test/CodeGen/RISCV/double_reduct.ll
@@ -25,14 +25,14 @@ define float @fmul_f32(<4 x float> %a, <4 x float> %b) {
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vslidedown.vi v10, v8, 2
; CHECK-NEXT: vfmul.vv v8, v8, v10
+; CHECK-NEXT: vslidedown.vi v10, v9, 2
+; CHECK-NEXT: vfmul.vv v9, v9, v10
; CHECK-NEXT: vrgather.vi v10, v8, 1
; CHECK-NEXT: vfmul.vv v8, v8, v10
+; CHECK-NEXT: vrgather.vi v10, v9, 1
+; CHECK-NEXT: vfmul.vv v9, v9, v10
; CHECK-NEXT: vfmv.f.s fa5, v8
-; CHECK-NEXT: vslidedown.vi v8, v9, 2
-; CHECK-NEXT: vfmul.vv v8, v9, v8
-; CHECK-NEXT: vrgather.vi v9, v8, 1
-; CHECK-NEXT: vfmul.vv v8, v8, v9
-; CHECK-NEXT: vfmv.f.s fa4, v8
+; CHECK-NEXT: vfmv.f.s fa4, v9
; CHECK-NEXT: fmul.s fa0, fa5, fa4
; CHECK-NEXT: ret
%r1 = call fast float @llvm.vector.reduce.fmul.f32.v4f32(float 1.0, <4 x float> %a)
@@ -130,14 +130,14 @@ define i32 @mul_i32(<4 x i32> %a, <4 x i32> %b) {
; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV32-NEXT: vslidedown.vi v10, v8, 2
; RV32-NEXT: vmul.vv v8, v8, v10
+; RV32-NEXT: vslidedown.vi v10, v9, 2
+; RV32-NEXT: vmul.vv v9, v9, v10
; RV32-NEXT: vrgather.vi v10, v8, 1
; RV32-NEXT: vmul.vv v8, v8, v10
+; RV32-NEXT: vrgather.vi v10, v9, 1
+; RV32-NEXT: vmul.vv v9, v9, v10
; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: vslidedown.vi v8, v9, 2
-; RV32-NEXT: vmul.vv v8, v9, v8
-; RV32-NEXT: vrgather.vi v9, v8, 1
-; RV32-NEXT: vmul.vv v8, v8, v9
-; RV32-NEXT: vmv.x.s a1, v8
+; RV32-NEXT: vmv.x.s a1, v9
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: ret
;
@@ -146,14 +146,14 @@ define i32 @mul_i32(<4 x i32> %a, <4 x i32> %b) {
; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV64-NEXT: vslidedown.vi v10, v8, 2
; RV64-NEXT: vmul.vv v8, v8, v10
+; RV64-NEXT: vslidedown.vi v10, v9, 2
+; RV64-NEXT: vmul.vv v9, v9, v10
; RV64-NEXT: vrgather.vi v10, v8, 1
; RV64-NEXT: vmul.vv v8, v8, v10
+; RV64-NEXT: vrgather.vi v10, v9, 1
+; RV64-NEXT: vmul.vv v9, v9, v10
; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: vslidedown.vi v8, v9, 2
-; RV64-NEXT: vmul.vv v8, v9, v8
-; RV64-NEXT: vrgather.vi v9, v8, 1
-; RV64-NEXT: vmul.vv v8, v8, v9
-; RV64-NEXT: vmv.x.s a1, v8
+; RV64-NEXT: vmv.x.s a1, v9
; RV64-NEXT: mulw a0, a0, a1
; RV64-NEXT: ret
%r1 = call i32 @llvm.vector.reduce.mul.i32.v4i32(<4 x i32> %a)
@@ -165,8 +165,9 @@ define i32 @mul_i32(<4 x i32> %a, <4 x i32> %b) {
define i32 @and_i32(<4 x i32> %a, <4 x i32> %b) {
; CHECK-LABEL: and_i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-NEXT: vand.vv v8, v8, v9
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vredand.vs v8, v8, v8
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
@@ -179,8 +180,9 @@ define i32 @and_i32(<4 x i32> %a, <4 x i32> %b) {
define i32 @or_i32(<4 x i32> %a, <4 x i32> %b) {
; CHECK-LABEL: or_i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-NEXT: vor.vv v8, v8, v9
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vredor.vs v8, v8, v8
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
@@ -208,8 +210,9 @@ define i32 @xor_i32(<4 x i32> %a, <4 x i32> %b) {
define i32 @umin_i32(<4 x i32> %a, <4 x i32> %b) {
; CHECK-LABEL: umin_i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-NEXT: vminu.vv v8, v8, v9
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vredminu.vs v8, v8, v8
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
@@ -222,8 +225,9 @@ define i32 @umin_i32(<4 x i32> %a, <4 x i32> %b) {
define i32 @umax_i32(<4 x i32> %a, <4 x i32> %b) {
; CHECK-LABEL: umax_i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-NEXT: vmaxu.vv v8, v8, v9
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vredmaxu.vs v8, v8, v8
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
@@ -236,8 +240,9 @@ define i32 @umax_i32(<4 x i32> %a, <4 x i32> %b) {
define i32 @smin_i32(<4 x i32> %a, <4 x i32> %b) {
; CHECK-LABEL: smin_i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-NEXT: vmin.vv v8, v8, v9
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vredmin.vs v8, v8, v8
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
@@ -250,8 +255,9 @@ define i32 @smin_i32(<4 x i32> %a, <4 x i32> %b) {
define i32 @smax_i32(<4 x i32> %a, <4 x i32> %b) {
; CHECK-LABEL: smax_i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-NEXT: vmax.vv v8, v8, v9
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vredmax.vs v8, v8, v8
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts.ll b/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts.ll
index 94b717b42e92b6..f545d92fe9a0f1 100644
--- a/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts.ll
+++ b/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts.ll
@@ -12,9 +12,11 @@ define i16 @ctz_v4i32(<4 x i32> %a) {
; RV32-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
; RV32-NEXT: vmv.v.i v8, 0
; RV32-NEXT: vmerge.vim v8, v8, -1, v0
+; RV32-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
; RV32-NEXT: vid.v v9
; RV32-NEXT: vrsub.vi v9, v9, 4
; RV32-NEXT: vand.vv v8, v8, v9
+; RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
; RV32-NEXT: vredmaxu.vs v8, v8, v8
; RV32-NEXT: vmv.x.s a0, v8
; RV32-NEXT: li a1, 4
@@ -29,9 +31,11 @@ define i16 @ctz_v4i32(<4 x i32> %a) {
; RV64-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
; RV64-NEXT: vmv.v.i v8, 0
; RV64-NEXT: vmerge.vim v8, v8, -1, v0
+; RV64-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
; RV64-NEXT: vid.v v9
; RV64-NEXT: vrsub.vi v9, v9, 4
; RV64-NEXT: vand.vv v8, v8, v9
+; RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
; RV64-NEXT: vredmaxu.vs v8, v8, v8
; RV64-NEXT: vmv.x.s a0, v8
; RV64-NEXT: li a1, 4
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-formation.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-formation.ll
index 6f52bee591d30d..bf8baafc4a25db 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-formation.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-formation.ll
@@ -437,8 +437,8 @@ define i32 @reduce_sum_4xi32_reduce_order(<4 x i32> %v) {
; RV32-NEXT: vslidedown.vi v9, v8, 1
; RV32-NEXT: vmv.x.s a1, v9
; RV32-NEXT: vslidedown.vi v9, v8, 2
-; RV32-NEXT: vmv.x.s a2, v9
; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a2, v9
; RV32-NEXT: vmv.x.s a3, v8
; RV32-NEXT: add a1, a1, a2
; RV32-NEXT: add a0, a0, a3
@@ -452,8 +452,8 @@ define i32 @reduce_sum_4xi32_reduce_order(<4 x i32> %v) {
; RV64-NEXT: vslidedown.vi v9, v8, 1
; RV64-NEXT: vmv.x.s a1, v9
; RV64-NEXT: vslidedown.vi v9, v8, 2
-; RV64-NEXT: vmv.x.s a2, v9
; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a2, v9
; RV64-NEXT: vmv.x.s a3, v8
; RV64-NEXT: add a1, a1, a2
; RV64-NEXT: add a0, a0, a3
@@ -530,7 +530,7 @@ define i32 @reduce_and_16xi32_prefix5(ptr %p) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 5, e32, m2, ta, ma
; CHECK-NEXT: vle32.v v8, (a0)
-; CHECK-NEXT: vsetivli zero, 5, e32, m1, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-NEXT: vmv.v.i v10, -1
; CHECK-NEXT: vsetivli zero, 5, e32, m2, ta, ma
; CHECK-NEXT: vredand.vs v8, v8, v10
@@ -725,7 +725,7 @@ define i32 @reduce_umin_16xi32_prefix5(ptr %p) {
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 5, e32, m2, ta, ma
; RV32-NEXT: vle32.v v8, (a0)
-; RV32-NEXT: vsetivli zero, 5, e32, m1, ta, ma
+; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: vmv.v.i v10, -1
; RV32-NEXT: vsetivli zero, 5, e32, m2, ta, ma
; RV32-NEXT: vredminu.vs v8, v8, v10
@@ -799,8 +799,11 @@ define float @reduce_fadd_16xi32_prefix5(ptr %p) {
define float @reduce_fadd_2xf32_non_associative(ptr %p) {
; CHECK-LABEL: reduce_fadd_2xf32_non_associative:
; CHECK: # %bb.0:
-; CHECK-NEXT: flw fa5, 0(a0)
-; CHECK-NEXT: flw fa4, 4(a0)
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vfmv.f.s fa5, v8
+; CHECK-NEXT: vslidedown.vi v8, v8, 1
+; CHECK-NEXT: vfmv.f.s fa4, v8
; CHECK-NEXT: fadd.s fa0, fa5, fa4
; CHECK-NEXT: ret
%v = load <2 x float>, ptr %p, align 256
@@ -832,8 +835,11 @@ define float @reduce_fadd_2xf32_reassoc_only(ptr %p) {
define float @reduce_fadd_2xf32_ninf_only(ptr %p) {
; CHECK-LABEL: reduce_fadd_2xf32_ninf_only:
; CHECK: # %bb.0:
-; CHECK-NEXT: flw fa5, 0(a0)
-; CHECK-NEXT: flw fa4, 4(a0)
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vfmv.f.s fa5, v8
+; CHECK-NEXT: vslidedown.vi v8, v8, 1
+; CHECK-NEXT: vfmv.f.s fa4, v8
; CHECK-NEXT: fadd.s fa0, fa5, fa4
; CHECK-NEXT: ret
%v = load <2 x float>, ptr %p, align 256
@@ -848,13 +854,15 @@ define float @reduce_fadd_2xf32_ninf_only(ptr %p) {
define float @reduce_fadd_4xi32_non_associative(ptr %p) {
; CHECK-LABEL: reduce_fadd_4xi32_non_associative:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 3, e32, m1, ta, ma
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vle32.v v8, (a0)
-; CHECK-NEXT: flw fa5, 12(a0)
; CHECK-NEXT: lui a0, 524288
; CHECK-NEXT: vmv.s.x v9, a0
-; CHECK-NEXT: vfredusum.vs v8, v8, v9
-; CHECK-NEXT: vfmv.f.s fa4, v8
+; CHECK-NEXT: vsetivli zero, 3, e32, m1, ta, ma
+; CHECK-NEXT: vfredusum.vs v9, v8, v9
+; CHECK-NEXT: vslidedown.vi v8, v8, 3
+; CHECK-NEXT: vfmv.f.s fa5, v8
+; CHECK-NEXT: vfmv.f.s fa4, v9
; CHECK-NEXT: fadd.s fa0, fa4, fa5
; CHECK-NEXT: ret
%v = load <4 x float>, ptr %p, align 256
@@ -873,10 +881,15 @@ define float @reduce_fadd_4xi32_non_associative(ptr %p) {
define float @reduce_fadd_4xi32_non_associative2(ptr %p) {
; CHECK-LABEL: reduce_fadd_4xi32_non_associative2:
; CHECK: # %bb.0:
-; CHECK-NEXT: flw fa5, 0(a0)
-; CHECK-NEXT: flw fa4, 4(a0)
-; CHECK-NEXT: flw fa3, 8(a0)
-; CHECK-NEXT: flw fa2, 12(a0)
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vfmv.f.s fa5, v8
+; CHECK-NEXT: vslidedown.vi v9, v8, 1
+; CHECK-NEXT: vfmv.f.s fa4, v9
+; CHECK-NEXT: vslidedown.vi v9, v8, 2
+; CHECK-NEXT: vslidedown.vi v8, v8, 3
+; CHECK-NEXT: vfmv.f.s fa3, v9
+; CHECK-NEXT: vfmv.f.s fa2, v8
; CHECK-NEXT: fadd.s fa5, fa5, fa4
; CHECK-NEXT: fadd.s fa4, fa3, fa2
; CHECK-NEXT: fadd.s fa0, fa5, fa4
diff --git a/llvm/test/CodeGen/RISCV/rvv/fold-binary-reduce.ll b/llvm/test/CodeGen/RISCV/rvv/fold-binary-reduce.ll
index 5bc1ab9820d6cc..6787c8c24c87ef 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fold-binary-reduce.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fold-binary-reduce.ll
@@ -18,7 +18,7 @@ entry:
define i64 @reduce_add2(<4 x i64> %v) {
; CHECK-LABEL: reduce_add2:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 4, e64, m1, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-NEXT: vmv.v.i v10, 8
; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; CHECK-NEXT: vredsum.vs v8, v8, v10
@@ -282,10 +282,10 @@ define float @reduce_fadd4(float %x, float %y, <4 x float> %v, <4 x float> %w) {
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vfmv.s.f v10, fa0
; CHECK-NEXT: vfredusum.vs v8, v8, v10
+; CHECK-NEXT: vfmv.s.f v10, fa1
+; CHECK-NEXT: vfredusum.vs v9, v9, v10
; CHECK-NEXT: vfmv.f.s fa5, v8
-; CHECK-NEXT: vfmv.s.f v8, fa1
-; CHECK-NEXT: vfredusum.vs v8, v9, v8
-; CHECK-NEXT: vfmv.f.s fa4, v8
+; CHECK-NEXT: vfmv.f.s fa4, v9
; CHECK-NEXT: fdiv.s fa0, fa5, fa4
; CHECK-NEXT: ret
entry:
>From 90b42ce4886ca68ca2208073ef0f0f4bceecef59 Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Mon, 23 Dec 2024 16:39:12 -0800
Subject: [PATCH 12/12] fixup! fix operand number and add special case when no
vlop
---
llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp | 10 +++++++++-
llvm/test/CodeGen/RISCV/double_reduct.ll | 6 ++++++
.../RISCV/rvv/fixed-vectors-reduction-int-vp.ll | 8 ++++++++
.../CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll | 12 +++++++++++-
.../test/CodeGen/RISCV/rvv/fold-scalar-load-crash.ll | 4 ++--
5 files changed, 36 insertions(+), 4 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
index 260033423e471e..6638e890e3fd10 100644
--- a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
+++ b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
@@ -859,7 +859,7 @@ static bool isVectorOpUsedAsScalarOp(MachineOperand &MO) {
return MO.getOperandNo() == 3;
case RISCV::VMV_X_S:
case RISCV::VFMV_F_S:
- return MO.getOperandNo() == 2;
+ return MO.getOperandNo() == 1;
default:
return false;
}
@@ -976,6 +976,14 @@ bool RISCVVLOptimizer::checkUsers(const MachineOperand *&CommonVL,
"Expect LMUL 1 register class for vector as scalar operands!");
LLVM_DEBUG(dbgs() << " Used this operand as a scalar operand\n");
const MCInstrDesc &Desc = UserMI.getDesc();
+ // VMV_X_S and VFMV_F_S do not have a VL opt which would cause an assert
+ // assert failure if we called getVLOpNum. Therefore, we will set the
+ // CommonVL in that case as 1, even if it could have been set to 0.
+ if (!RISCVII::hasVLOp(Desc.TSFlags) || !RISCVII::hasSEWOp(Desc.TSFlags)) {
+ CommonVL = &One;
+ continue;
+ }
+
unsigned VLOpNum = RISCVII::getVLOpNum(Desc);
const MachineOperand &VLOp = UserMI.getOperand(VLOpNum);
if (VLOp.isReg() || (VLOp.isImm() && VLOp.getImm() != 0)) {
diff --git a/llvm/test/CodeGen/RISCV/double_reduct.ll b/llvm/test/CodeGen/RISCV/double_reduct.ll
index b74922761ad7c6..691d17272e84f5 100644
--- a/llvm/test/CodeGen/RISCV/double_reduct.ll
+++ b/llvm/test/CodeGen/RISCV/double_reduct.ll
@@ -133,8 +133,11 @@ define i32 @mul_i32(<4 x i32> %a, <4 x i32> %b) {
; RV32-NEXT: vslidedown.vi v10, v9, 2
; RV32-NEXT: vmul.vv v9, v9, v10
; RV32-NEXT: vrgather.vi v10, v8, 1
+; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: vmul.vv v8, v8, v10
+; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV32-NEXT: vrgather.vi v10, v9, 1
+; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: vmul.vv v9, v9, v10
; RV32-NEXT: vmv.x.s a0, v8
; RV32-NEXT: vmv.x.s a1, v9
@@ -149,8 +152,11 @@ define i32 @mul_i32(<4 x i32> %a, <4 x i32> %b) {
; RV64-NEXT: vslidedown.vi v10, v9, 2
; RV64-NEXT: vmul.vv v9, v9, v10
; RV64-NEXT: vrgather.vi v10, v8, 1
+; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64-NEXT: vmul.vv v8, v8, v10
+; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV64-NEXT: vrgather.vi v10, v9, 1
+; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64-NEXT: vmul.vv v9, v9, v10
; RV64-NEXT: vmv.x.s a0, v8
; RV64-NEXT: vmv.x.s a1, v9
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll
index f920e39e7d295c..6a82ff0c9479ad 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll
@@ -1456,6 +1456,7 @@ define signext i8 @vpreduce_mul_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i3
; RV32-NEXT: vmv.v.i v9, 1
; RV32-NEXT: vmerge.vvm v8, v9, v8, v0
; RV32-NEXT: vrgather.vi v9, v8, 1
+; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
; RV32-NEXT: vmul.vv v8, v8, v9
; RV32-NEXT: vmv.x.s a0, v8
; RV32-NEXT: mv a1, a2
@@ -1483,6 +1484,7 @@ define signext i8 @vpreduce_mul_v2i8(i8 signext %s, <2 x i8> %v, <2 x i1> %m, i3
; RV64-NEXT: vmv.v.i v9, 1
; RV64-NEXT: vmerge.vvm v8, v9, v8, v0
; RV64-NEXT: vrgather.vi v9, v8, 1
+; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
; RV64-NEXT: vmul.vv v8, v8, v9
; RV64-NEXT: vmv.x.s a0, v8
; RV64-NEXT: mv a1, a2
@@ -1518,6 +1520,7 @@ define signext i8 @vpreduce_mul_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i3
; RV32-NEXT: vslidedown.vi v9, v8, 2
; RV32-NEXT: vmul.vv v8, v8, v9
; RV32-NEXT: vrgather.vi v9, v8, 1
+; RV32-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
; RV32-NEXT: vmul.vv v8, v8, v9
; RV32-NEXT: vmv.x.s a0, v8
; RV32-NEXT: mv a1, a2
@@ -1547,6 +1550,7 @@ define signext i8 @vpreduce_mul_v4i8(i8 signext %s, <4 x i8> %v, <4 x i1> %m, i3
; RV64-NEXT: vslidedown.vi v9, v8, 2
; RV64-NEXT: vmul.vv v8, v8, v9
; RV64-NEXT: vrgather.vi v9, v8, 1
+; RV64-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
; RV64-NEXT: vmul.vv v8, v8, v9
; RV64-NEXT: vmv.x.s a0, v8
; RV64-NEXT: mv a1, a2
@@ -1584,6 +1588,7 @@ define signext i8 @vpreduce_mul_v8i8(i8 signext %s, <8 x i8> %v, <8 x i1> %m, i3
; RV32-NEXT: vslidedown.vi v9, v8, 2
; RV32-NEXT: vmul.vv v8, v8, v9
; RV32-NEXT: vrgather.vi v9, v8, 1
+; RV32-NEXT: vsetivli zero, 1, e8, mf2, ta, ma
; RV32-NEXT: vmul.vv v8, v8, v9
; RV32-NEXT: vmv.x.s a0, v8
; RV32-NEXT: mv a1, a2
@@ -1615,6 +1620,7 @@ define signext i8 @vpreduce_mul_v8i8(i8 signext %s, <8 x i8> %v, <8 x i1> %m, i3
; RV64-NEXT: vslidedown.vi v9, v8, 2
; RV64-NEXT: vmul.vv v8, v8, v9
; RV64-NEXT: vrgather.vi v9, v8, 1
+; RV64-NEXT: vsetivli zero, 1, e8, mf2, ta, ma
; RV64-NEXT: vmul.vv v8, v8, v9
; RV64-NEXT: vmv.x.s a0, v8
; RV64-NEXT: mv a1, a2
@@ -1654,6 +1660,7 @@ define signext i8 @vpreduce_mul_v16i8(i8 signext %s, <16 x i8> %v, <16 x i1> %m,
; RV32-NEXT: vslidedown.vi v9, v8, 2
; RV32-NEXT: vmul.vv v8, v8, v9
; RV32-NEXT: vrgather.vi v9, v8, 1
+; RV32-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; RV32-NEXT: vmul.vv v8, v8, v9
; RV32-NEXT: vmv.x.s a0, v8
; RV32-NEXT: mv a1, a2
@@ -1687,6 +1694,7 @@ define signext i8 @vpreduce_mul_v16i8(i8 signext %s, <16 x i8> %v, <16 x i1> %m,
; RV64-NEXT: vslidedown.vi v9, v8, 2
; RV64-NEXT: vmul.vv v8, v8, v9
; RV64-NEXT: vrgather.vi v9, v8, 1
+; RV64-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; RV64-NEXT: vmul.vv v8, v8, v9
; RV64-NEXT: vmv.x.s a0, v8
; RV64-NEXT: mv a1, a2
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll
index 2ea618bf8a2260..98a586a5c41a51 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll
@@ -5935,6 +5935,7 @@ define i8 @vreduce_mul_v2i8(ptr %x) {
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
; CHECK-NEXT: vle8.v v8, (a0)
; CHECK-NEXT: lbu a0, 1(a0)
+; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
; CHECK-NEXT: vmul.vx v8, v8, a0
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
@@ -5977,6 +5978,7 @@ define i8 @vreduce_mul_v4i8(ptr %x) {
; CHECK-NEXT: vslidedown.vi v9, v8, 2
; CHECK-NEXT: vmul.vv v8, v8, v9
; CHECK-NEXT: vrgather.vi v9, v8, 1
+; CHECK-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
; CHECK-NEXT: vmul.vv v8, v8, v9
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
@@ -5997,6 +5999,7 @@ define i8 @vreduce_mul_v8i8(ptr %x) {
; CHECK-NEXT: vslidedown.vi v9, v8, 2
; CHECK-NEXT: vmul.vv v8, v8, v9
; CHECK-NEXT: vrgather.vi v9, v8, 1
+; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, ma
; CHECK-NEXT: vmul.vv v8, v8, v9
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
@@ -6019,6 +6022,7 @@ define i8 @vreduce_mul_v16i8(ptr %x) {
; CHECK-NEXT: vslidedown.vi v9, v8, 2
; CHECK-NEXT: vmul.vv v8, v8, v9
; CHECK-NEXT: vrgather.vi v9, v8, 1
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; CHECK-NEXT: vmul.vv v8, v8, v9
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
@@ -6167,6 +6171,7 @@ define i16 @vreduce_mul_v2i16(ptr %x) {
; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
; CHECK-NEXT: vle16.v v8, (a0)
; CHECK-NEXT: lh a0, 2(a0)
+; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
; CHECK-NEXT: vmul.vx v8, v8, a0
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
@@ -6185,6 +6190,7 @@ define i16 @vreduce_mul_v4i16(ptr %x) {
; CHECK-NEXT: vslidedown.vi v9, v8, 2
; CHECK-NEXT: vmul.vv v8, v8, v9
; CHECK-NEXT: vrgather.vi v9, v8, 1
+; CHECK-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
; CHECK-NEXT: vmul.vv v8, v8, v9
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
@@ -6205,6 +6211,7 @@ define i16 @vreduce_mul_v8i16(ptr %x) {
; CHECK-NEXT: vslidedown.vi v9, v8, 2
; CHECK-NEXT: vmul.vv v8, v8, v9
; CHECK-NEXT: vrgather.vi v9, v8, 1
+; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; CHECK-NEXT: vmul.vv v8, v8, v9
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
@@ -6341,6 +6348,7 @@ define i32 @vreduce_mul_v2i32(ptr %x) {
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: lw a0, 4(a0)
+; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; CHECK-NEXT: vmul.vx v8, v8, a0
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
@@ -6359,6 +6367,7 @@ define i32 @vreduce_mul_v4i32(ptr %x) {
; CHECK-NEXT: vslidedown.vi v9, v8, 2
; CHECK-NEXT: vmul.vv v8, v8, v9
; CHECK-NEXT: vrgather.vi v9, v8, 1
+; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-NEXT: vmul.vv v8, v8, v9
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
@@ -6496,9 +6505,9 @@ define i64 @vreduce_mul_v2i64(ptr %x) {
; RV32-NEXT: addi a0, a0, 8
; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: li a1, 32
+; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; RV32-NEXT: vmul.vv v8, v8, v9
; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; RV32-NEXT: vsrl.vx v8, v8, a1
; RV32-NEXT: vmv.x.s a1, v8
; RV32-NEXT: ret
@@ -6508,6 +6517,7 @@ define i64 @vreduce_mul_v2i64(ptr %x) {
; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV64-NEXT: vle64.v v8, (a0)
; RV64-NEXT: ld a0, 8(a0)
+; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; RV64-NEXT: vmul.vx v8, v8, a0
; RV64-NEXT: vmv.x.s a0, v8
; RV64-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fold-scalar-load-crash.ll b/llvm/test/CodeGen/RISCV/rvv/fold-scalar-load-crash.ll
index 5e657a93ec0d63..fb15085cd3b5a6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fold-scalar-load-crash.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fold-scalar-load-crash.ll
@@ -21,7 +21,7 @@ define i32 @test(i32 %size, ptr %add.ptr, i64 %const) {
; RV32-NEXT: vsetivli zero, 1, e8, m1, tu, ma
; RV32-NEXT: vmv1r.v v9, v8
; RV32-NEXT: vmv.s.x v9, a0
-; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; RV32-NEXT: vsetivli zero, 1, e8, mf2, ta, ma
; RV32-NEXT: vmseq.vi v9, v9, 0
; RV32-NEXT: vmv.x.s a0, v9
; RV32-NEXT: andi a3, a0, 255
@@ -48,7 +48,7 @@ define i32 @test(i32 %size, ptr %add.ptr, i64 %const) {
; RV64-NEXT: vsetivli zero, 1, e8, m1, tu, ma
; RV64-NEXT: vmv1r.v v9, v8
; RV64-NEXT: vmv.s.x v9, a0
-; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; RV64-NEXT: vsetivli zero, 1, e8, mf2, ta, ma
; RV64-NEXT: vmseq.vi v9, v9, 0
; RV64-NEXT: vmv.x.s a0, v9
; RV64-NEXT: andi a3, a0, 255
More information about the llvm-commits
mailing list