[llvm] 4c03c9f - [RISCV] Add missing VL arguments to the creation of RISCVISD::VMV_V_X_VL nodes.
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Mon Oct 3 12:13:31 PDT 2022
Author: Craig Topper
Date: 2022-10-03T12:13:21-07:00
New Revision: 4c03c9f375f326a87065443d649c6568a4b7dd67
URL: https://github.com/llvm/llvm-project/commit/4c03c9f375f326a87065443d649c6568a4b7dd67
DIFF: https://github.com/llvm/llvm-project/commit/4c03c9f375f326a87065443d649c6568a4b7dd67.diff
LOG: [RISCV] Add missing VL arguments to the creation of RISCVISD::VMV_V_X_VL nodes.
VMV_V_X_VL nodes should always have a passthru, a splat, and a VL.
We were sometimes missing the VL.
This went unnoticed because these cases were all selected into the
following node to form a .vx or .vi instruction. The ComplexPattern
that does this, doesn't check the VL operand. I've added an assert
to the ComplexPattern to catch if the operand is missing.
@qcolombet spotted some of these in D134703.
Added:
Modified:
llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
llvm/lib/Target/RISCV/RISCVISelLowering.cpp
llvm/test/CodeGen/RISCV/rvv/stepvector.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index be1656518119..a3ad883ed42c 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -2366,6 +2366,7 @@ bool RISCVDAGToDAGISel::selectVLOp(SDValue N, SDValue &VL) {
bool RISCVDAGToDAGISel::selectVSplat(SDValue N, SDValue &SplatVal) {
if (N.getOpcode() != RISCVISD::VMV_V_X_VL || !N.getOperand(0).isUndef())
return false;
+ assert(N.getNumOperands() == 3 && "Unexpected number of operands");
SplatVal = N.getOperand(1);
return true;
}
@@ -2379,6 +2380,7 @@ static bool selectVSplatSimmHelper(SDValue N, SDValue &SplatVal,
if (N.getOpcode() != RISCVISD::VMV_V_X_VL || !N.getOperand(0).isUndef() ||
!isa<ConstantSDNode>(N.getOperand(1)))
return false;
+ assert(N.getNumOperands() == 3 && "Unexpected number of operands");
int64_t SplatImm =
cast<ConstantSDNode>(N.getOperand(1))->getSExtValue();
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index fd94a699126d..af8fdb77c9dd 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -5967,6 +5967,7 @@ SDValue RISCVTargetLowering::lowerSTEP_VECTOR(SDValue Op,
SelectionDAG &DAG) const {
SDLoc DL(Op);
MVT VT = Op.getSimpleValueType();
+ assert(VT.isScalableVector() && "Expected scalable vector");
MVT XLenVT = Subtarget.getXLenVT();
auto [Mask, VL] = getDefaultScalableVLOps(VT, DL, DAG, Subtarget);
SDValue StepVec = DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL);
@@ -5975,7 +5976,7 @@ SDValue RISCVTargetLowering::lowerSTEP_VECTOR(SDValue Op,
if (isPowerOf2_64(StepValImm)) {
SDValue StepVal =
DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, DAG.getUNDEF(VT),
- DAG.getConstant(Log2_64(StepValImm), DL, XLenVT));
+ DAG.getConstant(Log2_64(StepValImm), DL, XLenVT), VL);
StepVec = DAG.getNode(ISD::SHL, DL, VT, StepVec, StepVal);
} else {
SDValue StepVal = lowerScalarSplat(
@@ -6361,7 +6362,7 @@ SDValue RISCVTargetLowering::lowerABS(SDValue Op, SelectionDAG &DAG) const {
SDValue SplatZero = DAG.getNode(
RISCVISD::VMV_V_X_VL, DL, ContainerVT, DAG.getUNDEF(ContainerVT),
- DAG.getConstant(0, DL, Subtarget.getXLenVT()));
+ DAG.getConstant(0, DL, Subtarget.getXLenVT()), VL);
SDValue NegX = DAG.getNode(RISCVISD::SUB_VL, DL, ContainerVT, SplatZero, X,
DAG.getUNDEF(ContainerVT), Mask, VL);
SDValue Max = DAG.getNode(RISCVISD::SMAX_VL, DL, ContainerVT, X, NegX,
@@ -6711,7 +6712,7 @@ SDValue RISCVTargetLowering::lowerVPFPIntConvOp(SDValue Op, SelectionDAG &DAG,
MVT XLenVT = Subtarget.getXLenVT();
SDValue SplatZero = DAG.getConstant(0, DL, XLenVT);
SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, InterimIVT,
- DAG.getUNDEF(InterimIVT), SplatZero);
+ DAG.getUNDEF(InterimIVT), SplatZero, VL);
Result = DAG.getNode(RISCVISD::SETCC_VL, DL, DstVT,
{Result, SplatZero, DAG.getCondCode(ISD::SETNE),
DAG.getUNDEF(DstVT), Mask, VL});
diff --git a/llvm/test/CodeGen/RISCV/rvv/stepvector.ll b/llvm/test/CodeGen/RISCV/rvv/stepvector.ll
index 96070da031b7..3287ce39b07b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/stepvector.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/stepvector.ll
@@ -67,7 +67,7 @@ define <vscale x 8 x i8> @add_stepvector_nxv8i8() {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu
; CHECK-NEXT: vid.v v8
-; CHECK-NEXT: vsll.vi v8, v8, 1
+; CHECK-NEXT: vadd.vv v8, v8, v8
; CHECK-NEXT: ret
entry:
%0 = call <vscale x 8 x i8> @llvm.experimental.stepvector.nxv8i8()
@@ -232,7 +232,7 @@ define <vscale x 16 x i16> @add_stepvector_nxv16i16() {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu
; CHECK-NEXT: vid.v v8
-; CHECK-NEXT: vsll.vi v8, v8, 1
+; CHECK-NEXT: vadd.vv v8, v8, v8
; CHECK-NEXT: ret
entry:
%0 = call <vscale x 16 x i16> @llvm.experimental.stepvector.nxv16i16()
@@ -361,7 +361,7 @@ define <vscale x 16 x i32> @add_stepvector_nxv16i32() {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu
; CHECK-NEXT: vid.v v8
-; CHECK-NEXT: vsll.vi v8, v8, 1
+; CHECK-NEXT: vadd.vv v8, v8, v8
; CHECK-NEXT: ret
entry:
%0 = call <vscale x 16 x i32> @llvm.experimental.stepvector.nxv16i32()
@@ -466,7 +466,7 @@ define <vscale x 8 x i64> @add_stepvector_nxv8i64() {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: vid.v v8
-; CHECK-NEXT: vsll.vi v8, v8, 1
+; CHECK-NEXT: vadd.vv v8, v8, v8
; CHECK-NEXT: ret
entry:
%0 = call <vscale x 8 x i64> @llvm.experimental.stepvector.nxv8i64()
@@ -584,7 +584,7 @@ define <vscale x 16 x i64> @add_stepvector_nxv16i64() {
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vid.v v8
-; RV32-NEXT: vsll.vi v8, v8, 1
+; RV32-NEXT: vadd.vv v8, v8, v8
; RV32-NEXT: vadd.vv v16, v8, v16
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
@@ -595,7 +595,7 @@ define <vscale x 16 x i64> @add_stepvector_nxv16i64() {
; RV64-NEXT: slli a0, a0, 1
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vid.v v8
-; RV64-NEXT: vsll.vi v8, v8, 1
+; RV64-NEXT: vadd.vv v8, v8, v8
; RV64-NEXT: vadd.vx v16, v8, a0
; RV64-NEXT: ret
entry:
More information about the llvm-commits
mailing list