[llvm] f8a0572 - [RISCV][NFC] Add policy operand for RISCVISD::VSLIDEUP_VL and RISCVISD::VSLIDEDOWN_VL.
Yeting Kuo via llvm-commits
llvm-commits at lists.llvm.org
Tue Dec 20 18:50:10 PST 2022
Author: Yeting Kuo
Date: 2022-12-21T10:50:04+08:00
New Revision: f8a05727b03093081b28589ac9c52cfe98d1ca78
URL: https://github.com/llvm/llvm-project/commit/f8a05727b03093081b28589ac9c52cfe98d1ca78
DIFF: https://github.com/llvm/llvm-project/commit/f8a05727b03093081b28589ac9c52cfe98d1ca78.diff
LOG: [RISCV][NFC] Add policy operand for RISCVISD::VSLIDEUP_VL and RISCVISD::VSLIDEDOWN_VL.
There is room for optimization to use tail agnostic vslideup/vslidedown to lower
some vector operations. D125546 is an revision for the kind of optimization.
Reviewed By: craig.topper
Differential Revision: https://reviews.llvm.org/D140393
Added:
Modified:
llvm/lib/Target/RISCV/RISCVISelLowering.cpp
llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
Removed:
################################################################################
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index dd316b8472a4..318278c9fd87 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -3075,6 +3075,28 @@ static SDValue lowerVECTOR_SHUFFLEAsVNSRL(const SDLoc &DL, MVT VT,
return convertFromScalableVector(VT, Res, DAG, Subtarget);
}
+static SDValue getVSlidedown(SelectionDAG &DAG, const RISCVSubtarget &Subtarget,
+ SDLoc DL, EVT VT, SDValue Merge, SDValue Op,
+ SDValue Offset, SDValue Mask, SDValue VL,
+ unsigned Policy = /* TUMU */ 0) {
+ if (Merge.isUndef())
+ Policy = RISCVII::TAIL_AGNOSTIC | RISCVII::MASK_AGNOSTIC;
+ SDValue PolicyOp = DAG.getTargetConstant(Policy, DL, Subtarget.getXLenVT());
+ SDValue Ops[] = {Merge, Op, Offset, Mask, VL, PolicyOp};
+ return DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, VT, Ops);
+}
+
+static SDValue getVSlideup(SelectionDAG &DAG, const RISCVSubtarget &Subtarget,
+ SDLoc DL, EVT VT, SDValue Merge, SDValue Op,
+ SDValue Offset, SDValue Mask, SDValue VL,
+ unsigned Policy = /* TUMU */ 0) {
+ if (Merge.isUndef())
+ Policy = RISCVII::TAIL_AGNOSTIC | RISCVII::MASK_AGNOSTIC;
+ SDValue PolicyOp = DAG.getTargetConstant(Policy, DL, Subtarget.getXLenVT());
+ SDValue Ops[] = {Merge, Op, Offset, Mask, VL, PolicyOp};
+ return DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, VT, Ops);
+}
+
// Lower the following shuffle to vslidedown.
// a)
// t49: v8i8 = extract_subvector t13, Constant:i64<0>
@@ -3142,10 +3164,10 @@ static SDValue lowerVECTOR_SHUFFLEAsVSlidedown(const SDLoc &DL, MVT VT,
MVT SrcVT = Src.getSimpleValueType();
MVT ContainerVT = getContainerForFixedLengthVector(DAG, SrcVT, Subtarget);
auto [TrueMask, VL] = getDefaultVLOps(SrcVT, ContainerVT, DL, DAG, Subtarget);
- SDValue Slidedown = DAG.getNode(
- RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT, DAG.getUNDEF(ContainerVT),
- convertToScalableVector(ContainerVT, Src, DAG, Subtarget),
- DAG.getConstant(NewMask[0], DL, XLenVT), TrueMask, VL);
+ SDValue Slidedown =
+ getVSlidedown(DAG, Subtarget, DL, ContainerVT, DAG.getUNDEF(ContainerVT),
+ convertToScalableVector(ContainerVT, Src, DAG, Subtarget),
+ DAG.getConstant(NewMask[0], DL, XLenVT), TrueMask, VL);
return DAG.getNode(
ISD::EXTRACT_SUBVECTOR, DL, VT,
convertFromScalableVector(SrcVT, Slidedown, DAG, Subtarget),
@@ -3275,12 +3297,12 @@ static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
SDValue DownVL = VL;
if (LoV)
DownVL = DAG.getConstant(InvRotate, DL, XLenVT);
- Res =
- DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT, Res, HiV,
- DAG.getConstant(Rotation, DL, XLenVT), TrueMask, DownVL);
+ Res = getVSlidedown(DAG, Subtarget, DL, ContainerVT, Res, HiV,
+ DAG.getConstant(Rotation, DL, XLenVT), TrueMask,
+ DownVL);
}
if (LoV)
- Res = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Res, LoV,
+ Res = getVSlideup(DAG, Subtarget, DL, ContainerVT, Res, LoV,
DAG.getConstant(InvRotate, DL, XLenVT), TrueMask, VL);
return convertFromScalableVector(VT, Res, DAG, Subtarget);
@@ -5225,8 +5247,8 @@ SDValue RISCVTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
// Now that the value is in a vector, slide it into position.
SDValue InsertVL =
DAG.getNode(ISD::ADD, DL, XLenVT, Idx, DAG.getConstant(1, DL, XLenVT));
- SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec,
- ValInVec, Idx, Mask, InsertVL);
+ SDValue Slideup = getVSlideup(DAG, Subtarget, DL, ContainerVT, Vec, ValInVec,
+ Idx, Mask, InsertVL);
if (!VecVT.isFixedLengthVector())
return Slideup;
return convertFromScalableVector(VecVT, Slideup, DAG, Subtarget);
@@ -5303,8 +5325,8 @@ SDValue RISCVTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
if (!isNullConstant(Idx)) {
// Use a VL of 1 to avoid processing more elements than we need.
auto [Mask, VL] = getDefaultVLOps(1, ContainerVT, DL, DAG, Subtarget);
- Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
- DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL);
+ Vec = getVSlidedown(DAG, Subtarget, DL, ContainerVT,
+ DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL);
}
if (!EltVT.isInteger()) {
@@ -6133,8 +6155,8 @@ SDValue RISCVTargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
SDValue VL =
getVLOp(OrigIdx + SubVecVT.getVectorNumElements(), DL, DAG, Subtarget);
SDValue SlideupAmt = DAG.getConstant(OrigIdx, DL, XLenVT);
- SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec,
- SubVec, SlideupAmt, Mask, VL);
+ SDValue Slideup = getVSlideup(DAG, Subtarget, DL, ContainerVT, Vec, SubVec,
+ SlideupAmt, Mask, VL);
if (VecVT.isFixedLengthVector())
Slideup = convertFromScalableVector(VecVT, Slideup, DAG, Subtarget);
return DAG.getBitcast(Op.getValueType(), Slideup);
@@ -6196,8 +6218,8 @@ SDValue RISCVTargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
DAG.getUNDEF(InterSubVT), SubVec,
DAG.getConstant(0, DL, XLenVT));
- SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, InterSubVT,
- AlignedExtract, SubVec, SlideupAmt, Mask, VL);
+ SDValue Slideup = getVSlideup(DAG, Subtarget, DL, InterSubVT, AlignedExtract,
+ SubVec, SlideupAmt, Mask, VL);
// If required, insert this subvector back into the correct vector register.
// This should resolve to an INSERT_SUBREG instruction.
@@ -6280,8 +6302,8 @@ SDValue RISCVTargetLowering::lowerEXTRACT_SUBVECTOR(SDValue Op,
SDValue VL = getVLOp(SubVecVT.getVectorNumElements(), DL, DAG, Subtarget);
SDValue SlidedownAmt = DAG.getConstant(OrigIdx, DL, XLenVT);
SDValue Slidedown =
- DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
- DAG.getUNDEF(ContainerVT), Vec, SlidedownAmt, Mask, VL);
+ getVSlidedown(DAG, Subtarget, DL, ContainerVT,
+ DAG.getUNDEF(ContainerVT), Vec, SlidedownAmt, Mask, VL);
// Now we can use a cast-like subvector extract to get the result.
Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown,
DAG.getConstant(0, DL, XLenVT));
@@ -6320,8 +6342,8 @@ SDValue RISCVTargetLowering::lowerEXTRACT_SUBVECTOR(SDValue Op,
auto [Mask, VL] = getDefaultScalableVLOps(InterSubVT, DL, DAG, Subtarget);
SDValue Slidedown =
- DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, InterSubVT,
- DAG.getUNDEF(InterSubVT), Vec, SlidedownAmt, Mask, VL);
+ getVSlidedown(DAG, Subtarget, DL, InterSubVT, DAG.getUNDEF(InterSubVT),
+ Vec, SlidedownAmt, Mask, VL);
// Now the vector is in the right position, extract our final subvector. This
// should resolve to a COPY.
@@ -6470,9 +6492,9 @@ SDValue RISCVTargetLowering::lowerVECTOR_SPLICE(SDValue Op,
SDValue TrueMask = getAllOnesMask(VecVT, VLMax, DL, DAG);
SDValue SlideDown =
- DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, VecVT, DAG.getUNDEF(VecVT), V1,
- DownOffset, TrueMask, UpOffset);
- return DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, VecVT, SlideDown, V2, UpOffset,
+ getVSlidedown(DAG, Subtarget, DL, VecVT, DAG.getUNDEF(VecVT), V1,
+ DownOffset, TrueMask, UpOffset);
+ return getVSlideup(DAG, Subtarget, DL, VecVT, SlideDown, V2, UpOffset,
TrueMask, DAG.getRegister(RISCV::X0, XLenVT));
}
@@ -7934,8 +7956,8 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
// Unless the index is known to be 0, we must slide the vector down to get
// the desired element into index 0.
if (!isNullConstant(Idx)) {
- Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
- DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL);
+ Vec = getVSlidedown(DAG, Subtarget, DL, ContainerVT,
+ DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL);
}
// Extract the lower XLEN bits of the correct vector element.
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
index 56b06526f08a..c6804b6011eb 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
@@ -1964,9 +1964,10 @@ def riscv_vid_vl : SDNode<"RISCVISD::VID_VL", SDTypeProfile<1, 2,
[SDTCisVec<0>, SDTCVecEltisVT<1, i1>,
SDTCisSameNumEltsAs<0, 1>, SDTCisVT<2, XLenVT>]>, []>;
-def SDTRVVSlide : SDTypeProfile<1, 5, [
+def SDTRVVSlide : SDTypeProfile<1, 6, [
SDTCisVec<0>, SDTCisSameAs<1, 0>, SDTCisSameAs<2, 0>, SDTCisVT<3, XLenVT>,
- SDTCVecEltisVT<4, i1>, SDTCisSameNumEltsAs<0, 4>, SDTCisVT<5, XLenVT>
+ SDTCVecEltisVT<4, i1>, SDTCisSameNumEltsAs<0, 4>, SDTCisVT<5, XLenVT>,
+ SDTCisVT<6, XLenVT>
]>;
def SDTRVVSlide1 : SDTypeProfile<1, 5, [
SDTCisVec<0>, SDTCisSameAs<1, 0>, SDTCisSameAs<2, 0>, SDTCisInt<0>,
@@ -2016,48 +2017,34 @@ foreach vti = !listconcat(AllIntegerVectors, AllFloatVectors) in {
def : Pat<(vti.Vector (riscv_slideup_vl (vti.Vector vti.RegClass:$rs3),
(vti.Vector vti.RegClass:$rs1),
uimm5:$rs2, (vti.Mask true_mask),
- VLOpFrag)),
+ VLOpFrag, (XLenVT timm:$policy))),
(!cast<Instruction>("PseudoVSLIDEUP_VI_"#vti.LMul.MX)
vti.RegClass:$rs3, vti.RegClass:$rs1, uimm5:$rs2,
- GPR:$vl, vti.Log2SEW, TAIL_UNDISTURBED_MASK_UNDISTURBED)>;
+ GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>;
def : Pat<(vti.Vector (riscv_slideup_vl (vti.Vector vti.RegClass:$rs3),
(vti.Vector vti.RegClass:$rs1),
GPR:$rs2, (vti.Mask true_mask),
- VLOpFrag)),
+ VLOpFrag, (XLenVT timm:$policy))),
(!cast<Instruction>("PseudoVSLIDEUP_VX_"#vti.LMul.MX)
vti.RegClass:$rs3, vti.RegClass:$rs1, GPR:$rs2,
- GPR:$vl, vti.Log2SEW, TAIL_UNDISTURBED_MASK_UNDISTURBED)>;
+ GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>;
def : Pat<(vti.Vector (riscv_slidedown_vl (vti.Vector vti.RegClass:$rs3),
(vti.Vector vti.RegClass:$rs1),
uimm5:$rs2, (vti.Mask true_mask),
- VLOpFrag)),
+ VLOpFrag, (XLenVT timm:$policy))),
(!cast<Instruction>("PseudoVSLIDEDOWN_VI_"#vti.LMul.MX)
vti.RegClass:$rs3, vti.RegClass:$rs1, uimm5:$rs2,
- GPR:$vl, vti.Log2SEW, TAIL_UNDISTURBED_MASK_UNDISTURBED)>;
- def : Pat<(vti.Vector (riscv_slidedown_vl (vti.Vector undef),
- (vti.Vector vti.RegClass:$rs1),
- uimm5:$rs2, (vti.Mask true_mask),
- VLOpFrag)),
- (!cast<Instruction>("PseudoVSLIDEDOWN_VI_"#vti.LMul.MX)
- (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, uimm5:$rs2,
- GPR:$vl, vti.Log2SEW, TA_MA)>;
+ GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>;
def : Pat<(vti.Vector (riscv_slidedown_vl (vti.Vector vti.RegClass:$rs3),
(vti.Vector vti.RegClass:$rs1),
GPR:$rs2, (vti.Mask true_mask),
- VLOpFrag)),
+ VLOpFrag, (XLenVT timm:$policy))),
(!cast<Instruction>("PseudoVSLIDEDOWN_VX_"#vti.LMul.MX)
vti.RegClass:$rs3, vti.RegClass:$rs1, GPR:$rs2,
- GPR:$vl, vti.Log2SEW, TAIL_UNDISTURBED_MASK_UNDISTURBED)>;
- def : Pat<(vti.Vector (riscv_slidedown_vl (vti.Vector undef),
- (vti.Vector vti.RegClass:$rs1),
- GPR:$rs2, (vti.Mask true_mask),
- VLOpFrag)),
- (!cast<Instruction>("PseudoVSLIDEDOWN_VX_"#vti.LMul.MX)
- (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, GPR:$rs2,
- GPR:$vl, vti.Log2SEW, TA_MA)>;
+ GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>;
}
} // Predicates = [HasVInstructions]
More information about the llvm-commits
mailing list