[llvm] 615af94 - [RISCV] Lower VECTOR_SHUFFLE to VSLIDEDOWN_VL.
Han-Kuan Chen via llvm-commits
llvm-commits at lists.llvm.org
Tue Oct 18 08:58:48 PDT 2022
Author: Han-Kuan Chen
Date: 2022-10-18T08:58:39-07:00
New Revision: 615af94dc20cb71d537858d30cf53549a6f063fc
URL: https://github.com/llvm/llvm-project/commit/615af94dc20cb71d537858d30cf53549a6f063fc
DIFF: https://github.com/llvm/llvm-project/commit/615af94dc20cb71d537858d30cf53549a6f063fc.diff
LOG: [RISCV] Lower VECTOR_SHUFFLE to VSLIDEDOWN_VL.
Differential Revision: https://reviews.llvm.org/D136136
Added:
Modified:
llvm/lib/Target/RISCV/RISCVISelLowering.cpp
llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll
llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 1ab814bfb4fcd..d134f56eb927b 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -2878,6 +2878,54 @@ static SDValue lowerVECTOR_SHUFFLEAsVNSRL(const SDLoc &DL, MVT VT,
return convertFromScalableVector(VT, Res, DAG, Subtarget);
}
+// Lower the following shuffle to vslidedown.
+// t49: v8i8 = extract_subvector t13, Constant:i64<0>
+// t109: v8i8 = extract_subvector t12, Constant:i64<8>
+// t108: v8i8 = vector_shuffle<1,2,3,4,5,6,7,8> t49, t106
+static SDValue lowerVECTOR_SHUFFLEAsVSlidedown(const SDLoc &DL, MVT VT,
+ SDValue V1, SDValue V2,
+ ArrayRef<int> Mask,
+ const RISCVSubtarget &Subtarget,
+ SelectionDAG &DAG) {
+ // Both input must be extracts.
+ if (V1.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
+ V2.getOpcode() != ISD::EXTRACT_SUBVECTOR)
+ return SDValue();
+
+ // Extracting from the same source.
+ SDValue Src = V1.getOperand(0);
+ if (Src != V2.getOperand(0))
+ return SDValue();
+
+ // V1 must be started with 0.
+ // V1 and V2 are continuous.
+ if (V1.getConstantOperandVal(1) != 0 ||
+ VT.getVectorNumElements() != V2.getConstantOperandVal(1))
+ return SDValue();
+
+ // Do not handle -1 here. -1 can be handled by isElementRotate.
+ if (Mask[0] == -1)
+ return SDValue();
+
+ // Mask is also continuous.
+ for (unsigned i = 1; i != Mask.size(); ++i)
+ if (Mask[i - 1] + 1 != Mask[i])
+ return SDValue();
+
+ MVT XLenVT = Subtarget.getXLenVT();
+ MVT SrcVT = Src.getSimpleValueType();
+ MVT ContainerVT = getContainerForFixedLengthVector(DAG, SrcVT, Subtarget);
+ auto [TrueMask, VL] = getDefaultVLOps(SrcVT, ContainerVT, DL, DAG, Subtarget);
+ SDValue Slidedown = DAG.getNode(
+ RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT, DAG.getUNDEF(ContainerVT),
+ convertToScalableVector(ContainerVT, Src, DAG, Subtarget),
+ DAG.getConstant(Mask[0], DL, XLenVT), TrueMask, VL);
+ return DAG.getNode(
+ ISD::EXTRACT_SUBVECTOR, DL, VT,
+ convertFromScalableVector(SrcVT, Slidedown, DAG, Subtarget),
+ DAG.getConstant(0, DL, XLenVT));
+}
+
static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
const RISCVSubtarget &Subtarget) {
SDValue V1 = Op.getOperand(0);
@@ -2969,6 +3017,10 @@ static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
ArrayRef<int> Mask = SVN->getMask();
+ if (SDValue V =
+ lowerVECTOR_SHUFFLEAsVSlidedown(DL, VT, V1, V2, Mask, Subtarget, DAG))
+ return V;
+
// Lower rotations to a SLIDEDOWN and a SLIDEUP. One of the source vectors may
// be undef which can be handled with a single SLIDEDOWN/UP.
int LoSrc, HiSrc;
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll
index 20f9663bb8ef0..dabe47e1a36e4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll
@@ -229,13 +229,8 @@ define <4 x double> @vrgather_shuffle_vx_v4f64(<4 x double> %x) {
define <4 x half> @shuffle_v8f16_to_vslidedown_1(<8 x half> %x) {
; CHECK-LABEL: shuffle_v8f16_to_vslidedown_1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 3, e16, mf2, ta, ma
-; CHECK-NEXT: vslidedown.vi v9, v8, 1
-; CHECK-NEXT: vsetivli zero, 4, e16, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v8, v8, 4
-; CHECK-NEXT: vsetivli zero, 4, e16, mf2, tu, ma
-; CHECK-NEXT: vslideup.vi v9, v8, 3
-; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vslidedown.vi v8, v8, 1
; CHECK-NEXT: ret
entry:
%s = shufflevector <8 x half> %x, <8 x half> poison, <4 x i32> <i32 1, i32 2, i32 3, i32 4>
@@ -245,13 +240,8 @@ entry:
define <4 x half> @shuffle_v8f16_to_vslidedown_3(<8 x half> %x) {
; CHECK-LABEL: shuffle_v8f16_to_vslidedown_3:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
-; CHECK-NEXT: vslidedown.vi v9, v8, 3
-; CHECK-NEXT: vsetivli zero, 4, e16, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v8, v8, 4
-; CHECK-NEXT: vsetivli zero, 4, e16, mf2, tu, ma
-; CHECK-NEXT: vslideup.vi v9, v8, 1
-; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vslidedown.vi v8, v8, 3
; CHECK-NEXT: ret
entry:
%s = shufflevector <8 x half> %x, <8 x half> poison, <4 x i32> <i32 3, i32 4, i32 5, i32 6>
@@ -261,13 +251,8 @@ entry:
define <2 x float> @shuffle_v4f32_to_vslidedown(<4 x float> %x) {
; CHECK-LABEL: shuffle_v4f32_to_vslidedown:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vslidedown.vi v9, v8, 1
-; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v8, v8, 2
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, tu, ma
-; CHECK-NEXT: vslideup.vi v9, v8, 1
-; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vslidedown.vi v8, v8, 1
; CHECK-NEXT: ret
entry:
%s = shufflevector <4 x float> %x, <4 x float> poison, <2 x i32> <i32 1, i32 2>
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll
index ebe435235f7fa..6b41fd762fdb0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll
@@ -295,13 +295,8 @@ define <8 x i64> @vrgather_shuffle_vx_v8i64(<8 x i64> %x) {
define <4 x i16> @shuffle_v8i16_to_vslidedown_1(<8 x i16> %x) {
; CHECK-LABEL: shuffle_v8i16_to_vslidedown_1:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 3, e16, mf2, ta, ma
-; CHECK-NEXT: vslidedown.vi v9, v8, 1
-; CHECK-NEXT: vsetivli zero, 4, e16, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v8, v8, 4
-; CHECK-NEXT: vsetivli zero, 4, e16, mf2, tu, ma
-; CHECK-NEXT: vslideup.vi v9, v8, 3
-; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vslidedown.vi v8, v8, 1
; CHECK-NEXT: ret
entry:
%s = shufflevector <8 x i16> %x, <8 x i16> poison, <4 x i32> <i32 1, i32 2, i32 3, i32 4>
@@ -311,13 +306,8 @@ entry:
define <4 x i16> @shuffle_v8i16_to_vslidedown_3(<8 x i16> %x) {
; CHECK-LABEL: shuffle_v8i16_to_vslidedown_3:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
-; CHECK-NEXT: vslidedown.vi v9, v8, 3
-; CHECK-NEXT: vsetivli zero, 4, e16, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v8, v8, 4
-; CHECK-NEXT: vsetivli zero, 4, e16, mf2, tu, ma
-; CHECK-NEXT: vslideup.vi v9, v8, 1
-; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vslidedown.vi v8, v8, 3
; CHECK-NEXT: ret
entry:
%s = shufflevector <8 x i16> %x, <8 x i16> poison, <4 x i32> <i32 3, i32 4, i32 5, i32 6>
@@ -327,13 +317,8 @@ entry:
define <2 x i32> @shuffle_v4i32_to_vslidedown(<4 x i32> %x) {
; CHECK-LABEL: shuffle_v4i32_to_vslidedown:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vslidedown.vi v9, v8, 1
-; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v8, v8, 2
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, tu, ma
-; CHECK-NEXT: vslideup.vi v9, v8, 1
-; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vslidedown.vi v8, v8, 1
; CHECK-NEXT: ret
entry:
%s = shufflevector <4 x i32> %x, <4 x i32> poison, <2 x i32> <i32 1, i32 2>
More information about the llvm-commits
mailing list