[llvm] 4184baf - [RISCV] Refactor lowerSPLAT_VECTOR_PARTS to use splatPartsI64WithVL for scalable vectors.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Fri Aug 25 14:15:47 PDT 2023


Author: Craig Topper
Date: 2023-08-25T14:15:40-07:00
New Revision: 4184bafa9b466180ff9f9f6d029301271db5d760

URL: https://github.com/llvm/llvm-project/commit/4184bafa9b466180ff9f9f6d029301271db5d760
DIFF: https://github.com/llvm/llvm-project/commit/4184bafa9b466180ff9f9f6d029301271db5d760.diff

LOG: [RISCV] Refactor lowerSPLAT_VECTOR_PARTS to use splatPartsI64WithVL for scalable vectors.

There was quite a bit of duplication between splatPartsI64WithVL
and the scalable vector handling in lowerSPLAT_VECTOR_PARTS, but
scalable vector had one additional case. Move that case to
splatPartsI64WithVL which improves some fixed vector tests.

Reviewed By: reames

Differential Revision: https://reviews.llvm.org/D158876

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwadd.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsub.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 778b3824eda147..343b1cd51db13d 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -3571,6 +3571,13 @@ static SDValue splatPartsI64WithVL(const SDLoc &DL, MVT VT, SDValue Passthru,
       return DAG.getNode(ISD::BITCAST, DL, VT, InterVec);
     }
   }
+
+  // Detect cases where Hi is (SRA Lo, 31) which means Hi is Lo sign extended.
+  if (Hi.getOpcode() == ISD::SRA && Hi.getOperand(0) == Lo &&
+      isa<ConstantSDNode>(Hi.getOperand(1)) &&
+      Hi.getConstantOperandVal(1) == 31)
+    return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Passthru, Lo, VL);
+
   // If the hi bits of the splat are undefined, then it's fine to just splat Lo
   // even if it might be sign extended.
   if (Hi.isUndef())
@@ -6870,43 +6877,19 @@ SDValue RISCVTargetLowering::lowerSPLAT_VECTOR_PARTS(SDValue Op,
   SDValue Lo = Op.getOperand(0);
   SDValue Hi = Op.getOperand(1);
 
-  if (VecVT.isFixedLengthVector()) {
-    MVT ContainerVT = getContainerForFixedLengthVector(VecVT);
-    SDLoc DL(Op);
-    auto VL = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).second;
-
-    SDValue Res =
-        splatPartsI64WithVL(DL, ContainerVT, SDValue(), Lo, Hi, VL, DAG);
-    return convertFromScalableVector(VecVT, Res, DAG, Subtarget);
-  }
+  MVT ContainerVT = VecVT;
+  if (VecVT.isFixedLengthVector())
+    ContainerVT = getContainerForFixedLengthVector(VecVT);
 
-  if (isa<ConstantSDNode>(Lo) && isa<ConstantSDNode>(Hi)) {
-    int32_t LoC = cast<ConstantSDNode>(Lo)->getSExtValue();
-    int32_t HiC = cast<ConstantSDNode>(Hi)->getSExtValue();
-    // If Hi constant is all the same sign bit as Lo, lower this as a custom
-    // node in order to try and match RVV vector/scalar instructions.
-    if ((LoC >> 31) == HiC)
-      return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT, DAG.getUNDEF(VecVT),
-                         Lo, DAG.getRegister(RISCV::X0, MVT::i32));
-  }
+  auto VL = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).second;
 
-  // Detect cases where Hi is (SRA Lo, 31) which means Hi is Lo sign extended.
-  if (Hi.getOpcode() == ISD::SRA && Hi.getOperand(0) == Lo &&
-      isa<ConstantSDNode>(Hi.getOperand(1)) &&
-      Hi.getConstantOperandVal(1) == 31)
-    return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT, DAG.getUNDEF(VecVT), Lo,
-                       DAG.getRegister(RISCV::X0, MVT::i32));
+  SDValue Res =
+      splatPartsI64WithVL(DL, ContainerVT, SDValue(), Lo, Hi, VL, DAG);
 
-  // If the hi bits of the splat are undefined, then it's fine to just splat Lo
-  // even if it might be sign extended.
-  if (Hi.isUndef())
-    return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT, DAG.getUNDEF(VecVT), Lo,
-                       DAG.getRegister(RISCV::X0, MVT::i32));
+  if (VecVT.isFixedLengthVector())
+    Res = convertFromScalableVector(VecVT, Res, DAG, Subtarget);
 
-  // Fall back to use a stack store and stride x0 vector load. Use X0 as VL.
-  return DAG.getNode(RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, DL, VecVT,
-                     DAG.getUNDEF(VecVT), Lo, Hi,
-                     DAG.getRegister(RISCV::X0, MVT::i32));
+  return Res;
 }
 
 // Custom-lower extensions from mask vectors by using a vselect either with 1

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwadd.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwadd.ll
index f30fc791d107ff..32d8e0fc6a6399 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwadd.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwadd.ll
@@ -768,17 +768,12 @@ define <4 x i32> @vwadd_vx_v4i32_i32(ptr %x, ptr %y) {
 define <2 x i64> @vwadd_vx_v2i64_i8(ptr %x, ptr %y) nounwind {
 ; RV32-LABEL: vwadd_vx_v2i64_i8:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -16
-; RV32-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
 ; RV32-NEXT:    lb a1, 0(a1)
 ; RV32-NEXT:    vle32.v v9, (a0)
-; RV32-NEXT:    srai a0, a1, 31
-; RV32-NEXT:    sw a1, 8(sp)
-; RV32-NEXT:    sw a0, 12(sp)
-; RV32-NEXT:    addi a0, sp, 8
-; RV32-NEXT:    vlse64.v v8, (a0), zero
+; RV32-NEXT:    vmv.v.x v8, a1
+; RV32-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
 ; RV32-NEXT:    vwadd.wv v8, v8, v9
-; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vwadd_vx_v2i64_i8:
@@ -801,17 +796,12 @@ define <2 x i64> @vwadd_vx_v2i64_i8(ptr %x, ptr %y) nounwind {
 define <2 x i64> @vwadd_vx_v2i64_i16(ptr %x, ptr %y) nounwind {
 ; RV32-LABEL: vwadd_vx_v2i64_i16:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -16
-; RV32-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
 ; RV32-NEXT:    lh a1, 0(a1)
 ; RV32-NEXT:    vle32.v v9, (a0)
-; RV32-NEXT:    srai a0, a1, 31
-; RV32-NEXT:    sw a1, 8(sp)
-; RV32-NEXT:    sw a0, 12(sp)
-; RV32-NEXT:    addi a0, sp, 8
-; RV32-NEXT:    vlse64.v v8, (a0), zero
+; RV32-NEXT:    vmv.v.x v8, a1
+; RV32-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
 ; RV32-NEXT:    vwadd.wv v8, v8, v9
-; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vwadd_vx_v2i64_i16:
@@ -834,17 +824,12 @@ define <2 x i64> @vwadd_vx_v2i64_i16(ptr %x, ptr %y) nounwind {
 define <2 x i64> @vwadd_vx_v2i64_i32(ptr %x, ptr %y) nounwind {
 ; RV32-LABEL: vwadd_vx_v2i64_i32:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -16
-; RV32-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
 ; RV32-NEXT:    lw a1, 0(a1)
 ; RV32-NEXT:    vle32.v v9, (a0)
-; RV32-NEXT:    srai a0, a1, 31
-; RV32-NEXT:    sw a1, 8(sp)
-; RV32-NEXT:    sw a0, 12(sp)
-; RV32-NEXT:    addi a0, sp, 8
-; RV32-NEXT:    vlse64.v v8, (a0), zero
+; RV32-NEXT:    vmv.v.x v8, a1
+; RV32-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
 ; RV32-NEXT:    vwadd.wv v8, v8, v9
-; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vwadd_vx_v2i64_i32:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll
index 63de84a35022cd..429b078eef27eb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll
@@ -800,19 +800,11 @@ define <4 x i32> @vwmul_vx_v4i32_i32(ptr %x, ptr %y) {
 define <2 x i64> @vwmul_vx_v2i64_i8(ptr %x, ptr %y) {
 ; RV32-LABEL: vwmul_vx_v2i64_i8:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -16
-; RV32-NEXT:    .cfi_def_cfa_offset 16
 ; RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
-; RV32-NEXT:    lb a1, 0(a1)
 ; RV32-NEXT:    vle32.v v8, (a0)
-; RV32-NEXT:    srai a0, a1, 31
-; RV32-NEXT:    sw a1, 8(sp)
-; RV32-NEXT:    sw a0, 12(sp)
-; RV32-NEXT:    addi a0, sp, 8
-; RV32-NEXT:    vlse64.v v9, (a0), zero
-; RV32-NEXT:    vsext.vf2 v10, v8
-; RV32-NEXT:    vmul.vv v8, v9, v10
-; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    lb a0, 0(a1)
+; RV32-NEXT:    vsext.vf2 v9, v8
+; RV32-NEXT:    vmul.vx v8, v9, a0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vwmul_vx_v2i64_i8:
@@ -835,19 +827,11 @@ define <2 x i64> @vwmul_vx_v2i64_i8(ptr %x, ptr %y) {
 define <2 x i64> @vwmul_vx_v2i64_i16(ptr %x, ptr %y) {
 ; RV32-LABEL: vwmul_vx_v2i64_i16:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -16
-; RV32-NEXT:    .cfi_def_cfa_offset 16
 ; RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
-; RV32-NEXT:    lh a1, 0(a1)
 ; RV32-NEXT:    vle32.v v8, (a0)
-; RV32-NEXT:    srai a0, a1, 31
-; RV32-NEXT:    sw a1, 8(sp)
-; RV32-NEXT:    sw a0, 12(sp)
-; RV32-NEXT:    addi a0, sp, 8
-; RV32-NEXT:    vlse64.v v9, (a0), zero
-; RV32-NEXT:    vsext.vf2 v10, v8
-; RV32-NEXT:    vmul.vv v8, v9, v10
-; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    lh a0, 0(a1)
+; RV32-NEXT:    vsext.vf2 v9, v8
+; RV32-NEXT:    vmul.vx v8, v9, a0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vwmul_vx_v2i64_i16:
@@ -870,19 +854,11 @@ define <2 x i64> @vwmul_vx_v2i64_i16(ptr %x, ptr %y) {
 define <2 x i64> @vwmul_vx_v2i64_i32(ptr %x, ptr %y) {
 ; RV32-LABEL: vwmul_vx_v2i64_i32:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -16
-; RV32-NEXT:    .cfi_def_cfa_offset 16
 ; RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
-; RV32-NEXT:    lw a1, 0(a1)
 ; RV32-NEXT:    vle32.v v8, (a0)
-; RV32-NEXT:    srai a0, a1, 31
-; RV32-NEXT:    sw a1, 8(sp)
-; RV32-NEXT:    sw a0, 12(sp)
-; RV32-NEXT:    addi a0, sp, 8
-; RV32-NEXT:    vlse64.v v9, (a0), zero
-; RV32-NEXT:    vsext.vf2 v10, v8
-; RV32-NEXT:    vmul.vv v8, v9, v10
-; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    lw a0, 0(a1)
+; RV32-NEXT:    vsext.vf2 v9, v8
+; RV32-NEXT:    vmul.vx v8, v9, a0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vwmul_vx_v2i64_i32:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsub.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsub.ll
index a8120b4a49bd5a..6d7507bb4fb0e6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsub.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsub.ll
@@ -769,17 +769,12 @@ define <4 x i32> @vwsub_vx_v4i32_i32(ptr %x, ptr %y) {
 define <2 x i64> @vwsub_vx_v2i64_i8(ptr %x, ptr %y) nounwind {
 ; RV32-LABEL: vwsub_vx_v2i64_i8:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -16
-; RV32-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
 ; RV32-NEXT:    lb a1, 0(a1)
 ; RV32-NEXT:    vle32.v v9, (a0)
-; RV32-NEXT:    srai a0, a1, 31
-; RV32-NEXT:    sw a1, 8(sp)
-; RV32-NEXT:    sw a0, 12(sp)
-; RV32-NEXT:    addi a0, sp, 8
-; RV32-NEXT:    vlse64.v v8, (a0), zero
+; RV32-NEXT:    vmv.v.x v8, a1
+; RV32-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
 ; RV32-NEXT:    vwsub.wv v8, v8, v9
-; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vwsub_vx_v2i64_i8:
@@ -803,17 +798,12 @@ define <2 x i64> @vwsub_vx_v2i64_i8(ptr %x, ptr %y) nounwind {
 define <2 x i64> @vwsub_vx_v2i64_i16(ptr %x, ptr %y) nounwind {
 ; RV32-LABEL: vwsub_vx_v2i64_i16:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -16
-; RV32-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
 ; RV32-NEXT:    lh a1, 0(a1)
 ; RV32-NEXT:    vle32.v v9, (a0)
-; RV32-NEXT:    srai a0, a1, 31
-; RV32-NEXT:    sw a1, 8(sp)
-; RV32-NEXT:    sw a0, 12(sp)
-; RV32-NEXT:    addi a0, sp, 8
-; RV32-NEXT:    vlse64.v v8, (a0), zero
+; RV32-NEXT:    vmv.v.x v8, a1
+; RV32-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
 ; RV32-NEXT:    vwsub.wv v8, v8, v9
-; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vwsub_vx_v2i64_i16:
@@ -837,17 +827,12 @@ define <2 x i64> @vwsub_vx_v2i64_i16(ptr %x, ptr %y) nounwind {
 define <2 x i64> @vwsub_vx_v2i64_i32(ptr %x, ptr %y) nounwind {
 ; RV32-LABEL: vwsub_vx_v2i64_i32:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -16
-; RV32-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
 ; RV32-NEXT:    lw a1, 0(a1)
 ; RV32-NEXT:    vle32.v v9, (a0)
-; RV32-NEXT:    srai a0, a1, 31
-; RV32-NEXT:    sw a1, 8(sp)
-; RV32-NEXT:    sw a0, 12(sp)
-; RV32-NEXT:    addi a0, sp, 8
-; RV32-NEXT:    vlse64.v v8, (a0), zero
+; RV32-NEXT:    vmv.v.x v8, a1
+; RV32-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
 ; RV32-NEXT:    vwsub.wv v8, v8, v9
-; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vwsub_vx_v2i64_i32:


        


More information about the llvm-commits mailing list