[llvm] 88c0e08 - [RISCV] Improve i64 splat vector lowering in RV32.

via llvm-commits llvm-commits at lists.llvm.org
Thu Jan 13 22:07:33 PST 2022


Author: jacquesguan
Date: 2022-01-14T14:06:01+08:00
New Revision: 88c0e0806b36a32e92f97585b85db6e2ec9860ec

URL: https://github.com/llvm/llvm-project/commit/88c0e0806b36a32e92f97585b85db6e2ec9860ec
DIFF: https://github.com/llvm/llvm-project/commit/88c0e0806b36a32e92f97585b85db6e2ec9860ec.diff

LOG: [RISCV] Improve i64 splat vector lowering in RV32.

We could use vmv.v.i/vmv.v.x whose eew is 32 to lower the i64 splat vector if the i64 constant scalar could be splitted into two same i32 scalar.

Differential Revision: https://reviews.llvm.org/D117079

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/test/CodeGen/RISCV/rvv/vmv.v.x-rv32.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index a9d52d19d045e..97cdc2752df52 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -2221,6 +2221,17 @@ static SDValue splatPartsI64WithVL(const SDLoc &DL, MVT VT, SDValue Lo,
     // node in order to try and match RVV vector/scalar instructions.
     if ((LoC >> 31) == HiC)
       return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Lo, VL);
+
+    // If vl is equal to VLMax and Hi constant is equal to Lo, we could use
+    // vmv.v.x whose EEW = 32 to lower it.
+    auto *Const = dyn_cast<ConstantSDNode>(VL);
+    if (LoC == HiC && Const && Const->getSExtValue() == RISCV::VLMaxSentinel) {
+      MVT InterVT = MVT::getVectorVT(MVT::i32, VT.getVectorElementCount() * 2);
+      // TODO: if vl <= min(VLMAX), we can also do this. But we could not
+      // access the subtarget here now.
+      auto InterVec = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, InterVT, Lo, VL);
+      return DAG.getNode(ISD::BITCAST, DL, VT, InterVec);
+    }
   }
 
   // Fall back to a stack store and stride x0 vector load.

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmv.v.x-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmv.v.x-rv32.ll
index d0251a2aeb0b0..dab05270b0039 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmv.v.x-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmv.v.x-rv32.ll
@@ -728,14 +728,8 @@ entry:
 define <vscale x 1 x i64> @intrinsic_vmv.v.x_i_nxv1i64_vlmax() nounwind {
 ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv1i64_vlmax:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    li a0, 3
-; CHECK-NEXT:    sw a0, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vlse64.v v8, (a0), zero
-; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
+; CHECK-NEXT:    vmv.v.i v8, 3
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vmv.v.x.nxv1i64(
@@ -748,14 +742,8 @@ entry:
 define <vscale x 2 x i64> @intrinsic_vmv.v.x_i_nxv2i64_vlmax() nounwind {
 ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv2i64_vlmax:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    li a0, 3
-; CHECK-NEXT:    sw a0, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vlse64.v v8, (a0), zero
-; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
+; CHECK-NEXT:    vmv.v.i v8, 3
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vmv.v.x.nxv2i64(
@@ -768,14 +756,8 @@ entry:
 define <vscale x 4 x i64> @intrinsic_vmv.v.x_i_nxv4i64_vlmax() nounwind {
 ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv4i64_vlmax:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    li a0, 3
-; CHECK-NEXT:    sw a0, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vlse64.v v8, (a0), zero
-; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
+; CHECK-NEXT:    vmv.v.i v8, 3
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vmv.v.x.nxv4i64(
@@ -788,14 +770,8 @@ entry:
 define <vscale x 8 x i64> @intrinsic_vmv.v.x_i_nxv8i64_vlmax() nounwind {
 ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv8i64_vlmax:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    li a0, 3
-; CHECK-NEXT:    sw a0, 12(sp)
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, mu
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vlse64.v v8, (a0), zero
-; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, mu
+; CHECK-NEXT:    vmv.v.i v8, 3
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vmv.v.x.nxv8i64(


        


More information about the llvm-commits mailing list