[llvm] 6a79441 - [RISCV] Optimize i64 insertelt on RV32.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Fri Oct 28 10:23:55 PDT 2022


Author: Craig Topper
Date: 2022-10-28T10:23:19-07:00
New Revision: 6a794419cddb5e77178f2b048b5220051be758f8

URL: https://github.com/llvm/llvm-project/commit/6a794419cddb5e77178f2b048b5220051be758f8
DIFF: https://github.com/llvm/llvm-project/commit/6a794419cddb5e77178f2b048b5220051be758f8.diff

LOG: [RISCV] Optimize i64 insertelt on RV32.

We can use tail undisturbed vslide1down to insert into the vector.

This should make D136640 unneeded.

Reviewed By: reames

Differential Revision: https://reviews.llvm.org/D136738

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-bitcast.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll
    llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv32.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index cdc1f8f0a7c24..5ecf46806ceb6 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -5008,6 +5008,25 @@ SDValue RISCVTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
         getDefaultScalableVLOps(I32ContainerVT, DL, DAG, Subtarget).first;
     // Limit the active VL to two.
     SDValue InsertI64VL = DAG.getConstant(2, DL, XLenVT);
+    // If the Idx is 0 we can insert directly into the vector.
+    if (isNullConstant(Idx)) {
+      // First slide in the lo value, then the hi in above it. We use slide1down
+      // to avoid the register group overlap constraint of vslide1up.
+      ValInVec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32ContainerVT,
+                             Vec, Vec, ValLo, I32Mask, InsertI64VL);
+      // If the source vector is undef don't pass along the tail elements from
+      // the previous slide1down.
+      SDValue Tail = Vec.isUndef() ? Vec : ValInVec;
+      ValInVec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32ContainerVT,
+                             Tail, ValInVec, ValHi, I32Mask, InsertI64VL);
+      // Bitcast back to the right container type.
+      ValInVec = DAG.getBitcast(ContainerVT, ValInVec);
+
+      if (!VecVT.isFixedLengthVector())
+        return ValInVec;
+      return convertFromScalableVector(VecVT, ValInVec, DAG, Subtarget);
+    }
+
     // First slide in the lo value, then the hi in above it. We use slide1down
     // to avoid the register group overlap constraint of vslide1up.
     ValInVec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32ContainerVT,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast.ll
index 1d5715d632326..6dcf6ee78e76e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast.ll
@@ -509,9 +509,7 @@ define <4 x i16> @bitcast_i64_v4i16(i64 %a) {
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 2, e32, m1, ta, ma
 ; RV32-NEXT:    vslide1down.vx v8, v8, a0
-; RV32-NEXT:    vslide1down.vx v9, v8, a1
-; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; RV32-NEXT:    vslideup.vi v8, v9, 0
+; RV32-NEXT:    vslide1down.vx v8, v8, a1
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: bitcast_i64_v4i16:
@@ -547,9 +545,7 @@ define <2 x i32> @bitcast_i64_v2i32(i64 %a) {
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 2, e32, m1, ta, ma
 ; RV32-NEXT:    vslide1down.vx v8, v8, a0
-; RV32-NEXT:    vslide1down.vx v9, v8, a1
-; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; RV32-NEXT:    vslideup.vi v8, v9, 0
+; RV32-NEXT:    vslide1down.vx v8, v8, a1
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: bitcast_i64_v2i32:
@@ -585,9 +581,7 @@ define <1 x i64> @bitcast_i64_v1i64(i64 %a) {
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 2, e32, m1, ta, ma
 ; RV32-NEXT:    vslide1down.vx v8, v8, a0
-; RV32-NEXT:    vslide1down.vx v9, v8, a1
-; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; RV32-NEXT:    vslideup.vi v8, v9, 0
+; RV32-NEXT:    vslide1down.vx v8, v8, a1
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: bitcast_i64_v1i64:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-bitcast.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-bitcast.ll
index b316c71a89685..5776bb75ef5d2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-bitcast.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-bitcast.ll
@@ -200,9 +200,7 @@ define <4 x half> @bitcast_i64_v4f16(i64 %a) {
 ; RV32-FP:       # %bb.0:
 ; RV32-FP-NEXT:    vsetivli zero, 2, e32, m1, ta, ma
 ; RV32-FP-NEXT:    vslide1down.vx v8, v8, a0
-; RV32-FP-NEXT:    vslide1down.vx v9, v8, a1
-; RV32-FP-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; RV32-FP-NEXT:    vslideup.vi v8, v9, 0
+; RV32-FP-NEXT:    vslide1down.vx v8, v8, a1
 ; RV32-FP-NEXT:    ret
 ;
 ; RV64-FP-LABEL: bitcast_i64_v4f16:
@@ -219,9 +217,7 @@ define <2 x float> @bitcast_i64_v2f32(i64 %a) {
 ; RV32-FP:       # %bb.0:
 ; RV32-FP-NEXT:    vsetivli zero, 2, e32, m1, ta, ma
 ; RV32-FP-NEXT:    vslide1down.vx v8, v8, a0
-; RV32-FP-NEXT:    vslide1down.vx v9, v8, a1
-; RV32-FP-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; RV32-FP-NEXT:    vslideup.vi v8, v9, 0
+; RV32-FP-NEXT:    vslide1down.vx v8, v8, a1
 ; RV32-FP-NEXT:    ret
 ;
 ; RV64-FP-LABEL: bitcast_i64_v2f32:
@@ -238,9 +234,7 @@ define <1 x double> @bitcast_i64_v1f64(i64 %a) {
 ; RV32-FP:       # %bb.0:
 ; RV32-FP-NEXT:    vsetivli zero, 2, e32, m1, ta, ma
 ; RV32-FP-NEXT:    vslide1down.vx v8, v8, a0
-; RV32-FP-NEXT:    vslide1down.vx v9, v8, a1
-; RV32-FP-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; RV32-FP-NEXT:    vslideup.vi v8, v9, 0
+; RV32-FP-NEXT:    vslide1down.vx v8, v8, a1
 ; RV32-FP-NEXT:    ret
 ;
 ; RV64-FP-LABEL: bitcast_i64_v1f64:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll
index dde5bfb2c8bb3..3d3811520ad02 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll
@@ -144,11 +144,9 @@ define <2 x i64> @mgather_v2i64_align4(<2 x i64*> %ptrs, <2 x i1> %m, <2 x i64>
 ; RV32-NEXT:    vmv.x.s a1, v8
 ; RV32-NEXT:    lw a2, 0(a1)
 ; RV32-NEXT:    lw a1, 4(a1)
-; RV32-NEXT:    vsetivli zero, 2, e32, m1, ta, ma
-; RV32-NEXT:    vslide1down.vx v10, v8, a2
-; RV32-NEXT:    vslide1down.vx v10, v10, a1
-; RV32-NEXT:    vsetivli zero, 1, e64, m1, tu, ma
-; RV32-NEXT:    vslideup.vi v9, v10, 0
+; RV32-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
+; RV32-NEXT:    vslide1down.vx v9, v9, a2
+; RV32-NEXT:    vslide1down.vx v9, v9, a1
 ; RV32-NEXT:    andi a0, a0, 2
 ; RV32-NEXT:    beqz a0, .LBB5_2
 ; RV32-NEXT:  .LBB5_4: # %cond.load1

diff  --git a/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv32.ll
index 6328addd29ca6..65ca000b98ff6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv32.ll
@@ -635,11 +635,9 @@ define <vscale x 16 x i32> @insertelt_nxv16i32_idx(<vscale x 16 x i32> %v, i32 %
 define <vscale x 1 x i64> @insertelt_nxv1i64_0(<vscale x 1 x i64> %v, i64 %elt) {
 ; CHECK-LABEL: insertelt_nxv1i64_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 2, e32, m1, ta, ma
-; CHECK-NEXT:    vslide1down.vx v9, v8, a0
-; CHECK-NEXT:    vslide1down.vx v9, v9, a1
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, tu, ma
-; CHECK-NEXT:    vslideup.vi v8, v9, 0
+; CHECK-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
+; CHECK-NEXT:    vslide1down.vx v8, v8, a0
+; CHECK-NEXT:    vslide1down.vx v8, v8, a1
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 1 x i64> %v, i64 %elt, i32 0
   ret <vscale x 1 x i64> %r
@@ -675,11 +673,9 @@ define <vscale x 1 x i64> @insertelt_nxv1i64_idx(<vscale x 1 x i64> %v, i64 %elt
 define <vscale x 2 x i64> @insertelt_nxv2i64_0(<vscale x 2 x i64> %v, i64 %elt) {
 ; CHECK-LABEL: insertelt_nxv2i64_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 2, e32, m2, ta, ma
-; CHECK-NEXT:    vslide1down.vx v10, v8, a0
-; CHECK-NEXT:    vslide1down.vx v10, v10, a1
-; CHECK-NEXT:    vsetivli zero, 1, e64, m2, tu, ma
-; CHECK-NEXT:    vslideup.vi v8, v10, 0
+; CHECK-NEXT:    vsetivli zero, 2, e32, m2, tu, ma
+; CHECK-NEXT:    vslide1down.vx v8, v8, a0
+; CHECK-NEXT:    vslide1down.vx v8, v8, a1
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 2 x i64> %v, i64 %elt, i32 0
   ret <vscale x 2 x i64> %r
@@ -715,11 +711,9 @@ define <vscale x 2 x i64> @insertelt_nxv2i64_idx(<vscale x 2 x i64> %v, i64 %elt
 define <vscale x 4 x i64> @insertelt_nxv4i64_0(<vscale x 4 x i64> %v, i64 %elt) {
 ; CHECK-LABEL: insertelt_nxv4i64_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 2, e32, m4, ta, ma
-; CHECK-NEXT:    vslide1down.vx v12, v8, a0
-; CHECK-NEXT:    vslide1down.vx v12, v12, a1
-; CHECK-NEXT:    vsetivli zero, 1, e64, m4, tu, ma
-; CHECK-NEXT:    vslideup.vi v8, v12, 0
+; CHECK-NEXT:    vsetivli zero, 2, e32, m4, tu, ma
+; CHECK-NEXT:    vslide1down.vx v8, v8, a0
+; CHECK-NEXT:    vslide1down.vx v8, v8, a1
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 4 x i64> %v, i64 %elt, i32 0
   ret <vscale x 4 x i64> %r
@@ -755,11 +749,9 @@ define <vscale x 4 x i64> @insertelt_nxv4i64_idx(<vscale x 4 x i64> %v, i64 %elt
 define <vscale x 8 x i64> @insertelt_nxv8i64_0(<vscale x 8 x i64> %v, i64 %elt) {
 ; CHECK-LABEL: insertelt_nxv8i64_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 2, e32, m8, ta, ma
-; CHECK-NEXT:    vslide1down.vx v16, v8, a0
-; CHECK-NEXT:    vslide1down.vx v16, v16, a1
-; CHECK-NEXT:    vsetivli zero, 1, e64, m8, tu, ma
-; CHECK-NEXT:    vslideup.vi v8, v16, 0
+; CHECK-NEXT:    vsetivli zero, 2, e32, m8, tu, ma
+; CHECK-NEXT:    vslide1down.vx v8, v8, a0
+; CHECK-NEXT:    vslide1down.vx v8, v8, a1
 ; CHECK-NEXT:    ret
   %r = insertelement <vscale x 8 x i64> %v, i64 %elt, i32 0
   ret <vscale x 8 x i64> %r


        


More information about the llvm-commits mailing list