[llvm] 50f9b34 - [RISCV] Prefer vmv.s.x for build_vector a, undef, ..., undef (#136164)

via llvm-commits llvm-commits at lists.llvm.org
Thu Apr 17 19:51:38 PDT 2025


Author: Philip Reames
Date: 2025-04-17T19:51:35-07:00
New Revision: 50f9b34b5340cfb32d14920bb0d41a90f48ffc40

URL: https://github.com/llvm/llvm-project/commit/50f9b34b5340cfb32d14920bb0d41a90f48ffc40
DIFF: https://github.com/llvm/llvm-project/commit/50f9b34b5340cfb32d14920bb0d41a90f48ffc40.diff

LOG: [RISCV] Prefer vmv.s.x for build_vector a, undef, ..., undef (#136164)

If we have a build vector which could be either a splat or a scalar
insert, prefer the scalar insert. At high LMUL, this reduces vector
register pressure (locally, the use will likely still be aligned), and
the amount of work performed for the splat.

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-binop-splats.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-buildvec-of-binop.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-vslide1up.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll
    llvm/test/CodeGen/RISCV/rvv/fold-scalar-load-crash.ll
    llvm/test/CodeGen/RISCV/rvv/pr125306.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 5d4df039a0ed8..98c8bdb4bc114 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -4208,8 +4208,22 @@ static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
   if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) {
     if (auto Gather = matchSplatAsGather(Splat, VT, DL, DAG, Subtarget))
       return Gather;
-    unsigned Opc = VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL
-                                        : RISCVISD::VMV_V_X_VL;
+
+    // Prefer vmv.s.x/vfmv.s.f if legal to reduce work and register
+    // pressure at high LMUL.
+    if (all_of(Op->ops().drop_front(),
+               [](const SDUse &U) { return U.get().isUndef(); })) {
+      unsigned Opc =
+          VT.isFloatingPoint() ? RISCVISD::VFMV_S_F_VL : RISCVISD::VMV_S_X_VL;
+      if (!VT.isFloatingPoint())
+        Splat = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Splat);
+      Splat = DAG.getNode(Opc, DL, ContainerVT, DAG.getUNDEF(ContainerVT),
+                          Splat, VL);
+      return convertFromScalableVector(VT, Splat, DAG, Subtarget);
+    }
+
+    unsigned Opc =
+        VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL : RISCVISD::VMV_V_X_VL;
     if (!VT.isFloatingPoint())
       Splat = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Splat);
     Splat =

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-binop-splats.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-binop-splats.ll
index bfc43db2e369e..6b5ca5f391b80 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-binop-splats.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-binop-splats.ll
@@ -187,7 +187,7 @@ define <2 x i8> @v2i8(i8 %x, i8 %y) {
 ; CHECK-LABEL: v2i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
-; CHECK-NEXT:    vmv.v.x v8, a0
+; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    vadd.vx v9, v8, a1
 ; CHECK-NEXT:    vrgather.vi v8, v9, 0
 ; CHECK-NEXT:    ret
@@ -203,7 +203,7 @@ define <4 x i8> @v4i8(i8 %x, i8 %y) {
 ; CHECK-LABEL: v4i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
-; CHECK-NEXT:    vmv.v.x v8, a0
+; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    vadd.vx v9, v8, a1
 ; CHECK-NEXT:    vrgather.vi v8, v9, 0
 ; CHECK-NEXT:    ret
@@ -219,7 +219,7 @@ define <8 x i8> @v8i8(i8 %x, i8 %y) {
 ; CHECK-LABEL: v8i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
-; CHECK-NEXT:    vmv.v.x v8, a0
+; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    vadd.vx v9, v8, a1
 ; CHECK-NEXT:    vrgather.vi v8, v9, 0
 ; CHECK-NEXT:    ret
@@ -235,7 +235,7 @@ define <16 x i8> @v16i8(i8 %x, i8 %y) {
 ; CHECK-LABEL: v16i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
-; CHECK-NEXT:    vmv.v.x v8, a0
+; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    vadd.vx v9, v8, a1
 ; CHECK-NEXT:    vrgather.vi v8, v9, 0
 ; CHECK-NEXT:    ret
@@ -252,7 +252,7 @@ define <32 x i8> @v32i8(i8 %x, i8 %y) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    li a2, 32
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m2, ta, ma
-; CHECK-NEXT:    vmv.v.x v8, a0
+; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    vadd.vx v10, v8, a1
 ; CHECK-NEXT:    vrgather.vi v8, v10, 0
 ; CHECK-NEXT:    ret
@@ -269,7 +269,7 @@ define <64 x i8> @v64i8(i8 %x, i8 %y) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    li a2, 64
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m4, ta, ma
-; CHECK-NEXT:    vmv.v.x v8, a0
+; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    vadd.vx v12, v8, a1
 ; CHECK-NEXT:    vrgather.vi v8, v12, 0
 ; CHECK-NEXT:    ret
@@ -300,7 +300,7 @@ define <2 x i16> @v2i16(i16 %x, i16 %y) {
 ; CHECK-LABEL: v2i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
-; CHECK-NEXT:    vmv.v.x v8, a0
+; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    vadd.vx v9, v8, a1
 ; CHECK-NEXT:    vrgather.vi v8, v9, 0
 ; CHECK-NEXT:    ret
@@ -316,7 +316,7 @@ define <4 x i16> @v4i16(i16 %x, i16 %y) {
 ; CHECK-LABEL: v4i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
-; CHECK-NEXT:    vmv.v.x v8, a0
+; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    vadd.vx v9, v8, a1
 ; CHECK-NEXT:    vrgather.vi v8, v9, 0
 ; CHECK-NEXT:    ret
@@ -332,7 +332,7 @@ define <8 x i16> @v8i16(i16 %x, i16 %y) {
 ; CHECK-LABEL: v8i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
-; CHECK-NEXT:    vmv.v.x v8, a0
+; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    vadd.vx v9, v8, a1
 ; CHECK-NEXT:    vrgather.vi v8, v9, 0
 ; CHECK-NEXT:    ret
@@ -348,7 +348,7 @@ define <16 x i16> @v16i16(i16 %x, i16 %y) {
 ; CHECK-LABEL: v16i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
-; CHECK-NEXT:    vmv.v.x v8, a0
+; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    vadd.vx v10, v8, a1
 ; CHECK-NEXT:    vrgather.vi v8, v10, 0
 ; CHECK-NEXT:    ret
@@ -365,7 +365,7 @@ define <32 x i16> @v32i16(i16 %x, i16 %y) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    li a2, 32
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
-; CHECK-NEXT:    vmv.v.x v8, a0
+; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    vadd.vx v12, v8, a1
 ; CHECK-NEXT:    vrgather.vi v8, v12, 0
 ; CHECK-NEXT:    ret
@@ -396,7 +396,7 @@ define <2 x i32> @v2i32(i32 %x, i32 %y) {
 ; CHECK-LABEL: v2i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT:    vmv.v.x v8, a0
+; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    vadd.vx v9, v8, a1
 ; CHECK-NEXT:    vrgather.vi v8, v9, 0
 ; CHECK-NEXT:    ret
@@ -412,7 +412,7 @@ define <4 x i32> @v4i32(i32 %x, i32 %y) {
 ; CHECK-LABEL: v4i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT:    vmv.v.x v8, a0
+; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    vadd.vx v9, v8, a1
 ; CHECK-NEXT:    vrgather.vi v8, v9, 0
 ; CHECK-NEXT:    ret
@@ -428,7 +428,7 @@ define <8 x i32> @v8i32(i32 %x, i32 %y) {
 ; CHECK-LABEL: v8i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT:    vmv.v.x v8, a0
+; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    vadd.vx v10, v8, a1
 ; CHECK-NEXT:    vrgather.vi v8, v10, 0
 ; CHECK-NEXT:    ret
@@ -444,7 +444,7 @@ define <16 x i32> @v16i32(i32 %x, i32 %y) {
 ; CHECK-LABEL: v16i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
-; CHECK-NEXT:    vmv.v.x v8, a0
+; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    vadd.vx v12, v8, a1
 ; CHECK-NEXT:    vrgather.vi v8, v12, 0
 ; CHECK-NEXT:    ret
@@ -509,7 +509,7 @@ define <2 x i64> @v2i64(i64 %x, i64 %y) {
 ; RV64-LABEL: v2i64:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
-; RV64-NEXT:    vmv.v.x v8, a0
+; RV64-NEXT:    vmv.s.x v8, a0
 ; RV64-NEXT:    vadd.vx v9, v8, a1
 ; RV64-NEXT:    vrgather.vi v8, v9, 0
 ; RV64-NEXT:    ret
@@ -542,7 +542,7 @@ define <4 x i64> @v4i64(i64 %x, i64 %y) {
 ; RV64-LABEL: v4i64:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
-; RV64-NEXT:    vmv.v.x v8, a0
+; RV64-NEXT:    vmv.s.x v8, a0
 ; RV64-NEXT:    vadd.vx v10, v8, a1
 ; RV64-NEXT:    vrgather.vi v8, v10, 0
 ; RV64-NEXT:    ret
@@ -575,7 +575,7 @@ define <8 x i64> @v8i64(i64 %x, i64 %y) {
 ; RV64-LABEL: v8i64:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
-; RV64-NEXT:    vmv.v.x v8, a0
+; RV64-NEXT:    vmv.s.x v8, a0
 ; RV64-NEXT:    vadd.vx v12, v8, a1
 ; RV64-NEXT:    vrgather.vi v8, v12, 0
 ; RV64-NEXT:    ret
@@ -591,7 +591,7 @@ define <4 x half> @v4f16(half %x, half %y) {
 ; CHECK-LABEL: v4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
-; CHECK-NEXT:    vfmv.v.f v8, fa0
+; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    vfadd.vf v9, v8, fa1
 ; CHECK-NEXT:    vrgather.vi v8, v9, 0
 ; CHECK-NEXT:    ret
@@ -607,7 +607,7 @@ define <2 x float> @v2f32(float %x, float %y) {
 ; CHECK-LABEL: v2f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT:    vfmv.v.f v8, fa0
+; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    vfadd.vf v9, v8, fa1
 ; CHECK-NEXT:    vrgather.vi v8, v9, 0
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-buildvec-of-binop.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-buildvec-of-binop.ll
index dbbb8362144ca..c97545691180e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-buildvec-of-binop.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-buildvec-of-binop.ll
@@ -439,7 +439,7 @@ define void @buggy(i32 %0) #0 {
 ; RV32-LABEL: buggy:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; RV32-NEXT:    vmv.v.x v8, a0
+; RV32-NEXT:    vmv.s.x v8, a0
 ; RV32-NEXT:    vadd.vv v8, v8, v8
 ; RV32-NEXT:    vor.vi v8, v8, 1
 ; RV32-NEXT:    vrgather.vi v9, v8, 0
@@ -450,7 +450,7 @@ define void @buggy(i32 %0) #0 {
 ; RV64:       # %bb.0: # %entry
 ; RV64-NEXT:    slli a0, a0, 1
 ; RV64-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; RV64-NEXT:    vmv.v.x v8, a0
+; RV64-NEXT:    vmv.s.x v8, a0
 ; RV64-NEXT:    vor.vi v8, v8, 1
 ; RV64-NEXT:    vrgather.vi v9, v8, 0
 ; RV64-NEXT:    vse32.v v9, (zero)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-vslide1up.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-vslide1up.ll
index a171a7f8ac5f1..a29d53b50287b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-vslide1up.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-vslide1up.ll
@@ -250,7 +250,7 @@ define <4 x double> @vslide1up_4xf64(<4 x double> %v, double %b) {
 ; CHECK-LABEL: vslide1up_4xf64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-NEXT:    vfmv.v.f v10, fa0
+; CHECK-NEXT:    vfmv.s.f v10, fa0
 ; CHECK-NEXT:    vslideup.vi v10, v8, 3
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll
index 29fbb8acc3358..b6253c6ea63b2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll
@@ -500,7 +500,7 @@ define void @masked_load_v2i32_align1(ptr %a, <2 x i32> %m, ptr %res_ptr) nounwi
 ; RV32-SLOW-NEXT:    or a4, a6, a5
 ; RV32-SLOW-NEXT:    or a3, a4, a3
 ; RV32-SLOW-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
-; RV32-SLOW-NEXT:    vmv.v.x v8, a3
+; RV32-SLOW-NEXT:    vmv.s.x v8, a3
 ; RV32-SLOW-NEXT:  .LBB8_2: # %else
 ; RV32-SLOW-NEXT:    andi a2, a2, 2
 ; RV32-SLOW-NEXT:    beqz a2, .LBB8_4
@@ -544,7 +544,7 @@ define void @masked_load_v2i32_align1(ptr %a, <2 x i32> %m, ptr %res_ptr) nounwi
 ; RV64-SLOW-NEXT:    or a4, a6, a5
 ; RV64-SLOW-NEXT:    or a3, a4, a3
 ; RV64-SLOW-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
-; RV64-SLOW-NEXT:    vmv.v.x v8, a3
+; RV64-SLOW-NEXT:    vmv.s.x v8, a3
 ; RV64-SLOW-NEXT:  .LBB8_2: # %else
 ; RV64-SLOW-NEXT:    andi a2, a2, 2
 ; RV64-SLOW-NEXT:    beqz a2, .LBB8_4

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fold-scalar-load-crash.ll b/llvm/test/CodeGen/RISCV/rvv/fold-scalar-load-crash.ll
index 919c2fd518578..19ea7b7ca481e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fold-scalar-load-crash.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fold-scalar-load-crash.ll
@@ -9,8 +9,8 @@ define i32 @test(i32 %size, ptr %add.ptr, i64 %const) {
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    addi a3, a2, 1
 ; RV32-NEXT:    th.lbib a4, (a1), -1, 0
-; RV32-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
-; RV32-NEXT:    vmv.v.x v8, a4
+; RV32-NEXT:    vsetivli zero, 8, e8, m1, ta, ma
+; RV32-NEXT:    vmv.s.x v8, a4
 ; RV32-NEXT:    vmv.s.x v9, zero
 ; RV32-NEXT:    vsetvli zero, a3, e8, mf2, tu, ma
 ; RV32-NEXT:    vslideup.vx v8, v9, a2
@@ -35,8 +35,8 @@ define i32 @test(i32 %size, ptr %add.ptr, i64 %const) {
 ; RV64:       # %bb.0: # %entry
 ; RV64-NEXT:    addi a3, a2, 1
 ; RV64-NEXT:    th.lbib a4, (a1), -1, 0
-; RV64-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
-; RV64-NEXT:    vmv.v.x v8, a4
+; RV64-NEXT:    vsetivli zero, 8, e8, m1, ta, ma
+; RV64-NEXT:    vmv.s.x v8, a4
 ; RV64-NEXT:    vmv.s.x v9, zero
 ; RV64-NEXT:    vsetvli zero, a3, e8, mf2, tu, ma
 ; RV64-NEXT:    vslideup.vx v8, v9, a2

diff  --git a/llvm/test/CodeGen/RISCV/rvv/pr125306.ll b/llvm/test/CodeGen/RISCV/rvv/pr125306.ll
index 111f87de220db..9400c381bc87c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/pr125306.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/pr125306.ll
@@ -60,7 +60,7 @@ define <2 x i32> @main(ptr %0) {
 ; CHECK-NEXT:    vslide1down.vx v8, v8, zero
 ; CHECK-NEXT:    vslide1down.vx v10, v10, zero
 ; CHECK-NEXT:    vmin.vv v8, v10, v8
-; CHECK-NEXT:    vmv.v.x v10, a0
+; CHECK-NEXT:    vmv.s.x v10, a0
 ; CHECK-NEXT:    vslide1down.vx v11, v11, zero
 ; CHECK-NEXT:    vmin.vx v10, v10, a2
 ; CHECK-NEXT:    vmin.vx v10, v10, a1


        


More information about the llvm-commits mailing list