[llvm] [RISCV] Use LMUL=1 for vmv_s_x_vl with non-undef passthru (PR #66659)

Luke Lau via llvm-commits llvm-commits at lists.llvm.org
Thu Sep 28 06:32:21 PDT 2023


https://github.com/lukel97 updated https://github.com/llvm/llvm-project/pull/66659

>From 21281af453d263da5e3d7a868feda33338d7d04d Mon Sep 17 00:00:00 2001
From: Luke Lau <luke at igalia.com>
Date: Mon, 18 Sep 2023 16:46:45 +0100
Subject: [PATCH] [RISCV] Use LMUL=1 for vmv_s_x_vl with non-undef passthru

We currently shrink the type of vmv_s_x_vl to LMUL=1 when its passthru is
undef to avoid constraining the register allocator since it ignores LMUL.
This patch relaxes it for non-undef passthrus, which occurs when lowering
insert_vector_elt.
---
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 11 ++++++----
 llvm/test/CodeGen/RISCV/rvv/vmv.s.x-rv32.ll | 18 ++++++++--------
 llvm/test/CodeGen/RISCV/rvv/vmv.s.x-rv64.ll | 24 ++++++++++-----------
 3 files changed, 28 insertions(+), 25 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 5e3975df1c4425d..fb7b353a857b999 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -14598,11 +14598,14 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
 
     // Use M1 or smaller to avoid over constraining register allocation
     const MVT M1VT = getLMUL1VT(VT);
-    if (M1VT.bitsLT(VT) && Passthru.isUndef()) {
+    if (M1VT.bitsLT(VT)) {
+      SDValue M1Passthru =
+          DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, M1VT, Passthru,
+                      DAG.getVectorIdxConstant(0, DL));
       SDValue Result =
-        DAG.getNode(N->getOpcode(), DL, M1VT, Passthru, Scalar, VL);
-      Result = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT),
-                           Result, DAG.getConstant(0, DL, XLenVT));
+          DAG.getNode(N->getOpcode(), DL, M1VT, M1Passthru, Scalar, VL);
+      Result = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, Passthru, Result,
+                           DAG.getConstant(0, DL, XLenVT));
       return Result;
     }
 
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmv.s.x-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmv.s.x-rv32.ll
index 477b12c29b9ce7d..7c3e94ab71ff7d3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmv.s.x-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmv.s.x-rv32.ll
@@ -58,7 +58,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vmv.s.x.nxv16i8(<vscale x 16 x i8>, i8, i
 define <vscale x 16 x i8> @intrinsic_vmv.s.x_x_nxv16i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, ma
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -71,7 +71,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vmv.s.x.nxv32i8(<vscale x 32 x i8>, i8, i
 define <vscale x 32 x i8> @intrinsic_vmv.s.x_x_nxv32i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, ma
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -84,7 +84,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vmv.s.x.nxv64i8(<vscale x 64 x i8>, i8, i
 define <vscale x 64 x i8> @intrinsic_vmv.s.x_x_nxv64i8(<vscale x 64 x i8> %0, i8 %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, ma
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -136,7 +136,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vmv.s.x.nxv8i16(<vscale x 8 x i16>, i16,
 define <vscale x 8 x i16> @intrinsic_vmv.s.x_x_nxv8i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, ma
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -149,7 +149,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vmv.s.x.nxv16i16(<vscale x 16 x i16>, i1
 define <vscale x 16 x i16> @intrinsic_vmv.s.x_x_nxv16i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, ma
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -162,7 +162,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vmv.s.x.nxv32i16(<vscale x 32 x i16>, i1
 define <vscale x 32 x i16> @intrinsic_vmv.s.x_x_nxv32i16(<vscale x 32 x i16> %0, i16 %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, ma
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -201,7 +201,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vmv.s.x.nxv4i32(<vscale x 4 x i32>, i32,
 define <vscale x 4 x i32> @intrinsic_vmv.s.x_x_nxv4i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, ma
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -214,7 +214,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vmv.s.x.nxv8i32(<vscale x 8 x i32>, i32,
 define <vscale x 8 x i32> @intrinsic_vmv.s.x_x_nxv8i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, ma
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -227,7 +227,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vmv.s.x.nxv16i32(<vscale x 16 x i32>, i3
 define <vscale x 16 x i32> @intrinsic_vmv.s.x_x_nxv16i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, ma
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
 entry:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmv.s.x-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmv.s.x-rv64.ll
index 8bba3b841a3cfa2..79931acbdc06dbe 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmv.s.x-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmv.s.x-rv64.ll
@@ -58,7 +58,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vmv.s.x.nxv16i8(<vscale x 16 x i8>, i8, i
 define <vscale x 16 x i8> @intrinsic_vmv.s.x_x_nxv16i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, ma
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -71,7 +71,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vmv.s.x.nxv32i8(<vscale x 32 x i8>, i8, i
 define <vscale x 32 x i8> @intrinsic_vmv.s.x_x_nxv32i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, ma
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -84,7 +84,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vmv.s.x.nxv64i8(<vscale x 64 x i8>, i8, i
 define <vscale x 64 x i8> @intrinsic_vmv.s.x_x_nxv64i8(<vscale x 64 x i8> %0, i8 %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, ma
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -136,7 +136,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vmv.s.x.nxv8i16(<vscale x 8 x i16>, i16,
 define <vscale x 8 x i16> @intrinsic_vmv.s.x_x_nxv8i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, ma
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -149,7 +149,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vmv.s.x.nxv16i16(<vscale x 16 x i16>, i1
 define <vscale x 16 x i16> @intrinsic_vmv.s.x_x_nxv16i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, ma
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -162,7 +162,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vmv.s.x.nxv32i16(<vscale x 32 x i16>, i1
 define <vscale x 32 x i16> @intrinsic_vmv.s.x_x_nxv32i16(<vscale x 32 x i16> %0, i16 %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, ma
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -201,7 +201,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vmv.s.x.nxv4i32(<vscale x 4 x i32>, i32,
 define <vscale x 4 x i32> @intrinsic_vmv.s.x_x_nxv4i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, ma
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -214,7 +214,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vmv.s.x.nxv8i32(<vscale x 8 x i32>, i32,
 define <vscale x 8 x i32> @intrinsic_vmv.s.x_x_nxv8i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, ma
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -227,7 +227,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vmv.s.x.nxv16i32(<vscale x 16 x i32>, i3
 define <vscale x 16 x i32> @intrinsic_vmv.s.x_x_nxv16i32(<vscale x 16 x i32> %0, i32 %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, ma
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -253,7 +253,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vmv.s.x.nxv2i64(<vscale x 2 x i64>, i64,
 define <vscale x 2 x i64> @intrinsic_vmv.s.x_x_nxv2i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, ma
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -266,7 +266,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vmv.s.x.nxv4i64(<vscale x 4 x i64>, i64,
 define <vscale x 4 x i64> @intrinsic_vmv.s.x_x_nxv4i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, ma
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -279,7 +279,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vmv.s.x.nxv8i64(<vscale x 8 x i64>, i64,
 define <vscale x 8 x i64> @intrinsic_vmv.s.x_x_nxv8i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, ma
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
 entry:



More information about the llvm-commits mailing list