[llvm] [RISCV] Lower VP_SELECT constant false to use vmerge.vxm/vmerge.vim (PR #144461)

Liao Chunyu via llvm-commits llvm-commits at lists.llvm.org
Tue Jun 17 21:18:24 PDT 2025


https://github.com/ChunyuLiao updated https://github.com/llvm/llvm-project/pull/144461

>From 3151db1ab319a014579d5b96b77c7f5e44789319 Mon Sep 17 00:00:00 2001
From: Liao Chunyu <chunyu at iscas.ac.cn>
Date: Mon, 16 Jun 2025 21:40:22 -0400
Subject: [PATCH] [RISCV] Combine VP_SELECT constant false to use
 vmerge.vxm/vmerge.vim

Currently, when the false path of a vp_select is a splat vector,
it is lowered to a vmv_v_x/vmv_v_i. The vmv is hoisted out of
the loop and the whole copy in loop body by MachineLICM.

By inverting the mask register and swapping the true and false values
in the vp_select, we can eliminate some instructions inside the loop.

corrent: https://godbolt.org/z/EnGMn3xeM
expected similar form: https://godbolt.org/z/nWhGM6Ej5
---
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp   | 30 +++++++++++++------
 .../test/CodeGen/RISCV/rvv/masked-load-int.ll |  6 ++--
 llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll     |  6 ++--
 3 files changed, 27 insertions(+), 15 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index e670567bd1844..210e0a6241d3c 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -1628,15 +1628,15 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
                          ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT});
   if (Subtarget.hasVInstructions())
     setTargetDAGCombine(
-        {ISD::FCOPYSIGN,    ISD::MGATHER,      ISD::MSCATTER,
-         ISD::VP_GATHER,    ISD::VP_SCATTER,   ISD::SRA,
-         ISD::SRL,          ISD::SHL,          ISD::STORE,
-         ISD::SPLAT_VECTOR, ISD::BUILD_VECTOR, ISD::CONCAT_VECTORS,
-         ISD::VP_STORE,     ISD::VP_TRUNCATE,  ISD::EXPERIMENTAL_VP_REVERSE,
-         ISD::MUL,          ISD::SDIV,         ISD::UDIV,
-         ISD::SREM,         ISD::UREM,         ISD::INSERT_VECTOR_ELT,
-         ISD::ABS,          ISD::CTPOP,        ISD::VECTOR_SHUFFLE,
-         ISD::VSELECT,      ISD::VECREDUCE_ADD});
+        {ISD::FCOPYSIGN,    ISD::MGATHER,       ISD::MSCATTER,
+         ISD::VP_GATHER,    ISD::VP_SCATTER,    ISD::SRA,
+         ISD::SRL,          ISD::SHL,           ISD::STORE,
+         ISD::SPLAT_VECTOR, ISD::BUILD_VECTOR,  ISD::CONCAT_VECTORS,
+         ISD::VP_STORE,     ISD::VP_TRUNCATE,   ISD::EXPERIMENTAL_VP_REVERSE,
+         ISD::MUL,          ISD::SDIV,          ISD::UDIV,
+         ISD::SREM,         ISD::UREM,          ISD::INSERT_VECTOR_ELT,
+         ISD::ABS,          ISD::CTPOP,         ISD::VECTOR_SHUFFLE,
+         ISD::VSELECT,      ISD::VECREDUCE_ADD, ISD::VP_SELECT});
 
   if (Subtarget.hasVendorXTHeadMemPair())
     setTargetDAGCombine({ISD::LOAD, ISD::STORE});
@@ -19725,6 +19725,18 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
     return performSELECTCombine(N, DAG, Subtarget);
   case ISD::VSELECT:
     return performVSELECTCombine(N, DAG);
+  case ISD::VP_SELECT: {
+    if (SDValue Op2 = N->getOperand(2);
+        Op2.hasOneUse() && (Op2.getOpcode() == ISD::SPLAT_VECTOR ||
+                            Op2.getOpcode() == ISD::SPLAT_VECTOR_PARTS)) {
+      SDLoc DL(N);
+      SDValue Op0 = N->getOperand(0);
+      SDValue Val = DAG.getLogicalNOT(DL, Op0, Op0.getValueType());
+      return DAG.getNode(ISD::VP_SELECT, DL, N->getValueType(0), Val,
+                         N->getOperand(2), N->getOperand(1), N->getOperand(3));
+    }
+    return SDValue();
+  }
   case RISCVISD::CZERO_EQZ:
   case RISCVISD::CZERO_NEZ: {
     SDValue Val = N->getOperand(0);
diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-load-int.ll b/llvm/test/CodeGen/RISCV/rvv/masked-load-int.ll
index 75537406f3515..a9ed70b94c90f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/masked-load-int.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/masked-load-int.ll
@@ -34,10 +34,10 @@ define <vscale x 1 x i8> @masked_load_passthru_nxv1i8(ptr %a, <vscale x 1 x i1>
 ; ZVE32:       # %bb.0:
 ; ZVE32-NEXT:    csrr a1, vlenb
 ; ZVE32-NEXT:    srli a1, a1, 3
-; ZVE32-NEXT:    vsetvli a2, zero, e8, mf4, ta, ma
-; ZVE32-NEXT:    vmv.v.i v8, 0
-; ZVE32-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
+; ZVE32-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; ZVE32-NEXT:    vle8.v v8, (a0), v0.t
+; ZVE32-NEXT:    vmnot.m v0, v0
+; ZVE32-NEXT:    vmerge.vim v8, v8, 0, v0
 ; ZVE32-NEXT:    ret
   %load = call <vscale x 1 x i8> @llvm.masked.load.nxv1i8(ptr %a, i32 1, <vscale x 1 x i1> %mask, <vscale x 1 x i8> zeroinitializer)
   ret <vscale x 1 x i8> %load
diff --git a/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll
index 522c83fd9fa99..3918a8009fde8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll
@@ -483,10 +483,10 @@ define <vscale x 2 x i64> @select_nxv2i64_constant_true(<vscale x 2 x i1> %a, <v
 define <vscale x 2 x i64> @select_nxv2i64_constant_false(<vscale x 2 x i1> %a, <vscale x 2 x i64> %b, i32 zeroext %evl) {
 ; CHECK-LABEL: select_nxv2i64_constant_false:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    li a1, 100
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT:    vmv.v.x v10, a1
-; CHECK-NEXT:    vmerge.vvm v8, v10, v8, v0
+; CHECK-NEXT:    vmnot.m v0, v0
+; CHECK-NEXT:    li a0, 100
+; CHECK-NEXT:    vmerge.vxm v8, v8, a0, v0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 2 x i64> @llvm.vp.select.nxv2i64(<vscale x 2 x i1> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> splat (i64 100), i32 %evl)
   ret <vscale x 2 x i64> %v



More information about the llvm-commits mailing list