[llvm] [RISCV] Try to optimize `vp.splice` to `vslide1up`. (PR #144871)
Ming Yan via llvm-commits
llvm-commits at lists.llvm.org
Tue Jun 24 23:21:29 PDT 2025
https://github.com/NexMing updated https://github.com/llvm/llvm-project/pull/144871
>From adff330d667e6e74d61f98c0f7c634840ff433c0 Mon Sep 17 00:00:00 2001
From: yanming <ming.yan at terapines.com>
Date: Fri, 20 Jun 2025 11:05:33 +0800
Subject: [PATCH 1/2] Add testcases.
---
.../RISCV/rvv/fixed-vectors-vp-splice.ll | 57 ++++++++++++
llvm/test/CodeGen/RISCV/rvv/vp-splice.ll | 90 ++++++++++++++++++-
2 files changed, 143 insertions(+), 4 deletions(-)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vp-splice.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vp-splice.ll
index 7bf22247093f7..c2871a4b133e9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vp-splice.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vp-splice.ll
@@ -299,3 +299,60 @@ define <8 x half> @test_vp_splice_v8f16_masked(<8 x half> %va, <8 x half> %vb, <
%v = call <8 x half> @llvm.experimental.vp.splice.v8f16(<8 x half> %va, <8 x half> %vb, i32 5, <8 x i1> %mask, i32 %evla, i32 %evlb)
ret <8 x half> %v
}
+
+define <4 x i32> @test_vp_splice_v4i32_with_firstelt(i32 %first, <4 x i32> %vb, <4 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: test_vp_splice_v4i32_with_firstelt:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vmv.s.x v9, a0
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vslideup.vi v9, v8, 1, v0.t
+; CHECK-NEXT: vmv.v.v v8, v9
+; CHECK-NEXT: ret
+ %va = insertelement <4 x i32> poison, i32 %first, i32 0
+ %v = call <4 x i32> @llvm.experimental.vp.splice.v4i32(<4 x i32> %va, <4 x i32> %vb, i32 0, <4 x i1> %mask, i32 1, i32 %evl)
+ ret <4 x i32> %v
+}
+
+define <4 x i32> @test_vp_splice_v4i32_with_splat_firstelt(i32 %first, <4 x i32> %vb, <4 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: test_vp_splice_v4i32_with_splat_firstelt:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vslideup.vi v9, v8, 1, v0.t
+; CHECK-NEXT: vmv.v.v v8, v9
+; CHECK-NEXT: ret
+ %ins = insertelement <4 x i32> poison, i32 %first, i32 0
+ %splat = shufflevector <4 x i32> %ins, <4 x i32> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i32> @llvm.experimental.vp.splice.v4i32(<4 x i32> %splat, <4 x i32> %vb, i32 0, <4 x i1> %mask, i32 1, i32 %evl)
+ ret <4 x i32> %v
+}
+
+define <4 x float> @test_vp_splice_nxv2f32_with_firstelt(float %first, <4 x float> %vb, <4 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: test_vp_splice_nxv2f32_with_firstelt:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vfmv.s.f v9, fa0
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vslideup.vi v9, v8, 1, v0.t
+; CHECK-NEXT: vmv.v.v v8, v9
+; CHECK-NEXT: ret
+ %va = insertelement <4 x float> poison, float %first, i32 0
+ %v = call <4 x float> @llvm.experimental.vp.splice.nxv2f32(<4 x float> %va, <4 x float> %vb, i32 0, <4 x i1> %mask, i32 1, i32 %evl)
+ ret <4 x float> %v
+}
+
+define <4 x half> @test_vp_splice_nxv2f16_with_firstelt(half %first, <4 x half> %vb, <4 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: test_vp_splice_nxv2f16_with_firstelt:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e16, m1, ta, ma
+; CHECK-NEXT: vfmv.s.f v9, fa0
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vslideup.vi v9, v8, 1, v0.t
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %va = insertelement <4 x half> poison, half %first, i32 0
+ %v = call <4 x half> @llvm.experimental.vp.splice.nxv2f16(<4 x half> %va, <4 x half> %vb, i32 0, <4 x i1> %mask, i32 1, i32 %evl)
+ ret <4 x half> %v
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-splice.ll b/llvm/test/CodeGen/RISCV/rvv/vp-splice.ll
index 6008ea43e9158..b5d20002f961a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vp-splice.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vp-splice.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple riscv64 -mattr=+f,+d,+v,+zvfh,+zvfbfmin -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
-; RUN: llc -mtriple riscv64 -mattr=+f,+d,+v,+zvfhmin,+zvfbfmin -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
+; RUN: llc -mtriple riscv64 -mattr=+f,+d,+v,+zfh,+zfbfmin,+zvfh,+zvfbfmin -verify-machineinstrs \
+; RUN: < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
+; RUN: llc -mtriple riscv64 -mattr=+f,+d,+v,+zfh,+zfbfmin,+zvfhmin,+zvfbfmin -verify-machineinstrs \
+; RUN: < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
define <vscale x 2 x i64> @test_vp_splice_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
; CHECK-LABEL: test_vp_splice_nxv2i64:
@@ -505,3 +505,85 @@ define <vscale x 2 x bfloat> @test_vp_splice_nxv2bf16_masked(<vscale x 2 x bfloa
%v = call <vscale x 2 x bfloat> @llvm.experimental.vp.splice.nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x bfloat> %vb, i32 5, <vscale x 2 x i1> %mask, i32 %evla, i32 %evlb)
ret <vscale x 2 x bfloat> %v
}
+
+define <vscale x 2 x i32> @test_vp_splice_nxv2i32_with_firstelt(i32 %first, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: test_vp_splice_nxv2i32_with_firstelt:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, ma
+; CHECK-NEXT: vmv.s.x v9, a0
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vslideup.vi v9, v8, 1, v0.t
+; CHECK-NEXT: vmv.v.v v8, v9
+; CHECK-NEXT: ret
+ %va = insertelement <vscale x 2 x i32> poison, i32 %first, i32 0
+ %v = call <vscale x 2 x i32> @llvm.experimental.vp.splice.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, i32 0, <vscale x 2 x i1> %mask, i32 1, i32 %evl)
+ ret <vscale x 2 x i32> %v
+}
+
+define <vscale x 2 x i32> @test_vp_splice_nxv2i32_with_splat_firstelt(i32 %first, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: test_vp_splice_nxv2i32_with_splat_firstelt:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, ma
+; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vslideup.vi v9, v8, 1, v0.t
+; CHECK-NEXT: vmv.v.v v8, v9
+; CHECK-NEXT: ret
+ %ins = insertelement <vscale x 2 x i32> poison, i32 %first, i32 0
+ %splat = shufflevector <vscale x 2 x i32> %ins, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i32> @llvm.experimental.vp.splice.nxv2i32(<vscale x 2 x i32> %splat, <vscale x 2 x i32> %vb, i32 0, <vscale x 2 x i1> %mask, i32 1, i32 %evl)
+ ret <vscale x 2 x i32> %v
+}
+
+define <vscale x 2 x float> @test_vp_splice_nxv2f32_with_firstelt(float %first, <vscale x 2 x float> %vb, <vscale x 2 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: test_vp_splice_nxv2f32_with_firstelt:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
+; CHECK-NEXT: vfmv.s.f v9, fa0
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vslideup.vi v9, v8, 1, v0.t
+; CHECK-NEXT: vmv.v.v v8, v9
+; CHECK-NEXT: ret
+ %va = insertelement <vscale x 2 x float> poison, float %first, i32 0
+ %v = call <vscale x 2 x float> @llvm.experimental.vp.splice.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, i32 0, <vscale x 2 x i1> %mask, i32 1, i32 %evl)
+ ret <vscale x 2 x float> %v
+}
+
+define <vscale x 2 x half> @test_vp_splice_nxv2f16_with_firstelt(half %first, <vscale x 2 x half> %vb, <vscale x 2 x i1> %mask, i32 zeroext %evl) {
+; ZVFH-LABEL: test_vp_splice_nxv2f16_with_firstelt:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli a1, zero, e16, m1, ta, ma
+; ZVFH-NEXT: vfmv.s.f v9, fa0
+; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; ZVFH-NEXT: vslideup.vi v9, v8, 1, v0.t
+; ZVFH-NEXT: vmv1r.v v8, v9
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: test_vp_splice_nxv2f16_with_firstelt:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: fmv.x.h a1, fa0
+; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vmv.s.x v9, a1
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; ZVFHMIN-NEXT: vslideup.vi v9, v8, 1, v0.t
+; ZVFHMIN-NEXT: vmv1r.v v8, v9
+; ZVFHMIN-NEXT: ret
+ %va = insertelement <vscale x 2 x half> poison, half %first, i32 0
+ %v = call <vscale x 2 x half> @llvm.experimental.vp.splice.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, i32 0, <vscale x 2 x i1> %mask, i32 1, i32 %evl)
+ ret <vscale x 2 x half> %v
+}
+
+define <vscale x 2 x bfloat> @test_vp_splice_nxv2bf16_with_firstelt(bfloat %first, <vscale x 2 x bfloat> %vb, <vscale x 2 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: test_vp_splice_nxv2bf16_with_firstelt:
+; CHECK: # %bb.0:
+; CHECK-NEXT: fmv.x.h a1, fa0
+; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, ma
+; CHECK-NEXT: vmv.s.x v9, a1
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vslideup.vi v9, v8, 1, v0.t
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %va = insertelement <vscale x 2 x bfloat> poison, bfloat %first, i32 0
+ %v = call <vscale x 2 x bfloat> @llvm.experimental.vp.splice.nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x bfloat> %vb, i32 0, <vscale x 2 x i1> %mask, i32 1, i32 %evl)
+ ret <vscale x 2 x bfloat> %v
+}
>From 9c8def0757e976050c4a94904ee0a68d46e6c157 Mon Sep 17 00:00:00 2001
From: yanming <ming.yan at terapines.com>
Date: Fri, 20 Jun 2025 11:08:52 +0800
Subject: [PATCH 2/2] [RISCV] Try to optimize `vp.splice` to `vslide1up`.
Fold (vp.splice (insert_elt poison, scalar, 0), vec, 0, mask, 1, vl)
to (vslide1up vec, scalar, mask. vl).
Fold (vp.splice (splat_vector scalar), vec, 0, mask, 1, vl)
to (vslide1up vec, scalar, mask. vl).
---
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 37 +++++++++++++++++++
.../RISCV/rvv/fixed-vectors-vp-splice.ll | 24 ++++--------
llvm/test/CodeGen/RISCV/rvv/vp-splice.ll | 36 ++++++------------
3 files changed, 57 insertions(+), 40 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 712f6154732a2..99aba902a5ea8 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -13241,6 +13241,8 @@ SDValue RISCVTargetLowering::lowerVPMergeMask(SDValue Op,
SDValue
RISCVTargetLowering::lowerVPSpliceExperimental(SDValue Op,
SelectionDAG &DAG) const {
+ using namespace SDPatternMatch;
+
SDLoc DL(Op);
SDValue Op1 = Op.getOperand(0);
@@ -13285,6 +13287,41 @@ RISCVTargetLowering::lowerVPSpliceExperimental(SDValue Op,
SplatZeroOp2, DAG.getUNDEF(ContainerVT), EVL2);
}
+ auto getVectorFirstEle = [](SDValue Vec) {
+ SDValue FirstEle;
+ if (sd_match(Vec, m_InsertElt(m_Value(), m_Value(FirstEle), m_Zero())))
+ return FirstEle;
+
+ if (Vec.getOpcode() == ISD::SPLAT_VECTOR ||
+ Vec.getOpcode() == ISD::BUILD_VECTOR)
+ return Vec.getOperand(0);
+
+ return SDValue();
+ };
+
+ if (!IsMaskVector && isNullConstant(Offset) && isOneConstant(EVL1))
+ if (auto FirstEle = getVectorFirstEle(Op->getOperand(0))) {
+ MVT EltVT = ContainerVT.getVectorElementType();
+ SDValue Result;
+ if ((EltVT == MVT::f16 && !Subtarget.hasVInstructionsF16()) ||
+ EltVT == MVT::bf16) {
+ EltVT = EltVT.changeTypeToInteger();
+ ContainerVT = ContainerVT.changeVectorElementType(EltVT);
+ FirstEle =
+ DAG.getAnyExtOrTrunc(DAG.getBitcast(EltVT, FirstEle), DL, XLenVT);
+ }
+ Result = DAG.getNode(EltVT.isFloatingPoint() ? RISCVISD::VFSLIDE1UP_VL
+ : RISCVISD::VSLIDE1UP_VL,
+ DL, ContainerVT, DAG.getUNDEF(ContainerVT), Op2,
+ FirstEle, Mask, EVL2);
+ Result = DAG.getBitcast(
+ ContainerVT.changeVectorElementType(VT.getVectorElementType()),
+ Result);
+ return VT.isFixedLengthVector()
+ ? convertFromScalableVector(VT, Result, DAG, Subtarget)
+ : Result;
+ }
+
int64_t ImmValue = cast<ConstantSDNode>(Offset)->getSExtValue();
SDValue DownOffset, UpOffset;
if (ImmValue >= 0) {
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vp-splice.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vp-splice.ll
index c2871a4b133e9..d0562e2be346f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vp-splice.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vp-splice.ll
@@ -303,10 +303,8 @@ define <8 x half> @test_vp_splice_v8f16_masked(<8 x half> %va, <8 x half> %vb, <
define <4 x i32> @test_vp_splice_v4i32_with_firstelt(i32 %first, <4 x i32> %vb, <4 x i1> %mask, i32 zeroext %evl) {
; CHECK-LABEL: test_vp_splice_v4i32_with_firstelt:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vmv.s.x v9, a0
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vslideup.vi v9, v8, 1, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vslide1up.vx v9, v8, a0, v0.t
; CHECK-NEXT: vmv.v.v v8, v9
; CHECK-NEXT: ret
%va = insertelement <4 x i32> poison, i32 %first, i32 0
@@ -317,10 +315,8 @@ define <4 x i32> @test_vp_splice_v4i32_with_firstelt(i32 %first, <4 x i32> %vb,
define <4 x i32> @test_vp_splice_v4i32_with_splat_firstelt(i32 %first, <4 x i32> %vb, <4 x i1> %mask, i32 zeroext %evl) {
; CHECK-LABEL: test_vp_splice_v4i32_with_splat_firstelt:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vslideup.vi v9, v8, 1, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vslide1up.vx v9, v8, a0, v0.t
; CHECK-NEXT: vmv.v.v v8, v9
; CHECK-NEXT: ret
%ins = insertelement <4 x i32> poison, i32 %first, i32 0
@@ -332,10 +328,8 @@ define <4 x i32> @test_vp_splice_v4i32_with_splat_firstelt(i32 %first, <4 x i32>
define <4 x float> @test_vp_splice_nxv2f32_with_firstelt(float %first, <4 x float> %vb, <4 x i1> %mask, i32 zeroext %evl) {
; CHECK-LABEL: test_vp_splice_nxv2f32_with_firstelt:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vfmv.s.f v9, fa0
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vslideup.vi v9, v8, 1, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: vfslide1up.vf v9, v8, fa0, v0.t
; CHECK-NEXT: vmv.v.v v8, v9
; CHECK-NEXT: ret
%va = insertelement <4 x float> poison, float %first, i32 0
@@ -346,10 +340,8 @@ define <4 x float> @test_vp_splice_nxv2f32_with_firstelt(float %first, <4 x floa
define <4 x half> @test_vp_splice_nxv2f16_with_firstelt(half %first, <4 x half> %vb, <4 x i1> %mask, i32 zeroext %evl) {
; CHECK-LABEL: test_vp_splice_nxv2f16_with_firstelt:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 4, e16, m1, ta, ma
-; CHECK-NEXT: vfmv.s.f v9, fa0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vslideup.vi v9, v8, 1, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: vfslide1up.vf v9, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
%va = insertelement <4 x half> poison, half %first, i32 0
diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-splice.ll b/llvm/test/CodeGen/RISCV/rvv/vp-splice.ll
index b5d20002f961a..9c8c5da75ff7c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vp-splice.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vp-splice.ll
@@ -509,10 +509,8 @@ define <vscale x 2 x bfloat> @test_vp_splice_nxv2bf16_masked(<vscale x 2 x bfloa
define <vscale x 2 x i32> @test_vp_splice_nxv2i32_with_firstelt(i32 %first, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %mask, i32 zeroext %evl) {
; CHECK-LABEL: test_vp_splice_nxv2i32_with_firstelt:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, ma
-; CHECK-NEXT: vmv.s.x v9, a0
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vslideup.vi v9, v8, 1, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vslide1up.vx v9, v8, a0, v0.t
; CHECK-NEXT: vmv.v.v v8, v9
; CHECK-NEXT: ret
%va = insertelement <vscale x 2 x i32> poison, i32 %first, i32 0
@@ -523,10 +521,8 @@ define <vscale x 2 x i32> @test_vp_splice_nxv2i32_with_firstelt(i32 %first, <vsc
define <vscale x 2 x i32> @test_vp_splice_nxv2i32_with_splat_firstelt(i32 %first, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %mask, i32 zeroext %evl) {
; CHECK-LABEL: test_vp_splice_nxv2i32_with_splat_firstelt:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, ma
-; CHECK-NEXT: vmv.v.x v9, a0
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vslideup.vi v9, v8, 1, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vslide1up.vx v9, v8, a0, v0.t
; CHECK-NEXT: vmv.v.v v8, v9
; CHECK-NEXT: ret
%ins = insertelement <vscale x 2 x i32> poison, i32 %first, i32 0
@@ -538,10 +534,8 @@ define <vscale x 2 x i32> @test_vp_splice_nxv2i32_with_splat_firstelt(i32 %first
define <vscale x 2 x float> @test_vp_splice_nxv2f32_with_firstelt(float %first, <vscale x 2 x float> %vb, <vscale x 2 x i1> %mask, i32 zeroext %evl) {
; CHECK-LABEL: test_vp_splice_nxv2f32_with_firstelt:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
-; CHECK-NEXT: vfmv.s.f v9, fa0
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vslideup.vi v9, v8, 1, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: vfslide1up.vf v9, v8, fa0, v0.t
; CHECK-NEXT: vmv.v.v v8, v9
; CHECK-NEXT: ret
%va = insertelement <vscale x 2 x float> poison, float %first, i32 0
@@ -552,20 +546,16 @@ define <vscale x 2 x float> @test_vp_splice_nxv2f32_with_firstelt(float %first,
define <vscale x 2 x half> @test_vp_splice_nxv2f16_with_firstelt(half %first, <vscale x 2 x half> %vb, <vscale x 2 x i1> %mask, i32 zeroext %evl) {
; ZVFH-LABEL: test_vp_splice_nxv2f16_with_firstelt:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetvli a1, zero, e16, m1, ta, ma
-; ZVFH-NEXT: vfmv.s.f v9, fa0
-; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; ZVFH-NEXT: vslideup.vi v9, v8, 1, v0.t
+; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT: vfslide1up.vf v9, v8, fa0, v0.t
; ZVFH-NEXT: vmv1r.v v8, v9
; ZVFH-NEXT: ret
;
; ZVFHMIN-LABEL: test_vp_splice_nxv2f16_with_firstelt:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m1, ta, ma
-; ZVFHMIN-NEXT: vmv.s.x v9, a1
-; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; ZVFHMIN-NEXT: vslideup.vi v9, v8, 1, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vslide1up.vx v9, v8, a1, v0.t
; ZVFHMIN-NEXT: vmv1r.v v8, v9
; ZVFHMIN-NEXT: ret
%va = insertelement <vscale x 2 x half> poison, half %first, i32 0
@@ -577,10 +567,8 @@ define <vscale x 2 x bfloat> @test_vp_splice_nxv2bf16_with_firstelt(bfloat %firs
; CHECK-LABEL: test_vp_splice_nxv2bf16_with_firstelt:
; CHECK: # %bb.0:
; CHECK-NEXT: fmv.x.h a1, fa0
-; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, ma
-; CHECK-NEXT: vmv.s.x v9, a1
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vslideup.vi v9, v8, 1, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: vslide1up.vx v9, v8, a1, v0.t
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
%va = insertelement <vscale x 2 x bfloat> poison, bfloat %first, i32 0
More information about the llvm-commits
mailing list