[llvm] [RISCV] Combine concat_vectors of single element scalar_to_vector (PR #114366)
via llvm-commits
llvm-commits at lists.llvm.org
Wed Oct 30 23:01:50 PDT 2024
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-backend-risc-v
Author: Luke Lau (lukel97)
<details>
<summary>Changes</summary>
Some nodes when scalarized during vector type legalization end up as a tree of concat_vectors with single element scalar_to_vectors at the leaves, e.g:
t102: v1f32 = scalar_to_vector t103
t99: v1f32 = scalar_to_vector t100
t98: v2f32 = concat_vectors t102, t99
t90: v1f32 = scalar_to_vector t91
t86: v1f32 = scalar_to_vector t87
t85: v2f32 = concat_vectors t90, t86
t79: v4f32 = concat_vectors t98, t85
We can get better lowering if we combine this to a build_vector, which avoids the intermediate vector for each element.
I originally added this to generic DAGCombiner and whilst it did give a bunch of improvements on AArch64, it resulted in some AVX512 regressions I couldn't explain.
---
Patch is 98.23 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/114366.diff
6 Files Affected:
- (modified) llvm/lib/Target/RISCV/RISCVISelLowering.cpp (+34-4)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access-zve32x.ll (+5-39)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll (+21-25)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-scalarized.ll (+48-56)
- (modified) llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll (+492-834)
- (modified) llvm/test/CodeGen/RISCV/rvv/pr63596.ll (+30-45)
``````````diff
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 3b3f8772a08940..2b6c2c0a965679 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -16487,13 +16487,40 @@ static SDValue performINSERT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG,
return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ConcatOps);
}
+/// Combine a concat_vector of single element scalar_to_vectors to a
+/// build_vector.
+static SDValue
+performCONCAT_VECTORSOfSCALAR_TO_VECTORCombine(SDNode *N, SelectionDAG &DAG) {
+ EVT VT = N->getValueType(0);
+ if (VT.isScalableVector())
+ return SDValue();
+ SmallVector<SDValue> Worklist(reverse(N->ops()));
+ SmallVector<SDValue> Elts;
+ Elts.reserve(VT.getVectorNumElements());
+ while (!Worklist.empty()) {
+ SDValue Op = Worklist.pop_back_val();
+ bool SingleElt = Op.getValueType().getVectorNumElements() == 1;
+ if (SingleElt && Op.isUndef())
+ Elts.push_back(Op);
+ else if (SingleElt && Op.getOpcode() == ISD::SCALAR_TO_VECTOR)
+ Elts.push_back(Op.getOperand(0));
+ else if (Op.getOpcode() == ISD::CONCAT_VECTORS)
+ for (SDValue Vec : reverse(Op->ops()))
+ Worklist.push_back(Vec);
+ else
+ return SDValue();
+ }
+ return DAG.getBuildVector(VT, SDLoc(N), Elts);
+}
+
// If we're concatenating a series of vector loads like
// concat_vectors (load v4i8, p+0), (load v4i8, p+n), (load v4i8, p+n*2) ...
// Then we can turn this into a strided load by widening the vector elements
// vlse32 p, stride=n
-static SDValue performCONCAT_VECTORSCombine(SDNode *N, SelectionDAG &DAG,
- const RISCVSubtarget &Subtarget,
- const RISCVTargetLowering &TLI) {
+static SDValue
+performCONCAT_VECTORSOfLoadCombine(SDNode *N, SelectionDAG &DAG,
+ const RISCVSubtarget &Subtarget,
+ const RISCVTargetLowering &TLI) {
SDLoc DL(N);
EVT VT = N->getValueType(0);
@@ -17797,7 +17824,10 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
return V;
break;
case ISD::CONCAT_VECTORS:
- if (SDValue V = performCONCAT_VECTORSCombine(N, DAG, Subtarget, *this))
+ if (SDValue V = performCONCAT_VECTORSOfSCALAR_TO_VECTORCombine(N, DAG))
+ return V;
+ if (SDValue V =
+ performCONCAT_VECTORSOfLoadCombine(N, DAG, Subtarget, *this))
return V;
break;
case ISD::INSERT_VECTOR_ELT:
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access-zve32x.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access-zve32x.ll
index af46849ae08719..7869037c8bd4e9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access-zve32x.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access-zve32x.ll
@@ -17,53 +17,19 @@ define <4 x i1> @load_large_vector(ptr %p) {
; ZVE32X-NEXT: ld a0, 80(a0)
; ZVE32X-NEXT: xor a3, a3, a4
; ZVE32X-NEXT: snez a3, a3
-; ZVE32X-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
-; ZVE32X-NEXT: vmv.s.x v8, a3
-; ZVE32X-NEXT: vand.vi v8, v8, 1
-; ZVE32X-NEXT: vmsne.vi v0, v8, 0
-; ZVE32X-NEXT: vmv.s.x v9, zero
-; ZVE32X-NEXT: vmerge.vim v8, v9, 1, v0
; ZVE32X-NEXT: xor a1, a1, a2
; ZVE32X-NEXT: snez a1, a1
-; ZVE32X-NEXT: vmv.s.x v10, a1
-; ZVE32X-NEXT: vand.vi v10, v10, 1
-; ZVE32X-NEXT: vmsne.vi v0, v10, 0
-; ZVE32X-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
-; ZVE32X-NEXT: vmv.v.i v10, 0
-; ZVE32X-NEXT: vmerge.vim v11, v10, 1, v0
-; ZVE32X-NEXT: vsetivli zero, 2, e8, mf4, tu, ma
-; ZVE32X-NEXT: vslideup.vi v11, v8, 1
; ZVE32X-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
-; ZVE32X-NEXT: vmsne.vi v0, v11, 0
+; ZVE32X-NEXT: vmv.v.x v8, a1
+; ZVE32X-NEXT: vslide1down.vx v8, v8, a3
; ZVE32X-NEXT: xor a1, a5, a6
; ZVE32X-NEXT: snez a1, a1
-; ZVE32X-NEXT: vmv.s.x v8, a1
-; ZVE32X-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
-; ZVE32X-NEXT: vand.vi v8, v8, 1
-; ZVE32X-NEXT: vmsne.vi v8, v8, 0
-; ZVE32X-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
-; ZVE32X-NEXT: vmerge.vim v11, v10, 1, v0
-; ZVE32X-NEXT: vmv1r.v v0, v8
-; ZVE32X-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
-; ZVE32X-NEXT: vmerge.vim v8, v9, 1, v0
-; ZVE32X-NEXT: vsetivli zero, 3, e8, mf4, tu, ma
-; ZVE32X-NEXT: vslideup.vi v11, v8, 2
-; ZVE32X-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
-; ZVE32X-NEXT: vmsne.vi v0, v11, 0
+; ZVE32X-NEXT: vslide1down.vx v8, v8, a1
; ZVE32X-NEXT: xor a0, a7, a0
; ZVE32X-NEXT: snez a0, a0
-; ZVE32X-NEXT: vmv.s.x v8, a0
-; ZVE32X-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
+; ZVE32X-NEXT: vslide1down.vx v8, v8, a0
; ZVE32X-NEXT: vand.vi v8, v8, 1
-; ZVE32X-NEXT: vmsne.vi v8, v8, 0
-; ZVE32X-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
-; ZVE32X-NEXT: vmerge.vim v10, v10, 1, v0
-; ZVE32X-NEXT: vmv1r.v v0, v8
-; ZVE32X-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
-; ZVE32X-NEXT: vmerge.vim v8, v9, 1, v0
-; ZVE32X-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
-; ZVE32X-NEXT: vslideup.vi v10, v8, 3
-; ZVE32X-NEXT: vmsne.vi v0, v10, 0
+; ZVE32X-NEXT: vmsne.vi v0, v8, 0
; ZVE32X-NEXT: ret
;
; ZVE64X-LABEL: load_large_vector:
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
index a445c8fe081725..81679f555dfec8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
@@ -220,24 +220,22 @@ define void @mscatter_v2i64_truncstore_v2i8(<2 x i64> %val, <2 x ptr> %ptrs, <2
;
; RV32ZVE32F-LABEL: mscatter_v2i64_truncstore_v2i8:
; RV32ZVE32F: # %bb.0:
-; RV32ZVE32F-NEXT: lw a1, 8(a0)
-; RV32ZVE32F-NEXT: lw a0, 0(a0)
+; RV32ZVE32F-NEXT: lw a1, 0(a0)
+; RV32ZVE32F-NEXT: lw a0, 8(a0)
; RV32ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; RV32ZVE32F-NEXT: vmv.s.x v9, a1
-; RV32ZVE32F-NEXT: vmv.s.x v10, a0
-; RV32ZVE32F-NEXT: vslideup.vi v10, v9, 1
-; RV32ZVE32F-NEXT: vsoxei32.v v10, (zero), v8, v0.t
+; RV32ZVE32F-NEXT: vmv.v.x v9, a1
+; RV32ZVE32F-NEXT: vslide1down.vx v9, v9, a0
+; RV32ZVE32F-NEXT: vsoxei32.v v9, (zero), v8, v0.t
; RV32ZVE32F-NEXT: ret
;
; RV64ZVE32F-LABEL: mscatter_v2i64_truncstore_v2i8:
; RV64ZVE32F: # %bb.0:
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; RV64ZVE32F-NEXT: vmv.s.x v9, a1
-; RV64ZVE32F-NEXT: vmv.s.x v8, a0
+; RV64ZVE32F-NEXT: vmv.v.x v8, a0
; RV64ZVE32F-NEXT: vmv.x.s a0, v0
-; RV64ZVE32F-NEXT: andi a1, a0, 1
-; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 1
-; RV64ZVE32F-NEXT: bnez a1, .LBB4_3
+; RV64ZVE32F-NEXT: andi a4, a0, 1
+; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a1
+; RV64ZVE32F-NEXT: bnez a4, .LBB4_3
; RV64ZVE32F-NEXT: # %bb.1: # %else
; RV64ZVE32F-NEXT: andi a0, a0, 2
; RV64ZVE32F-NEXT: bnez a0, .LBB4_4
@@ -736,26 +734,24 @@ define void @mscatter_v2i64_truncstore_v2i16(<2 x i64> %val, <2 x ptr> %ptrs, <2
;
; RV32ZVE32F-LABEL: mscatter_v2i64_truncstore_v2i16:
; RV32ZVE32F: # %bb.0:
-; RV32ZVE32F-NEXT: lw a1, 8(a0)
-; RV32ZVE32F-NEXT: lw a0, 0(a0)
+; RV32ZVE32F-NEXT: lw a1, 0(a0)
+; RV32ZVE32F-NEXT: lw a0, 8(a0)
; RV32ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
-; RV32ZVE32F-NEXT: vmv.s.x v9, a1
-; RV32ZVE32F-NEXT: vmv.s.x v10, a0
-; RV32ZVE32F-NEXT: vslideup.vi v10, v9, 1
-; RV32ZVE32F-NEXT: vsoxei32.v v10, (zero), v8, v0.t
+; RV32ZVE32F-NEXT: vmv.v.x v9, a1
+; RV32ZVE32F-NEXT: vslide1down.vx v9, v9, a0
+; RV32ZVE32F-NEXT: vsoxei32.v v9, (zero), v8, v0.t
; RV32ZVE32F-NEXT: ret
;
; RV64ZVE32F-LABEL: mscatter_v2i64_truncstore_v2i16:
; RV64ZVE32F: # %bb.0:
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64ZVE32F-NEXT: vmv.s.x v9, a1
-; RV64ZVE32F-NEXT: vmv.s.x v8, a0
-; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; RV64ZVE32F-NEXT: vmv.x.s a0, v0
-; RV64ZVE32F-NEXT: andi a1, a0, 1
; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
-; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 1
-; RV64ZVE32F-NEXT: bnez a1, .LBB13_3
+; RV64ZVE32F-NEXT: vmv.v.x v8, a0
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
+; RV64ZVE32F-NEXT: vmv.x.s a0, v0
+; RV64ZVE32F-NEXT: andi a4, a0, 1
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a1
+; RV64ZVE32F-NEXT: bnez a4, .LBB13_3
; RV64ZVE32F-NEXT: # %bb.1: # %else
; RV64ZVE32F-NEXT: andi a0, a0, 2
; RV64ZVE32F-NEXT: bnez a0, .LBB13_4
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-scalarized.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-scalarized.ll
index 4621f339ca8828..eaa7339ee7eab5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-scalarized.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-scalarized.ll
@@ -5,42 +5,38 @@
define <8 x float> @fpext_v8bf16(<8 x bfloat> %x) {
; CHECK-LABEL: fpext_v8bf16:
; CHECK: # %bb.0:
-; CHECK-NEXT: fmv.x.w a0, fa0
-; CHECK-NEXT: fmv.x.w a1, fa1
-; CHECK-NEXT: fmv.x.w a2, fa2
-; CHECK-NEXT: fmv.x.w a3, fa3
-; CHECK-NEXT: fmv.x.w a4, fa4
-; CHECK-NEXT: fmv.x.w a5, fa5
-; CHECK-NEXT: fmv.x.w a6, fa6
-; CHECK-NEXT: fmv.x.w a7, fa7
+; CHECK-NEXT: fmv.x.w a0, fa7
+; CHECK-NEXT: fmv.x.w a1, fa6
+; CHECK-NEXT: fmv.x.w a2, fa5
+; CHECK-NEXT: fmv.x.w a3, fa4
+; CHECK-NEXT: fmv.x.w a4, fa3
+; CHECK-NEXT: fmv.x.w a5, fa2
+; CHECK-NEXT: fmv.x.w a6, fa0
+; CHECK-NEXT: fmv.x.w a7, fa1
; CHECK-NEXT: slli a7, a7, 16
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vmv.s.x v8, a7
+; CHECK-NEXT: fmv.w.x fa5, a7
; CHECK-NEXT: slli a6, a6, 16
-; CHECK-NEXT: vmv.s.x v9, a6
-; CHECK-NEXT: vslideup.vi v9, v8, 1
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vmv.v.x v8, a6
+; CHECK-NEXT: vfslide1down.vf v8, v8, fa5
; CHECK-NEXT: slli a5, a5, 16
-; CHECK-NEXT: vmv.s.x v8, a5
+; CHECK-NEXT: fmv.w.x fa5, a5
+; CHECK-NEXT: vfslide1down.vf v8, v8, fa5
; CHECK-NEXT: slli a4, a4, 16
-; CHECK-NEXT: vmv.s.x v10, a4
-; CHECK-NEXT: vslideup.vi v10, v8, 1
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vslideup.vi v10, v9, 2
+; CHECK-NEXT: fmv.w.x fa5, a4
+; CHECK-NEXT: vfslide1down.vf v8, v8, fa5
; CHECK-NEXT: slli a3, a3, 16
-; CHECK-NEXT: vmv.s.x v8, a3
+; CHECK-NEXT: fmv.w.x fa5, a3
+; CHECK-NEXT: vfslide1down.vf v8, v8, fa5
; CHECK-NEXT: slli a2, a2, 16
-; CHECK-NEXT: vmv.s.x v9, a2
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vslideup.vi v9, v8, 1
+; CHECK-NEXT: fmv.w.x fa5, a2
+; CHECK-NEXT: vfslide1down.vf v8, v8, fa5
; CHECK-NEXT: slli a1, a1, 16
-; CHECK-NEXT: vmv.s.x v11, a1
+; CHECK-NEXT: fmv.w.x fa5, a1
+; CHECK-NEXT: vfslide1down.vf v8, v8, fa5
; CHECK-NEXT: slli a0, a0, 16
-; CHECK-NEXT: vmv.s.x v8, a0
-; CHECK-NEXT: vslideup.vi v8, v11, 1
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vslideup.vi v8, v9, 2
-; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vslideup.vi v8, v10, 4
+; CHECK-NEXT: fmv.w.x fa5, a0
+; CHECK-NEXT: vfslide1down.vf v8, v8, fa5
; CHECK-NEXT: ret
%y = fpext <8 x bfloat> %x to <8 x float>
ret <8 x float> %y
@@ -49,42 +45,38 @@ define <8 x float> @fpext_v8bf16(<8 x bfloat> %x) {
define <8 x float> @fpext_v8f16(<8 x bfloat> %x) {
; CHECK-LABEL: fpext_v8f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: fmv.x.w a0, fa0
-; CHECK-NEXT: fmv.x.w a1, fa1
-; CHECK-NEXT: fmv.x.w a2, fa2
-; CHECK-NEXT: fmv.x.w a3, fa3
-; CHECK-NEXT: fmv.x.w a4, fa4
-; CHECK-NEXT: fmv.x.w a5, fa5
-; CHECK-NEXT: fmv.x.w a6, fa6
-; CHECK-NEXT: fmv.x.w a7, fa7
+; CHECK-NEXT: fmv.x.w a0, fa7
+; CHECK-NEXT: fmv.x.w a1, fa6
+; CHECK-NEXT: fmv.x.w a2, fa5
+; CHECK-NEXT: fmv.x.w a3, fa4
+; CHECK-NEXT: fmv.x.w a4, fa3
+; CHECK-NEXT: fmv.x.w a5, fa2
+; CHECK-NEXT: fmv.x.w a6, fa0
+; CHECK-NEXT: fmv.x.w a7, fa1
; CHECK-NEXT: slli a7, a7, 16
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vmv.s.x v8, a7
+; CHECK-NEXT: fmv.w.x fa5, a7
; CHECK-NEXT: slli a6, a6, 16
-; CHECK-NEXT: vmv.s.x v9, a6
-; CHECK-NEXT: vslideup.vi v9, v8, 1
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vmv.v.x v8, a6
+; CHECK-NEXT: vfslide1down.vf v8, v8, fa5
; CHECK-NEXT: slli a5, a5, 16
-; CHECK-NEXT: vmv.s.x v8, a5
+; CHECK-NEXT: fmv.w.x fa5, a5
+; CHECK-NEXT: vfslide1down.vf v8, v8, fa5
; CHECK-NEXT: slli a4, a4, 16
-; CHECK-NEXT: vmv.s.x v10, a4
-; CHECK-NEXT: vslideup.vi v10, v8, 1
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vslideup.vi v10, v9, 2
+; CHECK-NEXT: fmv.w.x fa5, a4
+; CHECK-NEXT: vfslide1down.vf v8, v8, fa5
; CHECK-NEXT: slli a3, a3, 16
-; CHECK-NEXT: vmv.s.x v8, a3
+; CHECK-NEXT: fmv.w.x fa5, a3
+; CHECK-NEXT: vfslide1down.vf v8, v8, fa5
; CHECK-NEXT: slli a2, a2, 16
-; CHECK-NEXT: vmv.s.x v9, a2
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vslideup.vi v9, v8, 1
+; CHECK-NEXT: fmv.w.x fa5, a2
+; CHECK-NEXT: vfslide1down.vf v8, v8, fa5
; CHECK-NEXT: slli a1, a1, 16
-; CHECK-NEXT: vmv.s.x v11, a1
+; CHECK-NEXT: fmv.w.x fa5, a1
+; CHECK-NEXT: vfslide1down.vf v8, v8, fa5
; CHECK-NEXT: slli a0, a0, 16
-; CHECK-NEXT: vmv.s.x v8, a0
-; CHECK-NEXT: vslideup.vi v8, v11, 1
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vslideup.vi v8, v9, 2
-; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vslideup.vi v8, v10, 4
+; CHECK-NEXT: fmv.w.x fa5, a0
+; CHECK-NEXT: vfslide1down.vf v8, v8, fa5
; CHECK-NEXT: ret
%y = fpext <8 x bfloat> %x to <8 x float>
ret <8 x float> %y
diff --git a/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll b/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll
index 5a1f7f54305846..832675934e2f88 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll
@@ -432,59 +432,45 @@ define <4 x i32> @stest_f16i32(<4 x half> %x) {
; CHECK-V-NEXT: .cfi_offset s1, -24
; CHECK-V-NEXT: .cfi_offset s2, -32
; CHECK-V-NEXT: csrr a1, vlenb
-; CHECK-V-NEXT: slli a2, a1, 1
-; CHECK-V-NEXT: add a1, a2, a1
+; CHECK-V-NEXT: slli a1, a1, 1
; CHECK-V-NEXT: sub sp, sp, a1
-; CHECK-V-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
+; CHECK-V-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 2 * vlenb
; CHECK-V-NEXT: lhu s0, 0(a0)
-; CHECK-V-NEXT: lhu s1, 8(a0)
-; CHECK-V-NEXT: lhu s2, 16(a0)
-; CHECK-V-NEXT: lhu a0, 24(a0)
-; CHECK-V-NEXT: fmv.w.x fa0, a0
+; CHECK-V-NEXT: lhu a1, 8(a0)
+; CHECK-V-NEXT: lhu s1, 16(a0)
+; CHECK-V-NEXT: lhu s2, 24(a0)
+; CHECK-V-NEXT: fmv.w.x fa0, a1
; CHECK-V-NEXT: call __extendhfsf2
-; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: fmv.w.x fa0, s2
-; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-V-NEXT: vmv.s.x v8, a0
-; CHECK-V-NEXT: addi a0, sp, 16
-; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-V-NEXT: fmv.w.x fa5, s0
+; CHECK-V-NEXT: fcvt.l.s s0, fa0, rtz
+; CHECK-V-NEXT: fmv.s fa0, fa5
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-V-NEXT: vmv.s.x v8, a0
+; CHECK-V-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-V-NEXT: vmv.v.x v8, a0
+; CHECK-V-NEXT: vslide1down.vx v8, v8, s0
; CHECK-V-NEXT: addi a0, sp, 16
-; CHECK-V-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT: vslideup.vi v8, v9, 1
-; CHECK-V-NEXT: csrr a0, vlenb
-; CHECK-V-NEXT: add a0, sp, a0
-; CHECK-V-NEXT: addi a0, a0, 16
; CHECK-V-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s1
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: fmv.w.x fa0, s0
-; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-V-NEXT: vmv.s.x v8, a0
+; CHECK-V-NEXT: addi a1, sp, 16
+; CHECK-V-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-V-NEXT: vslide1down.vx v8, v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
-; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-V-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-V-NEXT: fmv.w.x fa0, s2
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-V-NEXT: vmv.s.x v10, a0
-; CHECK-V-NEXT: addi a0, sp, 16
-; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT: vslideup.vi v10, v8, 1
-; CHECK-V-NEXT: csrr a0, vlenb
-; CHECK-V-NEXT: add a0, sp, a0
-; CHECK-V-NEXT: addi a0, a0, 16
-; CHECK-V-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: addi a1, sp, 16
+; CHECK-V-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
; CHECK-V-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-V-NEXT: vslideup.vi v10, v8, 2
+; CHECK-V-NEXT: vslide1down.vx v10, v8, a0
; CHECK-V-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; CHECK-V-NEXT: vnclip.wi v8, v10, 0
; CHECK-V-NEXT: csrr a0, vlenb
-; CHECK-V-NEXT: slli a1, a0, 1
-; CHECK-V-NEXT: add a0, a1, a0
+; CHECK-V-NEXT: slli a0, a0, 1
; CHECK-V-NEXT: add sp, sp, a0
; CHECK-V-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
; CHECK-V-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
@@ -595,59 +581,45 @@ define <4 x i32> @utesth_f16i32(<4 x half> %x) {
; CHECK-V-NEXT: .cfi_offset s1, -24
; CHECK-V-NEXT: .cfi_offset s2, -32
; CHECK-V-NEXT: csrr a1, vlenb
-; CHECK-V-NEXT: slli a2, a1, 1
-; CHECK-V-NEXT: add a1, a2, a1
+; CHECK-V-NEXT: slli a1, a1, 1
; CHECK-V-NEXT: sub sp, sp, a1
-; CHECK-V-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
+; CHECK-V-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 2 * vlenb
; CHECK-V-NEXT: lhu s0, 0(a0)
-; CHECK-V-NEXT: lhu s1, 8(a0)
-; CHECK-V-NEXT: lhu s2, 16(a0)
-; CHECK-V-NEXT: lhu a0, 24(a0)
-; CHECK-V-NEXT: fmv.w.x fa0, a0
+; CHECK-V-NEXT: lhu a1, 8(a0)
+; CHECK-V-NEXT: lhu s1, 16(a0)
+; CHECK-V-NEXT: lhu s2, 24(a0)
+; CHECK-V-NEXT: fmv.w.x fa0, a1
; CHECK-V-NEXT: call __extendhfsf2
-; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz
-; CHECK-V-NEXT: fmv.w.x fa0, s2
-; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-V-NEXT: vmv.s.x v8, a0
-; CHECK-V-NEXT: addi a0, sp, 16
-; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-V-NEXT: fmv.w.x fa5, s0
+; CHECK-V-NEXT: fcvt.lu.s s0, fa0, rtz
+; CHECK-V-NEXT: fmv.s fa0, fa5
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-V-NEXT: vmv.s.x v8, a0
+; CHECK-V-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-V-NEXT: vmv.v.x v8, a0
+; CHECK-V-NEXT: vslide1down.vx v8, v8, s0
; CHECK-V-NEXT: addi a0, sp, 16
-; CHECK-V-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT: vslideup.vi v8, v9, 1
-; CHECK-V-NEXT: csrr a0, vlenb
-; CHECK-V-NEXT: add a0, sp, a0
-; CHECK-V-NEXT: addi a0, a0, 16
; CHECK-V-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-V-NEXT: fmv.w.x fa0, s1
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz
-; CHECK-V-NEXT: fmv.w.x fa0, s0
-; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-V-NEXT: vmv.s.x v8, a0
+; CHECK-V-NEXT: addi a1, sp, 16
+; CHECK-V-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-V-NEXT: vslide1down.vx v8, v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
-; CHECK-V...
[truncated]
``````````
</details>
https://github.com/llvm/llvm-project/pull/114366
More information about the llvm-commits
mailing list