[llvm] 20cf77f - [LegalizeTypes][VP] Add widen and split support for vp.fptrunc and vp.fpext
Lian Wang via llvm-commits
llvm-commits at lists.llvm.org
Sun Jun 5 19:28:22 PDT 2022
Author: Lian Wang
Date: 2022-06-06T02:28:01Z
New Revision: 20cf77f776b199690acda7a17d59846526030fcf
URL: https://github.com/llvm/llvm-project/commit/20cf77f776b199690acda7a17d59846526030fcf
DIFF: https://github.com/llvm/llvm-project/commit/20cf77f776b199690acda7a17d59846526030fcf.diff
LOG: [LegalizeTypes][VP] Add widen and split support for vp.fptrunc and vp.fpext
Reviewed By: frasercrmck
Differential Revision: https://reviews.llvm.org/D126439
Added:
Modified:
llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
llvm/test/CodeGen/RISCV/rvv/fixed-vector-fpext-vp.ll
llvm/test/CodeGen/RISCV/rvv/fixed-vector-fptrunc-vp.ll
llvm/test/CodeGen/RISCV/rvv/vfpext-vp.ll
llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll
Removed:
################################################################################
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
index 68b27b2bdb385..e4e6a72b956a7 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
@@ -1027,7 +1027,9 @@ void DAGTypeLegalizer::SplitVectorResult(SDNode *N, unsigned ResNo) {
case ISD::FREEZE:
case ISD::ARITH_FENCE:
case ISD::FP_EXTEND:
+ case ISD::VP_FP_EXTEND:
case ISD::FP_ROUND:
+ case ISD::VP_FP_ROUND:
case ISD::FP_TO_SINT:
case ISD::VP_FPTOSI:
case ISD::FP_TO_UINT:
@@ -2696,6 +2698,7 @@ bool DAGTypeLegalizer::SplitVectorOperand(SDNode *N, unsigned OpNo) {
Res = SplitVecOp_TruncateHelper(N);
break;
case ISD::STRICT_FP_ROUND:
+ case ISD::VP_FP_ROUND:
case ISD::FP_ROUND: Res = SplitVecOp_FP_ROUND(N); break;
case ISD::FCOPYSIGN: Res = SplitVecOp_FCOPYSIGN(N); break;
case ISD::STORE:
@@ -3572,6 +3575,13 @@ SDValue DAGTypeLegalizer::SplitVecOp_FP_ROUND(SDNode *N) {
SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
Lo.getValue(1), Hi.getValue(1));
ReplaceValueWith(SDValue(N, 1), NewChain);
+ } else if (N->getOpcode() == ISD::VP_FP_ROUND) {
+ SDValue MaskLo, MaskHi, EVLLo, EVLHi;
+ std::tie(MaskLo, MaskHi) = SplitMask(N->getOperand(1));
+ std::tie(EVLLo, EVLHi) =
+ DAG.SplitEVL(N->getOperand(2), N->getValueType(0), DL);
+ Lo = DAG.getNode(ISD::VP_FP_ROUND, DL, OutVT, Lo, MaskLo, EVLLo);
+ Hi = DAG.getNode(ISD::VP_FP_ROUND, DL, OutVT, Hi, MaskHi, EVLHi);
} else {
Lo = DAG.getNode(ISD::FP_ROUND, DL, OutVT, Lo, N->getOperand(1));
Hi = DAG.getNode(ISD::FP_ROUND, DL, OutVT, Hi, N->getOperand(1));
@@ -3795,7 +3805,9 @@ void DAGTypeLegalizer::WidenVectorResult(SDNode *N, unsigned ResNo) {
case ISD::ANY_EXTEND:
case ISD::FP_EXTEND:
+ case ISD::VP_FP_EXTEND:
case ISD::FP_ROUND:
+ case ISD::VP_FP_ROUND:
case ISD::FP_TO_SINT:
case ISD::VP_FPTOSI:
case ISD::FP_TO_UINT:
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-fpext-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-fpext-vp.ll
index c0bb5f89e708e..6b19011be6016 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-fpext-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-fpext-vp.ll
@@ -75,3 +75,48 @@ define <2 x double> @vfpext_v2f32_v2f64_unmasked(<2 x float> %a, i32 zeroext %vl
%v = call <2 x double> @llvm.vp.fpext.v2f64.v2f32(<2 x float> %a, <2 x i1> shufflevector (<2 x i1> insertelement (<2 x i1> undef, i1 true, i32 0), <2 x i1> undef, <2 x i32> zeroinitializer), i32 %vl)
ret <2 x double> %v
}
+
+declare <15 x double> @llvm.vp.fpext.v15f64.v15f32(<15 x float>, <15 x i1>, i32)
+
+define <15 x double> @vfpext_v15f32_v15f64(<15 x float> %a, <15 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: vfpext_v15f32_v15f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vfwcvt.f.f.v v16, v8, v0.t
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: ret
+ %v = call <15 x double> @llvm.vp.fpext.v15f64.v15f32(<15 x float> %a, <15 x i1> %m, i32 %vl)
+ ret <15 x double> %v
+}
+
+declare <32 x double> @llvm.vp.fpext.v32f64.v32f32(<32 x float>, <32 x i1>, i32)
+
+define <32 x double> @vfpext_v32f32_v32f64(<32 x float> %a, <32 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: vfpext_v32f32_v32f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmv1r.v v1, v0
+; CHECK-NEXT: li a1, 0
+; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, mu
+; CHECK-NEXT: addi a2, a0, -16
+; CHECK-NEXT: vslidedown.vi v0, v0, 2
+; CHECK-NEXT: bltu a0, a2, .LBB7_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: mv a1, a2
+; CHECK-NEXT: .LBB7_2:
+; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, mu
+; CHECK-NEXT: vslidedown.vi v24, v8, 16
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; CHECK-NEXT: li a1, 16
+; CHECK-NEXT: vfwcvt.f.f.v v16, v24, v0.t
+; CHECK-NEXT: bltu a0, a1, .LBB7_4
+; CHECK-NEXT: # %bb.3:
+; CHECK-NEXT: li a0, 16
+; CHECK-NEXT: .LBB7_4:
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: vfwcvt.f.f.v v24, v8, v0.t
+; CHECK-NEXT: vmv8r.v v8, v24
+; CHECK-NEXT: ret
+ %v = call <32 x double> @llvm.vp.fpext.v32f64.v32f32(<32 x float> %a, <32 x i1> %m, i32 %vl)
+ ret <32 x double> %v
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-fptrunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-fptrunc-vp.ll
index 7657382ccf859..b4ea48173750a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-fptrunc-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-fptrunc-vp.ll
@@ -75,3 +75,62 @@ define <2 x float> @vfptrunc_v2f32_v2f64_unmasked(<2 x double> %a, i32 zeroext %
%v = call <2 x float> @llvm.vp.fptrunc.v2f64.v2f32(<2 x double> %a, <2 x i1> shufflevector (<2 x i1> insertelement (<2 x i1> undef, i1 true, i32 0), <2 x i1> undef, <2 x i32> zeroinitializer), i32 %vl)
ret <2 x float> %v
}
+
+declare <15 x float> @llvm.vp.fptrunc.v15f64.v15f32(<15 x double>, <15 x i1>, i32)
+
+define <15 x float> @vfptrunc_v15f32_v15f64(<15 x double> %a, <15 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: vfptrunc_v15f32_v15f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vfncvt.f.f.w v16, v8, v0.t
+; CHECK-NEXT: vmv.v.v v8, v16
+; CHECK-NEXT: ret
+ %v = call <15 x float> @llvm.vp.fptrunc.v15f64.v15f32(<15 x double> %a, <15 x i1> %m, i32 %vl)
+ ret <15 x float> %v
+}
+
+declare <32 x float> @llvm.vp.fptrunc.v32f64.v32f32(<32 x double>, <32 x i1>, i32)
+
+define <32 x float> @vfptrunc_v32f32_v32f64(<32 x double> %a, <32 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: vfptrunc_v32f32_v32f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: sub sp, sp, a1
+; CHECK-NEXT: vmv1r.v v24, v0
+; CHECK-NEXT: addi a1, sp, 16
+; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
+; CHECK-NEXT: li a1, 0
+; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, mu
+; CHECK-NEXT: addi a2, a0, -16
+; CHECK-NEXT: vslidedown.vi v0, v0, 2
+; CHECK-NEXT: bltu a0, a2, .LBB7_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: mv a1, a2
+; CHECK-NEXT: .LBB7_2:
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; CHECK-NEXT: li a1, 16
+; CHECK-NEXT: vfncvt.f.f.w v8, v16, v0.t
+; CHECK-NEXT: bltu a0, a1, .LBB7_4
+; CHECK-NEXT: # %bb.3:
+; CHECK-NEXT: li a0, 16
+; CHECK-NEXT: .LBB7_4:
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vfncvt.f.f.w v16, v24, v0.t
+; CHECK-NEXT: li a0, 32
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu
+; CHECK-NEXT: vslideup.vi v16, v8, 16
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: ret
+ %v = call <32 x float> @llvm.vp.fptrunc.v32f64.v32f32(<32 x double> %a, <32 x i1> %m, i32 %vl)
+ ret <32 x float> %v
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfpext-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfpext-vp.ll
index 99df1d746ffec..f7bacfb5bdee4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfpext-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfpext-vp.ll
@@ -75,3 +75,48 @@ define <vscale x 2 x double> @vfpext_nxv2f32_nxv2f64_unmasked(<vscale x 2 x floa
%v = call <vscale x 2 x double> @llvm.vp.fpext.nxv2f64.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %vl)
ret <vscale x 2 x double> %v
}
+
+declare <vscale x 7 x double> @llvm.vp.fpext.nxv7f64.nxv7f32(<vscale x 7 x float>, <vscale x 7 x i1>, i32)
+
+define <vscale x 7 x double> @vfpext_nxv7f32_nxv7f64(<vscale x 7 x float> %a, <vscale x 7 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: vfpext_nxv7f32_nxv7f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vfwcvt.f.f.v v16, v8, v0.t
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: ret
+ %v = call <vscale x 7 x double> @llvm.vp.fpext.nxv7f64.nxv7f32(<vscale x 7 x float> %a, <vscale x 7 x i1> %m, i32 %vl)
+ ret <vscale x 7 x double> %v
+}
+
+declare <vscale x 32 x float> @llvm.vp.fpext.nxv32f32.nxv32f16(<vscale x 32 x half>, <vscale x 32 x i1>, i32)
+
+define <vscale x 32 x float> @vfpext_nxv32f16_nxv32f32(<vscale x 32 x half> %a, <vscale x 32 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: vfpext_nxv32f16_nxv32f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmv1r.v v24, v0
+; CHECK-NEXT: li a2, 0
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: srli a4, a1, 2
+; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, mu
+; CHECK-NEXT: slli a1, a1, 1
+; CHECK-NEXT: sub a3, a0, a1
+; CHECK-NEXT: vslidedown.vx v0, v0, a4
+; CHECK-NEXT: bltu a0, a3, .LBB7_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: mv a2, a3
+; CHECK-NEXT: .LBB7_2:
+; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu
+; CHECK-NEXT: vfwcvt.f.f.v v16, v12, v0.t
+; CHECK-NEXT: bltu a0, a1, .LBB7_4
+; CHECK-NEXT: # %bb.3:
+; CHECK-NEXT: mv a0, a1
+; CHECK-NEXT: .LBB7_4:
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vfwcvt.f.f.v v24, v8, v0.t
+; CHECK-NEXT: vmv8r.v v8, v24
+; CHECK-NEXT: ret
+ %v = call <vscale x 32 x float> @llvm.vp.fpext.nxv32f32.nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x i1> %m, i32 %vl)
+ ret <vscale x 32 x float> %v
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll
index 47032c3cbb38c..0f4ea22a7822e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll
@@ -75,3 +75,158 @@ define <vscale x 2 x float> @vfptrunc_nxv2f32_nxv2f64_unmasked(<vscale x 2 x dou
%v = call <vscale x 2 x float> @llvm.vp.fptrunc.nxv2f64.nxv2f32(<vscale x 2 x double> %a, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %vl)
ret <vscale x 2 x float> %v
}
+
+declare <vscale x 7 x float> @llvm.vp.fptrunc.nxv7f64.nxv7f32(<vscale x 7 x double>, <vscale x 7 x i1>, i32)
+
+define <vscale x 7 x float> @vfptrunc_nxv7f32_nxv7f64(<vscale x 7 x double> %a, <vscale x 7 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: vfptrunc_nxv7f32_nxv7f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vfncvt.f.f.w v16, v8, v0.t
+; CHECK-NEXT: vmv.v.v v8, v16
+; CHECK-NEXT: ret
+ %v = call <vscale x 7 x float> @llvm.vp.fptrunc.nxv7f64.nxv7f32(<vscale x 7 x double> %a, <vscale x 7 x i1> %m, i32 %vl)
+ ret <vscale x 7 x float> %v
+}
+
+declare <vscale x 16 x float> @llvm.vp.fptrunc.nxv16f64.nxv16f32(<vscale x 16 x double>, <vscale x 16 x i1>, i32)
+
+define <vscale x 16 x float> @vfptrunc_nxv16f32_nxv16f64(<vscale x 16 x double> %a, <vscale x 16 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: vfptrunc_nxv16f32_nxv16f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: sub sp, sp, a1
+; CHECK-NEXT: vmv1r.v v24, v0
+; CHECK-NEXT: addi a1, sp, 16
+; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
+; CHECK-NEXT: li a2, 0
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: srli a4, a1, 3
+; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, mu
+; CHECK-NEXT: sub a3, a0, a1
+; CHECK-NEXT: vslidedown.vx v0, v0, a4
+; CHECK-NEXT: bltu a0, a3, .LBB7_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: mv a2, a3
+; CHECK-NEXT: .LBB7_2:
+; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu
+; CHECK-NEXT: vfncvt.f.f.w v12, v16, v0.t
+; CHECK-NEXT: bltu a0, a1, .LBB7_4
+; CHECK-NEXT: # %bb.3:
+; CHECK-NEXT: mv a0, a1
+; CHECK-NEXT: .LBB7_4:
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vfncvt.f.f.w v8, v16, v0.t
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: ret
+ %v = call <vscale x 16 x float> @llvm.vp.fptrunc.nxv16f64.nxv16f32(<vscale x 16 x double> %a, <vscale x 16 x i1> %m, i32 %vl)
+ ret <vscale x 16 x float> %v
+}
+
+declare <vscale x 32 x float> @llvm.vp.fptrunc.nxv32f64.nxv32f32(<vscale x 32 x double>, <vscale x 32 x i1>, i32)
+
+define <vscale x 32 x float> @vfptrunc_nxv32f32_nxv32f64(<vscale x 32 x double> %a, <vscale x 32 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: vfptrunc_nxv32f32_nxv32f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 4
+; CHECK-NEXT: sub sp, sp, a1
+; CHECK-NEXT: vmv1r.v v24, v0
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a4, a1, 1
+; CHECK-NEXT: srli a3, a1, 3
+; CHECK-NEXT: mv a5, a2
+; CHECK-NEXT: bltu a2, a4, .LBB8_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: mv a5, a4
+; CHECK-NEXT: .LBB8_2:
+; CHECK-NEXT: li a6, 0
+; CHECK-NEXT: vsetvli a7, zero, e8, mf4, ta, mu
+; CHECK-NEXT: sub a7, a5, a1
+; CHECK-NEXT: vslidedown.vx v0, v24, a3
+; CHECK-NEXT: bltu a5, a7, .LBB8_4
+; CHECK-NEXT: # %bb.3:
+; CHECK-NEXT: mv a6, a7
+; CHECK-NEXT: .LBB8_4:
+; CHECK-NEXT: srli a7, a1, 2
+; CHECK-NEXT: slli t0, a1, 3
+; CHECK-NEXT: vsetvli zero, a6, e32, m4, ta, mu
+; CHECK-NEXT: vfncvt.f.f.w v12, v16, v0.t
+; CHECK-NEXT: bltu a5, a1, .LBB8_6
+; CHECK-NEXT: # %bb.5:
+; CHECK-NEXT: mv a5, a1
+; CHECK-NEXT: .LBB8_6:
+; CHECK-NEXT: li a6, 0
+; CHECK-NEXT: vsetvli t1, zero, e8, mf2, ta, mu
+; CHECK-NEXT: vslidedown.vx v1, v24, a7
+; CHECK-NEXT: add a7, a0, t0
+; CHECK-NEXT: vsetvli zero, a5, e32, m4, ta, mu
+; CHECK-NEXT: sub a4, a2, a4
+; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: csrr a5, vlenb
+; CHECK-NEXT: slli a5, a5, 3
+; CHECK-NEXT: add a5, sp, a5
+; CHECK-NEXT: addi a5, a5, 16
+; CHECK-NEXT: vl8re8.v v16, (a5) # Unknown-size Folded Reload
+; CHECK-NEXT: vfncvt.f.f.w v8, v16, v0.t
+; CHECK-NEXT: bltu a2, a4, .LBB8_8
+; CHECK-NEXT: # %bb.7:
+; CHECK-NEXT: mv a6, a4
+; CHECK-NEXT: .LBB8_8:
+; CHECK-NEXT: vsetvli a2, zero, e8, mf4, ta, mu
+; CHECK-NEXT: vl8re64.v v16, (a7)
+; CHECK-NEXT: csrr a2, vlenb
+; CHECK-NEXT: slli a2, a2, 3
+; CHECK-NEXT: add a2, sp, a2
+; CHECK-NEXT: addi a2, a2, 16
+; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; CHECK-NEXT: li a2, 0
+; CHECK-NEXT: sub a4, a6, a1
+; CHECK-NEXT: vslidedown.vx v0, v1, a3
+; CHECK-NEXT: bltu a6, a4, .LBB8_10
+; CHECK-NEXT: # %bb.9:
+; CHECK-NEXT: mv a2, a4
+; CHECK-NEXT: .LBB8_10:
+; CHECK-NEXT: vl8re64.v v16, (a0)
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: add a0, sp, a0
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vfncvt.f.f.w v20, v24, v0.t
+; CHECK-NEXT: bltu a6, a1, .LBB8_12
+; CHECK-NEXT: # %bb.11:
+; CHECK-NEXT: mv a6, a1
+; CHECK-NEXT: .LBB8_12:
+; CHECK-NEXT: vsetvli zero, a6, e32, m4, ta, mu
+; CHECK-NEXT: vmv1r.v v0, v1
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vfncvt.f.f.w v16, v24, v0.t
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 4
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: ret
+ %v = call <vscale x 32 x float> @llvm.vp.fptrunc.nxv32f64.nxv32f32(<vscale x 32 x double> %a, <vscale x 32 x i1> %m, i32 %vl)
+ ret <vscale x 32 x float> %v
+}
More information about the llvm-commits
mailing list