[llvm] 8aa6b05 - [LegalizeTypes][VP] Add widen and split support for VP_TRUNCATE

Lian Wang via llvm-commits llvm-commits at lists.llvm.org
Wed May 25 19:03:52 PDT 2022


Author: Lian Wang
Date: 2022-05-26T02:03:27Z
New Revision: 8aa6b05deb2ace6042274c9a33ff40523444f459

URL: https://github.com/llvm/llvm-project/commit/8aa6b05deb2ace6042274c9a33ff40523444f459
DIFF: https://github.com/llvm/llvm-project/commit/8aa6b05deb2ace6042274c9a33ff40523444f459.diff

LOG: [LegalizeTypes][VP] Add widen and split support for VP_TRUNCATE

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D125950

Added: 
    

Modified: 
    llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
    llvm/test/CodeGen/RISCV/rvv/fixed-vector-trunc-vp.ll
    llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
index 69974df8d121a..f274b50762489 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
@@ -1038,6 +1038,7 @@ void DAGTypeLegalizer::SplitVectorResult(SDNode *N, unsigned ResNo) {
   case ISD::FTRUNC:
   case ISD::SINT_TO_FP:
   case ISD::TRUNCATE:
+  case ISD::VP_TRUNCATE:
   case ISD::UINT_TO_FP:
   case ISD::FCANONICALIZE:
     SplitVecRes_UnaryOp(N, Lo, Hi);
@@ -2664,6 +2665,7 @@ bool DAGTypeLegalizer::SplitVectorOperand(SDNode *N, unsigned OpNo) {
   case ISD::INSERT_SUBVECTOR:  Res = SplitVecOp_INSERT_SUBVECTOR(N, OpNo); break;
   case ISD::EXTRACT_VECTOR_ELT:Res = SplitVecOp_EXTRACT_VECTOR_ELT(N); break;
   case ISD::CONCAT_VECTORS:    Res = SplitVecOp_CONCAT_VECTORS(N); break;
+  case ISD::VP_TRUNCATE:
   case ISD::TRUNCATE:
     Res = SplitVecOp_TruncateHelper(N);
     break;
@@ -2907,6 +2909,14 @@ SDValue DAGTypeLegalizer::SplitVecOp_UnaryOp(SDNode *N) {
     // Legalize the chain result - switch anything that used the old chain to
     // use the new one.
     ReplaceValueWith(SDValue(N, 1), Ch);
+  } else if (N->getNumOperands() == 3) {
+    assert(N->isVPOpcode() && "Expected VP opcode");
+    SDValue MaskLo, MaskHi, EVLLo, EVLHi;
+    std::tie(MaskLo, MaskHi) = SplitMask(N->getOperand(1));
+    std::tie(EVLLo, EVLHi) =
+        DAG.SplitEVL(N->getOperand(2), N->getValueType(0), dl);
+    Lo = DAG.getNode(N->getOpcode(), dl, OutVT, Lo, MaskLo, EVLLo);
+    Hi = DAG.getNode(N->getOpcode(), dl, OutVT, Hi, MaskHi, EVLHi);
   } else {
     Lo = DAG.getNode(N->getOpcode(), dl, OutVT, Lo);
     Hi = DAG.getNode(N->getOpcode(), dl, OutVT, Hi);
@@ -3764,6 +3774,7 @@ void DAGTypeLegalizer::WidenVectorResult(SDNode *N, unsigned ResNo) {
   case ISD::FP_TO_UINT:
   case ISD::SIGN_EXTEND:
   case ISD::SINT_TO_FP:
+  case ISD::VP_TRUNCATE:
   case ISD::TRUNCATE:
   case ISD::UINT_TO_FP:
   case ISD::ZERO_EXTEND:
@@ -4234,6 +4245,12 @@ SDValue DAGTypeLegalizer::WidenVecRes_Convert(SDNode *N) {
     if (InVTEC == WidenEC) {
       if (N->getNumOperands() == 1)
         return DAG.getNode(Opcode, DL, WidenVT, InOp);
+      if (N->getNumOperands() == 3) {
+        assert(N->isVPOpcode() && "Expected VP opcode");
+        SDValue Mask =
+            GetWidenedMask(N->getOperand(1), WidenVT.getVectorElementCount());
+        return DAG.getNode(Opcode, DL, WidenVT, InOp, Mask, N->getOperand(2));
+      }
       return DAG.getNode(Opcode, DL, WidenVT, InOp, N->getOperand(1), Flags);
     }
     if (WidenVT.getSizeInBits() == InVT.getSizeInBits()) {

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-trunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-trunc-vp.ll
index 253e9700859e8..8d7eb7406b5fb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-trunc-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-trunc-vp.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s
-; RUN: llc -mtriple=riscv64 -mattr=+v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv32 -mattr=+v,+m -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv64 -mattr=+v,+m -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s
 
 declare <2 x i7> @llvm.vp.trunc.nxv2i7.nxv2i16(<2 x i16>, <2 x i1>, i32)
 
@@ -198,6 +198,20 @@ define <2 x i16> @vtrunc_nxv2i16_nxv2i64_unmasked(<2 x i64> %a, i32 zeroext %vl)
   ret <2 x i16> %v
 }
 
+declare <15 x i16> @llvm.vp.trunc.nxv15i16.nxv15i64(<15 x i64>, <15 x i1>, i32)
+
+define <15 x i16> @vtrunc_nxv15i16_nxv15i64(<15 x i64> %a, <15 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: vtrunc_nxv15i16_nxv15i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT:    vncvt.x.x.w v16, v8, v0.t
+; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
+; CHECK-NEXT:    vncvt.x.x.w v8, v16, v0.t
+; CHECK-NEXT:    ret
+  %v = call <15 x i16> @llvm.vp.trunc.nxv15i16.nxv15i64(<15 x i64> %a, <15 x i1> %m, i32 %vl)
+  ret <15 x i16> %v
+}
+
 declare <2 x i32> @llvm.vp.trunc.nxv2i64.nxv2i32(<2 x i64>, <2 x i1>, i32)
 
 define <2 x i32> @vtrunc_nxv2i32_nxv2i64(<2 x i64> %a, <2 x i1> %m, i32 zeroext %vl) {
@@ -219,3 +233,309 @@ define <2 x i32> @vtrunc_nxv2i32_nxv2i64_unmasked(<2 x i64> %a, i32 zeroext %vl)
   %v = call <2 x i32> @llvm.vp.trunc.nxv2i64.nxv2i32(<2 x i64> %a, <2 x i1> shufflevector (<2 x i1> insertelement (<2 x i1> undef, i1 true, i32 0), <2 x i1> undef, <2 x i32> zeroinitializer), i32 %vl)
   ret <2 x i32> %v
 }
+
+declare <128 x i32> @llvm.vp.trunc.nxv128i64.nxv128i32(<128 x i64>, <128 x i1>, i32)
+
+define <128 x i32> @vtrunc_nxv128i32_nxv128i64(<128 x i64> %a, <128 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: vtrunc_nxv128i32_nxv128i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    csrr a2, vlenb
+; CHECK-NEXT:    li a3, 56
+; CHECK-NEXT:    mul a2, a2, a3
+; CHECK-NEXT:    sub sp, sp, a2
+; CHECK-NEXT:    vmv1r.v v1, v0
+; CHECK-NEXT:    csrr a2, vlenb
+; CHECK-NEXT:    li a3, 24
+; CHECK-NEXT:    mul a2, a2, a3
+; CHECK-NEXT:    add a2, sp, a2
+; CHECK-NEXT:    addi a2, a2, 16
+; CHECK-NEXT:    vs8r.v v16, (a2) # Unknown-size Folded Spill
+; CHECK-NEXT:    csrr a2, vlenb
+; CHECK-NEXT:    slli a2, a2, 5
+; CHECK-NEXT:    add a2, sp, a2
+; CHECK-NEXT:    addi a2, a2, 16
+; CHECK-NEXT:    vs8r.v v8, (a2) # Unknown-size Folded Spill
+; CHECK-NEXT:    li a2, 0
+; CHECK-NEXT:    vsetivli zero, 8, e8, m1, ta, mu
+; CHECK-NEXT:    addi a3, a7, -64
+; CHECK-NEXT:    vslidedown.vi v2, v0, 8
+; CHECK-NEXT:    mv a4, a2
+; CHECK-NEXT:    bltu a7, a3, .LBB16_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    mv a4, a3
+; CHECK-NEXT:  .LBB16_2:
+; CHECK-NEXT:    vsetivli zero, 4, e8, mf2, ta, mu
+; CHECK-NEXT:    vslidedown.vi v3, v2, 4
+; CHECK-NEXT:    addi a6, a4, -32
+; CHECK-NEXT:    addi a3, a1, 640
+; CHECK-NEXT:    mv a5, a2
+; CHECK-NEXT:    bltu a4, a6, .LBB16_4
+; CHECK-NEXT:  # %bb.3:
+; CHECK-NEXT:    mv a5, a6
+; CHECK-NEXT:  .LBB16_4:
+; CHECK-NEXT:    vsetivli zero, 2, e8, mf4, ta, mu
+; CHECK-NEXT:    vslidedown.vi v0, v3, 2
+; CHECK-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
+; CHECK-NEXT:    vle64.v v16, (a3)
+; CHECK-NEXT:    addi t0, a5, -16
+; CHECK-NEXT:    addi a6, a1, 512
+; CHECK-NEXT:    mv a3, a2
+; CHECK-NEXT:    bltu a5, t0, .LBB16_6
+; CHECK-NEXT:  # %bb.5:
+; CHECK-NEXT:    mv a3, t0
+; CHECK-NEXT:  .LBB16_6:
+; CHECK-NEXT:    vle64.v v8, (a6)
+; CHECK-NEXT:    vsetvli zero, a3, e32, m4, ta, mu
+; CHECK-NEXT:    li a3, 16
+; CHECK-NEXT:    vncvt.x.x.w v24, v16, v0.t
+; CHECK-NEXT:    csrr a6, vlenb
+; CHECK-NEXT:    slli a6, a6, 4
+; CHECK-NEXT:    add a6, sp, a6
+; CHECK-NEXT:    addi a6, a6, 16
+; CHECK-NEXT:    vs8r.v v24, (a6) # Unknown-size Folded Spill
+; CHECK-NEXT:    bltu a5, a3, .LBB16_8
+; CHECK-NEXT:  # %bb.7:
+; CHECK-NEXT:    li a5, 16
+; CHECK-NEXT:  .LBB16_8:
+; CHECK-NEXT:    vsetvli zero, a5, e32, m4, ta, mu
+; CHECK-NEXT:    li a5, 64
+; CHECK-NEXT:    vmv1r.v v0, v3
+; CHECK-NEXT:    vncvt.x.x.w v16, v8, v0.t
+; CHECK-NEXT:    csrr a6, vlenb
+; CHECK-NEXT:    li t0, 48
+; CHECK-NEXT:    mul a6, a6, t0
+; CHECK-NEXT:    add a6, sp, a6
+; CHECK-NEXT:    addi a6, a6, 16
+; CHECK-NEXT:    vs8r.v v16, (a6) # Unknown-size Folded Spill
+; CHECK-NEXT:    bltu a7, a5, .LBB16_10
+; CHECK-NEXT:  # %bb.9:
+; CHECK-NEXT:    li a7, 64
+; CHECK-NEXT:  .LBB16_10:
+; CHECK-NEXT:    vsetivli zero, 4, e8, mf2, ta, mu
+; CHECK-NEXT:    vslidedown.vi v3, v1, 4
+; CHECK-NEXT:    addi t0, a7, -32
+; CHECK-NEXT:    addi a5, a1, 128
+; CHECK-NEXT:    mv a6, a2
+; CHECK-NEXT:    bltu a7, t0, .LBB16_12
+; CHECK-NEXT:  # %bb.11:
+; CHECK-NEXT:    mv a6, t0
+; CHECK-NEXT:  .LBB16_12:
+; CHECK-NEXT:    vsetivli zero, 2, e8, mf4, ta, mu
+; CHECK-NEXT:    vslidedown.vi v0, v3, 2
+; CHECK-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
+; CHECK-NEXT:    vle64.v v16, (a5)
+; CHECK-NEXT:    addi a5, a6, -16
+; CHECK-NEXT:    mv t0, a2
+; CHECK-NEXT:    bltu a6, a5, .LBB16_14
+; CHECK-NEXT:  # %bb.13:
+; CHECK-NEXT:    mv t0, a5
+; CHECK-NEXT:  .LBB16_14:
+; CHECK-NEXT:    vle64.v v8, (a1)
+; CHECK-NEXT:    li a5, 32
+; CHECK-NEXT:    vsetvli zero, t0, e32, m4, ta, mu
+; CHECK-NEXT:    vncvt.x.x.w v24, v16, v0.t
+; CHECK-NEXT:    csrr t0, vlenb
+; CHECK-NEXT:    slli t0, t0, 3
+; CHECK-NEXT:    add t0, sp, t0
+; CHECK-NEXT:    addi t0, t0, 16
+; CHECK-NEXT:    vs8r.v v24, (t0) # Unknown-size Folded Spill
+; CHECK-NEXT:    bltu a6, a3, .LBB16_16
+; CHECK-NEXT:  # %bb.15:
+; CHECK-NEXT:    li a6, 16
+; CHECK-NEXT:  .LBB16_16:
+; CHECK-NEXT:    addi t0, a1, 384
+; CHECK-NEXT:    vsetvli zero, a6, e32, m4, ta, mu
+; CHECK-NEXT:    vmv1r.v v0, v3
+; CHECK-NEXT:    vncvt.x.x.w v16, v8, v0.t
+; CHECK-NEXT:    csrr a6, vlenb
+; CHECK-NEXT:    li t1, 40
+; CHECK-NEXT:    mul a6, a6, t1
+; CHECK-NEXT:    add a6, sp, a6
+; CHECK-NEXT:    addi a6, a6, 16
+; CHECK-NEXT:    vs8r.v v16, (a6) # Unknown-size Folded Spill
+; CHECK-NEXT:    bltu a4, a5, .LBB16_18
+; CHECK-NEXT:  # %bb.17:
+; CHECK-NEXT:    li a4, 32
+; CHECK-NEXT:  .LBB16_18:
+; CHECK-NEXT:    vsetivli zero, 2, e8, mf4, ta, mu
+; CHECK-NEXT:    vslidedown.vi v0, v2, 2
+; CHECK-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
+; CHECK-NEXT:    vle64.v v24, (t0)
+; CHECK-NEXT:    addi t0, a4, -16
+; CHECK-NEXT:    addi a6, a1, 256
+; CHECK-NEXT:    mv a1, a2
+; CHECK-NEXT:    bltu a4, t0, .LBB16_20
+; CHECK-NEXT:  # %bb.19:
+; CHECK-NEXT:    mv a1, t0
+; CHECK-NEXT:  .LBB16_20:
+; CHECK-NEXT:    vle64.v v8, (a6)
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
+; CHECK-NEXT:    vncvt.x.x.w v16, v24, v0.t
+; CHECK-NEXT:    addi a1, sp, 16
+; CHECK-NEXT:    vs8r.v v16, (a1) # Unknown-size Folded Spill
+; CHECK-NEXT:    bltu a4, a3, .LBB16_22
+; CHECK-NEXT:  # %bb.21:
+; CHECK-NEXT:    li a4, 16
+; CHECK-NEXT:  .LBB16_22:
+; CHECK-NEXT:    vsetvli zero, a4, e32, m4, ta, mu
+; CHECK-NEXT:    vmv1r.v v0, v2
+; CHECK-NEXT:    vncvt.x.x.w v24, v8, v0.t
+; CHECK-NEXT:    bltu a7, a5, .LBB16_24
+; CHECK-NEXT:  # %bb.23:
+; CHECK-NEXT:    li a7, 32
+; CHECK-NEXT:  .LBB16_24:
+; CHECK-NEXT:    vsetivli zero, 2, e8, mf4, ta, mu
+; CHECK-NEXT:    addi a1, a7, -16
+; CHECK-NEXT:    vslidedown.vi v0, v1, 2
+; CHECK-NEXT:    bltu a7, a1, .LBB16_26
+; CHECK-NEXT:  # %bb.25:
+; CHECK-NEXT:    mv a2, a1
+; CHECK-NEXT:  .LBB16_26:
+; CHECK-NEXT:    vsetvli zero, a5, e32, m8, tu, mu
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    li a4, 48
+; CHECK-NEXT:    mul a1, a1, a4
+; CHECK-NEXT:    add a1, sp, a1
+; CHECK-NEXT:    addi a1, a1, 16
+; CHECK-NEXT:    vl8re8.v v8, (a1) # Unknown-size Folded Reload
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    slli a1, a1, 4
+; CHECK-NEXT:    add a1, sp, a1
+; CHECK-NEXT:    addi a1, a1, 16
+; CHECK-NEXT:    vl8re8.v v16, (a1) # Unknown-size Folded Reload
+; CHECK-NEXT:    vslideup.vi v8, v16, 16
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    li a4, 48
+; CHECK-NEXT:    mul a1, a1, a4
+; CHECK-NEXT:    add a1, sp, a1
+; CHECK-NEXT:    addi a1, a1, 16
+; CHECK-NEXT:    vs8r.v v8, (a1) # Unknown-size Folded Spill
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    li a4, 40
+; CHECK-NEXT:    mul a1, a1, a4
+; CHECK-NEXT:    add a1, sp, a1
+; CHECK-NEXT:    addi a1, a1, 16
+; CHECK-NEXT:    vl8re8.v v8, (a1) # Unknown-size Folded Reload
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    slli a1, a1, 3
+; CHECK-NEXT:    add a1, sp, a1
+; CHECK-NEXT:    addi a1, a1, 16
+; CHECK-NEXT:    vl8re8.v v16, (a1) # Unknown-size Folded Reload
+; CHECK-NEXT:    vslideup.vi v8, v16, 16
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    li a4, 40
+; CHECK-NEXT:    mul a1, a1, a4
+; CHECK-NEXT:    add a1, sp, a1
+; CHECK-NEXT:    addi a1, a1, 16
+; CHECK-NEXT:    vs8r.v v8, (a1) # Unknown-size Folded Spill
+; CHECK-NEXT:    addi a1, sp, 16
+; CHECK-NEXT:    vl8re8.v v8, (a1) # Unknown-size Folded Reload
+; CHECK-NEXT:    vslideup.vi v24, v8, 16
+; CHECK-NEXT:    vsetvli zero, a2, e32, m4, ta, mu
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    li a2, 24
+; CHECK-NEXT:    mul a1, a1, a2
+; CHECK-NEXT:    add a1, sp, a1
+; CHECK-NEXT:    addi a1, a1, 16
+; CHECK-NEXT:    vl8re8.v v8, (a1) # Unknown-size Folded Reload
+; CHECK-NEXT:    vncvt.x.x.w v16, v8, v0.t
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    slli a1, a1, 4
+; CHECK-NEXT:    add a1, sp, a1
+; CHECK-NEXT:    addi a1, a1, 16
+; CHECK-NEXT:    vs8r.v v16, (a1) # Unknown-size Folded Spill
+; CHECK-NEXT:    bltu a7, a3, .LBB16_28
+; CHECK-NEXT:  # %bb.27:
+; CHECK-NEXT:    li a7, 16
+; CHECK-NEXT:  .LBB16_28:
+; CHECK-NEXT:    vsetvli zero, a7, e32, m4, ta, mu
+; CHECK-NEXT:    vmv1r.v v0, v1
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    slli a1, a1, 5
+; CHECK-NEXT:    add a1, sp, a1
+; CHECK-NEXT:    addi a1, a1, 16
+; CHECK-NEXT:    vl8re8.v v8, (a1) # Unknown-size Folded Reload
+; CHECK-NEXT:    vncvt.x.x.w v16, v8, v0.t
+; CHECK-NEXT:    vsetvli zero, a5, e32, m8, tu, mu
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    slli a1, a1, 4
+; CHECK-NEXT:    add a1, sp, a1
+; CHECK-NEXT:    addi a1, a1, 16
+; CHECK-NEXT:    vl8re8.v v8, (a1) # Unknown-size Folded Reload
+; CHECK-NEXT:    vslideup.vi v16, v8, 16
+; CHECK-NEXT:    vse32.v v16, (a0)
+; CHECK-NEXT:    addi a1, a0, 256
+; CHECK-NEXT:    vse32.v v24, (a1)
+; CHECK-NEXT:    addi a1, a0, 128
+; CHECK-NEXT:    csrr a2, vlenb
+; CHECK-NEXT:    li a3, 40
+; CHECK-NEXT:    mul a2, a2, a3
+; CHECK-NEXT:    add a2, sp, a2
+; CHECK-NEXT:    addi a2, a2, 16
+; CHECK-NEXT:    vl8re8.v v8, (a2) # Unknown-size Folded Reload
+; CHECK-NEXT:    vse32.v v8, (a1)
+; CHECK-NEXT:    addi a0, a0, 384
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    li a2, 48
+; CHECK-NEXT:    mul a1, a1, a2
+; CHECK-NEXT:    add a1, sp, a1
+; CHECK-NEXT:    addi a1, a1, 16
+; CHECK-NEXT:    vl8re8.v v8, (a1) # Unknown-size Folded Reload
+; CHECK-NEXT:    vse32.v v8, (a0)
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    li a1, 56
+; CHECK-NEXT:    mul a0, a0, a1
+; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+  %v = call <128 x i32> @llvm.vp.trunc.nxv128i64.nxv128i32(<128 x i64> %a, <128 x i1> %m, i32 %vl)
+  ret <128 x i32> %v
+}
+
+declare <32 x i32> @llvm.vp.trunc.nxv32i64.nxv32i32(<32 x i64>, <32 x i1>, i32)
+
+define <32 x i32> @vtrunc_nxv32i32_nxv32i64(<32 x i64> %a, <32 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: vtrunc_nxv32i32_nxv32i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    slli a1, a1, 3
+; CHECK-NEXT:    sub sp, sp, a1
+; CHECK-NEXT:    vmv1r.v v24, v0
+; CHECK-NEXT:    addi a1, sp, 16
+; CHECK-NEXT:    vs8r.v v8, (a1) # Unknown-size Folded Spill
+; CHECK-NEXT:    li a1, 0
+; CHECK-NEXT:    vsetivli zero, 2, e8, mf4, ta, mu
+; CHECK-NEXT:    addi a2, a0, -16
+; CHECK-NEXT:    vslidedown.vi v0, v0, 2
+; CHECK-NEXT:    bltu a0, a2, .LBB17_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    mv a1, a2
+; CHECK-NEXT:  .LBB17_2:
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
+; CHECK-NEXT:    li a1, 16
+; CHECK-NEXT:    vncvt.x.x.w v8, v16, v0.t
+; CHECK-NEXT:    bltu a0, a1, .LBB17_4
+; CHECK-NEXT:  # %bb.3:
+; CHECK-NEXT:    li a0, 16
+; CHECK-NEXT:  .LBB17_4:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT:    vmv1r.v v0, v24
+; CHECK-NEXT:    addi a0, sp, 16
+; CHECK-NEXT:    vl8re8.v v24, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT:    vncvt.x.x.w v16, v24, v0.t
+; CHECK-NEXT:    li a0, 32
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, mu
+; CHECK-NEXT:    vslideup.vi v16, v8, 16
+; CHECK-NEXT:    vmv8r.v v8, v16
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 3
+; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+  %v = call <32 x i32> @llvm.vp.trunc.nxv32i64.nxv32i32(<32 x i64> %a, <32 x i1> %m, i32 %vl)
+  ret <32 x i32> %v
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll
index fc5ed87865e41..28db08a23597b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll
@@ -152,6 +152,41 @@ define <vscale x 2 x i16> @vtrunc_nxv2i16_nxv2i64_unmasked(<vscale x 2 x i64> %a
   ret <vscale x 2 x i16> %v
 }
 
+declare <vscale x 15 x i16> @llvm.vp.trunc.nxv15i16.nxv15i64(<vscale x 15 x i64>, <vscale x 15 x i1>, i32)
+
+define <vscale x 15 x i16> @vtrunc_nxv15i16_nxv15i64(<vscale x 15 x i64> %a, <vscale x 15 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: vtrunc_nxv15i16_nxv15i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmv1r.v v24, v0
+; CHECK-NEXT:    li a2, 0
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    srli a4, a1, 3
+; CHECK-NEXT:    vsetvli a3, zero, e8, mf4, ta, mu
+; CHECK-NEXT:    sub a3, a0, a1
+; CHECK-NEXT:    vslidedown.vx v0, v0, a4
+; CHECK-NEXT:    bltu a0, a3, .LBB12_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    mv a2, a3
+; CHECK-NEXT:  .LBB12_2:
+; CHECK-NEXT:    vsetvli zero, a2, e32, m4, ta, mu
+; CHECK-NEXT:    vncvt.x.x.w v28, v16, v0.t
+; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
+; CHECK-NEXT:    vncvt.x.x.w v18, v28, v0.t
+; CHECK-NEXT:    bltu a0, a1, .LBB12_4
+; CHECK-NEXT:  # %bb.3:
+; CHECK-NEXT:    mv a0, a1
+; CHECK-NEXT:  .LBB12_4:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT:    vmv1r.v v0, v24
+; CHECK-NEXT:    vncvt.x.x.w v20, v8, v0.t
+; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
+; CHECK-NEXT:    vncvt.x.x.w v16, v20, v0.t
+; CHECK-NEXT:    vmv4r.v v8, v16
+; CHECK-NEXT:    ret
+  %v = call <vscale x 15 x i16> @llvm.vp.trunc.nxv15i16.nxv15i64(<vscale x 15 x i64> %a, <vscale x 15 x i1> %m, i32 %vl)
+  ret <vscale x 15 x i16> %v
+}
+
 declare <vscale x 2 x i32> @llvm.vp.trunc.nxv2i64.nxv2i32(<vscale x 2 x i64>, <vscale x 2 x i1>, i32)
 
 define <vscale x 2 x i32> @vtrunc_nxv2i32_nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i1> %m, i32 zeroext %vl) {
@@ -189,18 +224,18 @@ define <vscale x 32 x i7> @vtrunc_nxv32i7_nxv32i32(<vscale x 32 x i32> %a, <vsca
 ; CHECK-NEXT:    slli a1, a1, 1
 ; CHECK-NEXT:    sub a3, a0, a1
 ; CHECK-NEXT:    vslidedown.vx v0, v0, a4
-; CHECK-NEXT:    bltu a0, a3, .LBB14_2
+; CHECK-NEXT:    bltu a0, a3, .LBB15_2
 ; CHECK-NEXT:  # %bb.1:
 ; CHECK-NEXT:    mv a2, a3
-; CHECK-NEXT:  .LBB14_2:
+; CHECK-NEXT:  .LBB15_2:
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, mu
 ; CHECK-NEXT:    vncvt.x.x.w v28, v16, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e8, m2, ta, mu
 ; CHECK-NEXT:    vncvt.x.x.w v18, v28, v0.t
-; CHECK-NEXT:    bltu a0, a1, .LBB14_4
+; CHECK-NEXT:    bltu a0, a1, .LBB15_4
 ; CHECK-NEXT:  # %bb.3:
 ; CHECK-NEXT:    mv a0, a1
-; CHECK-NEXT:  .LBB14_4:
+; CHECK-NEXT:  .LBB15_4:
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
 ; CHECK-NEXT:    vmv1r.v v0, v24
 ; CHECK-NEXT:    vncvt.x.x.w v20, v8, v0.t
@@ -211,3 +246,138 @@ define <vscale x 32 x i7> @vtrunc_nxv32i7_nxv32i32(<vscale x 32 x i32> %a, <vsca
   %v = call <vscale x 32 x i7> @llvm.vp.trunc.nxv32i7.nxv32i32(<vscale x 32 x i32> %a, <vscale x 32 x i1> %m, i32 %vl)
   ret <vscale x 32 x i7> %v
 }
+
+declare <vscale x 32 x i8> @llvm.vp.trunc.nxv32i8.nxv32i32(<vscale x 32 x i32>, <vscale x 32 x i1>, i32)
+
+define <vscale x 32 x i8> @vtrunc_nxv32i8_nxv32i32(<vscale x 32 x i32> %a, <vscale x 32 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: vtrunc_nxv32i8_nxv32i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmv1r.v v24, v0
+; CHECK-NEXT:    li a2, 0
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    srli a4, a1, 2
+; CHECK-NEXT:    vsetvli a3, zero, e8, mf2, ta, mu
+; CHECK-NEXT:    slli a1, a1, 1
+; CHECK-NEXT:    sub a3, a0, a1
+; CHECK-NEXT:    vslidedown.vx v0, v0, a4
+; CHECK-NEXT:    bltu a0, a3, .LBB16_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    mv a2, a3
+; CHECK-NEXT:  .LBB16_2:
+; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, mu
+; CHECK-NEXT:    vncvt.x.x.w v28, v16, v0.t
+; CHECK-NEXT:    vsetvli zero, zero, e8, m2, ta, mu
+; CHECK-NEXT:    vncvt.x.x.w v18, v28, v0.t
+; CHECK-NEXT:    bltu a0, a1, .LBB16_4
+; CHECK-NEXT:  # %bb.3:
+; CHECK-NEXT:    mv a0, a1
+; CHECK-NEXT:  .LBB16_4:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT:    vmv1r.v v0, v24
+; CHECK-NEXT:    vncvt.x.x.w v20, v8, v0.t
+; CHECK-NEXT:    vsetvli zero, zero, e8, m2, ta, mu
+; CHECK-NEXT:    vncvt.x.x.w v16, v20, v0.t
+; CHECK-NEXT:    vmv4r.v v8, v16
+; CHECK-NEXT:    ret
+  %v = call <vscale x 32 x i8> @llvm.vp.trunc.nxv32i8.nxv32i32(<vscale x 32 x i32> %a, <vscale x 32 x i1> %m, i32 %vl)
+  ret <vscale x 32 x i8> %v
+}
+
+declare <vscale x 32 x i32> @llvm.vp.trunc.nxv32i64.nxv32i32(<vscale x 32 x i64>, <vscale x 32 x i1>, i32)
+
+define <vscale x 32 x i32> @vtrunc_nxv32i64_nxv32i32(<vscale x 32 x i64> %a, <vscale x 32 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: vtrunc_nxv32i64_nxv32i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    slli a1, a1, 4
+; CHECK-NEXT:    sub sp, sp, a1
+; CHECK-NEXT:    vmv1r.v v24, v0
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    slli a1, a1, 3
+; CHECK-NEXT:    add a1, sp, a1
+; CHECK-NEXT:    addi a1, a1, 16
+; CHECK-NEXT:    vs8r.v v8, (a1) # Unknown-size Folded Spill
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    slli a4, a1, 1
+; CHECK-NEXT:    srli a3, a1, 3
+; CHECK-NEXT:    mv a5, a2
+; CHECK-NEXT:    bltu a2, a4, .LBB17_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    mv a5, a4
+; CHECK-NEXT:  .LBB17_2:
+; CHECK-NEXT:    li a6, 0
+; CHECK-NEXT:    vsetvli a7, zero, e8, mf4, ta, mu
+; CHECK-NEXT:    sub a7, a5, a1
+; CHECK-NEXT:    vslidedown.vx v0, v24, a3
+; CHECK-NEXT:    bltu a5, a7, .LBB17_4
+; CHECK-NEXT:  # %bb.3:
+; CHECK-NEXT:    mv a6, a7
+; CHECK-NEXT:  .LBB17_4:
+; CHECK-NEXT:    srli a7, a1, 2
+; CHECK-NEXT:    slli t0, a1, 3
+; CHECK-NEXT:    vsetvli zero, a6, e32, m4, ta, mu
+; CHECK-NEXT:    vncvt.x.x.w v12, v16, v0.t
+; CHECK-NEXT:    bltu a5, a1, .LBB17_6
+; CHECK-NEXT:  # %bb.5:
+; CHECK-NEXT:    mv a5, a1
+; CHECK-NEXT:  .LBB17_6:
+; CHECK-NEXT:    li a6, 0
+; CHECK-NEXT:    vsetvli t1, zero, e8, mf2, ta, mu
+; CHECK-NEXT:    vslidedown.vx v1, v24, a7
+; CHECK-NEXT:    add a7, a0, t0
+; CHECK-NEXT:    vsetvli zero, a5, e32, m4, ta, mu
+; CHECK-NEXT:    sub a4, a2, a4
+; CHECK-NEXT:    vmv1r.v v0, v24
+; CHECK-NEXT:    csrr a5, vlenb
+; CHECK-NEXT:    slli a5, a5, 3
+; CHECK-NEXT:    add a5, sp, a5
+; CHECK-NEXT:    addi a5, a5, 16
+; CHECK-NEXT:    vl8re8.v v16, (a5) # Unknown-size Folded Reload
+; CHECK-NEXT:    vncvt.x.x.w v8, v16, v0.t
+; CHECK-NEXT:    bltu a2, a4, .LBB17_8
+; CHECK-NEXT:  # %bb.7:
+; CHECK-NEXT:    mv a6, a4
+; CHECK-NEXT:  .LBB17_8:
+; CHECK-NEXT:    vsetvli a2, zero, e8, mf4, ta, mu
+; CHECK-NEXT:    vl8re64.v v16, (a7)
+; CHECK-NEXT:    csrr a2, vlenb
+; CHECK-NEXT:    slli a2, a2, 3
+; CHECK-NEXT:    add a2, sp, a2
+; CHECK-NEXT:    addi a2, a2, 16
+; CHECK-NEXT:    vs8r.v v16, (a2) # Unknown-size Folded Spill
+; CHECK-NEXT:    li a2, 0
+; CHECK-NEXT:    sub a4, a6, a1
+; CHECK-NEXT:    vslidedown.vx v0, v1, a3
+; CHECK-NEXT:    bltu a6, a4, .LBB17_10
+; CHECK-NEXT:  # %bb.9:
+; CHECK-NEXT:    mv a2, a4
+; CHECK-NEXT:  .LBB17_10:
+; CHECK-NEXT:    vl8re64.v v16, (a0)
+; CHECK-NEXT:    addi a0, sp, 16
+; CHECK-NEXT:    vs8r.v v16, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT:    vsetvli zero, a2, e32, m4, ta, mu
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 3
+; CHECK-NEXT:    add a0, sp, a0
+; CHECK-NEXT:    addi a0, a0, 16
+; CHECK-NEXT:    vl8re8.v v24, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT:    vncvt.x.x.w v20, v24, v0.t
+; CHECK-NEXT:    bltu a6, a1, .LBB17_12
+; CHECK-NEXT:  # %bb.11:
+; CHECK-NEXT:    mv a6, a1
+; CHECK-NEXT:  .LBB17_12:
+; CHECK-NEXT:    vsetvli zero, a6, e32, m4, ta, mu
+; CHECK-NEXT:    vmv1r.v v0, v1
+; CHECK-NEXT:    addi a0, sp, 16
+; CHECK-NEXT:    vl8re8.v v24, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT:    vncvt.x.x.w v16, v24, v0.t
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 4
+; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    ret
+  %v = call <vscale x 32 x i32> @llvm.vp.trunc.nxv32i64.nxv32i32(<vscale x 32 x i64> %a, <vscale x 32 x i1> %m, i32 %vl)
+  ret <vscale x 32 x i32> %v
+}


        


More information about the llvm-commits mailing list