[llvm] ac603c8 - [RISCV] Add scalable vector truncate patterns

Fraser Cormack via llvm-commits llvm-commits at lists.llvm.org
Mon Jan 18 02:25:05 PST 2021


Author: Fraser Cormack
Date: 2021-01-18T10:18:43Z
New Revision: ac603c8d3850ed0c715c421d79bb5cb014bb21de

URL: https://github.com/llvm/llvm-project/commit/ac603c8d3850ed0c715c421d79bb5cb014bb21de
DIFF: https://github.com/llvm/llvm-project/commit/ac603c8d3850ed0c715c421d79bb5cb014bb21de.diff

LOG: [RISCV] Add scalable vector truncate patterns

Original patch by @rogfer01.

This patch supports vector truncates, which on RVV must be done in a
series of instructions truncating by one power-of-two at a time. This is
done through custom-lowering and a custom node to avoid LLVM
re-combining the split TRUNCATE nodes.

Authored-by: Roger Ferrer Ibanez <rofirrim at gmail.com>
Co-Authored-by: Fraser Cormack <fraser at codeplay.com>

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D94796

Added: 
    llvm/test/CodeGen/RISCV/rvv/vtruncs-sdnode-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vtruncs-sdnode-rv64.ll

Modified: 
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/lib/Target/RISCV/RISCVISelLowering.h
    llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index be7b32da8e9f..14795b5465be 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -372,6 +372,10 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
       setOperationAction(ISD::SMAX, VT, Legal);
       setOperationAction(ISD::UMIN, VT, Legal);
       setOperationAction(ISD::UMAX, VT, Legal);
+
+      // Lower RVV truncates as a series of "RISCVISD::TRUNCATE_VECTOR"
+      // nodes which truncate by one power of two at a time.
+      setOperationAction(ISD::TRUNCATE, VT, Custom);
     }
 
     // We must custom-lower SPLAT_VECTOR vXi64 on RV32
@@ -685,6 +689,38 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
     return DAG.getNode(RISCVISD::GREVI, DL, VT, Op.getOperand(0),
                        DAG.getTargetConstant(Imm, DL, Subtarget.getXLenVT()));
   }
+  case ISD::TRUNCATE: {
+    // RVV only has truncates which operate from SEW*2->SEW, so lower arbitrary
+    // truncates as a series of "RISCVISD::TRUNCATE_VECTOR" nodes which
+    // truncate by one power of two at a time.
+    SDLoc DL(Op);
+    EVT VT = Op.getValueType();
+    // Only custom-lower non-mask truncates
+    if (!VT.isVector() || VT.getVectorElementType() == MVT::i1)
+      return Op;
+
+    EVT DstEltVT = VT.getVectorElementType();
+
+    SDValue Src = Op.getOperand(0);
+    EVT SrcVT = Src.getValueType();
+    EVT SrcEltVT = SrcVT.getVectorElementType();
+
+    assert(DstEltVT.bitsLT(SrcEltVT) &&
+           isPowerOf2_64(DstEltVT.getSizeInBits()) &&
+           isPowerOf2_64(SrcEltVT.getSizeInBits()) &&
+           "Unexpected vector truncate lowering");
+
+    SDValue Result = Src;
+    LLVMContext &Context = *DAG.getContext();
+    const ElementCount Count = SrcVT.getVectorElementCount();
+    do {
+      SrcEltVT = EVT::getIntegerVT(Context, SrcEltVT.getSizeInBits() / 2);
+      EVT ResultVT = EVT::getVectorVT(Context, SrcEltVT, Count);
+      Result = DAG.getNode(RISCVISD::TRUNCATE_VECTOR, DL, ResultVT, Result);
+    } while (SrcEltVT != DstEltVT);
+
+    return Result;
+  }
   case ISD::SPLAT_VECTOR:
     return lowerSPLATVECTOR(Op, DAG);
   case ISD::VSCALE: {
@@ -3670,6 +3706,7 @@ const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
   NODE_NAME_CASE(VMV_X_S)
   NODE_NAME_CASE(SPLAT_VECTOR_I64)
   NODE_NAME_CASE(READ_VLENB)
+  NODE_NAME_CASE(TRUNCATE_VECTOR)
   }
   // clang-format on
   return nullptr;

diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h
index b1f6e55766ff..e5bdc75a619e 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.h
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h
@@ -93,6 +93,8 @@ enum NodeType : unsigned {
   SPLAT_VECTOR_I64,
   // Read VLENB CSR
   READ_VLENB,
+  // Truncates a RVV integer vector by one power-of-two.
+  TRUNCATE_VECTOR,
 };
 } // namespace RISCVISD
 

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
index cf7c8867094d..6ce6c16d4405 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
@@ -28,6 +28,10 @@ def SDTSplatI64 : SDTypeProfile<1, 1, [
 
 def rv32_splat_i64 : SDNode<"RISCVISD::SPLAT_VECTOR_I64", SDTSplatI64>;
 
+def riscv_trunc_vector : SDNode<"RISCVISD::TRUNCATE_VECTOR",
+                                SDTypeProfile<1, 1,
+                                 [SDTCisVec<0>, SDTCisVec<1>]>>;
+
 // Penalize the generic form with Complexity=1 to give the simm5/uimm5 variants
 // precedence
 def SplatPat       : ComplexPattern<vAny, 1, "selectVSplat", [], [], 1>;
@@ -307,6 +311,15 @@ defm "" : VPatBinarySDNode_VV_VX_VI<shl, "PseudoVSLL", uimm5>;
 defm "" : VPatBinarySDNode_VV_VX_VI<srl, "PseudoVSRL", uimm5>;
 defm "" : VPatBinarySDNode_VV_VX_VI<sra, "PseudoVSRA", uimm5>;
 
+// 12.7. Vector Narrowing Integer Right Shift Instructions
+foreach vtiTofti = AllFractionableVF2IntVectors in {
+  defvar vti = vtiTofti.Vti;
+  defvar fti = vtiTofti.Fti;
+  def : Pat<(fti.Vector (riscv_trunc_vector (vti.Vector vti.RegClass:$rs1))),
+            (!cast<Instruction>("PseudoVNSRL_WI_"#fti.LMul.MX)
+                vti.RegClass:$rs1, 0, VLMax, fti.SEW)>;
+}
+
 // 12.8. Vector Integer Comparison Instructions
 defm "" : VPatIntegerSetCCSDNode_VV_VX_VI<SETEQ,  "PseudoVMSEQ">;
 defm "" : VPatIntegerSetCCSDNode_VV_VX_VI<SETNE,  "PseudoVMSNE">;

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vtruncs-sdnode-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vtruncs-sdnode-rv32.ll
new file mode 100644
index 000000000000..f2ebcd2aad20
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vtruncs-sdnode-rv32.ll
@@ -0,0 +1,321 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s
+
+define <vscale x 1 x i8> @vtrunc_nxv1i16_nxv1i8(<vscale x 1 x i16> %va) {
+; CHECK-LABEL: vtrunc_nxv1i16_nxv1i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,mf8,ta,mu
+; CHECK-NEXT:    vnsrl.wi v25, v16, 0
+; CHECK-NEXT:    vmv1r.v v16, v25
+; CHECK-NEXT:    ret
+  %tvec = trunc <vscale x 1 x i16> %va to <vscale x 1 x i8>
+  ret <vscale x 1 x i8> %tvec
+}
+
+define <vscale x 2 x i8> @vtrunc_nxv2i16_nxv2i8(<vscale x 2 x i16> %va) {
+; CHECK-LABEL: vtrunc_nxv2i16_nxv2i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,mf4,ta,mu
+; CHECK-NEXT:    vnsrl.wi v25, v16, 0
+; CHECK-NEXT:    vmv1r.v v16, v25
+; CHECK-NEXT:    ret
+  %tvec = trunc <vscale x 2 x i16> %va to <vscale x 2 x i8>
+  ret <vscale x 2 x i8> %tvec
+}
+
+define <vscale x 4 x i8> @vtrunc_nxv4i16_nxv4i8(<vscale x 4 x i16> %va) {
+; CHECK-LABEL: vtrunc_nxv4i16_nxv4i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,mf2,ta,mu
+; CHECK-NEXT:    vnsrl.wi v25, v16, 0
+; CHECK-NEXT:    vmv1r.v v16, v25
+; CHECK-NEXT:    ret
+  %tvec = trunc <vscale x 4 x i16> %va to <vscale x 4 x i8>
+  ret <vscale x 4 x i8> %tvec
+}
+
+define <vscale x 8 x i8> @vtrunc_nxv8i16_nxv8i8(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: vtrunc_nxv8i16_nxv8i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vnsrl.wi v25, v16, 0
+; CHECK-NEXT:    vmv1r.v v16, v25
+; CHECK-NEXT:    ret
+  %tvec = trunc <vscale x 8 x i16> %va to <vscale x 8 x i8>
+  ret <vscale x 8 x i8> %tvec
+}
+
+define <vscale x 16 x i8> @vtrunc_nxv16i16_nxv16i8(<vscale x 16 x i16> %va) {
+; CHECK-LABEL: vtrunc_nxv16i16_nxv16i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,m2,ta,mu
+; CHECK-NEXT:    vnsrl.wi v26, v16, 0
+; CHECK-NEXT:    vmv2r.v v16, v26
+; CHECK-NEXT:    ret
+  %tvec = trunc <vscale x 16 x i16> %va to <vscale x 16 x i8>
+  ret <vscale x 16 x i8> %tvec
+}
+
+define <vscale x 1 x i8> @vtrunc_nxv1i32_nxv1i8(<vscale x 1 x i32> %va) {
+; CHECK-LABEL: vtrunc_nxv1i32_nxv1i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf4,ta,mu
+; CHECK-NEXT:    vnsrl.wi v25, v16, 0
+; CHECK-NEXT:    vsetvli a0, zero, e8,mf8,ta,mu
+; CHECK-NEXT:    vnsrl.wi v16, v25, 0
+; CHECK-NEXT:    ret
+  %tvec = trunc <vscale x 1 x i32> %va to <vscale x 1 x i8>
+  ret <vscale x 1 x i8> %tvec
+}
+
+define <vscale x 1 x i16> @vtrunc_nxv1i32_nxv1i16(<vscale x 1 x i32> %va) {
+; CHECK-LABEL: vtrunc_nxv1i32_nxv1i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf4,ta,mu
+; CHECK-NEXT:    vnsrl.wi v25, v16, 0
+; CHECK-NEXT:    vmv1r.v v16, v25
+; CHECK-NEXT:    ret
+  %tvec = trunc <vscale x 1 x i32> %va to <vscale x 1 x i16>
+  ret <vscale x 1 x i16> %tvec
+}
+
+define <vscale x 2 x i8> @vtrunc_nxv2i32_nxv2i8(<vscale x 2 x i32> %va) {
+; CHECK-LABEL: vtrunc_nxv2i32_nxv2i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf2,ta,mu
+; CHECK-NEXT:    vnsrl.wi v25, v16, 0
+; CHECK-NEXT:    vsetvli a0, zero, e8,mf4,ta,mu
+; CHECK-NEXT:    vnsrl.wi v16, v25, 0
+; CHECK-NEXT:    ret
+  %tvec = trunc <vscale x 2 x i32> %va to <vscale x 2 x i8>
+  ret <vscale x 2 x i8> %tvec
+}
+
+define <vscale x 2 x i16> @vtrunc_nxv2i32_nxv2i16(<vscale x 2 x i32> %va) {
+; CHECK-LABEL: vtrunc_nxv2i32_nxv2i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf2,ta,mu
+; CHECK-NEXT:    vnsrl.wi v25, v16, 0
+; CHECK-NEXT:    vmv1r.v v16, v25
+; CHECK-NEXT:    ret
+  %tvec = trunc <vscale x 2 x i32> %va to <vscale x 2 x i16>
+  ret <vscale x 2 x i16> %tvec
+}
+
+define <vscale x 4 x i8> @vtrunc_nxv4i32_nxv4i8(<vscale x 4 x i32> %va) {
+; CHECK-LABEL: vtrunc_nxv4i32_nxv4i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m1,ta,mu
+; CHECK-NEXT:    vnsrl.wi v25, v16, 0
+; CHECK-NEXT:    vsetvli a0, zero, e8,mf2,ta,mu
+; CHECK-NEXT:    vnsrl.wi v16, v25, 0
+; CHECK-NEXT:    ret
+  %tvec = trunc <vscale x 4 x i32> %va to <vscale x 4 x i8>
+  ret <vscale x 4 x i8> %tvec
+}
+
+define <vscale x 4 x i16> @vtrunc_nxv4i32_nxv4i16(<vscale x 4 x i32> %va) {
+; CHECK-LABEL: vtrunc_nxv4i32_nxv4i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m1,ta,mu
+; CHECK-NEXT:    vnsrl.wi v25, v16, 0
+; CHECK-NEXT:    vmv1r.v v16, v25
+; CHECK-NEXT:    ret
+  %tvec = trunc <vscale x 4 x i32> %va to <vscale x 4 x i16>
+  ret <vscale x 4 x i16> %tvec
+}
+
+define <vscale x 8 x i8> @vtrunc_nxv8i32_nxv8i8(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: vtrunc_nxv8i32_nxv8i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vnsrl.wi v26, v16, 0
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vnsrl.wi v16, v26, 0
+; CHECK-NEXT:    ret
+  %tvec = trunc <vscale x 8 x i32> %va to <vscale x 8 x i8>
+  ret <vscale x 8 x i8> %tvec
+}
+
+define <vscale x 8 x i16> @vtrunc_nxv8i32_nxv8i16(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: vtrunc_nxv8i32_nxv8i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vnsrl.wi v26, v16, 0
+; CHECK-NEXT:    vmv2r.v v16, v26
+; CHECK-NEXT:    ret
+  %tvec = trunc <vscale x 8 x i32> %va to <vscale x 8 x i16>
+  ret <vscale x 8 x i16> %tvec
+}
+
+define <vscale x 16 x i8> @vtrunc_nxv16i32_nxv16i8(<vscale x 16 x i32> %va) {
+; CHECK-LABEL: vtrunc_nxv16i32_nxv16i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m4,ta,mu
+; CHECK-NEXT:    vnsrl.wi v28, v16, 0
+; CHECK-NEXT:    vsetvli a0, zero, e8,m2,ta,mu
+; CHECK-NEXT:    vnsrl.wi v16, v28, 0
+; CHECK-NEXT:    ret
+  %tvec = trunc <vscale x 16 x i32> %va to <vscale x 16 x i8>
+  ret <vscale x 16 x i8> %tvec
+}
+
+define <vscale x 16 x i16> @vtrunc_nxv16i32_nxv16i16(<vscale x 16 x i32> %va) {
+; CHECK-LABEL: vtrunc_nxv16i32_nxv16i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m4,ta,mu
+; CHECK-NEXT:    vnsrl.wi v28, v16, 0
+; CHECK-NEXT:    vmv4r.v v16, v28
+; CHECK-NEXT:    ret
+  %tvec = trunc <vscale x 16 x i32> %va to <vscale x 16 x i16>
+  ret <vscale x 16 x i16> %tvec
+}
+
+define <vscale x 1 x i8> @vtrunc_nxv1i64_nxv1i8(<vscale x 1 x i64> %va) {
+; CHECK-LABEL: vtrunc_nxv1i64_nxv1i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,mf2,ta,mu
+; CHECK-NEXT:    vnsrl.wi v25, v16, 0
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf4,ta,mu
+; CHECK-NEXT:    vnsrl.wi v26, v25, 0
+; CHECK-NEXT:    vsetvli a0, zero, e8,mf8,ta,mu
+; CHECK-NEXT:    vnsrl.wi v16, v26, 0
+; CHECK-NEXT:    ret
+  %tvec = trunc <vscale x 1 x i64> %va to <vscale x 1 x i8>
+  ret <vscale x 1 x i8> %tvec
+}
+
+define <vscale x 1 x i16> @vtrunc_nxv1i64_nxv1i16(<vscale x 1 x i64> %va) {
+; CHECK-LABEL: vtrunc_nxv1i64_nxv1i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,mf2,ta,mu
+; CHECK-NEXT:    vnsrl.wi v25, v16, 0
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf4,ta,mu
+; CHECK-NEXT:    vnsrl.wi v16, v25, 0
+; CHECK-NEXT:    ret
+  %tvec = trunc <vscale x 1 x i64> %va to <vscale x 1 x i16>
+  ret <vscale x 1 x i16> %tvec
+}
+
+define <vscale x 1 x i32> @vtrunc_nxv1i64_nxv1i32(<vscale x 1 x i64> %va) {
+; CHECK-LABEL: vtrunc_nxv1i64_nxv1i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,mf2,ta,mu
+; CHECK-NEXT:    vnsrl.wi v25, v16, 0
+; CHECK-NEXT:    vmv1r.v v16, v25
+; CHECK-NEXT:    ret
+  %tvec = trunc <vscale x 1 x i64> %va to <vscale x 1 x i32>
+  ret <vscale x 1 x i32> %tvec
+}
+
+define <vscale x 2 x i8> @vtrunc_nxv2i64_nxv2i8(<vscale x 2 x i64> %va) {
+; CHECK-LABEL: vtrunc_nxv2i64_nxv2i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m1,ta,mu
+; CHECK-NEXT:    vnsrl.wi v25, v16, 0
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf2,ta,mu
+; CHECK-NEXT:    vnsrl.wi v26, v25, 0
+; CHECK-NEXT:    vsetvli a0, zero, e8,mf4,ta,mu
+; CHECK-NEXT:    vnsrl.wi v16, v26, 0
+; CHECK-NEXT:    ret
+  %tvec = trunc <vscale x 2 x i64> %va to <vscale x 2 x i8>
+  ret <vscale x 2 x i8> %tvec
+}
+
+define <vscale x 2 x i16> @vtrunc_nxv2i64_nxv2i16(<vscale x 2 x i64> %va) {
+; CHECK-LABEL: vtrunc_nxv2i64_nxv2i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m1,ta,mu
+; CHECK-NEXT:    vnsrl.wi v25, v16, 0
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf2,ta,mu
+; CHECK-NEXT:    vnsrl.wi v16, v25, 0
+; CHECK-NEXT:    ret
+  %tvec = trunc <vscale x 2 x i64> %va to <vscale x 2 x i16>
+  ret <vscale x 2 x i16> %tvec
+}
+
+define <vscale x 2 x i32> @vtrunc_nxv2i64_nxv2i32(<vscale x 2 x i64> %va) {
+; CHECK-LABEL: vtrunc_nxv2i64_nxv2i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m1,ta,mu
+; CHECK-NEXT:    vnsrl.wi v25, v16, 0
+; CHECK-NEXT:    vmv1r.v v16, v25
+; CHECK-NEXT:    ret
+  %tvec = trunc <vscale x 2 x i64> %va to <vscale x 2 x i32>
+  ret <vscale x 2 x i32> %tvec
+}
+
+define <vscale x 4 x i8> @vtrunc_nxv4i64_nxv4i8(<vscale x 4 x i64> %va) {
+; CHECK-LABEL: vtrunc_nxv4i64_nxv4i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m2,ta,mu
+; CHECK-NEXT:    vnsrl.wi v26, v16, 0
+; CHECK-NEXT:    vsetvli a0, zero, e16,m1,ta,mu
+; CHECK-NEXT:    vnsrl.wi v25, v26, 0
+; CHECK-NEXT:    vsetvli a0, zero, e8,mf2,ta,mu
+; CHECK-NEXT:    vnsrl.wi v16, v25, 0
+; CHECK-NEXT:    ret
+  %tvec = trunc <vscale x 4 x i64> %va to <vscale x 4 x i8>
+  ret <vscale x 4 x i8> %tvec
+}
+
+define <vscale x 4 x i16> @vtrunc_nxv4i64_nxv4i16(<vscale x 4 x i64> %va) {
+; CHECK-LABEL: vtrunc_nxv4i64_nxv4i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m2,ta,mu
+; CHECK-NEXT:    vnsrl.wi v26, v16, 0
+; CHECK-NEXT:    vsetvli a0, zero, e16,m1,ta,mu
+; CHECK-NEXT:    vnsrl.wi v16, v26, 0
+; CHECK-NEXT:    ret
+  %tvec = trunc <vscale x 4 x i64> %va to <vscale x 4 x i16>
+  ret <vscale x 4 x i16> %tvec
+}
+
+define <vscale x 4 x i32> @vtrunc_nxv4i64_nxv4i32(<vscale x 4 x i64> %va) {
+; CHECK-LABEL: vtrunc_nxv4i64_nxv4i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m2,ta,mu
+; CHECK-NEXT:    vnsrl.wi v26, v16, 0
+; CHECK-NEXT:    vmv2r.v v16, v26
+; CHECK-NEXT:    ret
+  %tvec = trunc <vscale x 4 x i64> %va to <vscale x 4 x i32>
+  ret <vscale x 4 x i32> %tvec
+}
+
+define <vscale x 8 x i8> @vtrunc_nxv8i64_nxv8i8(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: vtrunc_nxv8i64_nxv8i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vnsrl.wi v28, v16, 0
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vnsrl.wi v26, v28, 0
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vnsrl.wi v16, v26, 0
+; CHECK-NEXT:    ret
+  %tvec = trunc <vscale x 8 x i64> %va to <vscale x 8 x i8>
+  ret <vscale x 8 x i8> %tvec
+}
+
+define <vscale x 8 x i16> @vtrunc_nxv8i64_nxv8i16(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: vtrunc_nxv8i64_nxv8i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vnsrl.wi v28, v16, 0
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vnsrl.wi v16, v28, 0
+; CHECK-NEXT:    ret
+  %tvec = trunc <vscale x 8 x i64> %va to <vscale x 8 x i16>
+  ret <vscale x 8 x i16> %tvec
+}
+
+define <vscale x 8 x i32> @vtrunc_nxv8i64_nxv8i32(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: vtrunc_nxv8i64_nxv8i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vnsrl.wi v28, v16, 0
+; CHECK-NEXT:    vmv4r.v v16, v28
+; CHECK-NEXT:    ret
+  %tvec = trunc <vscale x 8 x i64> %va to <vscale x 8 x i32>
+  ret <vscale x 8 x i32> %tvec
+}
+

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vtruncs-sdnode-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vtruncs-sdnode-rv64.ll
new file mode 100644
index 000000000000..e3825e685f74
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vtruncs-sdnode-rv64.ll
@@ -0,0 +1,321 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s
+
+define <vscale x 1 x i8> @vtrunc_nxv1i16_nxv1i8(<vscale x 1 x i16> %va) {
+; CHECK-LABEL: vtrunc_nxv1i16_nxv1i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,mf8,ta,mu
+; CHECK-NEXT:    vnsrl.wi v25, v16, 0
+; CHECK-NEXT:    vmv1r.v v16, v25
+; CHECK-NEXT:    ret
+  %tvec = trunc <vscale x 1 x i16> %va to <vscale x 1 x i8>
+  ret <vscale x 1 x i8> %tvec
+}
+
+define <vscale x 2 x i8> @vtrunc_nxv2i16_nxv2i8(<vscale x 2 x i16> %va) {
+; CHECK-LABEL: vtrunc_nxv2i16_nxv2i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,mf4,ta,mu
+; CHECK-NEXT:    vnsrl.wi v25, v16, 0
+; CHECK-NEXT:    vmv1r.v v16, v25
+; CHECK-NEXT:    ret
+  %tvec = trunc <vscale x 2 x i16> %va to <vscale x 2 x i8>
+  ret <vscale x 2 x i8> %tvec
+}
+
+define <vscale x 4 x i8> @vtrunc_nxv4i16_nxv4i8(<vscale x 4 x i16> %va) {
+; CHECK-LABEL: vtrunc_nxv4i16_nxv4i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,mf2,ta,mu
+; CHECK-NEXT:    vnsrl.wi v25, v16, 0
+; CHECK-NEXT:    vmv1r.v v16, v25
+; CHECK-NEXT:    ret
+  %tvec = trunc <vscale x 4 x i16> %va to <vscale x 4 x i8>
+  ret <vscale x 4 x i8> %tvec
+}
+
+define <vscale x 8 x i8> @vtrunc_nxv8i16_nxv8i8(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: vtrunc_nxv8i16_nxv8i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vnsrl.wi v25, v16, 0
+; CHECK-NEXT:    vmv1r.v v16, v25
+; CHECK-NEXT:    ret
+  %tvec = trunc <vscale x 8 x i16> %va to <vscale x 8 x i8>
+  ret <vscale x 8 x i8> %tvec
+}
+
+define <vscale x 16 x i8> @vtrunc_nxv16i16_nxv16i8(<vscale x 16 x i16> %va) {
+; CHECK-LABEL: vtrunc_nxv16i16_nxv16i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,m2,ta,mu
+; CHECK-NEXT:    vnsrl.wi v26, v16, 0
+; CHECK-NEXT:    vmv2r.v v16, v26
+; CHECK-NEXT:    ret
+  %tvec = trunc <vscale x 16 x i16> %va to <vscale x 16 x i8>
+  ret <vscale x 16 x i8> %tvec
+}
+
+define <vscale x 1 x i8> @vtrunc_nxv1i32_nxv1i8(<vscale x 1 x i32> %va) {
+; CHECK-LABEL: vtrunc_nxv1i32_nxv1i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf4,ta,mu
+; CHECK-NEXT:    vnsrl.wi v25, v16, 0
+; CHECK-NEXT:    vsetvli a0, zero, e8,mf8,ta,mu
+; CHECK-NEXT:    vnsrl.wi v16, v25, 0
+; CHECK-NEXT:    ret
+  %tvec = trunc <vscale x 1 x i32> %va to <vscale x 1 x i8>
+  ret <vscale x 1 x i8> %tvec
+}
+
+define <vscale x 1 x i16> @vtrunc_nxv1i32_nxv1i16(<vscale x 1 x i32> %va) {
+; CHECK-LABEL: vtrunc_nxv1i32_nxv1i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf4,ta,mu
+; CHECK-NEXT:    vnsrl.wi v25, v16, 0
+; CHECK-NEXT:    vmv1r.v v16, v25
+; CHECK-NEXT:    ret
+  %tvec = trunc <vscale x 1 x i32> %va to <vscale x 1 x i16>
+  ret <vscale x 1 x i16> %tvec
+}
+
+define <vscale x 2 x i8> @vtrunc_nxv2i32_nxv2i8(<vscale x 2 x i32> %va) {
+; CHECK-LABEL: vtrunc_nxv2i32_nxv2i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf2,ta,mu
+; CHECK-NEXT:    vnsrl.wi v25, v16, 0
+; CHECK-NEXT:    vsetvli a0, zero, e8,mf4,ta,mu
+; CHECK-NEXT:    vnsrl.wi v16, v25, 0
+; CHECK-NEXT:    ret
+  %tvec = trunc <vscale x 2 x i32> %va to <vscale x 2 x i8>
+  ret <vscale x 2 x i8> %tvec
+}
+
+define <vscale x 2 x i16> @vtrunc_nxv2i32_nxv2i16(<vscale x 2 x i32> %va) {
+; CHECK-LABEL: vtrunc_nxv2i32_nxv2i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf2,ta,mu
+; CHECK-NEXT:    vnsrl.wi v25, v16, 0
+; CHECK-NEXT:    vmv1r.v v16, v25
+; CHECK-NEXT:    ret
+  %tvec = trunc <vscale x 2 x i32> %va to <vscale x 2 x i16>
+  ret <vscale x 2 x i16> %tvec
+}
+
+define <vscale x 4 x i8> @vtrunc_nxv4i32_nxv4i8(<vscale x 4 x i32> %va) {
+; CHECK-LABEL: vtrunc_nxv4i32_nxv4i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m1,ta,mu
+; CHECK-NEXT:    vnsrl.wi v25, v16, 0
+; CHECK-NEXT:    vsetvli a0, zero, e8,mf2,ta,mu
+; CHECK-NEXT:    vnsrl.wi v16, v25, 0
+; CHECK-NEXT:    ret
+  %tvec = trunc <vscale x 4 x i32> %va to <vscale x 4 x i8>
+  ret <vscale x 4 x i8> %tvec
+}
+
+define <vscale x 4 x i16> @vtrunc_nxv4i32_nxv4i16(<vscale x 4 x i32> %va) {
+; CHECK-LABEL: vtrunc_nxv4i32_nxv4i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m1,ta,mu
+; CHECK-NEXT:    vnsrl.wi v25, v16, 0
+; CHECK-NEXT:    vmv1r.v v16, v25
+; CHECK-NEXT:    ret
+  %tvec = trunc <vscale x 4 x i32> %va to <vscale x 4 x i16>
+  ret <vscale x 4 x i16> %tvec
+}
+
+define <vscale x 8 x i8> @vtrunc_nxv8i32_nxv8i8(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: vtrunc_nxv8i32_nxv8i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vnsrl.wi v26, v16, 0
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vnsrl.wi v16, v26, 0
+; CHECK-NEXT:    ret
+  %tvec = trunc <vscale x 8 x i32> %va to <vscale x 8 x i8>
+  ret <vscale x 8 x i8> %tvec
+}
+
+define <vscale x 8 x i16> @vtrunc_nxv8i32_nxv8i16(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: vtrunc_nxv8i32_nxv8i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vnsrl.wi v26, v16, 0
+; CHECK-NEXT:    vmv2r.v v16, v26
+; CHECK-NEXT:    ret
+  %tvec = trunc <vscale x 8 x i32> %va to <vscale x 8 x i16>
+  ret <vscale x 8 x i16> %tvec
+}
+
+define <vscale x 16 x i8> @vtrunc_nxv16i32_nxv16i8(<vscale x 16 x i32> %va) {
+; CHECK-LABEL: vtrunc_nxv16i32_nxv16i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m4,ta,mu
+; CHECK-NEXT:    vnsrl.wi v28, v16, 0
+; CHECK-NEXT:    vsetvli a0, zero, e8,m2,ta,mu
+; CHECK-NEXT:    vnsrl.wi v16, v28, 0
+; CHECK-NEXT:    ret
+  %tvec = trunc <vscale x 16 x i32> %va to <vscale x 16 x i8>
+  ret <vscale x 16 x i8> %tvec
+}
+
+define <vscale x 16 x i16> @vtrunc_nxv16i32_nxv16i16(<vscale x 16 x i32> %va) {
+; CHECK-LABEL: vtrunc_nxv16i32_nxv16i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m4,ta,mu
+; CHECK-NEXT:    vnsrl.wi v28, v16, 0
+; CHECK-NEXT:    vmv4r.v v16, v28
+; CHECK-NEXT:    ret
+  %tvec = trunc <vscale x 16 x i32> %va to <vscale x 16 x i16>
+  ret <vscale x 16 x i16> %tvec
+}
+
+define <vscale x 1 x i8> @vtrunc_nxv1i64_nxv1i8(<vscale x 1 x i64> %va) {
+; CHECK-LABEL: vtrunc_nxv1i64_nxv1i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,mf2,ta,mu
+; CHECK-NEXT:    vnsrl.wi v25, v16, 0
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf4,ta,mu
+; CHECK-NEXT:    vnsrl.wi v26, v25, 0
+; CHECK-NEXT:    vsetvli a0, zero, e8,mf8,ta,mu
+; CHECK-NEXT:    vnsrl.wi v16, v26, 0
+; CHECK-NEXT:    ret
+  %tvec = trunc <vscale x 1 x i64> %va to <vscale x 1 x i8>
+  ret <vscale x 1 x i8> %tvec
+}
+
+define <vscale x 1 x i16> @vtrunc_nxv1i64_nxv1i16(<vscale x 1 x i64> %va) {
+; CHECK-LABEL: vtrunc_nxv1i64_nxv1i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,mf2,ta,mu
+; CHECK-NEXT:    vnsrl.wi v25, v16, 0
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf4,ta,mu
+; CHECK-NEXT:    vnsrl.wi v16, v25, 0
+; CHECK-NEXT:    ret
+  %tvec = trunc <vscale x 1 x i64> %va to <vscale x 1 x i16>
+  ret <vscale x 1 x i16> %tvec
+}
+
+define <vscale x 1 x i32> @vtrunc_nxv1i64_nxv1i32(<vscale x 1 x i64> %va) {
+; CHECK-LABEL: vtrunc_nxv1i64_nxv1i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,mf2,ta,mu
+; CHECK-NEXT:    vnsrl.wi v25, v16, 0
+; CHECK-NEXT:    vmv1r.v v16, v25
+; CHECK-NEXT:    ret
+  %tvec = trunc <vscale x 1 x i64> %va to <vscale x 1 x i32>
+  ret <vscale x 1 x i32> %tvec
+}
+
+define <vscale x 2 x i8> @vtrunc_nxv2i64_nxv2i8(<vscale x 2 x i64> %va) {
+; CHECK-LABEL: vtrunc_nxv2i64_nxv2i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m1,ta,mu
+; CHECK-NEXT:    vnsrl.wi v25, v16, 0
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf2,ta,mu
+; CHECK-NEXT:    vnsrl.wi v26, v25, 0
+; CHECK-NEXT:    vsetvli a0, zero, e8,mf4,ta,mu
+; CHECK-NEXT:    vnsrl.wi v16, v26, 0
+; CHECK-NEXT:    ret
+  %tvec = trunc <vscale x 2 x i64> %va to <vscale x 2 x i8>
+  ret <vscale x 2 x i8> %tvec
+}
+
+define <vscale x 2 x i16> @vtrunc_nxv2i64_nxv2i16(<vscale x 2 x i64> %va) {
+; CHECK-LABEL: vtrunc_nxv2i64_nxv2i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m1,ta,mu
+; CHECK-NEXT:    vnsrl.wi v25, v16, 0
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf2,ta,mu
+; CHECK-NEXT:    vnsrl.wi v16, v25, 0
+; CHECK-NEXT:    ret
+  %tvec = trunc <vscale x 2 x i64> %va to <vscale x 2 x i16>
+  ret <vscale x 2 x i16> %tvec
+}
+
+define <vscale x 2 x i32> @vtrunc_nxv2i64_nxv2i32(<vscale x 2 x i64> %va) {
+; CHECK-LABEL: vtrunc_nxv2i64_nxv2i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m1,ta,mu
+; CHECK-NEXT:    vnsrl.wi v25, v16, 0
+; CHECK-NEXT:    vmv1r.v v16, v25
+; CHECK-NEXT:    ret
+  %tvec = trunc <vscale x 2 x i64> %va to <vscale x 2 x i32>
+  ret <vscale x 2 x i32> %tvec
+}
+
+define <vscale x 4 x i8> @vtrunc_nxv4i64_nxv4i8(<vscale x 4 x i64> %va) {
+; CHECK-LABEL: vtrunc_nxv4i64_nxv4i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m2,ta,mu
+; CHECK-NEXT:    vnsrl.wi v26, v16, 0
+; CHECK-NEXT:    vsetvli a0, zero, e16,m1,ta,mu
+; CHECK-NEXT:    vnsrl.wi v25, v26, 0
+; CHECK-NEXT:    vsetvli a0, zero, e8,mf2,ta,mu
+; CHECK-NEXT:    vnsrl.wi v16, v25, 0
+; CHECK-NEXT:    ret
+  %tvec = trunc <vscale x 4 x i64> %va to <vscale x 4 x i8>
+  ret <vscale x 4 x i8> %tvec
+}
+
+define <vscale x 4 x i16> @vtrunc_nxv4i64_nxv4i16(<vscale x 4 x i64> %va) {
+; CHECK-LABEL: vtrunc_nxv4i64_nxv4i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m2,ta,mu
+; CHECK-NEXT:    vnsrl.wi v26, v16, 0
+; CHECK-NEXT:    vsetvli a0, zero, e16,m1,ta,mu
+; CHECK-NEXT:    vnsrl.wi v16, v26, 0
+; CHECK-NEXT:    ret
+  %tvec = trunc <vscale x 4 x i64> %va to <vscale x 4 x i16>
+  ret <vscale x 4 x i16> %tvec
+}
+
+define <vscale x 4 x i32> @vtrunc_nxv4i64_nxv4i32(<vscale x 4 x i64> %va) {
+; CHECK-LABEL: vtrunc_nxv4i64_nxv4i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m2,ta,mu
+; CHECK-NEXT:    vnsrl.wi v26, v16, 0
+; CHECK-NEXT:    vmv2r.v v16, v26
+; CHECK-NEXT:    ret
+  %tvec = trunc <vscale x 4 x i64> %va to <vscale x 4 x i32>
+  ret <vscale x 4 x i32> %tvec
+}
+
+define <vscale x 8 x i8> @vtrunc_nxv8i64_nxv8i8(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: vtrunc_nxv8i64_nxv8i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vnsrl.wi v28, v16, 0
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vnsrl.wi v26, v28, 0
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vnsrl.wi v16, v26, 0
+; CHECK-NEXT:    ret
+  %tvec = trunc <vscale x 8 x i64> %va to <vscale x 8 x i8>
+  ret <vscale x 8 x i8> %tvec
+}
+
+define <vscale x 8 x i16> @vtrunc_nxv8i64_nxv8i16(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: vtrunc_nxv8i64_nxv8i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vnsrl.wi v28, v16, 0
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vnsrl.wi v16, v28, 0
+; CHECK-NEXT:    ret
+  %tvec = trunc <vscale x 8 x i64> %va to <vscale x 8 x i16>
+  ret <vscale x 8 x i16> %tvec
+}
+
+define <vscale x 8 x i32> @vtrunc_nxv8i64_nxv8i32(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: vtrunc_nxv8i64_nxv8i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vnsrl.wi v28, v16, 0
+; CHECK-NEXT:    vmv4r.v v16, v28
+; CHECK-NEXT:    ret
+  %tvec = trunc <vscale x 8 x i64> %va to <vscale x 8 x i32>
+  ret <vscale x 8 x i32> %tvec
+}
+


        


More information about the llvm-commits mailing list