[llvm] 7a6568d - [RISCV] Support LLVM IR intrinsics for XAndesVSIntLoad (#147493)

via llvm-commits llvm-commits at lists.llvm.org
Tue Jul 8 22:03:01 PDT 2025


Author: Jim Lin
Date: 2025-07-09T13:02:57+08:00
New Revision: 7a6568dcd5d471d5f836e5a7c810aff658e1d605

URL: https://github.com/llvm/llvm-project/commit/7a6568dcd5d471d5f836e5a7c810aff658e1d605
DIFF: https://github.com/llvm/llvm-project/commit/7a6568dcd5d471d5f836e5a7c810aff658e1d605.diff

LOG: [RISCV] Support LLVM IR intrinsics for XAndesVSIntLoad (#147493)

This patch adds LLVM IR intrinsic support for XAndesVSIntLoad.

The document for the intrinsics can be found at:
https://github.com/andestech/andes-vector-intrinsic-doc/blob/ast-v5_4_0-release-v5/auto-generated/andes-v5/intrinsic_funcs/04_andes_vector_int4_load_extension.adoc

The clang part will be added in a later patch.

---------

Co-authored-by: Lino Hsing-Yu Peng <linopeng at andestech.com>

Added: 
    llvm/test/CodeGen/RISCV/rvv/xandesvsinload-vln8.ll
    llvm/test/CodeGen/RISCV/rvv/xandesvsinload-vlnu8.ll

Modified: 
    llvm/include/llvm/IR/IntrinsicsRISCVXAndes.td
    llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.cpp
    llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h
    llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
    llvm/lib/Target/RISCV/RISCVInstrInfoXAndes.td

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/IR/IntrinsicsRISCVXAndes.td b/llvm/include/llvm/IR/IntrinsicsRISCVXAndes.td
index 92a07e9a6a5d0..43f7f7e22fe94 100644
--- a/llvm/include/llvm/IR/IntrinsicsRISCVXAndes.td
+++ b/llvm/include/llvm/IR/IntrinsicsRISCVXAndes.td
@@ -15,6 +15,10 @@ let TargetPrefix = "riscv" in {
   def int_riscv_nds_vfwcvt_s_bf16 : RISCVConversionUnMasked;
   def int_riscv_nds_vfncvt_bf16_s : RISCVConversionUnMaskedRoundingMode;
 
+  // Andes Vector INT4 Load Extension
+  defm nds_vln  : RISCVUSLoad;
+  defm nds_vlnu : RISCVUSLoad;
+
   // Andes Vector Packed FP16 Extension
   defm nds_vfpmadt : RISCVBinaryAAXRoundingMode;
   defm nds_vfpmadb : RISCVBinaryAAXRoundingMode;

diff  --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.cpp b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.cpp
index d407beffcd7d1..aeda5ac109f1a 100644
--- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.cpp
+++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.cpp
@@ -48,6 +48,7 @@ namespace RISCV {
 #define GET_RISCVVSETable_IMPL
 #define GET_RISCVVLXTable_IMPL
 #define GET_RISCVVSXTable_IMPL
+#define GET_RISCVNDSVLNTable_IMPL
 #include "RISCVGenSearchableTables.inc"
 } // namespace RISCV
 

diff  --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h
index e123ef45fa503..f41ad419db1a7 100644
--- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h
+++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h
@@ -749,6 +749,14 @@ struct VLX_VSXPseudo {
   uint16_t Pseudo;
 };
 
+struct NDSVLNPseudo {
+  uint16_t Masked : 1;
+  uint16_t Unsigned : 1;
+  uint16_t Log2SEW : 3;
+  uint16_t LMUL : 3;
+  uint16_t Pseudo;
+};
+
 #define GET_RISCVVSSEGTable_DECL
 #define GET_RISCVVLSEGTable_DECL
 #define GET_RISCVVLXSEGTable_DECL
@@ -757,6 +765,7 @@ struct VLX_VSXPseudo {
 #define GET_RISCVVSETable_DECL
 #define GET_RISCVVLXTable_DECL
 #define GET_RISCVVSXTable_DECL
+#define GET_RISCVNDSVLNTable_DECL
 #include "RISCVGenSearchableTables.inc"
 } // namespace RISCV
 

diff  --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index 7f61c782fa603..ae99b620cf436 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -2299,6 +2299,37 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
       ReplaceNode(Node, Load);
       return;
     }
+    case Intrinsic::riscv_nds_vln:
+    case Intrinsic::riscv_nds_vln_mask:
+    case Intrinsic::riscv_nds_vlnu:
+    case Intrinsic::riscv_nds_vlnu_mask: {
+      bool IsMasked = IntNo == Intrinsic::riscv_nds_vln_mask ||
+                      IntNo == Intrinsic::riscv_nds_vlnu_mask;
+      bool IsUnsigned = IntNo == Intrinsic::riscv_nds_vlnu ||
+                        IntNo == Intrinsic::riscv_nds_vlnu_mask;
+
+      MVT VT = Node->getSimpleValueType(0);
+      unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
+      unsigned CurOp = 2;
+      SmallVector<SDValue, 8> Operands;
+
+      Operands.push_back(Node->getOperand(CurOp++));
+      addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked,
+                                 /*IsStridedOrIndexed=*/false, Operands,
+                                 /*IsLoad=*/true);
+
+      RISCVVType::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
+      const RISCV::NDSVLNPseudo *P = RISCV::getNDSVLNPseudo(
+          IsMasked, IsUnsigned, Log2SEW, static_cast<unsigned>(LMUL));
+      MachineSDNode *Load =
+          CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands);
+
+      if (auto *MemOp = dyn_cast<MemSDNode>(Node))
+        CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
+
+      ReplaceNode(Node, Load);
+      return;
+    }
     }
     break;
   }

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoXAndes.td b/llvm/lib/Target/RISCV/RISCVInstrInfoXAndes.td
index e46b0d409eb5d..ec38201cd28c6 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoXAndes.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoXAndes.td
@@ -64,6 +64,26 @@ def Log2 : SDNodeXForm<imm, [{
   return CurDAG->getTargetConstant(Imm, SDLoc(N), N->getValueType(0));
 }]>;
 
+//===----------------------------------------------------------------------===//
+// Pseudo table
+//===----------------------------------------------------------------------===//
+
+class RISCVNDSVLN<bit M, bit U, bits<3> S, bits<3> L> {
+  bits<1> Masked = M;
+  bits<1> Unsigned = U;
+  bits<3> Log2SEW = S;
+  bits<3> LMUL = L;
+  Pseudo Pseudo = !cast<Pseudo>(NAME);
+}
+
+def RISCVNDSVLNTable : GenericTable {
+  let FilterClass = "RISCVNDSVLN";
+  let CppTypeName = "NDSVLNPseudo";
+  let Fields = ["Masked", "Unsigned", "Log2SEW", "LMUL", "Pseudo"];
+  let PrimaryKey = ["Masked", "Unsigned", "Log2SEW", "LMUL"];
+  let PrimaryKeyName = "getNDSVLNPseudo";
+}
+
 //===----------------------------------------------------------------------===//
 // Instruction Class Templates
 //===----------------------------------------------------------------------===//
@@ -416,6 +436,39 @@ class NDSRVInstVLN<bits<5> funct5, string opcodestr>
   let RVVConstraint = VMConstraint;
 }
 
+class VPseudoVLN8NoMask<VReg RetClass, bit U> :
+      Pseudo<(outs RetClass:$rd),
+             (ins RetClass:$dest,
+                  GPRMemZeroOffset:$rs1,
+                  AVL:$vl, sew:$sew, vec_policy:$policy), []>,
+      RISCVVPseudo,
+      RISCVNDSVLN</*Masked*/0, /*Unsigned*/U, !logtwo(8), VLMul> {
+  let mayLoad = 1;
+  let mayStore = 0;
+  let hasSideEffects = 0;
+  let HasVLOp = 1;
+  let HasSEWOp = 1;
+  let HasVecPolicyOp = 1;
+  let Constraints = "$rd = $dest";
+}
+
+class VPseudoVLN8Mask<VReg RetClass, bit U> :
+      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
+             (ins GetVRegNoV0<RetClass>.R:$passthru,
+                  GPRMemZeroOffset:$rs1,
+                  VMaskOp:$vm, AVL:$vl, sew:$sew, vec_policy:$policy), []>,
+      RISCVVPseudo,
+      RISCVNDSVLN</*Masked*/1, /*Unsigned*/U, !logtwo(8), VLMul> {
+  let mayLoad = 1;
+  let mayStore = 0;
+  let hasSideEffects = 0;
+  let HasVLOp = 1;
+  let HasSEWOp = 1;
+  let HasVecPolicyOp = 1;
+  let UsesMaskPolicy = 1;
+  let Constraints = "$rd = $passthru";
+}
+
 //===----------------------------------------------------------------------===//
 // Multiclass
 //===----------------------------------------------------------------------===//
@@ -465,6 +518,22 @@ multiclass VPatConversionBF16_S<string intrinsic, string instruction> {
   }
 }
 
+multiclass VPseudoVLN8<bit U> {
+  foreach lmul = MxSet<8>.m in {
+    defvar LInfo = lmul.MX;
+    defvar vreg = lmul.vrclass;
+    let VLMul = lmul.value in {
+      def "_V_" # LInfo :
+        VPseudoVLN8NoMask<vreg, U>,
+        VLESched<LInfo>;
+      def "_V_" # LInfo # "_MASK" :
+        VPseudoVLN8Mask<vreg, U>,
+        RISCVMaskedPseudo<MaskIdx=2>,
+        VLESched<LInfo>;
+    }
+  }
+}
+
 let fprclass = !cast<RegisterClass>("FPR32") in
 def SCALAR_F16_FPR32 : FPR_Info<16>;
 
@@ -684,6 +753,11 @@ defm : VPatConversionS_BF16<"int_riscv_nds_vfwcvt_s_bf16",
 defm : VPatConversionBF16_S<"int_riscv_nds_vfncvt_bf16_s",
                             "PseudoNDS_VFNCVT_BF16">;
 
+let Predicates = [HasVendorXAndesVSIntLoad] in {
+defm PseudoNDS_VLN8  : VPseudoVLN8<0>;
+defm PseudoNDS_VLNU8 : VPseudoVLN8<1>;
+} // Predicates = [HasVendorXAndesVSIntLoad]
+
 let Predicates = [HasVendorXAndesVPackFPH],
     mayRaiseFPException = true in {
 defm PseudoNDS_VFPMADT : VPseudoVFPMAD_VF_RM;

diff  --git a/llvm/test/CodeGen/RISCV/rvv/xandesvsinload-vln8.ll b/llvm/test/CodeGen/RISCV/rvv/xandesvsinload-vln8.ll
new file mode 100644
index 0000000000000..e90e4e560075a
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/xandesvsinload-vln8.ll
@@ -0,0 +1,222 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+zve64x,+xandesvsintload \
+; RUN:   -verify-machineinstrs -target-abi=ilp32 | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+zve64x,+xandesvsintload \
+; RUN:   -verify-machineinstrs -target-abi=lp64 | FileCheck %s
+
+define <vscale x 1 x i8> @intrinsic_nds_vln_v_nxv1i8_nxv1i8(<vscale x 1 x i8>* %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_nds_vln_v_nxv1i8_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT:    nds.vln8.v v8, (a0)
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.nds.vln.nxv1i8(
+    <vscale x 1 x i8> poison,
+    <vscale x 1 x i8>* %0,
+    iXLen %1)
+
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_nds_vln_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_nds_vln_mask_v_nxv1i8_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT:    nds.vln8.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.nds.vln.mask.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8>* %1,
+    <vscale x 1 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_nds_vln_v_nxv2i8_nxv2i8(<vscale x 2 x i8>* %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_nds_vln_v_nxv2i8_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT:    nds.vln8.v v8, (a0)
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.nds.vln.nxv2i8(
+    <vscale x 2 x i8> poison,
+    <vscale x 2 x i8>* %0,
+    iXLen %1)
+
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_nds_vln_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_nds_vln_mask_v_nxv2i8_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
+; CHECK-NEXT:    nds.vln8.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.nds.vln.mask.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8>* %1,
+    <vscale x 2 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_nds_vln_v_nxv4i8_nxv4i8(<vscale x 4 x i8>* %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_nds_vln_v_nxv4i8_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT:    nds.vln8.v v8, (a0)
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.nds.vln.nxv4i8(
+    <vscale x 4 x i8> poison,
+    <vscale x 4 x i8>* %0,
+    iXLen %1)
+
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_nds_vln_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_nds_vln_mask_v_nxv4i8_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT:    nds.vln8.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.nds.vln.mask.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8>* %1,
+    <vscale x 4 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_nds_vln_v_nxv8i8_nxv8i8(<vscale x 8 x i8>* %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_nds_vln_v_nxv8i8_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT:    nds.vln8.v v8, (a0)
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.nds.vln.nxv8i8(
+    <vscale x 8 x i8> poison,
+    <vscale x 8 x i8>* %0,
+    iXLen %1)
+
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_nds_vln_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_nds_vln_mask_v_nxv8i8_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT:    nds.vln8.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.nds.vln.mask.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8>* %1,
+    <vscale x 8 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_nds_vln_v_nxv16i8_nxv16i8(<vscale x 16 x i8>* %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_nds_vln_v_nxv16i8_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT:    nds.vln8.v v8, (a0)
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.nds.vln.nxv16i8(
+    <vscale x 16 x i8> poison,
+    <vscale x 16 x i8>* %0,
+    iXLen %1)
+
+  ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_nds_vln_mask_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_nds_vln_mask_v_nxv16i8_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
+; CHECK-NEXT:    nds.vln8.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.nds.vln.mask.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8>* %1,
+    <vscale x 16 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_nds_vln_v_nxv32i8_nxv32i8(<vscale x 32 x i8>* %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_nds_vln_v_nxv32i8_nxv32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT:    nds.vln8.v v8, (a0)
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i8> @llvm.riscv.nds.vln.nxv32i8(
+    <vscale x 32 x i8> poison,
+    <vscale x 32 x i8>* %0,
+    iXLen %1)
+
+  ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_nds_vln_mask_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_nds_vln_mask_v_nxv32i8_nxv32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
+; CHECK-NEXT:    nds.vln8.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i8> @llvm.riscv.nds.vln.mask.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8>* %1,
+    <vscale x 32 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 64 x i8> @intrinsic_nds_vln_v_nxv64i8_nxv64i8(<vscale x 64 x i8>* %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_nds_vln_v_nxv64i8_nxv64i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT:    nds.vln8.v v8, (a0)
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 64 x i8> @llvm.riscv.nds.vln.nxv64i8(
+    <vscale x 64 x i8> poison,
+    <vscale x 64 x i8>* %0,
+    iXLen %1)
+
+  ret <vscale x 64 x i8> %a
+}
+
+define <vscale x 64 x i8> @intrinsic_nds_vln_mask_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_nds_vln_mask_v_nxv64i8_nxv64i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
+; CHECK-NEXT:    nds.vln8.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 64 x i8> @llvm.riscv.nds.vln.mask.nxv64i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8>* %1,
+    <vscale x 64 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 64 x i8> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/xandesvsinload-vlnu8.ll b/llvm/test/CodeGen/RISCV/rvv/xandesvsinload-vlnu8.ll
new file mode 100644
index 0000000000000..363d57b56b031
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/xandesvsinload-vlnu8.ll
@@ -0,0 +1,222 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+zve64x,+xandesvsintload \
+; RUN:   -verify-machineinstrs -target-abi=ilp32 | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+zve64x,+xandesvsintload \
+; RUN:   -verify-machineinstrs -target-abi=lp64 | FileCheck %s
+
+define <vscale x 1 x i8> @intrinsic_nds_vlnu_v_nxv1i8_nxv1i8(<vscale x 1 x i8>* %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_nds_vlnu_v_nxv1i8_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT:    nds.vlnu8.v v8, (a0)
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.nds.vlnu.nxv1i8(
+    <vscale x 1 x i8> poison,
+    <vscale x 1 x i8>* %0,
+    iXLen %1)
+
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_nds_vlnu_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_nds_vlnu_mask_v_nxv1i8_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT:    nds.vlnu8.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.nds.vlnu.mask.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8>* %1,
+    <vscale x 1 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_nds_vlnu_v_nxv2i8_nxv2i8(<vscale x 2 x i8>* %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_nds_vlnu_v_nxv2i8_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT:    nds.vlnu8.v v8, (a0)
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.nds.vlnu.nxv2i8(
+    <vscale x 2 x i8> poison,
+    <vscale x 2 x i8>* %0,
+    iXLen %1)
+
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_nds_vlnu_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_nds_vlnu_mask_v_nxv2i8_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
+; CHECK-NEXT:    nds.vlnu8.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.nds.vlnu.mask.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8>* %1,
+    <vscale x 2 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_nds_vlnu_v_nxv4i8_nxv4i8(<vscale x 4 x i8>* %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_nds_vlnu_v_nxv4i8_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT:    nds.vlnu8.v v8, (a0)
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.nds.vlnu.nxv4i8(
+    <vscale x 4 x i8> poison,
+    <vscale x 4 x i8>* %0,
+    iXLen %1)
+
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_nds_vlnu_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_nds_vlnu_mask_v_nxv4i8_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT:    nds.vlnu8.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.nds.vlnu.mask.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8>* %1,
+    <vscale x 4 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_nds_vlnu_v_nxv8i8_nxv8i8(<vscale x 8 x i8>* %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_nds_vlnu_v_nxv8i8_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT:    nds.vlnu8.v v8, (a0)
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.nds.vlnu.nxv8i8(
+    <vscale x 8 x i8> poison,
+    <vscale x 8 x i8>* %0,
+    iXLen %1)
+
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_nds_vlnu_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_nds_vlnu_mask_v_nxv8i8_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT:    nds.vlnu8.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.nds.vlnu.mask.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8>* %1,
+    <vscale x 8 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_nds_vlnu_v_nxv16i8_nxv16i8(<vscale x 16 x i8>* %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_nds_vlnu_v_nxv16i8_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT:    nds.vlnu8.v v8, (a0)
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.nds.vlnu.nxv16i8(
+    <vscale x 16 x i8> poison,
+    <vscale x 16 x i8>* %0,
+    iXLen %1)
+
+  ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_nds_vlnu_mask_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_nds_vlnu_mask_v_nxv16i8_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
+; CHECK-NEXT:    nds.vlnu8.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.nds.vlnu.mask.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8>* %1,
+    <vscale x 16 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_nds_vlnu_v_nxv32i8_nxv32i8(<vscale x 32 x i8>* %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_nds_vlnu_v_nxv32i8_nxv32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT:    nds.vlnu8.v v8, (a0)
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i8> @llvm.riscv.nds.vlnu.nxv32i8(
+    <vscale x 32 x i8> poison,
+    <vscale x 32 x i8>* %0,
+    iXLen %1)
+
+  ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_nds_vlnu_mask_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_nds_vlnu_mask_v_nxv32i8_nxv32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
+; CHECK-NEXT:    nds.vlnu8.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i8> @llvm.riscv.nds.vlnu.mask.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8>* %1,
+    <vscale x 32 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 64 x i8> @intrinsic_nds_vlnu_v_nxv64i8_nxv64i8(<vscale x 64 x i8>* %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_nds_vlnu_v_nxv64i8_nxv64i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT:    nds.vlnu8.v v8, (a0)
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 64 x i8> @llvm.riscv.nds.vlnu.nxv64i8(
+    <vscale x 64 x i8> poison,
+    <vscale x 64 x i8>* %0,
+    iXLen %1)
+
+  ret <vscale x 64 x i8> %a
+}
+
+define <vscale x 64 x i8> @intrinsic_nds_vlnu_mask_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_nds_vlnu_mask_v_nxv64i8_nxv64i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
+; CHECK-NEXT:    nds.vlnu8.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 64 x i8> @llvm.riscv.nds.vlnu.mask.nxv64i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8>* %1,
+    <vscale x 64 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 64 x i8> %a
+}


        


More information about the llvm-commits mailing list