[clang] [RISCV] Implement intrinsics for XAndesVPackFPH (PR #140007)

Jim Lin via cfe-commits cfe-commits at lists.llvm.org
Wed May 14 23:00:24 PDT 2025


https://github.com/tclin914 created https://github.com/llvm/llvm-project/pull/140007

This patch implements clang intrinsic support for XAndesVPackFPH.

The document for the intrinsics can be found at:
https://github.com/andestech/andes-vector-intrinsic-doc/blob/ast-v5_4_0-release-v5/auto-generated/andes-v5/intrinsic_funcs.adoc#andes-vector-packed-fp16-extensionxandesvpackfph 
and with policy variants
https://github.com/andestech/andes-vector-intrinsic-doc/blob/ast-v5_4_0-release-v5/auto-generated/andes-v5/policy_funcs/intrinsic_funcs.adoc#andes-vector-packed-fp16-extensionxandesvpackfph

>From f49b0b7d6719749ceea561d952af2a29f0f175a0 Mon Sep 17 00:00:00 2001
From: Jim Lin <jim at andestech.com>
Date: Tue, 13 May 2025 16:22:36 +0800
Subject: [PATCH] [RISCV] Implement intrinsics for XAndesVPackFPH

This patch implements clang intrinsic support for XAndesVPackFPH.

The document for the intrinsics can be found at:
https://github.com/andestech/andes-vector-intrinsic-doc/blob/ast-v5_4_0-release-v5/auto-generated/andes-v5/intrinsic_funcs.adoc#andes-vector-packed-fp16-extensionxandesvpackfph
and with policy variants
https://github.com/andestech/andes-vector-intrinsic-doc/blob/ast-v5_4_0-release-v5/auto-generated/andes-v5/policy_funcs/intrinsic_funcs.adoc#andes-vector-packed-fp16-extensionxandesvpackfph

Co-authored-by: Tony Chuan-Yue Yuan <yuan593 at andestech.com>
---
 clang/include/clang/Basic/CMakeLists.txt      |   9 +
 clang/include/clang/Basic/TargetBuiltins.h    |   3 +
 .../include/clang/Basic/riscv_andes_vector.td |  83 ++++
 .../clang/Sema/RISCVIntrinsicManager.h        |   2 +-
 clang/include/clang/Sema/SemaRISCV.h          |   3 +
 .../clang/Support/RISCVVIntrinsicUtils.h      |   1 +
 clang/lib/Basic/Targets/RISCV.cpp             |  22 +-
 clang/lib/CodeGen/TargetBuiltins/RISCV.cpp    |   3 +
 clang/lib/Headers/CMakeLists.txt              |   1 +
 clang/lib/Headers/andes_vector.h              |  16 +
 clang/lib/Parse/ParsePragma.cpp               |   8 +-
 clang/lib/Sema/SemaLookup.cpp                 |   3 +-
 clang/lib/Sema/SemaRISCV.cpp                  |  23 +
 clang/lib/Support/RISCVVIntrinsicUtils.cpp    |   1 +
 .../non-policy/non-overloaded/nds_vfpmadb.c   | 225 +++++++++
 .../non-policy/non-overloaded/nds_vfpmadt.c   | 225 +++++++++
 .../non-policy/overloaded/nds_vfpmadb.c       | 225 +++++++++
 .../non-policy/overloaded/nds_vfpmadt.c       | 225 +++++++++
 .../policy/non-overloaded/nds_vfpmadb.c       | 441 ++++++++++++++++++
 .../policy/non-overloaded/nds_vfpmadt.c       | 441 ++++++++++++++++++
 .../policy/overloaded/nds_vfpmadb.c           | 441 ++++++++++++++++++
 .../policy/overloaded/nds_vfpmadt.c           | 441 ++++++++++++++++++
 clang/test/Sema/riscv-bad-intrinsic-pragma.c  |   2 +-
 clang/utils/TableGen/RISCVVEmitter.cpp        |   1 +
 clang/utils/TableGen/TableGen.cpp             |  21 +
 25 files changed, 2858 insertions(+), 8 deletions(-)
 create mode 100644 clang/include/clang/Basic/riscv_andes_vector.td
 create mode 100644 clang/lib/Headers/andes_vector.h
 create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/nds_vfpmadb.c
 create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/nds_vfpmadt.c
 create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/nds_vfpmadb.c
 create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/nds_vfpmadt.c
 create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/nds_vfpmadb.c
 create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/nds_vfpmadt.c
 create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/nds_vfpmadb.c
 create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/nds_vfpmadt.c

diff --git a/clang/include/clang/Basic/CMakeLists.txt b/clang/include/clang/Basic/CMakeLists.txt
index 265ea1fc06494..477f33b880c22 100644
--- a/clang/include/clang/Basic/CMakeLists.txt
+++ b/clang/include/clang/Basic/CMakeLists.txt
@@ -201,3 +201,12 @@ clang_tablegen(riscv_sifive_vector_builtin_cg.inc -gen-riscv-sifive-vector-built
 clang_tablegen(riscv_sifive_vector_builtin_sema.inc -gen-riscv-sifive-vector-builtin-sema
   SOURCE riscv_sifive_vector.td
   TARGET ClangRISCVSiFiveVectorBuiltinSema)
+clang_tablegen(riscv_andes_vector_builtins.inc -gen-riscv-andes-vector-builtins
+  SOURCE riscv_andes_vector.td
+  TARGET ClangRISCVAndesVectorBuiltins)
+clang_tablegen(riscv_andes_vector_builtin_cg.inc -gen-riscv-andes-vector-builtin-codegen
+  SOURCE riscv_andes_vector.td
+  TARGET ClangRISCVAndesVectorBuiltinCG)
+clang_tablegen(riscv_andes_vector_builtin_sema.inc -gen-riscv-andes-vector-builtin-sema
+  SOURCE riscv_andes_vector.td
+  TARGET ClangRISCVAndesVectorBuiltinSema)
diff --git a/clang/include/clang/Basic/TargetBuiltins.h b/clang/include/clang/Basic/TargetBuiltins.h
index 4e490d87ee8d6..fb09b20975346 100644
--- a/clang/include/clang/Basic/TargetBuiltins.h
+++ b/clang/include/clang/Basic/TargetBuiltins.h
@@ -197,6 +197,9 @@ namespace clang {
     FirstSiFiveBuiltin,
     LastRVVBuiltin = FirstSiFiveBuiltin - 1,
 #include "clang/Basic/riscv_sifive_vector_builtins.inc"
+    FirstAndesBuiltin,
+    LastSiFiveBuiltin = FirstAndesBuiltin - 1,
+#include "clang/Basic/riscv_andes_vector_builtins.inc"
 #undef GET_RISCVV_BUILTIN_ENUMERATORS
     FirstTSBuiltin,
   };
diff --git a/clang/include/clang/Basic/riscv_andes_vector.td b/clang/include/clang/Basic/riscv_andes_vector.td
new file mode 100644
index 0000000000000..8c202a8b6ff41
--- /dev/null
+++ b/clang/include/clang/Basic/riscv_andes_vector.td
@@ -0,0 +1,83 @@
+//==--- riscv_andes_vector.td - RISC-V Andes Builtin function list --------===//
+//
+//  Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+//  See https://llvm.org/LICENSE.txt for license information.
+//  SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the builtins for RISC-V Andes Vector Extension. See:
+//
+//     https://github.com/andestech/andes-vector-intrinsic-doc
+//
+//===----------------------------------------------------------------------===//
+
+include "riscv_vector_common.td"
+
+//===----------------------------------------------------------------------===//
+// Instruction definitions
+//===----------------------------------------------------------------------===//
+
+// Andes Vector Packed FP16 Extension (XAndesVPackFPH)
+
+multiclass RVVFPMAD {
+  let Log2LMUL = [-2, -1, 0, 1, 2, 3],
+      OverloadedName = NAME in {
+    defm NAME : RVVOutOp1BuiltinSet<NAME, "x", [["vf", "v", "vvf"]]>;
+
+    let HasFRMRoundModeOp = true in
+      defm NAME : RVVOutOp1BuiltinSet<NAME, "x", [["vf", "v", "vvfu"]]>;
+  }
+}
+
+let RequiredFeatures = ["Xandesvpackfph"],
+    UnMaskedPolicyScheme = HasPassthruOperand in {
+let ManualCodegen = [{
+  {
+    // LLVM intrinsic
+    // Unmasked: (passthru, op0, op1, round_mode, vl)
+    // Masked:   (passthru, vector_in, vector_in/scalar_in, mask, frm, vl, policy)
+
+    SmallVector<llvm::Value*, 7> Operands;
+    bool HasMaskedOff = !(
+        (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ||
+        (!IsMasked && PolicyAttrs & RVV_VTA));
+    bool HasRoundModeOp = IsMasked ?
+      (HasMaskedOff ? Ops.size() == 6 : Ops.size() == 5) :
+      (HasMaskedOff ? Ops.size() == 5 : Ops.size() == 4);
+
+    unsigned Offset = IsMasked ?
+        (HasMaskedOff ? 2 : 1) : (HasMaskedOff ? 1 : 0);
+
+    if (!HasMaskedOff)
+      Operands.push_back(llvm::PoisonValue::get(ResultType));
+    else
+      Operands.push_back(Ops[IsMasked ? 1 : 0]);
+
+    Operands.push_back(Ops[Offset]); // op0
+    Operands.push_back(Ops[Offset + 1]); // op1
+
+    if (IsMasked)
+      Operands.push_back(Ops[0]); // mask
+
+    if (HasRoundModeOp) {
+      Operands.push_back(Ops[Offset + 2]); // frm
+      Operands.push_back(Ops[Offset + 3]); // vl
+    } else {
+      Operands.push_back(ConstantInt::get(Ops[Offset + 2]->getType(), 7)); // frm
+      Operands.push_back(Ops[Offset + 2]); // vl
+    }
+
+    if (IsMasked)
+      Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
+
+    IntrinsicTypes = {ResultType, Ops[Offset + 1]->getType(),
+                      Operands.back()->getType()};
+    llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
+    return Builder.CreateCall(F, Operands, "");
+  }
+}] in {
+    defm nds_vfpmadt : RVVFPMAD;
+    defm nds_vfpmadb : RVVFPMAD;
+  }
+}
diff --git a/clang/include/clang/Sema/RISCVIntrinsicManager.h b/clang/include/clang/Sema/RISCVIntrinsicManager.h
index 2a3dd1e7c4697..05d995b77c99a 100644
--- a/clang/include/clang/Sema/RISCVIntrinsicManager.h
+++ b/clang/include/clang/Sema/RISCVIntrinsicManager.h
@@ -24,7 +24,7 @@ class Preprocessor;
 namespace sema {
 class RISCVIntrinsicManager {
 public:
-  enum class IntrinsicKind : uint8_t { RVV, SIFIVE_VECTOR };
+  enum class IntrinsicKind : uint8_t { RVV, SIFIVE_VECTOR, ANDES_VECTOR };
 
   virtual ~RISCVIntrinsicManager() = default;
 
diff --git a/clang/include/clang/Sema/SemaRISCV.h b/clang/include/clang/Sema/SemaRISCV.h
index d7f17797283b8..8d2e1c6b7512f 100644
--- a/clang/include/clang/Sema/SemaRISCV.h
+++ b/clang/include/clang/Sema/SemaRISCV.h
@@ -51,6 +51,9 @@ class SemaRISCV : public SemaBase {
   /// Indicate RISC-V SiFive vector builtin functions enabled or not.
   bool DeclareSiFiveVectorBuiltins = false;
 
+  /// Indicate RISC-V Andes vector builtin functions enabled or not.
+  bool DeclareAndesVectorBuiltins = false;
+
   std::unique_ptr<sema::RISCVIntrinsicManager> IntrinsicManager;
 };
 
diff --git a/clang/include/clang/Support/RISCVVIntrinsicUtils.h b/clang/include/clang/Support/RISCVVIntrinsicUtils.h
index 00a79a0fcb5d9..bbcf16dc36e71 100644
--- a/clang/include/clang/Support/RISCVVIntrinsicUtils.h
+++ b/clang/include/clang/Support/RISCVVIntrinsicUtils.h
@@ -489,6 +489,7 @@ class RVVIntrinsic {
 enum RVVRequire {
   RVV_REQ_RV64,
   RVV_REQ_Zvfhmin,
+  RVV_REQ_Xandesvpackfph,
   RVV_REQ_Xsfvcp,
   RVV_REQ_Xsfvfnrclipxfqf,
   RVV_REQ_Xsfvfwmaccqqq,
diff --git a/clang/lib/Basic/Targets/RISCV.cpp b/clang/lib/Basic/Targets/RISCV.cpp
index 390ef0f3ac884..69345749e3cbf 100644
--- a/clang/lib/Basic/Targets/RISCV.cpp
+++ b/clang/lib/Basic/Targets/RISCV.cpp
@@ -246,13 +246,15 @@ void RISCVTargetInfo::getTargetDefines(const LangOptions &Opts,
 static constexpr int NumRVVBuiltins =
     RISCVVector::FirstSiFiveBuiltin - Builtin::FirstTSBuiltin;
 static constexpr int NumRVVSiFiveBuiltins =
-    RISCVVector::FirstTSBuiltin - RISCVVector::FirstSiFiveBuiltin;
+    RISCVVector::FirstAndesBuiltin - RISCVVector::FirstSiFiveBuiltin;
+static constexpr int NumRVVAndesBuiltins =
+    RISCVVector::FirstTSBuiltin - RISCVVector::FirstAndesBuiltin;
 static constexpr int NumRISCVBuiltins =
     RISCV::LastTSBuiltin - RISCVVector::FirstTSBuiltin;
 static constexpr int NumBuiltins =
     RISCV::LastTSBuiltin - Builtin::FirstTSBuiltin;
-static_assert(NumBuiltins ==
-              (NumRVVBuiltins + NumRVVSiFiveBuiltins + NumRISCVBuiltins));
+static_assert(NumBuiltins == (NumRVVBuiltins + NumRVVSiFiveBuiltins +
+                              NumRVVAndesBuiltins + NumRISCVBuiltins));
 
 namespace RVV {
 #define GET_RISCVV_BUILTIN_STR_TABLE
@@ -280,6 +282,19 @@ static constexpr std::array<Builtin::Info, NumRVVSiFiveBuiltins> BuiltinInfos =
 };
 } // namespace RVVSiFive
 
+namespace RVVAndes {
+#define GET_RISCVV_BUILTIN_STR_TABLE
+#include "clang/Basic/riscv_andes_vector_builtins.inc"
+#undef GET_RISCVV_BUILTIN_STR_TABLE
+
+static constexpr std::array<Builtin::Info, NumRVVAndesBuiltins> BuiltinInfos =
+    {
+#define GET_RISCVV_BUILTIN_INFOS
+#include "clang/Basic/riscv_andes_vector_builtins.inc"
+#undef GET_RISCVV_BUILTIN_INFOS
+};
+} // namespace RVVAndes
+
 #define GET_BUILTIN_STR_TABLE
 #include "clang/Basic/BuiltinsRISCV.inc"
 #undef GET_BUILTIN_STR_TABLE
@@ -296,6 +311,7 @@ RISCVTargetInfo::getTargetBuiltins() const {
   return {
       {&RVV::BuiltinStrings, RVV::BuiltinInfos, "__builtin_rvv_"},
       {&RVVSiFive::BuiltinStrings, RVVSiFive::BuiltinInfos, "__builtin_rvv_"},
+      {&RVVAndes::BuiltinStrings, RVVAndes::BuiltinInfos, "__builtin_rvv_"},
       {&BuiltinStrings, BuiltinInfos},
   };
 }
diff --git a/clang/lib/CodeGen/TargetBuiltins/RISCV.cpp b/clang/lib/CodeGen/TargetBuiltins/RISCV.cpp
index 3335239b0b6c2..bc13031c254a7 100644
--- a/clang/lib/CodeGen/TargetBuiltins/RISCV.cpp
+++ b/clang/lib/CodeGen/TargetBuiltins/RISCV.cpp
@@ -412,6 +412,9 @@ Value *CodeGenFunction::EmitRISCVBuiltinExpr(unsigned BuiltinID,
 
     // SiFive Vector builtins are handled from here.
 #include "clang/Basic/riscv_sifive_vector_builtin_cg.inc"
+
+    // Andes Vector builtins are handled from here.
+#include "clang/Basic/riscv_andes_vector_builtin_cg.inc"
   }
 
   assert(ID != Intrinsic::not_intrinsic);
diff --git a/clang/lib/Headers/CMakeLists.txt b/clang/lib/Headers/CMakeLists.txt
index fdfe5c08a5fa6..449feb012481f 100644
--- a/clang/lib/Headers/CMakeLists.txt
+++ b/clang/lib/Headers/CMakeLists.txt
@@ -127,6 +127,7 @@ set(riscv_files
   riscv_crypto.h
   riscv_ntlh.h
   sifive_vector.h
+  andes_vector.h
   )
 
 set(systemz_files
diff --git a/clang/lib/Headers/andes_vector.h b/clang/lib/Headers/andes_vector.h
new file mode 100644
index 0000000000000..dc717e6d805b9
--- /dev/null
+++ b/clang/lib/Headers/andes_vector.h
@@ -0,0 +1,16 @@
+//===----- andes_vector.h - Andes Vector definitions ----------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef _ANDES_VECTOR_H_
+#define _ANDES_VECTOR_H_
+
+#include "riscv_vector.h"
+
+#pragma clang riscv intrinsic andes_vector
+
+#endif //_ANDES_VECTOR_H_
diff --git a/clang/lib/Parse/ParsePragma.cpp b/clang/lib/Parse/ParsePragma.cpp
index 3d46d02b72128..77b61af768993 100644
--- a/clang/lib/Parse/ParsePragma.cpp
+++ b/clang/lib/Parse/ParsePragma.cpp
@@ -4139,6 +4139,7 @@ void PragmaMaxTokensTotalHandler::HandlePragma(Preprocessor &PP,
 
 // Handle '#pragma clang riscv intrinsic vector'.
 //        '#pragma clang riscv intrinsic sifive_vector'.
+//        '#pragma clang riscv intrinsic andes_vector'.
 void PragmaRISCVHandler::HandlePragma(Preprocessor &PP,
                                       PragmaIntroducer Introducer,
                                       Token &FirstToken) {
@@ -4154,10 +4155,11 @@ void PragmaRISCVHandler::HandlePragma(Preprocessor &PP,
 
   PP.Lex(Tok);
   II = Tok.getIdentifierInfo();
-  if (!II || !(II->isStr("vector") || II->isStr("sifive_vector"))) {
+  if (!II || !(II->isStr("vector") || II->isStr("sifive_vector") ||
+               II->isStr("andes_vector"))) {
     PP.Diag(Tok.getLocation(), diag::warn_pragma_invalid_argument)
         << PP.getSpelling(Tok) << "riscv" << /*Expected=*/true
-        << "'vector' or 'sifive_vector'";
+        << "'vector', 'sifive_vector' or 'andes_vector'";
     return;
   }
 
@@ -4172,4 +4174,6 @@ void PragmaRISCVHandler::HandlePragma(Preprocessor &PP,
     Actions.RISCV().DeclareRVVBuiltins = true;
   else if (II->isStr("sifive_vector"))
     Actions.RISCV().DeclareSiFiveVectorBuiltins = true;
+  else if (II->isStr("andes_vector"))
+    Actions.RISCV().DeclareAndesVectorBuiltins = true;
 }
diff --git a/clang/lib/Sema/SemaLookup.cpp b/clang/lib/Sema/SemaLookup.cpp
index 29bf274f8a39f..55428af73011c 100644
--- a/clang/lib/Sema/SemaLookup.cpp
+++ b/clang/lib/Sema/SemaLookup.cpp
@@ -946,7 +946,8 @@ bool Sema::LookupBuiltin(LookupResult &R) {
         }
       }
 
-      if (RISCV().DeclareRVVBuiltins || RISCV().DeclareSiFiveVectorBuiltins) {
+      if (RISCV().DeclareRVVBuiltins || RISCV().DeclareSiFiveVectorBuiltins ||
+          RISCV().DeclareAndesVectorBuiltins) {
         if (!RISCV().IntrinsicManager)
           RISCV().IntrinsicManager = CreateRISCVIntrinsicManager(*this);
 
diff --git a/clang/lib/Sema/SemaRISCV.cpp b/clang/lib/Sema/SemaRISCV.cpp
index f0beedada7306..481bf8bd22cc1 100644
--- a/clang/lib/Sema/SemaRISCV.cpp
+++ b/clang/lib/Sema/SemaRISCV.cpp
@@ -69,6 +69,12 @@ static const PrototypeDescriptor RVSiFiveVectorSignatureTable[] = {
 #undef DECL_SIGNATURE_TABLE
 };
 
+static const PrototypeDescriptor RVAndesVectorSignatureTable[] = {
+#define DECL_SIGNATURE_TABLE
+#include "clang/Basic/riscv_andes_vector_builtin_sema.inc"
+#undef DECL_SIGNATURE_TABLE
+};
+
 static const RVVIntrinsicRecord RVVIntrinsicRecords[] = {
 #define DECL_INTRINSIC_RECORDS
 #include "clang/Basic/riscv_vector_builtin_sema.inc"
@@ -81,6 +87,12 @@ static const RVVIntrinsicRecord RVSiFiveVectorIntrinsicRecords[] = {
 #undef DECL_INTRINSIC_RECORDS
 };
 
+static const RVVIntrinsicRecord RVAndesVectorIntrinsicRecords[] = {
+#define DECL_INTRINSIC_RECORDS
+#include "clang/Basic/riscv_andes_vector_builtin_sema.inc"
+#undef DECL_INTRINSIC_RECORDS
+};
+
 // Get subsequence of signature table.
 static ArrayRef<PrototypeDescriptor>
 ProtoSeq2ArrayRef(IntrinsicKind K, uint16_t Index, uint8_t Length) {
@@ -89,6 +101,8 @@ ProtoSeq2ArrayRef(IntrinsicKind K, uint16_t Index, uint8_t Length) {
     return ArrayRef(&RVVSignatureTable[Index], Length);
   case IntrinsicKind::SIFIVE_VECTOR:
     return ArrayRef(&RVSiFiveVectorSignatureTable[Index], Length);
+  case IntrinsicKind::ANDES_VECTOR:
+    return ArrayRef(&RVAndesVectorSignatureTable[Index], Length);
   }
   llvm_unreachable("Unhandled IntrinsicKind");
 }
@@ -167,6 +181,7 @@ class RISCVIntrinsicManagerImpl : public sema::RISCVIntrinsicManager {
   RVVTypeCache TypeCache;
   bool ConstructedRISCVVBuiltins;
   bool ConstructedRISCVSiFiveVectorBuiltins;
+  bool ConstructedRISCVAndesVectorBuiltins;
 
   // List of all RVV intrinsic.
   std::vector<RVVIntrinsicDef> IntrinsicList;
@@ -192,6 +207,7 @@ class RISCVIntrinsicManagerImpl : public sema::RISCVIntrinsicManager {
   RISCVIntrinsicManagerImpl(clang::Sema &S) : S(S), Context(S.Context) {
     ConstructedRISCVVBuiltins = false;
     ConstructedRISCVSiFiveVectorBuiltins = false;
+    ConstructedRISCVAndesVectorBuiltins = false;
   }
 
   // Initialize IntrinsicList
@@ -209,6 +225,7 @@ void RISCVIntrinsicManagerImpl::ConstructRVVIntrinsics(
   const TargetInfo &TI = Context.getTargetInfo();
   static const std::pair<const char *, unsigned> FeatureCheckList[] = {
       {"64bit", RVV_REQ_RV64},
+      {"xandesvpackfph", RVV_REQ_Xandesvpackfph},
       {"xsfvcp", RVV_REQ_Xsfvcp},
       {"xsfvfnrclipxfqf", RVV_REQ_Xsfvfnrclipxfqf},
       {"xsfvfwmaccqqq", RVV_REQ_Xsfvfwmaccqqq},
@@ -358,6 +375,12 @@ void RISCVIntrinsicManagerImpl::InitIntrinsicList() {
     ConstructRVVIntrinsics(RVSiFiveVectorIntrinsicRecords,
                            IntrinsicKind::SIFIVE_VECTOR);
   }
+  if (S.RISCV().DeclareAndesVectorBuiltins &&
+      !ConstructedRISCVAndesVectorBuiltins) {
+    ConstructedRISCVAndesVectorBuiltins = true;
+    ConstructRVVIntrinsics(RVAndesVectorIntrinsicRecords,
+                           IntrinsicKind::ANDES_VECTOR);
+  }
 }
 
 // Compute name and signatures for intrinsic with practical types.
diff --git a/clang/lib/Support/RISCVVIntrinsicUtils.cpp b/clang/lib/Support/RISCVVIntrinsicUtils.cpp
index 6378596ef31e2..d954c1617ae1a 100644
--- a/clang/lib/Support/RISCVVIntrinsicUtils.cpp
+++ b/clang/lib/Support/RISCVVIntrinsicUtils.cpp
@@ -1214,6 +1214,7 @@ llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, enum RVVRequire Require) {
   switch (Require) {
     STRINGIFY(RVV_REQ_RV64)
     STRINGIFY(RVV_REQ_Zvfhmin)
+    STRINGIFY(RVV_REQ_Xandesvpackfph)
     STRINGIFY(RVV_REQ_Xsfvcp)
     STRINGIFY(RVV_REQ_Xsfvfnrclipxfqf)
     STRINGIFY(RVV_REQ_Xsfvfwmaccqqq)
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/nds_vfpmadb.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/nds_vfpmadb.c
new file mode 100644
index 0000000000000..48e3a1b0fc295
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/nds_vfpmadb.c
@@ -0,0 +1,225 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN:   -target-feature +zvfhmin \
+// RUN:   -target-feature +xandesvpackfph -disable-O0-optnone  \
+// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN:   FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <andes_vector.h>
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadb.nxv1f16.f32.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4(vfloat16mf4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_vf_f16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadb.nxv2f16.f32.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2(vfloat16mf2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_vf_f16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadb.nxv4f16.f32.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_nds_vfpmadb_vf_f16m1(vfloat16m1_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_vf_f16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadb.nxv8f16.f32.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_nds_vfpmadb_vf_f16m2(vfloat16m2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_vf_f16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadb.nxv16f16.f32.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_nds_vfpmadb_vf_f16m4(vfloat16m4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_vf_f16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadb.nxv32f16.f32.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_nds_vfpmadb_vf_f16m8(vfloat16m8_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_vf_f16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadb.mask.nxv1f16.f32.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_vf_f16mf4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadb.mask.nxv2f16.f32.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_vf_f16mf2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadb.mask.nxv4f16.f32.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_nds_vfpmadb_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_vf_f16m1_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadb.mask.nxv8f16.f32.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_nds_vfpmadb_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_vf_f16m2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadb.mask.nxv16f16.f32.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_nds_vfpmadb_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_vf_f16m4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadb.mask.nxv32f16.f32.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_nds_vfpmadb_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_vf_f16m8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf4_rm(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadb.nxv1f16.f32.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_rm(vfloat16mf4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_vf_f16mf4_rm(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf2_rm(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadb.nxv2f16.f32.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_rm(vfloat16mf2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_vf_f16mf2_rm(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m1_rm(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadb.nxv4f16.f32.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_nds_vfpmadb_vf_f16m1_rm(vfloat16m1_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_vf_f16m1_rm(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m2_rm(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadb.nxv8f16.f32.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_nds_vfpmadb_vf_f16m2_rm(vfloat16m2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_vf_f16m2_rm(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m4_rm(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadb.nxv16f16.f32.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_nds_vfpmadb_vf_f16m4_rm(vfloat16m4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_vf_f16m4_rm(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m8_rm(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadb.nxv32f16.f32.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_nds_vfpmadb_vf_f16m8_rm(vfloat16m8_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_vf_f16m8_rm(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf4_rm_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadb.mask.nxv1f16.f32.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_vf_f16mf4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf2_rm_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadb.mask.nxv2f16.f32.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_vf_f16mf2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m1_rm_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadb.mask.nxv4f16.f32.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_nds_vfpmadb_vf_f16m1_rm_m(vbool16_t mask, vfloat16m1_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_vf_f16m1_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m2_rm_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadb.mask.nxv8f16.f32.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_nds_vfpmadb_vf_f16m2_rm_m(vbool8_t mask, vfloat16m2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_vf_f16m2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m4_rm_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadb.mask.nxv16f16.f32.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_nds_vfpmadb_vf_f16m4_rm_m(vbool4_t mask, vfloat16m4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_vf_f16m4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m8_rm_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadb.mask.nxv32f16.f32.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_nds_vfpmadb_vf_f16m8_rm_m(vbool2_t mask, vfloat16m8_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_vf_f16m8_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/nds_vfpmadt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/nds_vfpmadt.c
new file mode 100644
index 0000000000000..e435a31eb3314
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/nds_vfpmadt.c
@@ -0,0 +1,225 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN:   -target-feature +zvfhmin \
+// RUN:   -target-feature +xandesvpackfph -disable-O0-optnone  \
+// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN:   FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <andes_vector.h>
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadt.nxv1f16.f32.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4(vfloat16mf4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_vf_f16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadt.nxv2f16.f32.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2(vfloat16mf2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_vf_f16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadt.nxv4f16.f32.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_nds_vfpmadt_vf_f16m1(vfloat16m1_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_vf_f16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadt.nxv8f16.f32.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_nds_vfpmadt_vf_f16m2(vfloat16m2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_vf_f16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadt.nxv16f16.f32.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_nds_vfpmadt_vf_f16m4(vfloat16m4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_vf_f16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadt.nxv32f16.f32.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_nds_vfpmadt_vf_f16m8(vfloat16m8_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_vf_f16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadt.mask.nxv1f16.f32.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_vf_f16mf4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadt.mask.nxv2f16.f32.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_vf_f16mf2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadt.mask.nxv4f16.f32.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_nds_vfpmadt_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_vf_f16m1_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadt.mask.nxv8f16.f32.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_nds_vfpmadt_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_vf_f16m2_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadt.mask.nxv16f16.f32.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_nds_vfpmadt_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_vf_f16m4_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadt.mask.nxv32f16.f32.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_nds_vfpmadt_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_vf_f16m8_m(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf4_rm(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadt.nxv1f16.f32.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_rm(vfloat16mf4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_vf_f16mf4_rm(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf2_rm(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadt.nxv2f16.f32.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_rm(vfloat16mf2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_vf_f16mf2_rm(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m1_rm(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadt.nxv4f16.f32.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_nds_vfpmadt_vf_f16m1_rm(vfloat16m1_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_vf_f16m1_rm(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m2_rm(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadt.nxv8f16.f32.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_nds_vfpmadt_vf_f16m2_rm(vfloat16m2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_vf_f16m2_rm(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m4_rm(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadt.nxv16f16.f32.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_nds_vfpmadt_vf_f16m4_rm(vfloat16m4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_vf_f16m4_rm(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m8_rm(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadt.nxv32f16.f32.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_nds_vfpmadt_vf_f16m8_rm(vfloat16m8_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_vf_f16m8_rm(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf4_rm_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadt.mask.nxv1f16.f32.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_vf_f16mf4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf2_rm_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadt.mask.nxv2f16.f32.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_vf_f16mf2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m1_rm_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadt.mask.nxv4f16.f32.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_nds_vfpmadt_vf_f16m1_rm_m(vbool16_t mask, vfloat16m1_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_vf_f16m1_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m2_rm_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadt.mask.nxv8f16.f32.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_nds_vfpmadt_vf_f16m2_rm_m(vbool8_t mask, vfloat16m2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_vf_f16m2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m4_rm_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadt.mask.nxv16f16.f32.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_nds_vfpmadt_vf_f16m4_rm_m(vbool4_t mask, vfloat16m4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_vf_f16m4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m8_rm_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadt.mask.nxv32f16.f32.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_nds_vfpmadt_vf_f16m8_rm_m(vbool2_t mask, vfloat16m8_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_vf_f16m8_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/nds_vfpmadb.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/nds_vfpmadb.c
new file mode 100644
index 0000000000000..46c6648bc7a82
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/nds_vfpmadb.c
@@ -0,0 +1,225 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN:   -target-feature +zvfhmin \
+// RUN:   -target-feature +xandesvpackfph -disable-O0-optnone  \
+// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN:   FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <andes_vector.h>
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadb.nxv1f16.f32.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4(vfloat16mf4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadb.nxv2f16.f32.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2(vfloat16mf2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadb.nxv4f16.f32.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_nds_vfpmadb_vf_f16m1(vfloat16m1_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadb.nxv8f16.f32.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_nds_vfpmadb_vf_f16m2(vfloat16m2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadb.nxv16f16.f32.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_nds_vfpmadb_vf_f16m4(vfloat16m4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadb.nxv32f16.f32.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_nds_vfpmadb_vf_f16m8(vfloat16m8_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadb.mask.nxv1f16.f32.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadb.mask.nxv2f16.f32.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadb.mask.nxv4f16.f32.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_nds_vfpmadb_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadb.mask.nxv8f16.f32.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_nds_vfpmadb_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadb.mask.nxv16f16.f32.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_nds_vfpmadb_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadb.mask.nxv32f16.f32.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_nds_vfpmadb_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf4_rm(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadb.nxv1f16.f32.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_rm(vfloat16mf4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf2_rm(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadb.nxv2f16.f32.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_rm(vfloat16mf2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m1_rm(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadb.nxv4f16.f32.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_nds_vfpmadb_vf_f16m1_rm(vfloat16m1_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m2_rm(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadb.nxv8f16.f32.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_nds_vfpmadb_vf_f16m2_rm(vfloat16m2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m4_rm(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadb.nxv16f16.f32.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_nds_vfpmadb_vf_f16m4_rm(vfloat16m4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m8_rm(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadb.nxv32f16.f32.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_nds_vfpmadb_vf_f16m8_rm(vfloat16m8_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf4_rm_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadb.mask.nxv1f16.f32.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf2_rm_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadb.mask.nxv2f16.f32.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m1_rm_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadb.mask.nxv4f16.f32.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_nds_vfpmadb_vf_f16m1_rm_m(vbool16_t mask, vfloat16m1_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m2_rm_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadb.mask.nxv8f16.f32.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_nds_vfpmadb_vf_f16m2_rm_m(vbool8_t mask, vfloat16m2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m4_rm_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadb.mask.nxv16f16.f32.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_nds_vfpmadb_vf_f16m4_rm_m(vbool4_t mask, vfloat16m4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m8_rm_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadb.mask.nxv32f16.f32.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_nds_vfpmadb_vf_f16m8_rm_m(vbool2_t mask, vfloat16m8_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/nds_vfpmadt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/nds_vfpmadt.c
new file mode 100644
index 0000000000000..62fe9b7bc10e7
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/nds_vfpmadt.c
@@ -0,0 +1,225 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN:   -target-feature +zvfhmin \
+// RUN:   -target-feature +xandesvpackfph -disable-O0-optnone  \
+// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN:   FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <andes_vector.h>
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadt.nxv1f16.f32.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4(vfloat16mf4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadt.nxv2f16.f32.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2(vfloat16mf2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadt.nxv4f16.f32.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_nds_vfpmadt_vf_f16m1(vfloat16m1_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadt.nxv8f16.f32.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_nds_vfpmadt_vf_f16m2(vfloat16m2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadt.nxv16f16.f32.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_nds_vfpmadt_vf_f16m4(vfloat16m4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadt.nxv32f16.f32.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_nds_vfpmadt_vf_f16m8(vfloat16m8_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadt.mask.nxv1f16.f32.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadt.mask.nxv2f16.f32.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadt.mask.nxv4f16.f32.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_nds_vfpmadt_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadt.mask.nxv8f16.f32.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_nds_vfpmadt_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadt.mask.nxv16f16.f32.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_nds_vfpmadt_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadt.mask.nxv32f16.f32.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_nds_vfpmadt_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf4_rm(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadt.nxv1f16.f32.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_rm(vfloat16mf4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf2_rm(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadt.nxv2f16.f32.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_rm(vfloat16mf2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m1_rm(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadt.nxv4f16.f32.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_nds_vfpmadt_vf_f16m1_rm(vfloat16m1_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m2_rm(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadt.nxv8f16.f32.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_nds_vfpmadt_vf_f16m2_rm(vfloat16m2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m4_rm(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadt.nxv16f16.f32.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_nds_vfpmadt_vf_f16m4_rm(vfloat16m4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m8_rm(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadt.nxv32f16.f32.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_nds_vfpmadt_vf_f16m8_rm(vfloat16m8_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf4_rm_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadt.mask.nxv1f16.f32.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf2_rm_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadt.mask.nxv2f16.f32.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m1_rm_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadt.mask.nxv4f16.f32.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_nds_vfpmadt_vf_f16m1_rm_m(vbool16_t mask, vfloat16m1_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m2_rm_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadt.mask.nxv8f16.f32.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_nds_vfpmadt_vf_f16m2_rm_m(vbool8_t mask, vfloat16m2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m4_rm_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadt.mask.nxv16f16.f32.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_nds_vfpmadt_vf_f16m4_rm_m(vbool4_t mask, vfloat16m4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m8_rm_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadt.mask.nxv32f16.f32.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_nds_vfpmadt_vf_f16m8_rm_m(vbool2_t mask, vfloat16m8_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/nds_vfpmadb.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/nds_vfpmadb.c
new file mode 100644
index 0000000000000..bda6a5292ae3d
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/nds_vfpmadb.c
@@ -0,0 +1,441 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN:   -target-feature +zvfhmin \
+// RUN:   -target-feature +xandesvpackfph -disable-O0-optnone  \
+// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN:   FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <andes_vector.h>
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadb.nxv1f16.f32.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_vf_f16mf4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadb.nxv2f16.f32.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_vf_f16mf2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadb.nxv4f16.f32.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_nds_vfpmadb_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_vf_f16m1_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadb.nxv8f16.f32.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_nds_vfpmadb_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_vf_f16m2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadb.nxv16f16.f32.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_nds_vfpmadb_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_vf_f16m4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadb.nxv32f16.f32.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_nds_vfpmadb_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_vf_f16m8_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf4_tum(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadb.mask.nxv1f16.f32.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_vf_f16mf4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf2_tum(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadb.mask.nxv2f16.f32.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_vf_f16mf2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m1_tum(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadb.mask.nxv4f16.f32.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_nds_vfpmadb_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_vf_f16m1_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m2_tum(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadb.mask.nxv8f16.f32.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_nds_vfpmadb_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_vf_f16m2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m4_tum(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadb.mask.nxv16f16.f32.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_nds_vfpmadb_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_vf_f16m4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m8_tum(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadb.mask.nxv32f16.f32.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_nds_vfpmadb_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_vf_f16m8_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadb.mask.nxv1f16.f32.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_vf_f16mf4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadb.mask.nxv2f16.f32.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_vf_f16mf2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadb.mask.nxv4f16.f32.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_nds_vfpmadb_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_vf_f16m1_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadb.mask.nxv8f16.f32.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_nds_vfpmadb_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_vf_f16m2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadb.mask.nxv16f16.f32.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_nds_vfpmadb_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_vf_f16m4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadb.mask.nxv32f16.f32.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_nds_vfpmadb_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_vf_f16m8_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf4_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadb.mask.nxv1f16.f32.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_vf_f16mf4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf2_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadb.mask.nxv2f16.f32.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_vf_f16mf2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m1_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadb.mask.nxv4f16.f32.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_nds_vfpmadb_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_vf_f16m1_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m2_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadb.mask.nxv8f16.f32.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_nds_vfpmadb_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_vf_f16m2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m4_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadb.mask.nxv16f16.f32.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_nds_vfpmadb_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_vf_f16m4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m8_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadb.mask.nxv32f16.f32.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_nds_vfpmadb_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_vf_f16m8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf4_rm_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadb.nxv1f16.f32.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_rm_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_vf_f16mf4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf2_rm_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadb.nxv2f16.f32.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_rm_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_vf_f16mf2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m1_rm_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadb.nxv4f16.f32.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_nds_vfpmadb_vf_f16m1_rm_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_vf_f16m1_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m2_rm_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadb.nxv8f16.f32.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_nds_vfpmadb_vf_f16m2_rm_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_vf_f16m2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m4_rm_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadb.nxv16f16.f32.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_nds_vfpmadb_vf_f16m4_rm_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_vf_f16m4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m8_rm_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadb.nxv32f16.f32.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_nds_vfpmadb_vf_f16m8_rm_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_vf_f16m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf4_rm_tum(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadb.mask.nxv1f16.f32.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_vf_f16mf4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf2_rm_tum(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadb.mask.nxv2f16.f32.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_vf_f16mf2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m1_rm_tum(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadb.mask.nxv4f16.f32.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_nds_vfpmadb_vf_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_vf_f16m1_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m2_rm_tum(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadb.mask.nxv8f16.f32.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_nds_vfpmadb_vf_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_vf_f16m2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m4_rm_tum(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadb.mask.nxv16f16.f32.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_nds_vfpmadb_vf_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_vf_f16m4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m8_rm_tum(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadb.mask.nxv32f16.f32.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_nds_vfpmadb_vf_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_vf_f16m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf4_rm_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadb.mask.nxv1f16.f32.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_vf_f16mf4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf2_rm_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadb.mask.nxv2f16.f32.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_vf_f16mf2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m1_rm_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadb.mask.nxv4f16.f32.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_nds_vfpmadb_vf_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_vf_f16m1_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m2_rm_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadb.mask.nxv8f16.f32.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_nds_vfpmadb_vf_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_vf_f16m2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m4_rm_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadb.mask.nxv16f16.f32.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_nds_vfpmadb_vf_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_vf_f16m4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m8_rm_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadb.mask.nxv32f16.f32.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_nds_vfpmadb_vf_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_vf_f16m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf4_rm_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadb.mask.nxv1f16.f32.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_vf_f16mf4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf2_rm_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadb.mask.nxv2f16.f32.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_vf_f16mf2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m1_rm_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadb.mask.nxv4f16.f32.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_nds_vfpmadb_vf_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_vf_f16m1_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m2_rm_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadb.mask.nxv8f16.f32.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_nds_vfpmadb_vf_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_vf_f16m2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m4_rm_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadb.mask.nxv16f16.f32.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_nds_vfpmadb_vf_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_vf_f16m4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m8_rm_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadb.mask.nxv32f16.f32.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_nds_vfpmadb_vf_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_vf_f16m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/nds_vfpmadt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/nds_vfpmadt.c
new file mode 100644
index 0000000000000..bfcc1e18bd351
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/nds_vfpmadt.c
@@ -0,0 +1,441 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN:   -target-feature +zvfhmin \
+// RUN:   -target-feature +xandesvpackfph -disable-O0-optnone  \
+// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN:   FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <andes_vector.h>
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadt.nxv1f16.f32.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_vf_f16mf4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadt.nxv2f16.f32.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_vf_f16mf2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadt.nxv4f16.f32.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_nds_vfpmadt_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_vf_f16m1_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadt.nxv8f16.f32.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_nds_vfpmadt_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_vf_f16m2_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadt.nxv16f16.f32.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_nds_vfpmadt_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_vf_f16m4_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadt.nxv32f16.f32.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_nds_vfpmadt_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_vf_f16m8_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf4_tum(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadt.mask.nxv1f16.f32.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_vf_f16mf4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf2_tum(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadt.mask.nxv2f16.f32.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_vf_f16mf2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m1_tum(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadt.mask.nxv4f16.f32.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_nds_vfpmadt_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_vf_f16m1_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m2_tum(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadt.mask.nxv8f16.f32.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_nds_vfpmadt_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_vf_f16m2_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m4_tum(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadt.mask.nxv16f16.f32.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_nds_vfpmadt_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_vf_f16m4_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m8_tum(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadt.mask.nxv32f16.f32.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_nds_vfpmadt_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_vf_f16m8_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadt.mask.nxv1f16.f32.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_vf_f16mf4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadt.mask.nxv2f16.f32.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_vf_f16mf2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadt.mask.nxv4f16.f32.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_nds_vfpmadt_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_vf_f16m1_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadt.mask.nxv8f16.f32.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_nds_vfpmadt_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_vf_f16m2_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadt.mask.nxv16f16.f32.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_nds_vfpmadt_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_vf_f16m4_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadt.mask.nxv32f16.f32.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_nds_vfpmadt_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_vf_f16m8_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf4_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadt.mask.nxv1f16.f32.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_vf_f16mf4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf2_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadt.mask.nxv2f16.f32.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_vf_f16mf2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m1_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadt.mask.nxv4f16.f32.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_nds_vfpmadt_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_vf_f16m1_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m2_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadt.mask.nxv8f16.f32.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_nds_vfpmadt_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_vf_f16m2_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m4_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadt.mask.nxv16f16.f32.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_nds_vfpmadt_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_vf_f16m4_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m8_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadt.mask.nxv32f16.f32.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_nds_vfpmadt_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_vf_f16m8_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf4_rm_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadt.nxv1f16.f32.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_rm_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_vf_f16mf4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf2_rm_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadt.nxv2f16.f32.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_rm_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_vf_f16mf2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m1_rm_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadt.nxv4f16.f32.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_nds_vfpmadt_vf_f16m1_rm_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_vf_f16m1_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m2_rm_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadt.nxv8f16.f32.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_nds_vfpmadt_vf_f16m2_rm_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_vf_f16m2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m4_rm_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadt.nxv16f16.f32.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_nds_vfpmadt_vf_f16m4_rm_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_vf_f16m4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m8_rm_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadt.nxv32f16.f32.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_nds_vfpmadt_vf_f16m8_rm_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_vf_f16m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf4_rm_tum(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadt.mask.nxv1f16.f32.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_vf_f16mf4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf2_rm_tum(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadt.mask.nxv2f16.f32.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_vf_f16mf2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m1_rm_tum(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadt.mask.nxv4f16.f32.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_nds_vfpmadt_vf_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_vf_f16m1_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m2_rm_tum(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadt.mask.nxv8f16.f32.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_nds_vfpmadt_vf_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_vf_f16m2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m4_rm_tum(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadt.mask.nxv16f16.f32.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_nds_vfpmadt_vf_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_vf_f16m4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m8_rm_tum(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadt.mask.nxv32f16.f32.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_nds_vfpmadt_vf_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_vf_f16m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf4_rm_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadt.mask.nxv1f16.f32.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_vf_f16mf4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf2_rm_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadt.mask.nxv2f16.f32.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_vf_f16mf2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m1_rm_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadt.mask.nxv4f16.f32.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_nds_vfpmadt_vf_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_vf_f16m1_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m2_rm_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadt.mask.nxv8f16.f32.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_nds_vfpmadt_vf_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_vf_f16m2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m4_rm_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadt.mask.nxv16f16.f32.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_nds_vfpmadt_vf_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_vf_f16m4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m8_rm_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadt.mask.nxv32f16.f32.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_nds_vfpmadt_vf_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_vf_f16m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf4_rm_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadt.mask.nxv1f16.f32.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_vf_f16mf4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf2_rm_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadt.mask.nxv2f16.f32.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_vf_f16mf2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m1_rm_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadt.mask.nxv4f16.f32.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_nds_vfpmadt_vf_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_vf_f16m1_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m2_rm_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadt.mask.nxv8f16.f32.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_nds_vfpmadt_vf_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_vf_f16m2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m4_rm_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadt.mask.nxv16f16.f32.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_nds_vfpmadt_vf_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_vf_f16m4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m8_rm_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadt.mask.nxv32f16.f32.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_nds_vfpmadt_vf_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_vf_f16m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/nds_vfpmadb.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/nds_vfpmadb.c
new file mode 100644
index 0000000000000..c7e6abc7a7e19
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/nds_vfpmadb.c
@@ -0,0 +1,441 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN:   -target-feature +zvfhmin \
+// RUN:   -target-feature +xandesvpackfph -disable-O0-optnone  \
+// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN:   FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <andes_vector.h>
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadb.nxv1f16.f32.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadb.nxv2f16.f32.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadb.nxv4f16.f32.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_nds_vfpmadb_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadb.nxv8f16.f32.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_nds_vfpmadb_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadb.nxv16f16.f32.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_nds_vfpmadb_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadb.nxv32f16.f32.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_nds_vfpmadb_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf4_tum(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadb.mask.nxv1f16.f32.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf2_tum(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadb.mask.nxv2f16.f32.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m1_tum(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadb.mask.nxv4f16.f32.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_nds_vfpmadb_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m2_tum(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadb.mask.nxv8f16.f32.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_nds_vfpmadb_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m4_tum(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadb.mask.nxv16f16.f32.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_nds_vfpmadb_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m8_tum(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadb.mask.nxv32f16.f32.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_nds_vfpmadb_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadb.mask.nxv1f16.f32.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadb.mask.nxv2f16.f32.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadb.mask.nxv4f16.f32.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_nds_vfpmadb_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadb.mask.nxv8f16.f32.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_nds_vfpmadb_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadb.mask.nxv16f16.f32.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_nds_vfpmadb_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadb.mask.nxv32f16.f32.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_nds_vfpmadb_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf4_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadb.mask.nxv1f16.f32.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf2_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadb.mask.nxv2f16.f32.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m1_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadb.mask.nxv4f16.f32.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_nds_vfpmadb_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m2_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadb.mask.nxv8f16.f32.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_nds_vfpmadb_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m4_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadb.mask.nxv16f16.f32.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_nds_vfpmadb_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m8_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadb.mask.nxv32f16.f32.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_nds_vfpmadb_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf4_rm_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadb.nxv1f16.f32.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_rm_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf2_rm_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadb.nxv2f16.f32.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_rm_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m1_rm_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadb.nxv4f16.f32.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_nds_vfpmadb_vf_f16m1_rm_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m2_rm_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadb.nxv8f16.f32.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_nds_vfpmadb_vf_f16m2_rm_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m4_rm_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadb.nxv16f16.f32.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_nds_vfpmadb_vf_f16m4_rm_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m8_rm_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadb.nxv32f16.f32.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_nds_vfpmadb_vf_f16m8_rm_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf4_rm_tum(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadb.mask.nxv1f16.f32.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf2_rm_tum(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadb.mask.nxv2f16.f32.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m1_rm_tum(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadb.mask.nxv4f16.f32.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_nds_vfpmadb_vf_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m2_rm_tum(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadb.mask.nxv8f16.f32.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_nds_vfpmadb_vf_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m4_rm_tum(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadb.mask.nxv16f16.f32.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_nds_vfpmadb_vf_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m8_rm_tum(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadb.mask.nxv32f16.f32.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_nds_vfpmadb_vf_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf4_rm_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadb.mask.nxv1f16.f32.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf2_rm_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadb.mask.nxv2f16.f32.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m1_rm_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadb.mask.nxv4f16.f32.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_nds_vfpmadb_vf_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m2_rm_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadb.mask.nxv8f16.f32.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_nds_vfpmadb_vf_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m4_rm_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadb.mask.nxv16f16.f32.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_nds_vfpmadb_vf_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m8_rm_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadb.mask.nxv32f16.f32.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_nds_vfpmadb_vf_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf4_rm_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadb.mask.nxv1f16.f32.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf2_rm_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadb.mask.nxv2f16.f32.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m1_rm_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadb.mask.nxv4f16.f32.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_nds_vfpmadb_vf_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m2_rm_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadb.mask.nxv8f16.f32.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_nds_vfpmadb_vf_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m4_rm_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadb.mask.nxv16f16.f32.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_nds_vfpmadb_vf_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m8_rm_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadb.mask.nxv32f16.f32.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_nds_vfpmadb_vf_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadb_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/nds_vfpmadt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/nds_vfpmadt.c
new file mode 100644
index 0000000000000..7e351e096821e
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/nds_vfpmadt.c
@@ -0,0 +1,441 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN:   -target-feature +zvfhmin \
+// RUN:   -target-feature +xandesvpackfph -disable-O0-optnone  \
+// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN:   FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <andes_vector.h>
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadt.nxv1f16.f32.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadt.nxv2f16.f32.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadt.nxv4f16.f32.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_nds_vfpmadt_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadt.nxv8f16.f32.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_nds_vfpmadt_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadt.nxv16f16.f32.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_nds_vfpmadt_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadt.nxv32f16.f32.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_nds_vfpmadt_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_tu(maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf4_tum(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadt.mask.nxv1f16.f32.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf2_tum(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadt.mask.nxv2f16.f32.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m1_tum(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadt.mask.nxv4f16.f32.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_nds_vfpmadt_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m2_tum(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadt.mask.nxv8f16.f32.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_nds_vfpmadt_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m4_tum(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadt.mask.nxv16f16.f32.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_nds_vfpmadt_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m8_tum(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadt.mask.nxv32f16.f32.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_nds_vfpmadt_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_tum(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadt.mask.nxv1f16.f32.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadt.mask.nxv2f16.f32.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadt.mask.nxv4f16.f32.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_nds_vfpmadt_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadt.mask.nxv8f16.f32.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_nds_vfpmadt_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadt.mask.nxv16f16.f32.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_nds_vfpmadt_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadt.mask.nxv32f16.f32.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_nds_vfpmadt_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_tumu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf4_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadt.mask.nxv1f16.f32.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf2_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadt.mask.nxv2f16.f32.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m1_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadt.mask.nxv4f16.f32.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_nds_vfpmadt_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m2_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadt.mask.nxv8f16.f32.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_nds_vfpmadt_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m4_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadt.mask.nxv16f16.f32.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_nds_vfpmadt_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m8_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadt.mask.nxv32f16.f32.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_nds_vfpmadt_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_mu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf4_rm_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadt.nxv1f16.f32.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_rm_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf2_rm_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadt.nxv2f16.f32.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_rm_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m1_rm_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadt.nxv4f16.f32.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_nds_vfpmadt_vf_f16m1_rm_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m2_rm_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadt.nxv8f16.f32.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_nds_vfpmadt_vf_f16m2_rm_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m4_rm_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadt.nxv16f16.f32.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_nds_vfpmadt_vf_f16m4_rm_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m8_rm_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadt.nxv32f16.f32.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_nds_vfpmadt_vf_f16m8_rm_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf4_rm_tum(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadt.mask.nxv1f16.f32.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf2_rm_tum(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadt.mask.nxv2f16.f32.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m1_rm_tum(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadt.mask.nxv4f16.f32.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_nds_vfpmadt_vf_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m2_rm_tum(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadt.mask.nxv8f16.f32.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_nds_vfpmadt_vf_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m4_rm_tum(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadt.mask.nxv16f16.f32.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_nds_vfpmadt_vf_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m8_rm_tum(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadt.mask.nxv32f16.f32.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_nds_vfpmadt_vf_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf4_rm_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadt.mask.nxv1f16.f32.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf2_rm_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadt.mask.nxv2f16.f32.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m1_rm_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadt.mask.nxv4f16.f32.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_nds_vfpmadt_vf_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m2_rm_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadt.mask.nxv8f16.f32.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_nds_vfpmadt_vf_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m4_rm_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadt.mask.nxv16f16.f32.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_nds_vfpmadt_vf_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m8_rm_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadt.mask.nxv32f16.f32.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_nds_vfpmadt_vf_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf4_rm_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadt.mask.nxv1f16.f32.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf2_rm_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadt.mask.nxv2f16.f32.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m1_rm_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadt.mask.nxv4f16.f32.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_nds_vfpmadt_vf_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m2_rm_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadt.mask.nxv8f16.f32.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_nds_vfpmadt_vf_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m4_rm_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadt.mask.nxv16f16.f32.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_nds_vfpmadt_vf_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m8_rm_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadt.mask.nxv32f16.f32.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_nds_vfpmadt_vf_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
+  return __riscv_nds_vfpmadt_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
diff --git a/clang/test/Sema/riscv-bad-intrinsic-pragma.c b/clang/test/Sema/riscv-bad-intrinsic-pragma.c
index fc8e18ff130e0..da1ff9e534a67 100644
--- a/clang/test/Sema/riscv-bad-intrinsic-pragma.c
+++ b/clang/test/Sema/riscv-bad-intrinsic-pragma.c
@@ -2,7 +2,7 @@
 // RUN:            2>&1 | FileCheck %s
 
 #pragma clang riscv intrinsic vvvv
-// CHECK:      warning: unexpected argument 'vvvv' to '#pragma riscv'; expected 'vector' or 'sifive_vector' [-Wignored-pragmas]
+// CHECK:      warning: unexpected argument 'vvvv' to '#pragma riscv'; expected 'vector', 'sifive_vector' or 'andes_vector' [-Wignored-pragmas]
 
 #pragma clang riscv what + 3241
 // CHECK:      warning: unexpected argument 'what' to '#pragma riscv'; expected 'intrinsic' [-Wignored-pragmas]
diff --git a/clang/utils/TableGen/RISCVVEmitter.cpp b/clang/utils/TableGen/RISCVVEmitter.cpp
index 8d94ec3d920d0..2eae4fd07614b 100644
--- a/clang/utils/TableGen/RISCVVEmitter.cpp
+++ b/clang/utils/TableGen/RISCVVEmitter.cpp
@@ -774,6 +774,7 @@ void RVVEmitter::createRVVIntrinsics(
           StringSwitch<RVVRequire>(RequiredFeature)
               .Case("RV64", RVV_REQ_RV64)
               .Case("Zvfhmin", RVV_REQ_Zvfhmin)
+              .Case("Xandesvpackfph", RVV_REQ_Xandesvpackfph)
               .Case("Xsfvcp", RVV_REQ_Xsfvcp)
               .Case("Xsfvfnrclipxfqf", RVV_REQ_Xsfvfnrclipxfqf)
               .Case("Xsfvfwmaccqqq", RVV_REQ_Xsfvfwmaccqqq)
diff --git a/clang/utils/TableGen/TableGen.cpp b/clang/utils/TableGen/TableGen.cpp
index a2c6f002f7359..8539bdfbb1245 100644
--- a/clang/utils/TableGen/TableGen.cpp
+++ b/clang/utils/TableGen/TableGen.cpp
@@ -109,6 +109,9 @@ enum ActionType {
   GenRISCVSiFiveVectorBuiltins,
   GenRISCVSiFiveVectorBuiltinCG,
   GenRISCVSiFiveVectorBuiltinSema,
+  GenRISCVAndesVectorBuiltins,
+  GenRISCVAndesVectorBuiltinCG,
+  GenRISCVAndesVectorBuiltinSema,
   GenAttrDocs,
   GenDiagDocs,
   GenOptDocs,
@@ -314,6 +317,15 @@ cl::opt<ActionType> Action(
         clEnumValN(GenRISCVSiFiveVectorBuiltinSema,
                    "gen-riscv-sifive-vector-builtin-sema",
                    "Generate riscv_sifive_vector_builtin_sema.inc for clang"),
+        clEnumValN(GenRISCVAndesVectorBuiltins,
+                   "gen-riscv-andes-vector-builtins",
+                   "Generate riscv_andes_vector_builtins.inc for clang"),
+        clEnumValN(GenRISCVAndesVectorBuiltinCG,
+                   "gen-riscv-andes-vector-builtin-codegen",
+                   "Generate riscv_andes_vector_builtin_cg.inc for clang"),
+        clEnumValN(GenRISCVAndesVectorBuiltinSema,
+                   "gen-riscv-andes-vector-builtin-sema",
+                   "Generate riscv_andes_vector_builtin_sema.inc for clang"),
         clEnumValN(GenAttrDocs, "gen-attr-docs",
                    "Generate attribute documentation"),
         clEnumValN(GenDiagDocs, "gen-diag-docs",
@@ -593,6 +605,15 @@ bool ClangTableGenMain(raw_ostream &OS, const RecordKeeper &Records) {
   case GenRISCVSiFiveVectorBuiltinSema:
     EmitRVVBuiltinSema(Records, OS);
     break;
+  case GenRISCVAndesVectorBuiltins:
+    EmitRVVBuiltins(Records, OS);
+    break;
+  case GenRISCVAndesVectorBuiltinCG:
+    EmitRVVBuiltinCG(Records, OS);
+    break;
+  case GenRISCVAndesVectorBuiltinSema:
+    EmitRVVBuiltinSema(Records, OS);
+    break;
   case GenAttrDocs:
     EmitClangAttrDocs(Records, OS);
     break;



More information about the cfe-commits mailing list