[llvm] 7648207 - [RISCV][POC] Model frm control for vfadd

via llvm-commits llvm-commits at lists.llvm.org
Thu Jul 13 00:34:12 PDT 2023


Author: eopXD
Date: 2023-07-13T00:34:00-07:00
New Revision: 76482078cdd2dca1f553904a1d2092626ba12856

URL: https://github.com/llvm/llvm-project/commit/76482078cdd2dca1f553904a1d2092626ba12856
DIFF: https://github.com/llvm/llvm-project/commit/76482078cdd2dca1f553904a1d2092626ba12856.diff

LOG: [RISCV][POC] Model frm control for vfadd

Depends on D152879.

Specification PR: riscv-non-isa/rvv-intrinsic-doc#226

This patch adds variant of `vfadd` that models the rounding mode control.
The added variant has suffix `_rm` appended to differentiate from the
existing ones that does not alternate `frm` and uses whatever is inside.

The value `7` is used to indicate no rounding mode change. Reusing the
semantic from the rounding mode encoding for scalar floating-point
instructions.

Additional data member `HasFRMRoundModeOp` is added so we can append
`_rm` suffix for the fadd variants that models rounding mode control.

Additional data member `IsRVVFixedPoint` is added so we can define
pseudo instructions with rounding mode operand and distinguish the
instructions between fixed-point and floating-point.

Reviewed By: craig.topper, kito-cheng

Differential Revision: https://reviews.llvm.org/D152996

Added: 
    clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vfadd-out-of-range.c

Modified: 
    clang/include/clang/Basic/riscv_vector.td
    clang/include/clang/Basic/riscv_vector_common.td
    clang/include/clang/Support/RISCVVIntrinsicUtils.h
    clang/lib/Sema/SemaChecking.cpp
    clang/lib/Sema/SemaRISCVVectorLookup.cpp
    clang/lib/Support/RISCVVIntrinsicUtils.cpp
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfadd.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfadd.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfadd.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfadd.c
    clang/utils/TableGen/RISCVVEmitter.cpp
    llvm/include/llvm/IR/IntrinsicsRISCV.td
    llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h
    llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/lib/Target/RISCV/RISCVInsertReadWriteCSR.cpp
    llvm/lib/Target/RISCV/RISCVInstrFormats.td
    llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
    llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
    llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
    llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-struct.ll
    llvm/test/CodeGen/RISCV/rvv/combine-vmv.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmf.ll
    llvm/test/CodeGen/RISCV/rvv/masked-tama.ll
    llvm/test/CodeGen/RISCV/rvv/masked-tamu.ll
    llvm/test/CodeGen/RISCV/rvv/masked-tuma.ll
    llvm/test/CodeGen/RISCV/rvv/masked-tumu.ll
    llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll
    llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll
    llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-masked-vops.ll
    llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll
    llvm/test/CodeGen/RISCV/rvv/vfadd.ll
    llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll
    llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll

Removed: 
    


################################################################################
diff  --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td
index 09f518cc54772f..f23d9edc9d96a5 100644
--- a/clang/include/clang/Basic/riscv_vector.td
+++ b/clang/include/clang/Basic/riscv_vector.td
@@ -226,6 +226,11 @@ multiclass RVVFloatingBinBuiltinSet
                           [["vv", "v", "vvv"],
                            ["vf", "v", "vve"]]>;
 
+multiclass RVVFloatingBinBuiltinSetRoundingMode
+    : RVVOutOp1BuiltinSet<NAME, "xfd",
+                          [["vv", "v", "vvvu"],
+                           ["vf", "v", "vveu"]]>;
+
 multiclass RVVFloatingBinVFBuiltinSet
     : RVVOutOp1BuiltinSet<NAME, "xfd",
                           [["vf", "v", "vve"]]>;
@@ -1856,10 +1861,72 @@ let ManualCodegen = [{
   defm vnclipu : RVVUnsignedNShiftBuiltinSetRoundingMode;
   defm vnclip : RVVSignedNShiftBuiltinSetRoundingMode;
 }
+}
 
 // 14. Vector Floating-Point Instructions
+let HeaderCode =
+[{
+enum __RISCV_FRM {
+  __RISCV_FRM_RNE = 0,
+  __RISCV_FRM_RTZ = 1,
+  __RISCV_FRM_RDN = 2,
+  __RISCV_FRM_RUP = 3,
+  __RISCV_FRM_RMM = 4,
+};
+}] in def frm_enum : RVVHeader;
+
+let UnMaskedPolicyScheme = HasPassthruOperand in {
 // 14.2. Vector Single-Width Floating-Point Add/Subtract Instructions
-defm vfadd  : RVVFloatingBinBuiltinSet;
+let ManualCodegen = [{
+  {
+    // LLVM intrinsic
+    // Unmasked: (passthru, op0, op1, round_mode, vl)
+    // Masked:   (passthru, vector_in, vector_in/scalar_in, mask, frm, vl, policy)
+
+    SmallVector<llvm::Value*, 7> Operands;
+    bool HasMaskedOff = !(
+        (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ||
+        (!IsMasked && PolicyAttrs & RVV_VTA));
+    bool HasRoundModeOp = IsMasked ?
+      (HasMaskedOff ? Ops.size() == 6 : Ops.size() == 5) :
+      (HasMaskedOff ? Ops.size() == 5 : Ops.size() == 4);
+
+    unsigned Offset = IsMasked ?
+        (HasMaskedOff ? 2 : 1) : (HasMaskedOff ? 1 : 0);
+
+    if (!HasMaskedOff)
+      Operands.push_back(llvm::PoisonValue::get(ResultType));
+    else
+      Operands.push_back(Ops[IsMasked ? 1 : 0]);
+
+    Operands.push_back(Ops[Offset]); // op0
+    Operands.push_back(Ops[Offset + 1]); // op1
+
+    if (IsMasked)
+      Operands.push_back(Ops[0]); // mask
+
+    if (HasRoundModeOp) {
+      Operands.push_back(Ops[Offset + 2]); // frm
+      Operands.push_back(Ops[Offset + 3]); // vl
+    } else {
+      Operands.push_back(ConstantInt::get(Ops[Offset + 2]->getType(), 7)); // frm
+      Operands.push_back(Ops[Offset + 2]); // vl
+    }
+
+    if (IsMasked)
+      Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
+
+    IntrinsicTypes = {ResultType, Ops[Offset + 1]->getType(),
+                      Operands.back()->getType()};
+    llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
+    return Builder.CreateCall(F, Operands, "");
+  }
+}] in {
+  let HasFRMRoundModeOp = true in {
+    defm vfadd  : RVVFloatingBinBuiltinSetRoundingMode;
+  }
+  defm vfadd  : RVVFloatingBinBuiltinSet;
+}
 defm vfsub  : RVVFloatingBinBuiltinSet;
 defm vfrsub : RVVFloatingBinVFBuiltinSet;
 

diff  --git a/clang/include/clang/Basic/riscv_vector_common.td b/clang/include/clang/Basic/riscv_vector_common.td
index f98432839f1d28..e276e4c3c409b2 100644
--- a/clang/include/clang/Basic/riscv_vector_common.td
+++ b/clang/include/clang/Basic/riscv_vector_common.td
@@ -234,6 +234,10 @@ class RVVBuiltin<string suffix, string prototype, string type_range,
 
   // Set to true if the builtin is associated with tuple types.
   bit IsTuple = false;
+
+  // Set to true if the builtin has a parameter that models floating-point
+  // rounding mode control
+  bit HasFRMRoundModeOp = false;
 }
 
 // This is the code emitted in the header.

diff  --git a/clang/include/clang/Support/RISCVVIntrinsicUtils.h b/clang/include/clang/Support/RISCVVIntrinsicUtils.h
index 2f78c0903f2d1f..1c216b4ee53714 100644
--- a/clang/include/clang/Support/RISCVVIntrinsicUtils.h
+++ b/clang/include/clang/Support/RISCVVIntrinsicUtils.h
@@ -387,6 +387,7 @@ class RVVIntrinsic {
   std::vector<int64_t> IntrinsicTypes;
   unsigned NF = 1;
   Policy PolicyAttrs;
+  bool HasFRMRoundModeOp;
 
 public:
   RVVIntrinsic(llvm::StringRef Name, llvm::StringRef Suffix,
@@ -397,7 +398,7 @@ class RVVIntrinsic {
                const RVVTypes &Types,
                const std::vector<int64_t> &IntrinsicTypes,
                const std::vector<llvm::StringRef> &RequiredFeatures,
-               unsigned NF, Policy PolicyAttrs);
+               unsigned NF, Policy PolicyAttrs, bool HasFRMRoundModeOp);
   ~RVVIntrinsic() = default;
 
   RVVTypePtr getOutputType() const { return OutputType; }
@@ -467,7 +468,7 @@ class RVVIntrinsic {
   static void updateNamesAndPolicy(bool IsMasked, bool HasPolicy,
                                    std::string &Name, std::string &BuiltinName,
                                    std::string &OverloadedName,
-                                   Policy &PolicyAttrs);
+                                   Policy &PolicyAttrs, bool HasFRMRoundModeOp);
 };
 
 // RVVRequire should be sync'ed with target features, but only
@@ -526,6 +527,7 @@ struct RVVIntrinsicRecord {
   bool HasMaskedOffOperand : 1;
   bool HasTailPolicy : 1;
   bool HasMaskPolicy : 1;
+  bool HasFRMRoundModeOp : 1;
   bool IsTuple : 1;
   uint8_t UnMaskedPolicyScheme : 2;
   uint8_t MaskedPolicyScheme : 2;

diff  --git a/clang/lib/Sema/SemaChecking.cpp b/clang/lib/Sema/SemaChecking.cpp
index 3284cf1b4f91be..7f0fed46083a1d 100644
--- a/clang/lib/Sema/SemaChecking.cpp
+++ b/clang/lib/Sema/SemaChecking.cpp
@@ -4808,6 +4808,21 @@ bool Sema::CheckRISCVBuiltinFunctionCall(const TargetInfo &TI,
   case RISCVVector::BI__builtin_rvv_vnclipu_wv_tumu:
   case RISCVVector::BI__builtin_rvv_vnclipu_wx_tumu:
     return SemaBuiltinConstantArgRange(TheCall, 4, 0, 3);
+  case RISCVVector::BI__builtin_rvv_vfadd_vv_rm:
+  case RISCVVector::BI__builtin_rvv_vfadd_vf_rm:
+    return SemaBuiltinConstantArgRange(TheCall, 2, 0, 4);
+  case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_tu:
+  case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_tu:
+  case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_tama:
+  case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_tama:
+    return SemaBuiltinConstantArgRange(TheCall, 3, 0, 4);
+  case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_tum:
+  case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_tum:
+  case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_tumu:
+  case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_tumu:
+  case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_mu:
+  case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_mu:
+    return SemaBuiltinConstantArgRange(TheCall, 4, 0, 4);
   case RISCV::BI__builtin_riscv_ntl_load:
   case RISCV::BI__builtin_riscv_ntl_store:
     DeclRefExpr *DRE =

diff  --git a/clang/lib/Sema/SemaRISCVVectorLookup.cpp b/clang/lib/Sema/SemaRISCVVectorLookup.cpp
index fc3086c7d78f06..fbed6e8d26cbc3 100644
--- a/clang/lib/Sema/SemaRISCVVectorLookup.cpp
+++ b/clang/lib/Sema/SemaRISCVVectorLookup.cpp
@@ -362,7 +362,8 @@ void RISCVIntrinsicManagerImpl::InitRVVIntrinsic(
   std::string BuiltinName = "__builtin_rvv_" + std::string(Record.Name);
 
   RVVIntrinsic::updateNamesAndPolicy(IsMasked, HasPolicy, Name, BuiltinName,
-                                     OverloadedName, PolicyAttrs);
+                                     OverloadedName, PolicyAttrs,
+                                     Record.HasFRMRoundModeOp);
 
   // Put into IntrinsicList.
   size_t Index = IntrinsicList.size();

diff  --git a/clang/lib/Support/RISCVVIntrinsicUtils.cpp b/clang/lib/Support/RISCVVIntrinsicUtils.cpp
index cba4ab7c675886..12b9ca38faa0fe 100644
--- a/clang/lib/Support/RISCVVIntrinsicUtils.cpp
+++ b/clang/lib/Support/RISCVVIntrinsicUtils.cpp
@@ -875,20 +875,19 @@ std::optional<RVVTypePtr> RVVTypeCache::computeType(BasicType BT, int Log2LMUL,
 //===----------------------------------------------------------------------===//
 // RVVIntrinsic implementation
 //===----------------------------------------------------------------------===//
-RVVIntrinsic::RVVIntrinsic(StringRef NewName, StringRef Suffix,
-                           StringRef NewOverloadedName,
-                           StringRef OverloadedSuffix, StringRef IRName,
-                           bool IsMasked, bool HasMaskedOffOperand, bool HasVL,
-                           PolicyScheme Scheme, bool SupportOverloading,
-                           bool HasBuiltinAlias, StringRef ManualCodegen,
-                           const RVVTypes &OutInTypes,
-                           const std::vector<int64_t> &NewIntrinsicTypes,
-                           const std::vector<StringRef> &RequiredFeatures,
-                           unsigned NF, Policy NewPolicyAttrs)
+RVVIntrinsic::RVVIntrinsic(
+    StringRef NewName, StringRef Suffix, StringRef NewOverloadedName,
+    StringRef OverloadedSuffix, StringRef IRName, bool IsMasked,
+    bool HasMaskedOffOperand, bool HasVL, PolicyScheme Scheme,
+    bool SupportOverloading, bool HasBuiltinAlias, StringRef ManualCodegen,
+    const RVVTypes &OutInTypes, const std::vector<int64_t> &NewIntrinsicTypes,
+    const std::vector<StringRef> &RequiredFeatures, unsigned NF,
+    Policy NewPolicyAttrs, bool HasFRMRoundModeOp)
     : IRName(IRName), IsMasked(IsMasked),
       HasMaskedOffOperand(HasMaskedOffOperand), HasVL(HasVL), Scheme(Scheme),
       SupportOverloading(SupportOverloading), HasBuiltinAlias(HasBuiltinAlias),
-      ManualCodegen(ManualCodegen.str()), NF(NF), PolicyAttrs(NewPolicyAttrs) {
+      ManualCodegen(ManualCodegen.str()), NF(NF), PolicyAttrs(NewPolicyAttrs),
+      HasFRMRoundModeOp(HasFRMRoundModeOp) {
 
   // Init BuiltinName, Name and OverloadedName
   BuiltinName = NewName.str();
@@ -903,7 +902,7 @@ RVVIntrinsic::RVVIntrinsic(StringRef NewName, StringRef Suffix,
     OverloadedName += "_" + OverloadedSuffix.str();
 
   updateNamesAndPolicy(IsMasked, hasPolicy(), Name, BuiltinName, OverloadedName,
-                       PolicyAttrs);
+                       PolicyAttrs, HasFRMRoundModeOp);
 
   // Init OutputType and InputTypes
   OutputType = OutInTypes[0];
@@ -1045,11 +1044,9 @@ RVVIntrinsic::getSupportedMaskedPolicies(bool HasTailPolicy,
                    "and mask policy");
 }
 
-void RVVIntrinsic::updateNamesAndPolicy(bool IsMasked, bool HasPolicy,
-                                        std::string &Name,
-                                        std::string &BuiltinName,
-                                        std::string &OverloadedName,
-                                        Policy &PolicyAttrs) {
+void RVVIntrinsic::updateNamesAndPolicy(
+    bool IsMasked, bool HasPolicy, std::string &Name, std::string &BuiltinName,
+    std::string &OverloadedName, Policy &PolicyAttrs, bool HasFRMRoundModeOp) {
 
   auto appendPolicySuffix = [&](const std::string &suffix) {
     Name += suffix;
@@ -1062,6 +1059,11 @@ void RVVIntrinsic::updateNamesAndPolicy(bool IsMasked, bool HasPolicy,
   Name = "__riscv_" + Name;
   OverloadedName = "__riscv_" + OverloadedName;
 
+  if (HasFRMRoundModeOp) {
+    Name += "_rm";
+    BuiltinName += "_rm";
+  }
+
   if (IsMasked) {
     if (PolicyAttrs.isTUMUPolicy())
       appendPolicySuffix("_tumu");
@@ -1131,6 +1133,7 @@ raw_ostream &operator<<(raw_ostream &OS, const RVVIntrinsicRecord &Record) {
   OS << (int)Record.HasMaskedOffOperand << ",";
   OS << (int)Record.HasTailPolicy << ",";
   OS << (int)Record.HasMaskPolicy << ",";
+  OS << (int)Record.HasFRMRoundModeOp << ",";
   OS << (int)Record.IsTuple << ",";
   OS << (int)Record.UnMaskedPolicyScheme << ",";
   OS << (int)Record.MaskedPolicyScheme << ",";

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfadd.c
index d9ec3d8a2cf77a..339325b958db42 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfadd.c
@@ -10,7 +10,7 @@
 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfadd_vv_f16mf4
 // CHECK-RV64-SAME: (<vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1]], <vscale x 1 x half> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1]], <vscale x 1 x half> [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfadd_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
@@ -20,7 +20,7 @@ vfloat16mf4_t test_vfadd_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t
 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfadd_vf_f16mf4
 // CHECK-RV64-SAME: (<vscale x 1 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1]], half [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1]], half [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfadd_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) {
@@ -30,7 +30,7 @@ vfloat16mf4_t test_vfadd_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) {
 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfadd_vv_f16mf2
 // CHECK-RV64-SAME: (<vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1]], <vscale x 2 x half> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1]], <vscale x 2 x half> [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vfadd_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
@@ -40,7 +40,7 @@ vfloat16mf2_t test_vfadd_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t
 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfadd_vf_f16mf2
 // CHECK-RV64-SAME: (<vscale x 2 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1]], half [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1]], half [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vfadd_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) {
@@ -50,7 +50,7 @@ vfloat16mf2_t test_vfadd_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) {
 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfadd_vv_f16m1
 // CHECK-RV64-SAME: (<vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1]], <vscale x 4 x half> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1]], <vscale x 4 x half> [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vfadd_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
@@ -60,7 +60,7 @@ vfloat16m1_t test_vfadd_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl)
 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfadd_vf_f16m1
 // CHECK-RV64-SAME: (<vscale x 4 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1]], half [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1]], half [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vfadd_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) {
@@ -70,7 +70,7 @@ vfloat16m1_t test_vfadd_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) {
 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfadd_vv_f16m2
 // CHECK-RV64-SAME: (<vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1]], <vscale x 8 x half> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1]], <vscale x 8 x half> [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vfadd_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
@@ -80,7 +80,7 @@ vfloat16m2_t test_vfadd_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl)
 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfadd_vf_f16m2
 // CHECK-RV64-SAME: (<vscale x 8 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1]], half [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1]], half [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vfadd_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) {
@@ -90,7 +90,7 @@ vfloat16m2_t test_vfadd_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) {
 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfadd_vv_f16m4
 // CHECK-RV64-SAME: (<vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1]], <vscale x 16 x half> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1]], <vscale x 16 x half> [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vfadd_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
@@ -100,7 +100,7 @@ vfloat16m4_t test_vfadd_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl)
 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfadd_vf_f16m4
 // CHECK-RV64-SAME: (<vscale x 16 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1]], half [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1]], half [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vfadd_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) {
@@ -110,7 +110,7 @@ vfloat16m4_t test_vfadd_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) {
 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfadd_vv_f16m8
 // CHECK-RV64-SAME: (<vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1]], <vscale x 32 x half> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1]], <vscale x 32 x half> [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vfadd_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
@@ -120,7 +120,7 @@ vfloat16m8_t test_vfadd_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl)
 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfadd_vf_f16m8
 // CHECK-RV64-SAME: (<vscale x 32 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1]], half [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1]], half [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vfadd_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) {
@@ -130,7 +130,7 @@ vfloat16m8_t test_vfadd_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) {
 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfadd_vv_f32mf2
 // CHECK-RV64-SAME: (<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1]], <vscale x 1 x float> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1]], <vscale x 1 x float> [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfadd_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -140,7 +140,7 @@ vfloat32mf2_t test_vfadd_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t
 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfadd_vf_f32mf2
 // CHECK-RV64-SAME: (<vscale x 1 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1]], float [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1]], float [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfadd_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -150,7 +150,7 @@ vfloat32mf2_t test_vfadd_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfadd_vv_f32m1
 // CHECK-RV64-SAME: (<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1]], <vscale x 2 x float> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1]], <vscale x 2 x float> [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfadd_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
@@ -160,7 +160,7 @@ vfloat32m1_t test_vfadd_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl)
 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfadd_vf_f32m1
 // CHECK-RV64-SAME: (<vscale x 2 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1]], float [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1]], float [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfadd_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
@@ -170,7 +170,7 @@ vfloat32m1_t test_vfadd_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfadd_vv_f32m2
 // CHECK-RV64-SAME: (<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1]], <vscale x 4 x float> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1]], <vscale x 4 x float> [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfadd_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
@@ -180,7 +180,7 @@ vfloat32m2_t test_vfadd_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl)
 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfadd_vf_f32m2
 // CHECK-RV64-SAME: (<vscale x 4 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1]], float [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1]], float [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfadd_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
@@ -190,7 +190,7 @@ vfloat32m2_t test_vfadd_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfadd_vv_f32m4
 // CHECK-RV64-SAME: (<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1]], <vscale x 8 x float> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1]], <vscale x 8 x float> [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfadd_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
@@ -200,7 +200,7 @@ vfloat32m4_t test_vfadd_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl)
 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfadd_vf_f32m4
 // CHECK-RV64-SAME: (<vscale x 8 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1]], float [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1]], float [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfadd_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
@@ -210,7 +210,7 @@ vfloat32m4_t test_vfadd_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfadd_vv_f32m8
 // CHECK-RV64-SAME: (<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1]], <vscale x 16 x float> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1]], <vscale x 16 x float> [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfadd_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
@@ -220,7 +220,7 @@ vfloat32m8_t test_vfadd_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl)
 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfadd_vf_f32m8
 // CHECK-RV64-SAME: (<vscale x 16 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1]], float [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1]], float [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfadd_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
@@ -230,7 +230,7 @@ vfloat32m8_t test_vfadd_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfadd_vv_f64m1
 // CHECK-RV64-SAME: (<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1]], <vscale x 1 x double> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1]], <vscale x 1 x double> [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfadd_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
@@ -240,7 +240,7 @@ vfloat64m1_t test_vfadd_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl)
 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfadd_vf_f64m1
 // CHECK-RV64-SAME: (<vscale x 1 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1]], double [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1]], double [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfadd_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
@@ -250,7 +250,7 @@ vfloat64m1_t test_vfadd_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfadd_vv_f64m2
 // CHECK-RV64-SAME: (<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1]], <vscale x 2 x double> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1]], <vscale x 2 x double> [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfadd_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
@@ -260,7 +260,7 @@ vfloat64m2_t test_vfadd_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl)
 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfadd_vf_f64m2
 // CHECK-RV64-SAME: (<vscale x 2 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1]], double [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1]], double [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfadd_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
@@ -270,7 +270,7 @@ vfloat64m2_t test_vfadd_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfadd_vv_f64m4
 // CHECK-RV64-SAME: (<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1]], <vscale x 4 x double> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1]], <vscale x 4 x double> [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfadd_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
@@ -280,7 +280,7 @@ vfloat64m4_t test_vfadd_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl)
 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfadd_vf_f64m4
 // CHECK-RV64-SAME: (<vscale x 4 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1]], double [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1]], double [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfadd_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
@@ -290,7 +290,7 @@ vfloat64m4_t test_vfadd_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfadd_vv_f64m8
 // CHECK-RV64-SAME: (<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1]], <vscale x 8 x double> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1]], <vscale x 8 x double> [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfadd_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
@@ -300,7 +300,7 @@ vfloat64m8_t test_vfadd_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl)
 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfadd_vf_f64m8
 // CHECK-RV64-SAME: (<vscale x 8 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1]], double [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1]], double [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfadd_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
@@ -310,7 +310,7 @@ vfloat64m8_t test_vfadd_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfadd_vv_f16mf4_m
 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1]], <vscale x 1 x half> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1]], <vscale x 1 x half> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfadd_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
@@ -320,7 +320,7 @@ vfloat16mf4_t test_vfadd_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16
 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfadd_vf_f16mf4_m
 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1]], half [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1]], half [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfadd_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
@@ -330,7 +330,7 @@ vfloat16mf4_t test_vfadd_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float16
 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfadd_vv_f16mf2_m
 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1]], <vscale x 2 x half> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1]], <vscale x 2 x half> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vfadd_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
@@ -340,7 +340,7 @@ vfloat16mf2_t test_vfadd_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16
 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfadd_vf_f16mf2_m
 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1]], half [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1]], half [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vfadd_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
@@ -350,7 +350,7 @@ vfloat16mf2_t test_vfadd_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float16
 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfadd_vv_f16m1_m
 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1]], <vscale x 4 x half> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1]], <vscale x 4 x half> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vfadd_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
@@ -360,7 +360,7 @@ vfloat16m1_t test_vfadd_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_
 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfadd_vf_f16m1_m
 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1]], half [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1]], half [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vfadd_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) {
@@ -370,7 +370,7 @@ vfloat16m1_t test_vfadd_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op
 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfadd_vv_f16m2_m
 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1]], <vscale x 8 x half> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1]], <vscale x 8 x half> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vfadd_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
@@ -380,7 +380,7 @@ vfloat16m2_t test_vfadd_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t
 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfadd_vf_f16m2_m
 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1]], half [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1]], half [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vfadd_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) {
@@ -390,7 +390,7 @@ vfloat16m2_t test_vfadd_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2
 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfadd_vv_f16m4_m
 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1]], <vscale x 16 x half> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1]], <vscale x 16 x half> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vfadd_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
@@ -400,7 +400,7 @@ vfloat16m4_t test_vfadd_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t
 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfadd_vf_f16m4_m
 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1]], half [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1]], half [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vfadd_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) {
@@ -410,7 +410,7 @@ vfloat16m4_t test_vfadd_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2
 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfadd_vv_f16m8_m
 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1]], <vscale x 32 x half> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1]], <vscale x 32 x half> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vfadd_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
@@ -420,7 +420,7 @@ vfloat16m8_t test_vfadd_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t
 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfadd_vf_f16m8_m
 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1]], half [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1]], half [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vfadd_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) {
@@ -430,7 +430,7 @@ vfloat16m8_t test_vfadd_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2
 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfadd_vv_f32mf2_m
 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1]], <vscale x 1 x float> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1]], <vscale x 1 x float> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -440,7 +440,7 @@ vfloat32mf2_t test_vfadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32
 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfadd_vf_f32mf2_m
 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1]], float [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1]], float [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {
@@ -450,7 +450,7 @@ vfloat32mf2_t test_vfadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float op
 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfadd_vv_f32m1_m
 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1]], <vscale x 2 x float> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1]], <vscale x 2 x float> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
@@ -460,7 +460,7 @@ vfloat32m1_t test_vfadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_
 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfadd_vf_f32m1_m
 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1]], float [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1]], float [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) {
@@ -470,7 +470,7 @@ vfloat32m1_t test_vfadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2,
 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfadd_vv_f32m2_m
 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1]], <vscale x 4 x float> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1]], <vscale x 4 x float> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
@@ -480,7 +480,7 @@ vfloat32m2_t test_vfadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_
 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfadd_vf_f32m2_m
 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1]], float [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1]], float [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) {
@@ -490,7 +490,7 @@ vfloat32m2_t test_vfadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2,
 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfadd_vv_f32m4_m
 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1]], <vscale x 8 x float> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1]], <vscale x 8 x float> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
@@ -500,7 +500,7 @@ vfloat32m4_t test_vfadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t
 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfadd_vf_f32m4_m
 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1]], float [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1]], float [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) {
@@ -510,7 +510,7 @@ vfloat32m4_t test_vfadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, s
 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfadd_vv_f32m8_m
 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1]], <vscale x 16 x float> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1]], <vscale x 16 x float> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
@@ -520,7 +520,7 @@ vfloat32m8_t test_vfadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t
 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfadd_vf_f32m8_m
 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1]], float [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1]], float [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) {
@@ -530,7 +530,7 @@ vfloat32m8_t test_vfadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, s
 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfadd_vv_f64m1_m
 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1]], <vscale x 1 x double> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1]], <vscale x 1 x double> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
@@ -540,7 +540,7 @@ vfloat64m1_t test_vfadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_
 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfadd_vf_f64m1_m
 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1]], double [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1]], double [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) {
@@ -550,7 +550,7 @@ vfloat64m1_t test_vfadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2,
 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfadd_vv_f64m2_m
 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1]], <vscale x 2 x double> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1]], <vscale x 2 x double> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
@@ -560,7 +560,7 @@ vfloat64m2_t test_vfadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_
 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfadd_vf_f64m2_m
 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1]], double [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1]], double [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) {
@@ -570,7 +570,7 @@ vfloat64m2_t test_vfadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2,
 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfadd_vv_f64m4_m
 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1]], <vscale x 4 x double> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1]], <vscale x 4 x double> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
@@ -580,7 +580,7 @@ vfloat64m4_t test_vfadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_
 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfadd_vf_f64m4_m
 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1]], double [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1]], double [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) {
@@ -590,7 +590,7 @@ vfloat64m4_t test_vfadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2,
 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfadd_vv_f64m8_m
 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1]], <vscale x 8 x double> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1]], <vscale x 8 x double> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
@@ -600,10 +600,610 @@ vfloat64m8_t test_vfadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t
 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfadd_vf_f64m8_m
 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1]], double [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1]], double [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) {
   return __riscv_vfadd_vf_f64m8_m(mask, op1, op2, vl);
 }
 
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfadd_vv_f16mf4_rm
+// CHECK-RV64-SAME: (<vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1]], <vscale x 1 x half> [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfadd_vv_f16mf4_rm(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f16mf4_rm(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfadd_vf_f16mf4_rm
+// CHECK-RV64-SAME: (<vscale x 1 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1]], half [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfadd_vf_f16mf4_rm(vfloat16mf4_t op1, _Float16 op2, size_t vl) {
+  return __riscv_vfadd_vf_f16mf4_rm(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfadd_vv_f16mf2_rm
+// CHECK-RV64-SAME: (<vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1]], <vscale x 2 x half> [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfadd_vv_f16mf2_rm(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f16mf2_rm(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfadd_vf_f16mf2_rm
+// CHECK-RV64-SAME: (<vscale x 2 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1]], half [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfadd_vf_f16mf2_rm(vfloat16mf2_t op1, _Float16 op2, size_t vl) {
+  return __riscv_vfadd_vf_f16mf2_rm(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfadd_vv_f16m1_rm
+// CHECK-RV64-SAME: (<vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1]], <vscale x 4 x half> [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfadd_vv_f16m1_rm(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f16m1_rm(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfadd_vf_f16m1_rm
+// CHECK-RV64-SAME: (<vscale x 4 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1]], half [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfadd_vf_f16m1_rm(vfloat16m1_t op1, _Float16 op2, size_t vl) {
+  return __riscv_vfadd_vf_f16m1_rm(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfadd_vv_f16m2_rm
+// CHECK-RV64-SAME: (<vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1]], <vscale x 8 x half> [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfadd_vv_f16m2_rm(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f16m2_rm(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfadd_vf_f16m2_rm
+// CHECK-RV64-SAME: (<vscale x 8 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1]], half [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfadd_vf_f16m2_rm(vfloat16m2_t op1, _Float16 op2, size_t vl) {
+  return __riscv_vfadd_vf_f16m2_rm(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfadd_vv_f16m4_rm
+// CHECK-RV64-SAME: (<vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1]], <vscale x 16 x half> [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfadd_vv_f16m4_rm(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f16m4_rm(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfadd_vf_f16m4_rm
+// CHECK-RV64-SAME: (<vscale x 16 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1]], half [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfadd_vf_f16m4_rm(vfloat16m4_t op1, _Float16 op2, size_t vl) {
+  return __riscv_vfadd_vf_f16m4_rm(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfadd_vv_f16m8_rm
+// CHECK-RV64-SAME: (<vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1]], <vscale x 32 x half> [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfadd_vv_f16m8_rm(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f16m8_rm(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfadd_vf_f16m8_rm
+// CHECK-RV64-SAME: (<vscale x 32 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1]], half [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfadd_vf_f16m8_rm(vfloat16m8_t op1, _Float16 op2, size_t vl) {
+  return __riscv_vfadd_vf_f16m8_rm(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfadd_vv_f32mf2_rm
+// CHECK-RV64-SAME: (<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1]], <vscale x 1 x float> [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfadd_vv_f32mf2_rm(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f32mf2_rm(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfadd_vf_f32mf2_rm
+// CHECK-RV64-SAME: (<vscale x 1 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1]], float [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfadd_vf_f32mf2_rm(vfloat32mf2_t op1, float op2, size_t vl) {
+  return __riscv_vfadd_vf_f32mf2_rm(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfadd_vv_f32m1_rm
+// CHECK-RV64-SAME: (<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1]], <vscale x 2 x float> [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfadd_vv_f32m1_rm(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f32m1_rm(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfadd_vf_f32m1_rm
+// CHECK-RV64-SAME: (<vscale x 2 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1]], float [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfadd_vf_f32m1_rm(vfloat32m1_t op1, float op2, size_t vl) {
+  return __riscv_vfadd_vf_f32m1_rm(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfadd_vv_f32m2_rm
+// CHECK-RV64-SAME: (<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1]], <vscale x 4 x float> [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfadd_vv_f32m2_rm(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f32m2_rm(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfadd_vf_f32m2_rm
+// CHECK-RV64-SAME: (<vscale x 4 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1]], float [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfadd_vf_f32m2_rm(vfloat32m2_t op1, float op2, size_t vl) {
+  return __riscv_vfadd_vf_f32m2_rm(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfadd_vv_f32m4_rm
+// CHECK-RV64-SAME: (<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1]], <vscale x 8 x float> [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfadd_vv_f32m4_rm(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f32m4_rm(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfadd_vf_f32m4_rm
+// CHECK-RV64-SAME: (<vscale x 8 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1]], float [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfadd_vf_f32m4_rm(vfloat32m4_t op1, float op2, size_t vl) {
+  return __riscv_vfadd_vf_f32m4_rm(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfadd_vv_f32m8_rm
+// CHECK-RV64-SAME: (<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1]], <vscale x 16 x float> [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfadd_vv_f32m8_rm(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f32m8_rm(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfadd_vf_f32m8_rm
+// CHECK-RV64-SAME: (<vscale x 16 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1]], float [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfadd_vf_f32m8_rm(vfloat32m8_t op1, float op2, size_t vl) {
+  return __riscv_vfadd_vf_f32m8_rm(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfadd_vv_f64m1_rm
+// CHECK-RV64-SAME: (<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1]], <vscale x 1 x double> [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfadd_vv_f64m1_rm(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f64m1_rm(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfadd_vf_f64m1_rm
+// CHECK-RV64-SAME: (<vscale x 1 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1]], double [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfadd_vf_f64m1_rm(vfloat64m1_t op1, double op2, size_t vl) {
+  return __riscv_vfadd_vf_f64m1_rm(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfadd_vv_f64m2_rm
+// CHECK-RV64-SAME: (<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1]], <vscale x 2 x double> [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfadd_vv_f64m2_rm(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f64m2_rm(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfadd_vf_f64m2_rm
+// CHECK-RV64-SAME: (<vscale x 2 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1]], double [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfadd_vf_f64m2_rm(vfloat64m2_t op1, double op2, size_t vl) {
+  return __riscv_vfadd_vf_f64m2_rm(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfadd_vv_f64m4_rm
+// CHECK-RV64-SAME: (<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1]], <vscale x 4 x double> [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfadd_vv_f64m4_rm(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f64m4_rm(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfadd_vf_f64m4_rm
+// CHECK-RV64-SAME: (<vscale x 4 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1]], double [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfadd_vf_f64m4_rm(vfloat64m4_t op1, double op2, size_t vl) {
+  return __riscv_vfadd_vf_f64m4_rm(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfadd_vv_f64m8_rm
+// CHECK-RV64-SAME: (<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1]], <vscale x 8 x double> [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfadd_vv_f64m8_rm(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f64m8_rm(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfadd_vf_f64m8_rm
+// CHECK-RV64-SAME: (<vscale x 8 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1]], double [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfadd_vf_f64m8_rm(vfloat64m8_t op1, double op2, size_t vl) {
+  return __riscv_vfadd_vf_f64m8_rm(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfadd_vv_f16mf4_rm_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1]], <vscale x 1 x half> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfadd_vv_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f16mf4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfadd_vf_f16mf4_rm_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1]], half [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfadd_vf_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
+  return __riscv_vfadd_vf_f16mf4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfadd_vv_f16mf2_rm_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1]], <vscale x 2 x half> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfadd_vv_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f16mf2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfadd_vf_f16mf2_rm_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1]], half [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfadd_vf_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
+  return __riscv_vfadd_vf_f16mf2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfadd_vv_f16m1_rm_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1]], <vscale x 4 x half> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfadd_vv_f16m1_rm_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f16m1_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfadd_vf_f16m1_rm_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1]], half [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfadd_vf_f16m1_rm_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) {
+  return __riscv_vfadd_vf_f16m1_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfadd_vv_f16m2_rm_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1]], <vscale x 8 x half> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfadd_vv_f16m2_rm_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f16m2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfadd_vf_f16m2_rm_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1]], half [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfadd_vf_f16m2_rm_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) {
+  return __riscv_vfadd_vf_f16m2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfadd_vv_f16m4_rm_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1]], <vscale x 16 x half> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfadd_vv_f16m4_rm_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f16m4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfadd_vf_f16m4_rm_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1]], half [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfadd_vf_f16m4_rm_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) {
+  return __riscv_vfadd_vf_f16m4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfadd_vv_f16m8_rm_m
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1]], <vscale x 32 x half> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfadd_vv_f16m8_rm_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f16m8_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfadd_vf_f16m8_rm_m
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1]], half [[OP2]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfadd_vf_f16m8_rm_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) {
+  return __riscv_vfadd_vf_f16m8_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfadd_vv_f32mf2_rm_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1]], <vscale x 1 x float> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfadd_vv_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f32mf2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfadd_vf_f32mf2_rm_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1]], float [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfadd_vf_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {
+  return __riscv_vfadd_vf_f32mf2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfadd_vv_f32m1_rm_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1]], <vscale x 2 x float> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfadd_vv_f32m1_rm_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f32m1_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfadd_vf_f32m1_rm_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1]], float [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfadd_vf_f32m1_rm_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) {
+  return __riscv_vfadd_vf_f32m1_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfadd_vv_f32m2_rm_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1]], <vscale x 4 x float> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfadd_vv_f32m2_rm_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f32m2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfadd_vf_f32m2_rm_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1]], float [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfadd_vf_f32m2_rm_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) {
+  return __riscv_vfadd_vf_f32m2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfadd_vv_f32m4_rm_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1]], <vscale x 8 x float> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfadd_vv_f32m4_rm_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f32m4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfadd_vf_f32m4_rm_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1]], float [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfadd_vf_f32m4_rm_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) {
+  return __riscv_vfadd_vf_f32m4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfadd_vv_f32m8_rm_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1]], <vscale x 16 x float> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfadd_vv_f32m8_rm_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f32m8_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfadd_vf_f32m8_rm_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1]], float [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfadd_vf_f32m8_rm_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) {
+  return __riscv_vfadd_vf_f32m8_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfadd_vv_f64m1_rm_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1]], <vscale x 1 x double> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfadd_vv_f64m1_rm_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f64m1_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfadd_vf_f64m1_rm_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1]], double [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfadd_vf_f64m1_rm_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) {
+  return __riscv_vfadd_vf_f64m1_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfadd_vv_f64m2_rm_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1]], <vscale x 2 x double> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfadd_vv_f64m2_rm_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f64m2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfadd_vf_f64m2_rm_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1]], double [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfadd_vf_f64m2_rm_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) {
+  return __riscv_vfadd_vf_f64m2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfadd_vv_f64m4_rm_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1]], <vscale x 4 x double> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfadd_vv_f64m4_rm_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f64m4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfadd_vf_f64m4_rm_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1]], double [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfadd_vf_f64m4_rm_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) {
+  return __riscv_vfadd_vf_f64m4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfadd_vv_f64m8_rm_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1]], <vscale x 8 x double> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfadd_vv_f64m8_rm_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f64m8_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfadd_vf_f64m8_rm_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1]], double [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfadd_vf_f64m8_rm_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) {
+  return __riscv_vfadd_vf_f64m8_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfadd.c
index 4de415e21b99ef..ad850f48c7d6c1 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfadd.c
@@ -10,7 +10,7 @@
 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfadd_vv_f16mf4
 // CHECK-RV64-SAME: (<vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1]], <vscale x 1 x half> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1]], <vscale x 1 x half> [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfadd_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
@@ -20,7 +20,7 @@ vfloat16mf4_t test_vfadd_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t
 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfadd_vf_f16mf4
 // CHECK-RV64-SAME: (<vscale x 1 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1]], half [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1]], half [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfadd_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) {
@@ -30,7 +30,7 @@ vfloat16mf4_t test_vfadd_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) {
 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfadd_vv_f16mf2
 // CHECK-RV64-SAME: (<vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1]], <vscale x 2 x half> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1]], <vscale x 2 x half> [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vfadd_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
@@ -40,7 +40,7 @@ vfloat16mf2_t test_vfadd_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t
 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfadd_vf_f16mf2
 // CHECK-RV64-SAME: (<vscale x 2 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1]], half [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1]], half [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vfadd_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) {
@@ -50,7 +50,7 @@ vfloat16mf2_t test_vfadd_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) {
 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfadd_vv_f16m1
 // CHECK-RV64-SAME: (<vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1]], <vscale x 4 x half> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1]], <vscale x 4 x half> [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vfadd_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
@@ -60,7 +60,7 @@ vfloat16m1_t test_vfadd_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl)
 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfadd_vf_f16m1
 // CHECK-RV64-SAME: (<vscale x 4 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1]], half [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1]], half [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vfadd_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) {
@@ -70,7 +70,7 @@ vfloat16m1_t test_vfadd_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) {
 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfadd_vv_f16m2
 // CHECK-RV64-SAME: (<vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1]], <vscale x 8 x half> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1]], <vscale x 8 x half> [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vfadd_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
@@ -80,7 +80,7 @@ vfloat16m2_t test_vfadd_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl)
 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfadd_vf_f16m2
 // CHECK-RV64-SAME: (<vscale x 8 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1]], half [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1]], half [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vfadd_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) {
@@ -90,7 +90,7 @@ vfloat16m2_t test_vfadd_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) {
 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfadd_vv_f16m4
 // CHECK-RV64-SAME: (<vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1]], <vscale x 16 x half> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1]], <vscale x 16 x half> [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vfadd_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
@@ -100,7 +100,7 @@ vfloat16m4_t test_vfadd_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl)
 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfadd_vf_f16m4
 // CHECK-RV64-SAME: (<vscale x 16 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1]], half [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1]], half [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vfadd_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) {
@@ -110,7 +110,7 @@ vfloat16m4_t test_vfadd_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) {
 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfadd_vv_f16m8
 // CHECK-RV64-SAME: (<vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1]], <vscale x 32 x half> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1]], <vscale x 32 x half> [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vfadd_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
@@ -120,7 +120,7 @@ vfloat16m8_t test_vfadd_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl)
 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfadd_vf_f16m8
 // CHECK-RV64-SAME: (<vscale x 32 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1]], half [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1]], half [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vfadd_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) {
@@ -130,7 +130,7 @@ vfloat16m8_t test_vfadd_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) {
 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfadd_vv_f32mf2
 // CHECK-RV64-SAME: (<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1]], <vscale x 1 x float> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1]], <vscale x 1 x float> [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfadd_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -140,7 +140,7 @@ vfloat32mf2_t test_vfadd_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t
 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfadd_vf_f32mf2
 // CHECK-RV64-SAME: (<vscale x 1 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1]], float [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1]], float [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfadd_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
@@ -150,7 +150,7 @@ vfloat32mf2_t test_vfadd_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfadd_vv_f32m1
 // CHECK-RV64-SAME: (<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1]], <vscale x 2 x float> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1]], <vscale x 2 x float> [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfadd_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
@@ -160,7 +160,7 @@ vfloat32m1_t test_vfadd_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl)
 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfadd_vf_f32m1
 // CHECK-RV64-SAME: (<vscale x 2 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1]], float [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1]], float [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfadd_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
@@ -170,7 +170,7 @@ vfloat32m1_t test_vfadd_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfadd_vv_f32m2
 // CHECK-RV64-SAME: (<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1]], <vscale x 4 x float> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1]], <vscale x 4 x float> [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfadd_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
@@ -180,7 +180,7 @@ vfloat32m2_t test_vfadd_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl)
 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfadd_vf_f32m2
 // CHECK-RV64-SAME: (<vscale x 4 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1]], float [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1]], float [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfadd_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
@@ -190,7 +190,7 @@ vfloat32m2_t test_vfadd_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfadd_vv_f32m4
 // CHECK-RV64-SAME: (<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1]], <vscale x 8 x float> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1]], <vscale x 8 x float> [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfadd_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
@@ -200,7 +200,7 @@ vfloat32m4_t test_vfadd_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl)
 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfadd_vf_f32m4
 // CHECK-RV64-SAME: (<vscale x 8 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1]], float [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1]], float [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfadd_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
@@ -210,7 +210,7 @@ vfloat32m4_t test_vfadd_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfadd_vv_f32m8
 // CHECK-RV64-SAME: (<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1]], <vscale x 16 x float> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1]], <vscale x 16 x float> [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfadd_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
@@ -220,7 +220,7 @@ vfloat32m8_t test_vfadd_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl)
 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfadd_vf_f32m8
 // CHECK-RV64-SAME: (<vscale x 16 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1]], float [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1]], float [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfadd_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
@@ -230,7 +230,7 @@ vfloat32m8_t test_vfadd_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfadd_vv_f64m1
 // CHECK-RV64-SAME: (<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1]], <vscale x 1 x double> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1]], <vscale x 1 x double> [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfadd_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
@@ -240,7 +240,7 @@ vfloat64m1_t test_vfadd_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl)
 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfadd_vf_f64m1
 // CHECK-RV64-SAME: (<vscale x 1 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1]], double [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1]], double [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfadd_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
@@ -250,7 +250,7 @@ vfloat64m1_t test_vfadd_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfadd_vv_f64m2
 // CHECK-RV64-SAME: (<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1]], <vscale x 2 x double> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1]], <vscale x 2 x double> [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfadd_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
@@ -260,7 +260,7 @@ vfloat64m2_t test_vfadd_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl)
 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfadd_vf_f64m2
 // CHECK-RV64-SAME: (<vscale x 2 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1]], double [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1]], double [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfadd_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
@@ -270,7 +270,7 @@ vfloat64m2_t test_vfadd_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfadd_vv_f64m4
 // CHECK-RV64-SAME: (<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1]], <vscale x 4 x double> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1]], <vscale x 4 x double> [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfadd_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
@@ -280,7 +280,7 @@ vfloat64m4_t test_vfadd_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl)
 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfadd_vf_f64m4
 // CHECK-RV64-SAME: (<vscale x 4 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1]], double [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1]], double [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfadd_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
@@ -290,7 +290,7 @@ vfloat64m4_t test_vfadd_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfadd_vv_f64m8
 // CHECK-RV64-SAME: (<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1]], <vscale x 8 x double> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1]], <vscale x 8 x double> [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfadd_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
@@ -300,7 +300,7 @@ vfloat64m8_t test_vfadd_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl)
 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfadd_vf_f64m8
 // CHECK-RV64-SAME: (<vscale x 8 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1]], double [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1]], double [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfadd_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
@@ -310,7 +310,7 @@ vfloat64m8_t test_vfadd_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfadd_vv_f16mf4_m
 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1]], <vscale x 1 x half> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1]], <vscale x 1 x half> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfadd_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
@@ -320,7 +320,7 @@ vfloat16mf4_t test_vfadd_vv_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16
 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfadd_vf_f16mf4_m
 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1]], half [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1]], half [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfadd_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
@@ -330,7 +330,7 @@ vfloat16mf4_t test_vfadd_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, _Float16
 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfadd_vv_f16mf2_m
 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1]], <vscale x 2 x half> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1]], <vscale x 2 x half> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vfadd_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
@@ -340,7 +340,7 @@ vfloat16mf2_t test_vfadd_vv_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16
 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfadd_vf_f16mf2_m
 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1]], half [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1]], half [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vfadd_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
@@ -350,7 +350,7 @@ vfloat16mf2_t test_vfadd_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, _Float16
 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfadd_vv_f16m1_m
 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1]], <vscale x 4 x half> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1]], <vscale x 4 x half> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vfadd_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
@@ -360,7 +360,7 @@ vfloat16m1_t test_vfadd_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_
 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfadd_vf_f16m1_m
 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1]], half [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1]], half [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vfadd_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) {
@@ -370,7 +370,7 @@ vfloat16m1_t test_vfadd_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op
 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfadd_vv_f16m2_m
 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1]], <vscale x 8 x half> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1]], <vscale x 8 x half> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vfadd_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
@@ -380,7 +380,7 @@ vfloat16m2_t test_vfadd_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t
 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfadd_vf_f16m2_m
 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1]], half [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1]], half [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vfadd_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) {
@@ -390,7 +390,7 @@ vfloat16m2_t test_vfadd_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2
 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfadd_vv_f16m4_m
 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1]], <vscale x 16 x half> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1]], <vscale x 16 x half> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vfadd_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
@@ -400,7 +400,7 @@ vfloat16m4_t test_vfadd_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t
 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfadd_vf_f16m4_m
 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1]], half [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1]], half [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vfadd_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) {
@@ -410,7 +410,7 @@ vfloat16m4_t test_vfadd_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2
 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfadd_vv_f16m8_m
 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1]], <vscale x 32 x half> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1]], <vscale x 32 x half> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vfadd_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
@@ -420,7 +420,7 @@ vfloat16m8_t test_vfadd_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t
 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfadd_vf_f16m8_m
 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1]], half [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1]], half [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vfadd_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) {
@@ -430,7 +430,7 @@ vfloat16m8_t test_vfadd_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2
 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfadd_vv_f32mf2_m
 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1]], <vscale x 1 x float> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1]], <vscale x 1 x float> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -440,7 +440,7 @@ vfloat32mf2_t test_vfadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32
 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfadd_vf_f32mf2_m
 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1]], float [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1]], float [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {
@@ -450,7 +450,7 @@ vfloat32mf2_t test_vfadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t op1, float op
 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfadd_vv_f32m1_m
 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1]], <vscale x 2 x float> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1]], <vscale x 2 x float> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
@@ -460,7 +460,7 @@ vfloat32m1_t test_vfadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_
 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfadd_vf_f32m1_m
 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1]], float [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1]], float [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) {
@@ -470,7 +470,7 @@ vfloat32m1_t test_vfadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2,
 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfadd_vv_f32m2_m
 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1]], <vscale x 4 x float> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1]], <vscale x 4 x float> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
@@ -480,7 +480,7 @@ vfloat32m2_t test_vfadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_
 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfadd_vf_f32m2_m
 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1]], float [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1]], float [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) {
@@ -490,7 +490,7 @@ vfloat32m2_t test_vfadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2,
 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfadd_vv_f32m4_m
 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1]], <vscale x 8 x float> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1]], <vscale x 8 x float> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
@@ -500,7 +500,7 @@ vfloat32m4_t test_vfadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t
 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfadd_vf_f32m4_m
 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1]], float [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1]], float [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) {
@@ -510,7 +510,7 @@ vfloat32m4_t test_vfadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, s
 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfadd_vv_f32m8_m
 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1]], <vscale x 16 x float> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1]], <vscale x 16 x float> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
@@ -520,7 +520,7 @@ vfloat32m8_t test_vfadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t
 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfadd_vf_f32m8_m
 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1]], float [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1]], float [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) {
@@ -530,7 +530,7 @@ vfloat32m8_t test_vfadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, s
 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfadd_vv_f64m1_m
 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1]], <vscale x 1 x double> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1]], <vscale x 1 x double> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
@@ -540,7 +540,7 @@ vfloat64m1_t test_vfadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_
 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfadd_vf_f64m1_m
 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1]], double [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1]], double [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) {
@@ -550,7 +550,7 @@ vfloat64m1_t test_vfadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2,
 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfadd_vv_f64m2_m
 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1]], <vscale x 2 x double> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1]], <vscale x 2 x double> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
@@ -560,7 +560,7 @@ vfloat64m2_t test_vfadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_
 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfadd_vf_f64m2_m
 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1]], double [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1]], double [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) {
@@ -570,7 +570,7 @@ vfloat64m2_t test_vfadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2,
 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfadd_vv_f64m4_m
 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1]], <vscale x 4 x double> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1]], <vscale x 4 x double> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
@@ -580,7 +580,7 @@ vfloat64m4_t test_vfadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_
 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfadd_vf_f64m4_m
 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1]], double [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1]], double [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) {
@@ -590,7 +590,7 @@ vfloat64m4_t test_vfadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2,
 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfadd_vv_f64m8_m
 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1]], <vscale x 8 x double> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1]], <vscale x 8 x double> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
@@ -600,10 +600,610 @@ vfloat64m8_t test_vfadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t
 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfadd_vf_f64m8_m
 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1]], double [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1]], double [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) {
   return __riscv_vfadd(mask, op1, op2, vl);
 }
 
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfadd_vv_f16mf4_rm
+// CHECK-RV64-SAME: (<vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1]], <vscale x 1 x half> [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfadd_vv_f16mf4_rm(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
+  return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfadd_vf_f16mf4_rm
+// CHECK-RV64-SAME: (<vscale x 1 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1]], half [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfadd_vf_f16mf4_rm(vfloat16mf4_t op1, _Float16 op2, size_t vl) {
+  return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfadd_vv_f16mf2_rm
+// CHECK-RV64-SAME: (<vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1]], <vscale x 2 x half> [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfadd_vv_f16mf2_rm(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
+  return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfadd_vf_f16mf2_rm
+// CHECK-RV64-SAME: (<vscale x 2 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1]], half [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfadd_vf_f16mf2_rm(vfloat16mf2_t op1, _Float16 op2, size_t vl) {
+  return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfadd_vv_f16m1_rm
+// CHECK-RV64-SAME: (<vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1]], <vscale x 4 x half> [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfadd_vv_f16m1_rm(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
+  return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfadd_vf_f16m1_rm
+// CHECK-RV64-SAME: (<vscale x 4 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1]], half [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfadd_vf_f16m1_rm(vfloat16m1_t op1, _Float16 op2, size_t vl) {
+  return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfadd_vv_f16m2_rm
+// CHECK-RV64-SAME: (<vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1]], <vscale x 8 x half> [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfadd_vv_f16m2_rm(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
+  return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfadd_vf_f16m2_rm
+// CHECK-RV64-SAME: (<vscale x 8 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1]], half [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfadd_vf_f16m2_rm(vfloat16m2_t op1, _Float16 op2, size_t vl) {
+  return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfadd_vv_f16m4_rm
+// CHECK-RV64-SAME: (<vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1]], <vscale x 16 x half> [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfadd_vv_f16m4_rm(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
+  return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfadd_vf_f16m4_rm
+// CHECK-RV64-SAME: (<vscale x 16 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1]], half [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfadd_vf_f16m4_rm(vfloat16m4_t op1, _Float16 op2, size_t vl) {
+  return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfadd_vv_f16m8_rm
+// CHECK-RV64-SAME: (<vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1]], <vscale x 32 x half> [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfadd_vv_f16m8_rm(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
+  return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfadd_vf_f16m8_rm
+// CHECK-RV64-SAME: (<vscale x 32 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1]], half [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfadd_vf_f16m8_rm(vfloat16m8_t op1, _Float16 op2, size_t vl) {
+  return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfadd_vv_f32mf2_rm
+// CHECK-RV64-SAME: (<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1]], <vscale x 1 x float> [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfadd_vv_f32mf2_rm(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
+  return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfadd_vf_f32mf2_rm
+// CHECK-RV64-SAME: (<vscale x 1 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1]], float [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfadd_vf_f32mf2_rm(vfloat32mf2_t op1, float op2, size_t vl) {
+  return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfadd_vv_f32m1_rm
+// CHECK-RV64-SAME: (<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1]], <vscale x 2 x float> [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfadd_vv_f32m1_rm(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
+  return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfadd_vf_f32m1_rm
+// CHECK-RV64-SAME: (<vscale x 2 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1]], float [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfadd_vf_f32m1_rm(vfloat32m1_t op1, float op2, size_t vl) {
+  return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfadd_vv_f32m2_rm
+// CHECK-RV64-SAME: (<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1]], <vscale x 4 x float> [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfadd_vv_f32m2_rm(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
+  return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfadd_vf_f32m2_rm
+// CHECK-RV64-SAME: (<vscale x 4 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1]], float [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfadd_vf_f32m2_rm(vfloat32m2_t op1, float op2, size_t vl) {
+  return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfadd_vv_f32m4_rm
+// CHECK-RV64-SAME: (<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1]], <vscale x 8 x float> [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfadd_vv_f32m4_rm(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
+  return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfadd_vf_f32m4_rm
+// CHECK-RV64-SAME: (<vscale x 8 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1]], float [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfadd_vf_f32m4_rm(vfloat32m4_t op1, float op2, size_t vl) {
+  return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfadd_vv_f32m8_rm
+// CHECK-RV64-SAME: (<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1]], <vscale x 16 x float> [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfadd_vv_f32m8_rm(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
+  return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfadd_vf_f32m8_rm
+// CHECK-RV64-SAME: (<vscale x 16 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1]], float [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfadd_vf_f32m8_rm(vfloat32m8_t op1, float op2, size_t vl) {
+  return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfadd_vv_f64m1_rm
+// CHECK-RV64-SAME: (<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1]], <vscale x 1 x double> [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfadd_vv_f64m1_rm(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
+  return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfadd_vf_f64m1_rm
+// CHECK-RV64-SAME: (<vscale x 1 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1]], double [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfadd_vf_f64m1_rm(vfloat64m1_t op1, double op2, size_t vl) {
+  return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfadd_vv_f64m2_rm
+// CHECK-RV64-SAME: (<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1]], <vscale x 2 x double> [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfadd_vv_f64m2_rm(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
+  return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfadd_vf_f64m2_rm
+// CHECK-RV64-SAME: (<vscale x 2 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1]], double [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfadd_vf_f64m2_rm(vfloat64m2_t op1, double op2, size_t vl) {
+  return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfadd_vv_f64m4_rm
+// CHECK-RV64-SAME: (<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1]], <vscale x 4 x double> [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfadd_vv_f64m4_rm(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
+  return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfadd_vf_f64m4_rm
+// CHECK-RV64-SAME: (<vscale x 4 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1]], double [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfadd_vf_f64m4_rm(vfloat64m4_t op1, double op2, size_t vl) {
+  return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfadd_vv_f64m8_rm
+// CHECK-RV64-SAME: (<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1]], <vscale x 8 x double> [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfadd_vv_f64m8_rm(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
+  return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfadd_vf_f64m8_rm
+// CHECK-RV64-SAME: (<vscale x 8 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1]], double [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfadd_vf_f64m8_rm(vfloat64m8_t op1, double op2, size_t vl) {
+  return __riscv_vfadd(op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfadd_vv_f16mf4_rm_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1]], <vscale x 1 x half> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfadd_vv_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
+  return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfadd_vf_f16mf4_rm_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1]], half [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfadd_vf_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
+  return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfadd_vv_f16mf2_rm_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1]], <vscale x 2 x half> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfadd_vv_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
+  return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfadd_vf_f16mf2_rm_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1]], half [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfadd_vf_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
+  return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfadd_vv_f16m1_rm_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1]], <vscale x 4 x half> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfadd_vv_f16m1_rm_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
+  return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfadd_vf_f16m1_rm_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1]], half [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfadd_vf_f16m1_rm_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) {
+  return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfadd_vv_f16m2_rm_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1]], <vscale x 8 x half> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfadd_vv_f16m2_rm_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
+  return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfadd_vf_f16m2_rm_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1]], half [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfadd_vf_f16m2_rm_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) {
+  return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfadd_vv_f16m4_rm_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1]], <vscale x 16 x half> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfadd_vv_f16m4_rm_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
+  return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfadd_vf_f16m4_rm_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1]], half [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfadd_vf_f16m4_rm_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) {
+  return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfadd_vv_f16m8_rm_m
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1]], <vscale x 32 x half> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfadd_vv_f16m8_rm_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
+  return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfadd_vf_f16m8_rm_m
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1]], half [[OP2]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfadd_vf_f16m8_rm_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) {
+  return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfadd_vv_f32mf2_rm_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1]], <vscale x 1 x float> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfadd_vv_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
+  return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfadd_vf_f32mf2_rm_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1]], float [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfadd_vf_f32mf2_rm_m(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {
+  return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfadd_vv_f32m1_rm_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1]], <vscale x 2 x float> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfadd_vv_f32m1_rm_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
+  return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfadd_vf_f32m1_rm_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1]], float [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfadd_vf_f32m1_rm_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) {
+  return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfadd_vv_f32m2_rm_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1]], <vscale x 4 x float> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfadd_vv_f32m2_rm_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
+  return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfadd_vf_f32m2_rm_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1]], float [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfadd_vf_f32m2_rm_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) {
+  return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfadd_vv_f32m4_rm_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1]], <vscale x 8 x float> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfadd_vv_f32m4_rm_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
+  return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfadd_vf_f32m4_rm_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1]], float [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfadd_vf_f32m4_rm_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) {
+  return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfadd_vv_f32m8_rm_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1]], <vscale x 16 x float> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfadd_vv_f32m8_rm_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
+  return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfadd_vf_f32m8_rm_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1]], float [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfadd_vf_f32m8_rm_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) {
+  return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfadd_vv_f64m1_rm_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1]], <vscale x 1 x double> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfadd_vv_f64m1_rm_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
+  return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfadd_vf_f64m1_rm_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1]], double [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfadd_vf_f64m1_rm_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) {
+  return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfadd_vv_f64m2_rm_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1]], <vscale x 2 x double> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfadd_vv_f64m2_rm_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
+  return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfadd_vf_f64m2_rm_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1]], double [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfadd_vf_f64m2_rm_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) {
+  return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfadd_vv_f64m4_rm_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1]], <vscale x 4 x double> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfadd_vv_f64m4_rm_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
+  return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfadd_vf_f64m4_rm_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1]], double [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfadd_vf_f64m4_rm_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) {
+  return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfadd_vv_f64m8_rm_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1]], <vscale x 8 x double> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfadd_vv_f64m8_rm_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
+  return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfadd_vf_f64m8_rm_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1]], double [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfadd_vf_f64m8_rm_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) {
+  return __riscv_vfadd(mask, op1, op2, __RISCV_FRM_RNE, vl);
+}
+

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfadd.c
index 27e807f6252ec0..343c8c098c9b8e 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfadd.c
@@ -10,7 +10,7 @@
 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfadd_vv_f16mf4_tu
 // CHECK-RV64-SAME: (<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], <vscale x 1 x half> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], <vscale x 1 x half> [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfadd_vv_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
@@ -20,7 +20,7 @@ vfloat16mf4_t test_vfadd_vv_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1
 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfadd_vf_f16mf4_tu
 // CHECK-RV64-SAME: (<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], half [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], half [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfadd_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
@@ -30,7 +30,7 @@ vfloat16mf4_t test_vfadd_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1
 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfadd_vv_f16mf2_tu
 // CHECK-RV64-SAME: (<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], <vscale x 2 x half> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], <vscale x 2 x half> [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vfadd_vv_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
@@ -40,7 +40,7 @@ vfloat16mf2_t test_vfadd_vv_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1
 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfadd_vf_f16mf2_tu
 // CHECK-RV64-SAME: (<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], half [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], half [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vfadd_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
@@ -50,7 +50,7 @@ vfloat16mf2_t test_vfadd_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1
 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfadd_vv_f16m1_tu
 // CHECK-RV64-SAME: (<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], <vscale x 4 x half> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], <vscale x 4 x half> [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vfadd_vv_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
@@ -60,7 +60,7 @@ vfloat16m1_t test_vfadd_vv_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vf
 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfadd_vf_f16m1_tu
 // CHECK-RV64-SAME: (<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], half [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], half [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vfadd_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
@@ -70,7 +70,7 @@ vfloat16m1_t test_vfadd_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, _F
 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfadd_vv_f16m2_tu
 // CHECK-RV64-SAME: (<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], <vscale x 8 x half> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], <vscale x 8 x half> [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vfadd_vv_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
@@ -80,7 +80,7 @@ vfloat16m2_t test_vfadd_vv_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vf
 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfadd_vf_f16m2_tu
 // CHECK-RV64-SAME: (<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], half [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], half [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vfadd_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
@@ -90,7 +90,7 @@ vfloat16m2_t test_vfadd_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, _F
 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfadd_vv_f16m4_tu
 // CHECK-RV64-SAME: (<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], <vscale x 16 x half> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], <vscale x 16 x half> [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vfadd_vv_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
@@ -100,7 +100,7 @@ vfloat16m4_t test_vfadd_vv_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vf
 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfadd_vf_f16m4_tu
 // CHECK-RV64-SAME: (<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], half [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], half [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vfadd_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
@@ -110,7 +110,7 @@ vfloat16m4_t test_vfadd_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, _F
 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfadd_vv_f16m8_tu
 // CHECK-RV64-SAME: (<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], <vscale x 32 x half> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], <vscale x 32 x half> [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vfadd_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
@@ -120,7 +120,7 @@ vfloat16m8_t test_vfadd_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vf
 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfadd_vf_f16m8_tu
 // CHECK-RV64-SAME: (<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], half [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], half [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vfadd_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
@@ -130,7 +130,7 @@ vfloat16m8_t test_vfadd_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, _F
 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfadd_vv_f32mf2_tu
 // CHECK-RV64-SAME: (<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], <vscale x 1 x float> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], <vscale x 1 x float> [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfadd_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -140,7 +140,7 @@ vfloat32mf2_t test_vfadd_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1
 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfadd_vf_f32mf2_tu
 // CHECK-RV64-SAME: (<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], float [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], float [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfadd_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
@@ -150,7 +150,7 @@ vfloat32mf2_t test_vfadd_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1
 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfadd_vv_f32m1_tu
 // CHECK-RV64-SAME: (<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], <vscale x 2 x float> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], <vscale x 2 x float> [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfadd_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
@@ -160,7 +160,7 @@ vfloat32m1_t test_vfadd_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vf
 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfadd_vf_f32m1_tu
 // CHECK-RV64-SAME: (<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], float [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], float [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfadd_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
@@ -170,7 +170,7 @@ vfloat32m1_t test_vfadd_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, fl
 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfadd_vv_f32m2_tu
 // CHECK-RV64-SAME: (<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], <vscale x 4 x float> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], <vscale x 4 x float> [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfadd_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
@@ -180,7 +180,7 @@ vfloat32m2_t test_vfadd_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vf
 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfadd_vf_f32m2_tu
 // CHECK-RV64-SAME: (<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], float [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], float [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfadd_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
@@ -190,7 +190,7 @@ vfloat32m2_t test_vfadd_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, fl
 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfadd_vv_f32m4_tu
 // CHECK-RV64-SAME: (<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], <vscale x 8 x float> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], <vscale x 8 x float> [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfadd_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
@@ -200,7 +200,7 @@ vfloat32m4_t test_vfadd_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vf
 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfadd_vf_f32m4_tu
 // CHECK-RV64-SAME: (<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], float [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], float [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfadd_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
@@ -210,7 +210,7 @@ vfloat32m4_t test_vfadd_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, fl
 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfadd_vv_f32m8_tu
 // CHECK-RV64-SAME: (<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], <vscale x 16 x float> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], <vscale x 16 x float> [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfadd_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
@@ -220,7 +220,7 @@ vfloat32m8_t test_vfadd_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vf
 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfadd_vf_f32m8_tu
 // CHECK-RV64-SAME: (<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], float [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], float [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfadd_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) {
@@ -230,7 +230,7 @@ vfloat32m8_t test_vfadd_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, fl
 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfadd_vv_f64m1_tu
 // CHECK-RV64-SAME: (<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], <vscale x 1 x double> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], <vscale x 1 x double> [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfadd_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
@@ -240,7 +240,7 @@ vfloat64m1_t test_vfadd_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vf
 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfadd_vf_f64m1_tu
 // CHECK-RV64-SAME: (<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], double [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], double [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfadd_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) {
@@ -250,7 +250,7 @@ vfloat64m1_t test_vfadd_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, do
 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfadd_vv_f64m2_tu
 // CHECK-RV64-SAME: (<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], <vscale x 2 x double> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], <vscale x 2 x double> [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfadd_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
@@ -260,7 +260,7 @@ vfloat64m2_t test_vfadd_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vf
 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfadd_vf_f64m2_tu
 // CHECK-RV64-SAME: (<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], double [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], double [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfadd_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) {
@@ -270,7 +270,7 @@ vfloat64m2_t test_vfadd_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, do
 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfadd_vv_f64m4_tu
 // CHECK-RV64-SAME: (<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], <vscale x 4 x double> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], <vscale x 4 x double> [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfadd_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
@@ -280,7 +280,7 @@ vfloat64m4_t test_vfadd_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vf
 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfadd_vf_f64m4_tu
 // CHECK-RV64-SAME: (<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], double [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], double [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfadd_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) {
@@ -290,7 +290,7 @@ vfloat64m4_t test_vfadd_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, do
 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfadd_vv_f64m8_tu
 // CHECK-RV64-SAME: (<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], <vscale x 8 x double> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], <vscale x 8 x double> [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfadd_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
@@ -300,7 +300,7 @@ vfloat64m8_t test_vfadd_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vf
 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfadd_vf_f64m8_tu
 // CHECK-RV64-SAME: (<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], double [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], double [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfadd_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) {
@@ -310,7 +310,7 @@ vfloat64m8_t test_vfadd_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, do
 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfadd_vv_f16mf4_tum
 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], <vscale x 1 x half> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], <vscale x 1 x half> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfadd_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
@@ -320,7 +320,7 @@ vfloat16mf4_t test_vfadd_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff,
 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfadd_vf_f16mf4_tum
 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], half [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], half [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfadd_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
@@ -330,7 +330,7 @@ vfloat16mf4_t test_vfadd_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff,
 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfadd_vv_f16mf2_tum
 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], <vscale x 2 x half> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], <vscale x 2 x half> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vfadd_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
@@ -340,7 +340,7 @@ vfloat16mf2_t test_vfadd_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff,
 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfadd_vf_f16mf2_tum
 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], half [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], half [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vfadd_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
@@ -350,7 +350,7 @@ vfloat16mf2_t test_vfadd_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff,
 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfadd_vv_f16m1_tum
 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], <vscale x 4 x half> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], <vscale x 4 x half> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vfadd_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
@@ -360,7 +360,7 @@ vfloat16m1_t test_vfadd_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfl
 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfadd_vf_f16m1_tum
 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], half [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], half [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vfadd_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
@@ -370,7 +370,7 @@ vfloat16m1_t test_vfadd_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfl
 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfadd_vv_f16m2_tum
 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], <vscale x 8 x half> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], <vscale x 8 x half> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vfadd_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
@@ -380,7 +380,7 @@ vfloat16m2_t test_vfadd_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vflo
 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfadd_vf_f16m2_tum
 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], half [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], half [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vfadd_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
@@ -390,7 +390,7 @@ vfloat16m2_t test_vfadd_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vflo
 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfadd_vv_f16m4_tum
 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], <vscale x 16 x half> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], <vscale x 16 x half> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vfadd_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
@@ -400,7 +400,7 @@ vfloat16m4_t test_vfadd_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vflo
 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfadd_vf_f16m4_tum
 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], half [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], half [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vfadd_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
@@ -410,7 +410,7 @@ vfloat16m4_t test_vfadd_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vflo
 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfadd_vv_f16m8_tum
 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], <vscale x 32 x half> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], <vscale x 32 x half> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vfadd_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
@@ -420,7 +420,7 @@ vfloat16m8_t test_vfadd_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vflo
 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfadd_vf_f16m8_tum
 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], half [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], half [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vfadd_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
@@ -430,7 +430,7 @@ vfloat16m8_t test_vfadd_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vflo
 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfadd_vv_f32mf2_tum
 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], <vscale x 1 x float> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], <vscale x 1 x float> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfadd_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -440,7 +440,7 @@ vfloat32mf2_t test_vfadd_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff,
 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfadd_vf_f32mf2_tum
 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], float [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], float [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfadd_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
@@ -450,7 +450,7 @@ vfloat32mf2_t test_vfadd_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff,
 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfadd_vv_f32m1_tum
 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], <vscale x 2 x float> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], <vscale x 2 x float> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfadd_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
@@ -460,7 +460,7 @@ vfloat32m1_t test_vfadd_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfl
 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfadd_vf_f32m1_tum
 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], float [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], float [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfadd_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
@@ -470,7 +470,7 @@ vfloat32m1_t test_vfadd_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfl
 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfadd_vv_f32m2_tum
 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], <vscale x 4 x float> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], <vscale x 4 x float> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfadd_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
@@ -480,7 +480,7 @@ vfloat32m2_t test_vfadd_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfl
 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfadd_vf_f32m2_tum
 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], float [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], float [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfadd_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
@@ -490,7 +490,7 @@ vfloat32m2_t test_vfadd_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfl
 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfadd_vv_f32m4_tum
 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], <vscale x 8 x float> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], <vscale x 8 x float> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfadd_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
@@ -500,7 +500,7 @@ vfloat32m4_t test_vfadd_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vflo
 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfadd_vf_f32m4_tum
 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], float [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], float [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfadd_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
@@ -510,7 +510,7 @@ vfloat32m4_t test_vfadd_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vflo
 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfadd_vv_f32m8_tum
 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], <vscale x 16 x float> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], <vscale x 16 x float> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfadd_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
@@ -520,7 +520,7 @@ vfloat32m8_t test_vfadd_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vflo
 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfadd_vf_f32m8_tum
 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], float [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], float [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfadd_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) {
@@ -530,7 +530,7 @@ vfloat32m8_t test_vfadd_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vflo
 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfadd_vv_f64m1_tum
 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], <vscale x 1 x double> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], <vscale x 1 x double> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfadd_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
@@ -540,7 +540,7 @@ vfloat64m1_t test_vfadd_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfl
 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfadd_vf_f64m1_tum
 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], double [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], double [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfadd_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) {
@@ -550,7 +550,7 @@ vfloat64m1_t test_vfadd_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfl
 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfadd_vv_f64m2_tum
 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], <vscale x 2 x double> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], <vscale x 2 x double> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfadd_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
@@ -560,7 +560,7 @@ vfloat64m2_t test_vfadd_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfl
 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfadd_vf_f64m2_tum
 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], double [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], double [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfadd_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) {
@@ -570,7 +570,7 @@ vfloat64m2_t test_vfadd_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfl
 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfadd_vv_f64m4_tum
 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], <vscale x 4 x double> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], <vscale x 4 x double> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfadd_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
@@ -580,7 +580,7 @@ vfloat64m4_t test_vfadd_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfl
 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfadd_vf_f64m4_tum
 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], double [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], double [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfadd_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) {
@@ -590,7 +590,7 @@ vfloat64m4_t test_vfadd_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfl
 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfadd_vv_f64m8_tum
 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], <vscale x 8 x double> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], <vscale x 8 x double> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfadd_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
@@ -600,7 +600,7 @@ vfloat64m8_t test_vfadd_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vflo
 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfadd_vf_f64m8_tum
 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], double [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], double [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfadd_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) {
@@ -610,7 +610,7 @@ vfloat64m8_t test_vfadd_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vflo
 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfadd_vv_f16mf4_tumu
 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], <vscale x 1 x half> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], <vscale x 1 x half> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfadd_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
@@ -620,7 +620,7 @@ vfloat16mf4_t test_vfadd_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff,
 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfadd_vf_f16mf4_tumu
 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], half [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], half [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfadd_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
@@ -630,7 +630,7 @@ vfloat16mf4_t test_vfadd_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff,
 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfadd_vv_f16mf2_tumu
 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], <vscale x 2 x half> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], <vscale x 2 x half> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vfadd_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
@@ -640,7 +640,7 @@ vfloat16mf2_t test_vfadd_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff,
 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfadd_vf_f16mf2_tumu
 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], half [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], half [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vfadd_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
@@ -650,7 +650,7 @@ vfloat16mf2_t test_vfadd_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff,
 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfadd_vv_f16m1_tumu
 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], <vscale x 4 x half> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], <vscale x 4 x half> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vfadd_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
@@ -660,7 +660,7 @@ vfloat16m1_t test_vfadd_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vf
 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfadd_vf_f16m1_tumu
 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], half [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], half [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vfadd_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
@@ -670,7 +670,7 @@ vfloat16m1_t test_vfadd_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vf
 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfadd_vv_f16m2_tumu
 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], <vscale x 8 x half> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], <vscale x 8 x half> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vfadd_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
@@ -680,7 +680,7 @@ vfloat16m2_t test_vfadd_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfl
 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfadd_vf_f16m2_tumu
 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], half [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], half [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vfadd_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
@@ -690,7 +690,7 @@ vfloat16m2_t test_vfadd_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfl
 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfadd_vv_f16m4_tumu
 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], <vscale x 16 x half> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], <vscale x 16 x half> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vfadd_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
@@ -700,7 +700,7 @@ vfloat16m4_t test_vfadd_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfl
 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfadd_vf_f16m4_tumu
 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], half [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], half [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vfadd_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
@@ -710,7 +710,7 @@ vfloat16m4_t test_vfadd_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfl
 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfadd_vv_f16m8_tumu
 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], <vscale x 32 x half> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], <vscale x 32 x half> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vfadd_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
@@ -720,7 +720,7 @@ vfloat16m8_t test_vfadd_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfl
 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfadd_vf_f16m8_tumu
 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], half [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], half [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vfadd_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
@@ -730,7 +730,7 @@ vfloat16m8_t test_vfadd_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfl
 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfadd_vv_f32mf2_tumu
 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], <vscale x 1 x float> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], <vscale x 1 x float> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfadd_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -740,7 +740,7 @@ vfloat32mf2_t test_vfadd_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff,
 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfadd_vf_f32mf2_tumu
 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], float [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], float [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfadd_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
@@ -750,7 +750,7 @@ vfloat32mf2_t test_vfadd_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff,
 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfadd_vv_f32m1_tumu
 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], <vscale x 2 x float> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], <vscale x 2 x float> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfadd_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
@@ -760,7 +760,7 @@ vfloat32m1_t test_vfadd_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vf
 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfadd_vf_f32m1_tumu
 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], float [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], float [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfadd_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
@@ -770,7 +770,7 @@ vfloat32m1_t test_vfadd_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vf
 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfadd_vv_f32m2_tumu
 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], <vscale x 4 x float> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], <vscale x 4 x float> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfadd_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
@@ -780,7 +780,7 @@ vfloat32m2_t test_vfadd_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vf
 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfadd_vf_f32m2_tumu
 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], float [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], float [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfadd_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
@@ -790,7 +790,7 @@ vfloat32m2_t test_vfadd_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vf
 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfadd_vv_f32m4_tumu
 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], <vscale x 8 x float> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], <vscale x 8 x float> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfadd_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
@@ -800,7 +800,7 @@ vfloat32m4_t test_vfadd_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfl
 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfadd_vf_f32m4_tumu
 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], float [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], float [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfadd_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
@@ -810,7 +810,7 @@ vfloat32m4_t test_vfadd_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfl
 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfadd_vv_f32m8_tumu
 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], <vscale x 16 x float> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], <vscale x 16 x float> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfadd_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
@@ -820,7 +820,7 @@ vfloat32m8_t test_vfadd_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfl
 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfadd_vf_f32m8_tumu
 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], float [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], float [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfadd_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) {
@@ -830,7 +830,7 @@ vfloat32m8_t test_vfadd_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfl
 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfadd_vv_f64m1_tumu
 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], <vscale x 1 x double> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], <vscale x 1 x double> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfadd_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
@@ -840,7 +840,7 @@ vfloat64m1_t test_vfadd_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vf
 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfadd_vf_f64m1_tumu
 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], double [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], double [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfadd_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) {
@@ -850,7 +850,7 @@ vfloat64m1_t test_vfadd_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vf
 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfadd_vv_f64m2_tumu
 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], <vscale x 2 x double> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], <vscale x 2 x double> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfadd_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
@@ -860,7 +860,7 @@ vfloat64m2_t test_vfadd_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vf
 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfadd_vf_f64m2_tumu
 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], double [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], double [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfadd_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) {
@@ -870,7 +870,7 @@ vfloat64m2_t test_vfadd_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vf
 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfadd_vv_f64m4_tumu
 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], <vscale x 4 x double> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], <vscale x 4 x double> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfadd_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
@@ -880,7 +880,7 @@ vfloat64m4_t test_vfadd_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vf
 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfadd_vf_f64m4_tumu
 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], double [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], double [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfadd_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) {
@@ -890,7 +890,7 @@ vfloat64m4_t test_vfadd_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vf
 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfadd_vv_f64m8_tumu
 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], <vscale x 8 x double> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], <vscale x 8 x double> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfadd_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
@@ -900,7 +900,7 @@ vfloat64m8_t test_vfadd_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfl
 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfadd_vf_f64m8_tumu
 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], double [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], double [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfadd_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) {
@@ -910,7 +910,7 @@ vfloat64m8_t test_vfadd_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfl
 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfadd_vv_f16mf4_mu
 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], <vscale x 1 x half> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], <vscale x 1 x half> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfadd_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
@@ -920,7 +920,7 @@ vfloat16mf4_t test_vfadd_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, v
 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfadd_vf_f16mf4_mu
 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], half [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], half [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfadd_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
@@ -930,7 +930,7 @@ vfloat16mf4_t test_vfadd_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, v
 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfadd_vv_f16mf2_mu
 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], <vscale x 2 x half> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], <vscale x 2 x half> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vfadd_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
@@ -940,7 +940,7 @@ vfloat16mf2_t test_vfadd_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, v
 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfadd_vf_f16mf2_mu
 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], half [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], half [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vfadd_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
@@ -950,7 +950,7 @@ vfloat16mf2_t test_vfadd_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, v
 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfadd_vv_f16m1_mu
 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], <vscale x 4 x half> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], <vscale x 4 x half> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vfadd_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
@@ -960,7 +960,7 @@ vfloat16m1_t test_vfadd_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vflo
 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfadd_vf_f16m1_mu
 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], half [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], half [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vfadd_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
@@ -970,7 +970,7 @@ vfloat16m1_t test_vfadd_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vflo
 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfadd_vv_f16m2_mu
 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], <vscale x 8 x half> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], <vscale x 8 x half> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vfadd_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
@@ -980,7 +980,7 @@ vfloat16m2_t test_vfadd_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloa
 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfadd_vf_f16m2_mu
 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], half [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], half [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vfadd_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
@@ -990,7 +990,7 @@ vfloat16m2_t test_vfadd_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloa
 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfadd_vv_f16m4_mu
 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], <vscale x 16 x half> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], <vscale x 16 x half> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vfadd_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
@@ -1000,7 +1000,7 @@ vfloat16m4_t test_vfadd_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloa
 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfadd_vf_f16m4_mu
 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], half [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], half [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vfadd_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
@@ -1010,7 +1010,7 @@ vfloat16m4_t test_vfadd_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloa
 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfadd_vv_f16m8_mu
 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], <vscale x 32 x half> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], <vscale x 32 x half> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vfadd_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
@@ -1020,7 +1020,7 @@ vfloat16m8_t test_vfadd_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloa
 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfadd_vf_f16m8_mu
 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], half [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], half [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vfadd_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
@@ -1030,7 +1030,7 @@ vfloat16m8_t test_vfadd_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloa
 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfadd_vv_f32mf2_mu
 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], <vscale x 1 x float> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], <vscale x 1 x float> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfadd_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -1040,7 +1040,7 @@ vfloat32mf2_t test_vfadd_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, v
 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfadd_vf_f32mf2_mu
 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], float [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], float [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfadd_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
@@ -1050,7 +1050,7 @@ vfloat32mf2_t test_vfadd_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, v
 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfadd_vv_f32m1_mu
 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], <vscale x 2 x float> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], <vscale x 2 x float> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfadd_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
@@ -1060,7 +1060,7 @@ vfloat32m1_t test_vfadd_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vflo
 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfadd_vf_f32m1_mu
 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], float [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], float [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfadd_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
@@ -1070,7 +1070,7 @@ vfloat32m1_t test_vfadd_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vflo
 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfadd_vv_f32m2_mu
 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], <vscale x 4 x float> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], <vscale x 4 x float> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfadd_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
@@ -1080,7 +1080,7 @@ vfloat32m2_t test_vfadd_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vflo
 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfadd_vf_f32m2_mu
 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], float [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], float [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfadd_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
@@ -1090,7 +1090,7 @@ vfloat32m2_t test_vfadd_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vflo
 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfadd_vv_f32m4_mu
 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], <vscale x 8 x float> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], <vscale x 8 x float> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfadd_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
@@ -1100,7 +1100,7 @@ vfloat32m4_t test_vfadd_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloa
 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfadd_vf_f32m4_mu
 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], float [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], float [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfadd_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
@@ -1110,7 +1110,7 @@ vfloat32m4_t test_vfadd_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloa
 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfadd_vv_f32m8_mu
 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], <vscale x 16 x float> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], <vscale x 16 x float> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfadd_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
@@ -1120,7 +1120,7 @@ vfloat32m8_t test_vfadd_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloa
 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfadd_vf_f32m8_mu
 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], float [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], float [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfadd_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) {
@@ -1130,7 +1130,7 @@ vfloat32m8_t test_vfadd_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloa
 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfadd_vv_f64m1_mu
 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], <vscale x 1 x double> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], <vscale x 1 x double> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfadd_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
@@ -1140,7 +1140,7 @@ vfloat64m1_t test_vfadd_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vflo
 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfadd_vf_f64m1_mu
 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], double [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], double [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfadd_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) {
@@ -1150,7 +1150,7 @@ vfloat64m1_t test_vfadd_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vflo
 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfadd_vv_f64m2_mu
 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], <vscale x 2 x double> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], <vscale x 2 x double> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfadd_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
@@ -1160,7 +1160,7 @@ vfloat64m2_t test_vfadd_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vflo
 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfadd_vf_f64m2_mu
 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], double [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], double [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfadd_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) {
@@ -1170,7 +1170,7 @@ vfloat64m2_t test_vfadd_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vflo
 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfadd_vv_f64m4_mu
 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], <vscale x 4 x double> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], <vscale x 4 x double> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfadd_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
@@ -1180,7 +1180,7 @@ vfloat64m4_t test_vfadd_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vflo
 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfadd_vf_f64m4_mu
 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], double [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], double [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfadd_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) {
@@ -1190,7 +1190,7 @@ vfloat64m4_t test_vfadd_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vflo
 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfadd_vv_f64m8_mu
 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], <vscale x 8 x double> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], <vscale x 8 x double> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfadd_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
@@ -1200,10 +1200,1210 @@ vfloat64m8_t test_vfadd_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloa
 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfadd_vf_f64m8_mu
 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], double [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], double [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfadd_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) {
   return __riscv_vfadd_vf_f64m8_mu(mask, maskedoff, op1, op2, vl);
 }
 
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfadd_vv_f16mf4_rm_tu
+// CHECK-RV64-SAME: (<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], <vscale x 1 x half> [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfadd_vv_f16mf4_rm_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f16mf4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfadd_vf_f16mf4_rm_tu
+// CHECK-RV64-SAME: (<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], half [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfadd_vf_f16mf4_rm_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
+  return __riscv_vfadd_vf_f16mf4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfadd_vv_f16mf2_rm_tu
+// CHECK-RV64-SAME: (<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], <vscale x 2 x half> [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfadd_vv_f16mf2_rm_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f16mf2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfadd_vf_f16mf2_rm_tu
+// CHECK-RV64-SAME: (<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], half [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfadd_vf_f16mf2_rm_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
+  return __riscv_vfadd_vf_f16mf2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfadd_vv_f16m1_rm_tu
+// CHECK-RV64-SAME: (<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], <vscale x 4 x half> [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfadd_vv_f16m1_rm_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f16m1_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfadd_vf_f16m1_rm_tu
+// CHECK-RV64-SAME: (<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], half [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfadd_vf_f16m1_rm_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
+  return __riscv_vfadd_vf_f16m1_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfadd_vv_f16m2_rm_tu
+// CHECK-RV64-SAME: (<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], <vscale x 8 x half> [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfadd_vv_f16m2_rm_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f16m2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfadd_vf_f16m2_rm_tu
+// CHECK-RV64-SAME: (<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], half [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfadd_vf_f16m2_rm_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
+  return __riscv_vfadd_vf_f16m2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfadd_vv_f16m4_rm_tu
+// CHECK-RV64-SAME: (<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], <vscale x 16 x half> [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfadd_vv_f16m4_rm_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f16m4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfadd_vf_f16m4_rm_tu
+// CHECK-RV64-SAME: (<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], half [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfadd_vf_f16m4_rm_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
+  return __riscv_vfadd_vf_f16m4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfadd_vv_f16m8_rm_tu
+// CHECK-RV64-SAME: (<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], <vscale x 32 x half> [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfadd_vv_f16m8_rm_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f16m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfadd_vf_f16m8_rm_tu
+// CHECK-RV64-SAME: (<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], half [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfadd_vf_f16m8_rm_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
+  return __riscv_vfadd_vf_f16m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfadd_vv_f32mf2_rm_tu
+// CHECK-RV64-SAME: (<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], <vscale x 1 x float> [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfadd_vv_f32mf2_rm_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f32mf2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfadd_vf_f32mf2_rm_tu
+// CHECK-RV64-SAME: (<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], float [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfadd_vf_f32mf2_rm_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
+  return __riscv_vfadd_vf_f32mf2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfadd_vv_f32m1_rm_tu
+// CHECK-RV64-SAME: (<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], <vscale x 2 x float> [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfadd_vv_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f32m1_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfadd_vf_f32m1_rm_tu
+// CHECK-RV64-SAME: (<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], float [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfadd_vf_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
+  return __riscv_vfadd_vf_f32m1_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfadd_vv_f32m2_rm_tu
+// CHECK-RV64-SAME: (<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], <vscale x 4 x float> [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfadd_vv_f32m2_rm_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f32m2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfadd_vf_f32m2_rm_tu
+// CHECK-RV64-SAME: (<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], float [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfadd_vf_f32m2_rm_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
+  return __riscv_vfadd_vf_f32m2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfadd_vv_f32m4_rm_tu
+// CHECK-RV64-SAME: (<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], <vscale x 8 x float> [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfadd_vv_f32m4_rm_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f32m4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfadd_vf_f32m4_rm_tu
+// CHECK-RV64-SAME: (<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], float [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfadd_vf_f32m4_rm_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
+  return __riscv_vfadd_vf_f32m4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfadd_vv_f32m8_rm_tu
+// CHECK-RV64-SAME: (<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], <vscale x 16 x float> [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfadd_vv_f32m8_rm_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f32m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfadd_vf_f32m8_rm_tu
+// CHECK-RV64-SAME: (<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], float [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfadd_vf_f32m8_rm_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) {
+  return __riscv_vfadd_vf_f32m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfadd_vv_f64m1_rm_tu
+// CHECK-RV64-SAME: (<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], <vscale x 1 x double> [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfadd_vv_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f64m1_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfadd_vf_f64m1_rm_tu
+// CHECK-RV64-SAME: (<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], double [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfadd_vf_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) {
+  return __riscv_vfadd_vf_f64m1_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfadd_vv_f64m2_rm_tu
+// CHECK-RV64-SAME: (<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], <vscale x 2 x double> [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfadd_vv_f64m2_rm_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f64m2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfadd_vf_f64m2_rm_tu
+// CHECK-RV64-SAME: (<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], double [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfadd_vf_f64m2_rm_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) {
+  return __riscv_vfadd_vf_f64m2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfadd_vv_f64m4_rm_tu
+// CHECK-RV64-SAME: (<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], <vscale x 4 x double> [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfadd_vv_f64m4_rm_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f64m4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfadd_vf_f64m4_rm_tu
+// CHECK-RV64-SAME: (<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], double [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfadd_vf_f64m4_rm_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) {
+  return __riscv_vfadd_vf_f64m4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfadd_vv_f64m8_rm_tu
+// CHECK-RV64-SAME: (<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], <vscale x 8 x double> [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfadd_vv_f64m8_rm_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f64m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfadd_vf_f64m8_rm_tu
+// CHECK-RV64-SAME: (<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], double [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfadd_vf_f64m8_rm_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) {
+  return __riscv_vfadd_vf_f64m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfadd_vv_f16mf4_rm_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], <vscale x 1 x half> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfadd_vv_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f16mf4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfadd_vf_f16mf4_rm_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], half [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfadd_vf_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
+  return __riscv_vfadd_vf_f16mf4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfadd_vv_f16mf2_rm_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], <vscale x 2 x half> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfadd_vv_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f16mf2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfadd_vf_f16mf2_rm_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], half [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfadd_vf_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
+  return __riscv_vfadd_vf_f16mf2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfadd_vv_f16m1_rm_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], <vscale x 4 x half> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfadd_vv_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f16m1_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfadd_vf_f16m1_rm_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], half [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfadd_vf_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
+  return __riscv_vfadd_vf_f16m1_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfadd_vv_f16m2_rm_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], <vscale x 8 x half> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfadd_vv_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f16m2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfadd_vf_f16m2_rm_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], half [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfadd_vf_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
+  return __riscv_vfadd_vf_f16m2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfadd_vv_f16m4_rm_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], <vscale x 16 x half> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfadd_vv_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f16m4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfadd_vf_f16m4_rm_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], half [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfadd_vf_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
+  return __riscv_vfadd_vf_f16m4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfadd_vv_f16m8_rm_tum
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], <vscale x 32 x half> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfadd_vv_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f16m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfadd_vf_f16m8_rm_tum
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], half [[OP2]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfadd_vf_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
+  return __riscv_vfadd_vf_f16m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfadd_vv_f32mf2_rm_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], <vscale x 1 x float> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfadd_vv_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f32mf2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfadd_vf_f32mf2_rm_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], float [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfadd_vf_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
+  return __riscv_vfadd_vf_f32mf2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfadd_vv_f32m1_rm_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], <vscale x 2 x float> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfadd_vv_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f32m1_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfadd_vf_f32m1_rm_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], float [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfadd_vf_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
+  return __riscv_vfadd_vf_f32m1_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfadd_vv_f32m2_rm_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], <vscale x 4 x float> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfadd_vv_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f32m2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfadd_vf_f32m2_rm_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], float [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfadd_vf_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
+  return __riscv_vfadd_vf_f32m2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfadd_vv_f32m4_rm_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], <vscale x 8 x float> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfadd_vv_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f32m4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfadd_vf_f32m4_rm_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], float [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfadd_vf_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
+  return __riscv_vfadd_vf_f32m4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfadd_vv_f32m8_rm_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], <vscale x 16 x float> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfadd_vv_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f32m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfadd_vf_f32m8_rm_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], float [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfadd_vf_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) {
+  return __riscv_vfadd_vf_f32m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfadd_vv_f64m1_rm_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], <vscale x 1 x double> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfadd_vv_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f64m1_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfadd_vf_f64m1_rm_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], double [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfadd_vf_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) {
+  return __riscv_vfadd_vf_f64m1_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfadd_vv_f64m2_rm_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], <vscale x 2 x double> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfadd_vv_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f64m2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfadd_vf_f64m2_rm_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], double [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfadd_vf_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) {
+  return __riscv_vfadd_vf_f64m2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfadd_vv_f64m4_rm_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], <vscale x 4 x double> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfadd_vv_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f64m4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfadd_vf_f64m4_rm_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], double [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfadd_vf_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) {
+  return __riscv_vfadd_vf_f64m4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfadd_vv_f64m8_rm_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], <vscale x 8 x double> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfadd_vv_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f64m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfadd_vf_f64m8_rm_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], double [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfadd_vf_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) {
+  return __riscv_vfadd_vf_f64m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfadd_vv_f16mf4_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], <vscale x 1 x half> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfadd_vv_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f16mf4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfadd_vf_f16mf4_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], half [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfadd_vf_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
+  return __riscv_vfadd_vf_f16mf4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfadd_vv_f16mf2_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], <vscale x 2 x half> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfadd_vv_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f16mf2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfadd_vf_f16mf2_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], half [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfadd_vf_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
+  return __riscv_vfadd_vf_f16mf2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfadd_vv_f16m1_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], <vscale x 4 x half> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfadd_vv_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f16m1_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfadd_vf_f16m1_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], half [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfadd_vf_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
+  return __riscv_vfadd_vf_f16m1_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfadd_vv_f16m2_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], <vscale x 8 x half> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfadd_vv_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f16m2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfadd_vf_f16m2_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], half [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfadd_vf_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
+  return __riscv_vfadd_vf_f16m2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfadd_vv_f16m4_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], <vscale x 16 x half> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfadd_vv_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f16m4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfadd_vf_f16m4_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], half [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfadd_vf_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
+  return __riscv_vfadd_vf_f16m4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfadd_vv_f16m8_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], <vscale x 32 x half> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfadd_vv_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f16m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfadd_vf_f16m8_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], half [[OP2]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfadd_vf_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
+  return __riscv_vfadd_vf_f16m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfadd_vv_f32mf2_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], <vscale x 1 x float> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfadd_vv_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f32mf2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfadd_vf_f32mf2_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], float [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfadd_vf_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
+  return __riscv_vfadd_vf_f32mf2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfadd_vv_f32m1_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], <vscale x 2 x float> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfadd_vv_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f32m1_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfadd_vf_f32m1_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], float [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfadd_vf_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
+  return __riscv_vfadd_vf_f32m1_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfadd_vv_f32m2_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], <vscale x 4 x float> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfadd_vv_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f32m2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfadd_vf_f32m2_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], float [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfadd_vf_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
+  return __riscv_vfadd_vf_f32m2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfadd_vv_f32m4_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], <vscale x 8 x float> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfadd_vv_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f32m4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfadd_vf_f32m4_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], float [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfadd_vf_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
+  return __riscv_vfadd_vf_f32m4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfadd_vv_f32m8_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], <vscale x 16 x float> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfadd_vv_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f32m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfadd_vf_f32m8_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], float [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfadd_vf_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) {
+  return __riscv_vfadd_vf_f32m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfadd_vv_f64m1_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], <vscale x 1 x double> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfadd_vv_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f64m1_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfadd_vf_f64m1_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], double [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfadd_vf_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) {
+  return __riscv_vfadd_vf_f64m1_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfadd_vv_f64m2_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], <vscale x 2 x double> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfadd_vv_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f64m2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfadd_vf_f64m2_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], double [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfadd_vf_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) {
+  return __riscv_vfadd_vf_f64m2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfadd_vv_f64m4_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], <vscale x 4 x double> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfadd_vv_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f64m4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfadd_vf_f64m4_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], double [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfadd_vf_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) {
+  return __riscv_vfadd_vf_f64m4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfadd_vv_f64m8_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], <vscale x 8 x double> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfadd_vv_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f64m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfadd_vf_f64m8_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], double [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfadd_vf_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) {
+  return __riscv_vfadd_vf_f64m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfadd_vv_f16mf4_rm_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], <vscale x 1 x half> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfadd_vv_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f16mf4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfadd_vf_f16mf4_rm_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], half [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfadd_vf_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
+  return __riscv_vfadd_vf_f16mf4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfadd_vv_f16mf2_rm_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], <vscale x 2 x half> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfadd_vv_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f16mf2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfadd_vf_f16mf2_rm_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], half [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfadd_vf_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
+  return __riscv_vfadd_vf_f16mf2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfadd_vv_f16m1_rm_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], <vscale x 4 x half> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfadd_vv_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f16m1_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfadd_vf_f16m1_rm_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], half [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfadd_vf_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
+  return __riscv_vfadd_vf_f16m1_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfadd_vv_f16m2_rm_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], <vscale x 8 x half> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfadd_vv_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f16m2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfadd_vf_f16m2_rm_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], half [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfadd_vf_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
+  return __riscv_vfadd_vf_f16m2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfadd_vv_f16m4_rm_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], <vscale x 16 x half> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfadd_vv_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f16m4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfadd_vf_f16m4_rm_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], half [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfadd_vf_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
+  return __riscv_vfadd_vf_f16m4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfadd_vv_f16m8_rm_mu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], <vscale x 32 x half> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfadd_vv_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f16m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfadd_vf_f16m8_rm_mu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], half [[OP2]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfadd_vf_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
+  return __riscv_vfadd_vf_f16m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfadd_vv_f32mf2_rm_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], <vscale x 1 x float> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfadd_vv_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f32mf2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfadd_vf_f32mf2_rm_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], float [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfadd_vf_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
+  return __riscv_vfadd_vf_f32mf2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfadd_vv_f32m1_rm_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], <vscale x 2 x float> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfadd_vv_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f32m1_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfadd_vf_f32m1_rm_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], float [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfadd_vf_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
+  return __riscv_vfadd_vf_f32m1_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfadd_vv_f32m2_rm_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], <vscale x 4 x float> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfadd_vv_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f32m2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfadd_vf_f32m2_rm_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], float [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfadd_vf_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
+  return __riscv_vfadd_vf_f32m2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfadd_vv_f32m4_rm_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], <vscale x 8 x float> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfadd_vv_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f32m4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfadd_vf_f32m4_rm_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], float [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfadd_vf_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
+  return __riscv_vfadd_vf_f32m4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfadd_vv_f32m8_rm_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], <vscale x 16 x float> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfadd_vv_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f32m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfadd_vf_f32m8_rm_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], float [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfadd_vf_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) {
+  return __riscv_vfadd_vf_f32m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfadd_vv_f64m1_rm_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], <vscale x 1 x double> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfadd_vv_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f64m1_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfadd_vf_f64m1_rm_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], double [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfadd_vf_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) {
+  return __riscv_vfadd_vf_f64m1_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfadd_vv_f64m2_rm_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], <vscale x 2 x double> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfadd_vv_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f64m2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfadd_vf_f64m2_rm_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], double [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfadd_vf_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) {
+  return __riscv_vfadd_vf_f64m2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfadd_vv_f64m4_rm_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], <vscale x 4 x double> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfadd_vv_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f64m4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfadd_vf_f64m4_rm_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], double [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfadd_vf_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) {
+  return __riscv_vfadd_vf_f64m4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfadd_vv_f64m8_rm_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], <vscale x 8 x double> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfadd_vv_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
+  return __riscv_vfadd_vv_f64m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfadd_vf_f64m8_rm_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], double [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfadd_vf_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) {
+  return __riscv_vfadd_vf_f64m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfadd.c
index 7e140beb762506..025f773c27fc1a 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfadd.c
@@ -10,7 +10,7 @@
 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfadd_vv_f16mf4_tu
 // CHECK-RV64-SAME: (<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], <vscale x 1 x half> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], <vscale x 1 x half> [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfadd_vv_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
@@ -20,7 +20,7 @@ vfloat16mf4_t test_vfadd_vv_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1
 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfadd_vf_f16mf4_tu
 // CHECK-RV64-SAME: (<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], half [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], half [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfadd_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
@@ -30,7 +30,7 @@ vfloat16mf4_t test_vfadd_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1
 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfadd_vv_f16mf2_tu
 // CHECK-RV64-SAME: (<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], <vscale x 2 x half> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], <vscale x 2 x half> [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vfadd_vv_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
@@ -40,7 +40,7 @@ vfloat16mf2_t test_vfadd_vv_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1
 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfadd_vf_f16mf2_tu
 // CHECK-RV64-SAME: (<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], half [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], half [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vfadd_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
@@ -50,7 +50,7 @@ vfloat16mf2_t test_vfadd_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1
 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfadd_vv_f16m1_tu
 // CHECK-RV64-SAME: (<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], <vscale x 4 x half> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], <vscale x 4 x half> [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vfadd_vv_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
@@ -60,7 +60,7 @@ vfloat16m1_t test_vfadd_vv_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vf
 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfadd_vf_f16m1_tu
 // CHECK-RV64-SAME: (<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], half [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], half [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vfadd_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
@@ -70,7 +70,7 @@ vfloat16m1_t test_vfadd_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, _F
 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfadd_vv_f16m2_tu
 // CHECK-RV64-SAME: (<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], <vscale x 8 x half> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], <vscale x 8 x half> [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vfadd_vv_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
@@ -80,7 +80,7 @@ vfloat16m2_t test_vfadd_vv_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vf
 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfadd_vf_f16m2_tu
 // CHECK-RV64-SAME: (<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], half [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], half [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vfadd_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
@@ -90,7 +90,7 @@ vfloat16m2_t test_vfadd_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, _F
 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfadd_vv_f16m4_tu
 // CHECK-RV64-SAME: (<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], <vscale x 16 x half> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], <vscale x 16 x half> [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vfadd_vv_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
@@ -100,7 +100,7 @@ vfloat16m4_t test_vfadd_vv_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vf
 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfadd_vf_f16m4_tu
 // CHECK-RV64-SAME: (<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], half [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], half [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vfadd_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
@@ -110,7 +110,7 @@ vfloat16m4_t test_vfadd_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, _F
 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfadd_vv_f16m8_tu
 // CHECK-RV64-SAME: (<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], <vscale x 32 x half> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], <vscale x 32 x half> [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vfadd_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
@@ -120,7 +120,7 @@ vfloat16m8_t test_vfadd_vv_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vf
 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfadd_vf_f16m8_tu
 // CHECK-RV64-SAME: (<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], half [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], half [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vfadd_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
@@ -130,7 +130,7 @@ vfloat16m8_t test_vfadd_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, _F
 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfadd_vv_f32mf2_tu
 // CHECK-RV64-SAME: (<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], <vscale x 1 x float> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], <vscale x 1 x float> [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfadd_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -140,7 +140,7 @@ vfloat32mf2_t test_vfadd_vv_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1
 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfadd_vf_f32mf2_tu
 // CHECK-RV64-SAME: (<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], float [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], float [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfadd_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
@@ -150,7 +150,7 @@ vfloat32mf2_t test_vfadd_vf_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1
 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfadd_vv_f32m1_tu
 // CHECK-RV64-SAME: (<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], <vscale x 2 x float> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], <vscale x 2 x float> [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfadd_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
@@ -160,7 +160,7 @@ vfloat32m1_t test_vfadd_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vf
 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfadd_vf_f32m1_tu
 // CHECK-RV64-SAME: (<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], float [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], float [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfadd_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
@@ -170,7 +170,7 @@ vfloat32m1_t test_vfadd_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, fl
 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfadd_vv_f32m2_tu
 // CHECK-RV64-SAME: (<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], <vscale x 4 x float> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], <vscale x 4 x float> [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfadd_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
@@ -180,7 +180,7 @@ vfloat32m2_t test_vfadd_vv_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vf
 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfadd_vf_f32m2_tu
 // CHECK-RV64-SAME: (<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], float [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], float [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfadd_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
@@ -190,7 +190,7 @@ vfloat32m2_t test_vfadd_vf_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, fl
 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfadd_vv_f32m4_tu
 // CHECK-RV64-SAME: (<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], <vscale x 8 x float> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], <vscale x 8 x float> [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfadd_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
@@ -200,7 +200,7 @@ vfloat32m4_t test_vfadd_vv_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vf
 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfadd_vf_f32m4_tu
 // CHECK-RV64-SAME: (<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], float [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], float [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfadd_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
@@ -210,7 +210,7 @@ vfloat32m4_t test_vfadd_vf_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, fl
 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfadd_vv_f32m8_tu
 // CHECK-RV64-SAME: (<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], <vscale x 16 x float> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], <vscale x 16 x float> [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfadd_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
@@ -220,7 +220,7 @@ vfloat32m8_t test_vfadd_vv_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vf
 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfadd_vf_f32m8_tu
 // CHECK-RV64-SAME: (<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], float [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], float [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfadd_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) {
@@ -230,7 +230,7 @@ vfloat32m8_t test_vfadd_vf_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, fl
 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfadd_vv_f64m1_tu
 // CHECK-RV64-SAME: (<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], <vscale x 1 x double> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], <vscale x 1 x double> [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfadd_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
@@ -240,7 +240,7 @@ vfloat64m1_t test_vfadd_vv_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vf
 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfadd_vf_f64m1_tu
 // CHECK-RV64-SAME: (<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], double [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], double [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfadd_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) {
@@ -250,7 +250,7 @@ vfloat64m1_t test_vfadd_vf_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, do
 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfadd_vv_f64m2_tu
 // CHECK-RV64-SAME: (<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], <vscale x 2 x double> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], <vscale x 2 x double> [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfadd_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
@@ -260,7 +260,7 @@ vfloat64m2_t test_vfadd_vv_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vf
 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfadd_vf_f64m2_tu
 // CHECK-RV64-SAME: (<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], double [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], double [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfadd_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) {
@@ -270,7 +270,7 @@ vfloat64m2_t test_vfadd_vf_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, do
 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfadd_vv_f64m4_tu
 // CHECK-RV64-SAME: (<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], <vscale x 4 x double> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], <vscale x 4 x double> [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfadd_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
@@ -280,7 +280,7 @@ vfloat64m4_t test_vfadd_vv_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vf
 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfadd_vf_f64m4_tu
 // CHECK-RV64-SAME: (<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], double [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], double [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfadd_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) {
@@ -290,7 +290,7 @@ vfloat64m4_t test_vfadd_vf_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, do
 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfadd_vv_f64m8_tu
 // CHECK-RV64-SAME: (<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], <vscale x 8 x double> [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], <vscale x 8 x double> [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfadd_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
@@ -300,7 +300,7 @@ vfloat64m8_t test_vfadd_vv_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vf
 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfadd_vf_f64m8_tu
 // CHECK-RV64-SAME: (<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], double [[OP2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], double [[OP2]], i64 7, i64 [[VL]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfadd_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) {
@@ -310,7 +310,7 @@ vfloat64m8_t test_vfadd_vf_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, do
 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfadd_vv_f16mf4_tum
 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], <vscale x 1 x half> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], <vscale x 1 x half> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfadd_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
@@ -320,7 +320,7 @@ vfloat16mf4_t test_vfadd_vv_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff,
 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfadd_vf_f16mf4_tum
 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], half [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], half [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfadd_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
@@ -330,7 +330,7 @@ vfloat16mf4_t test_vfadd_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff,
 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfadd_vv_f16mf2_tum
 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], <vscale x 2 x half> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], <vscale x 2 x half> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vfadd_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
@@ -340,7 +340,7 @@ vfloat16mf2_t test_vfadd_vv_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff,
 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfadd_vf_f16mf2_tum
 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], half [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], half [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vfadd_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
@@ -350,7 +350,7 @@ vfloat16mf2_t test_vfadd_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff,
 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfadd_vv_f16m1_tum
 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], <vscale x 4 x half> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], <vscale x 4 x half> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vfadd_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
@@ -360,7 +360,7 @@ vfloat16m1_t test_vfadd_vv_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfl
 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfadd_vf_f16m1_tum
 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], half [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], half [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vfadd_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
@@ -370,7 +370,7 @@ vfloat16m1_t test_vfadd_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfl
 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfadd_vv_f16m2_tum
 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], <vscale x 8 x half> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], <vscale x 8 x half> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vfadd_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
@@ -380,7 +380,7 @@ vfloat16m2_t test_vfadd_vv_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vflo
 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfadd_vf_f16m2_tum
 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], half [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], half [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vfadd_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
@@ -390,7 +390,7 @@ vfloat16m2_t test_vfadd_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vflo
 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfadd_vv_f16m4_tum
 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], <vscale x 16 x half> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], <vscale x 16 x half> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vfadd_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
@@ -400,7 +400,7 @@ vfloat16m4_t test_vfadd_vv_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vflo
 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfadd_vf_f16m4_tum
 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], half [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], half [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vfadd_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
@@ -410,7 +410,7 @@ vfloat16m4_t test_vfadd_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vflo
 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfadd_vv_f16m8_tum
 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], <vscale x 32 x half> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], <vscale x 32 x half> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vfadd_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
@@ -420,7 +420,7 @@ vfloat16m8_t test_vfadd_vv_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vflo
 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfadd_vf_f16m8_tum
 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], half [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], half [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vfadd_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
@@ -430,7 +430,7 @@ vfloat16m8_t test_vfadd_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vflo
 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfadd_vv_f32mf2_tum
 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], <vscale x 1 x float> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], <vscale x 1 x float> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfadd_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -440,7 +440,7 @@ vfloat32mf2_t test_vfadd_vv_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff,
 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfadd_vf_f32mf2_tum
 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], float [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], float [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfadd_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
@@ -450,7 +450,7 @@ vfloat32mf2_t test_vfadd_vf_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff,
 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfadd_vv_f32m1_tum
 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], <vscale x 2 x float> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], <vscale x 2 x float> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfadd_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
@@ -460,7 +460,7 @@ vfloat32m1_t test_vfadd_vv_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfl
 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfadd_vf_f32m1_tum
 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], float [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], float [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfadd_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
@@ -470,7 +470,7 @@ vfloat32m1_t test_vfadd_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfl
 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfadd_vv_f32m2_tum
 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], <vscale x 4 x float> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], <vscale x 4 x float> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfadd_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
@@ -480,7 +480,7 @@ vfloat32m2_t test_vfadd_vv_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfl
 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfadd_vf_f32m2_tum
 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], float [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], float [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfadd_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
@@ -490,7 +490,7 @@ vfloat32m2_t test_vfadd_vf_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfl
 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfadd_vv_f32m4_tum
 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], <vscale x 8 x float> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], <vscale x 8 x float> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfadd_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
@@ -500,7 +500,7 @@ vfloat32m4_t test_vfadd_vv_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vflo
 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfadd_vf_f32m4_tum
 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], float [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], float [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfadd_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
@@ -510,7 +510,7 @@ vfloat32m4_t test_vfadd_vf_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vflo
 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfadd_vv_f32m8_tum
 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], <vscale x 16 x float> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], <vscale x 16 x float> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfadd_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
@@ -520,7 +520,7 @@ vfloat32m8_t test_vfadd_vv_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vflo
 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfadd_vf_f32m8_tum
 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], float [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], float [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfadd_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) {
@@ -530,7 +530,7 @@ vfloat32m8_t test_vfadd_vf_f32m8_tum(vbool4_t mask, vfloat32m8_t maskedoff, vflo
 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfadd_vv_f64m1_tum
 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], <vscale x 1 x double> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], <vscale x 1 x double> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfadd_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
@@ -540,7 +540,7 @@ vfloat64m1_t test_vfadd_vv_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfl
 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfadd_vf_f64m1_tum
 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], double [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], double [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfadd_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) {
@@ -550,7 +550,7 @@ vfloat64m1_t test_vfadd_vf_f64m1_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfl
 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfadd_vv_f64m2_tum
 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], <vscale x 2 x double> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], <vscale x 2 x double> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfadd_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
@@ -560,7 +560,7 @@ vfloat64m2_t test_vfadd_vv_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfl
 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfadd_vf_f64m2_tum
 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], double [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], double [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfadd_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) {
@@ -570,7 +570,7 @@ vfloat64m2_t test_vfadd_vf_f64m2_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfl
 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfadd_vv_f64m4_tum
 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], <vscale x 4 x double> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], <vscale x 4 x double> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfadd_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
@@ -580,7 +580,7 @@ vfloat64m4_t test_vfadd_vv_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfl
 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfadd_vf_f64m4_tum
 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], double [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], double [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfadd_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) {
@@ -590,7 +590,7 @@ vfloat64m4_t test_vfadd_vf_f64m4_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfl
 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfadd_vv_f64m8_tum
 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], <vscale x 8 x double> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], <vscale x 8 x double> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfadd_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
@@ -600,7 +600,7 @@ vfloat64m8_t test_vfadd_vv_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vflo
 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfadd_vf_f64m8_tum
 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], double [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], double [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfadd_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) {
@@ -610,7 +610,7 @@ vfloat64m8_t test_vfadd_vf_f64m8_tum(vbool8_t mask, vfloat64m8_t maskedoff, vflo
 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfadd_vv_f16mf4_tumu
 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], <vscale x 1 x half> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], <vscale x 1 x half> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfadd_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
@@ -620,7 +620,7 @@ vfloat16mf4_t test_vfadd_vv_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff,
 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfadd_vf_f16mf4_tumu
 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], half [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], half [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfadd_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
@@ -630,7 +630,7 @@ vfloat16mf4_t test_vfadd_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff,
 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfadd_vv_f16mf2_tumu
 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], <vscale x 2 x half> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], <vscale x 2 x half> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vfadd_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
@@ -640,7 +640,7 @@ vfloat16mf2_t test_vfadd_vv_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff,
 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfadd_vf_f16mf2_tumu
 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], half [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], half [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vfadd_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
@@ -650,7 +650,7 @@ vfloat16mf2_t test_vfadd_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff,
 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfadd_vv_f16m1_tumu
 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], <vscale x 4 x half> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], <vscale x 4 x half> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vfadd_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
@@ -660,7 +660,7 @@ vfloat16m1_t test_vfadd_vv_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vf
 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfadd_vf_f16m1_tumu
 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], half [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], half [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vfadd_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
@@ -670,7 +670,7 @@ vfloat16m1_t test_vfadd_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vf
 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfadd_vv_f16m2_tumu
 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], <vscale x 8 x half> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], <vscale x 8 x half> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vfadd_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
@@ -680,7 +680,7 @@ vfloat16m2_t test_vfadd_vv_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfl
 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfadd_vf_f16m2_tumu
 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], half [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], half [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vfadd_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
@@ -690,7 +690,7 @@ vfloat16m2_t test_vfadd_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfl
 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfadd_vv_f16m4_tumu
 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], <vscale x 16 x half> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], <vscale x 16 x half> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vfadd_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
@@ -700,7 +700,7 @@ vfloat16m4_t test_vfadd_vv_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfl
 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfadd_vf_f16m4_tumu
 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], half [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], half [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vfadd_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
@@ -710,7 +710,7 @@ vfloat16m4_t test_vfadd_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfl
 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfadd_vv_f16m8_tumu
 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], <vscale x 32 x half> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], <vscale x 32 x half> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vfadd_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
@@ -720,7 +720,7 @@ vfloat16m8_t test_vfadd_vv_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfl
 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfadd_vf_f16m8_tumu
 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], half [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], half [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vfadd_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
@@ -730,7 +730,7 @@ vfloat16m8_t test_vfadd_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfl
 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfadd_vv_f32mf2_tumu
 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], <vscale x 1 x float> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], <vscale x 1 x float> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfadd_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -740,7 +740,7 @@ vfloat32mf2_t test_vfadd_vv_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff,
 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfadd_vf_f32mf2_tumu
 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], float [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], float [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfadd_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
@@ -750,7 +750,7 @@ vfloat32mf2_t test_vfadd_vf_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff,
 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfadd_vv_f32m1_tumu
 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], <vscale x 2 x float> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], <vscale x 2 x float> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfadd_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
@@ -760,7 +760,7 @@ vfloat32m1_t test_vfadd_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vf
 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfadd_vf_f32m1_tumu
 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], float [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], float [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfadd_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
@@ -770,7 +770,7 @@ vfloat32m1_t test_vfadd_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vf
 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfadd_vv_f32m2_tumu
 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], <vscale x 4 x float> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], <vscale x 4 x float> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfadd_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
@@ -780,7 +780,7 @@ vfloat32m2_t test_vfadd_vv_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vf
 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfadd_vf_f32m2_tumu
 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], float [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], float [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfadd_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
@@ -790,7 +790,7 @@ vfloat32m2_t test_vfadd_vf_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vf
 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfadd_vv_f32m4_tumu
 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], <vscale x 8 x float> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], <vscale x 8 x float> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfadd_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
@@ -800,7 +800,7 @@ vfloat32m4_t test_vfadd_vv_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfl
 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfadd_vf_f32m4_tumu
 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], float [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], float [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfadd_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
@@ -810,7 +810,7 @@ vfloat32m4_t test_vfadd_vf_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfl
 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfadd_vv_f32m8_tumu
 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], <vscale x 16 x float> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], <vscale x 16 x float> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfadd_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
@@ -820,7 +820,7 @@ vfloat32m8_t test_vfadd_vv_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfl
 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfadd_vf_f32m8_tumu
 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], float [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], float [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfadd_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) {
@@ -830,7 +830,7 @@ vfloat32m8_t test_vfadd_vf_f32m8_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfl
 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfadd_vv_f64m1_tumu
 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], <vscale x 1 x double> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], <vscale x 1 x double> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfadd_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
@@ -840,7 +840,7 @@ vfloat64m1_t test_vfadd_vv_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vf
 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfadd_vf_f64m1_tumu
 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], double [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], double [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfadd_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) {
@@ -850,7 +850,7 @@ vfloat64m1_t test_vfadd_vf_f64m1_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vf
 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfadd_vv_f64m2_tumu
 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], <vscale x 2 x double> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], <vscale x 2 x double> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfadd_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
@@ -860,7 +860,7 @@ vfloat64m2_t test_vfadd_vv_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vf
 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfadd_vf_f64m2_tumu
 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], double [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], double [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfadd_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) {
@@ -870,7 +870,7 @@ vfloat64m2_t test_vfadd_vf_f64m2_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vf
 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfadd_vv_f64m4_tumu
 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], <vscale x 4 x double> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], <vscale x 4 x double> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfadd_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
@@ -880,7 +880,7 @@ vfloat64m4_t test_vfadd_vv_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vf
 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfadd_vf_f64m4_tumu
 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], double [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], double [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfadd_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) {
@@ -890,7 +890,7 @@ vfloat64m4_t test_vfadd_vf_f64m4_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vf
 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfadd_vv_f64m8_tumu
 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], <vscale x 8 x double> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], <vscale x 8 x double> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfadd_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
@@ -900,7 +900,7 @@ vfloat64m8_t test_vfadd_vv_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfl
 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfadd_vf_f64m8_tumu
 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], double [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], double [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfadd_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) {
@@ -910,7 +910,7 @@ vfloat64m8_t test_vfadd_vf_f64m8_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfl
 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfadd_vv_f16mf4_mu
 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], <vscale x 1 x half> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], <vscale x 1 x half> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfadd_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
@@ -920,7 +920,7 @@ vfloat16mf4_t test_vfadd_vv_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, v
 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfadd_vf_f16mf4_mu
 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], half [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], half [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfadd_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
@@ -930,7 +930,7 @@ vfloat16mf4_t test_vfadd_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, v
 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfadd_vv_f16mf2_mu
 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], <vscale x 2 x half> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], <vscale x 2 x half> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vfadd_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
@@ -940,7 +940,7 @@ vfloat16mf2_t test_vfadd_vv_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, v
 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfadd_vf_f16mf2_mu
 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], half [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], half [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vfadd_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
@@ -950,7 +950,7 @@ vfloat16mf2_t test_vfadd_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, v
 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfadd_vv_f16m1_mu
 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], <vscale x 4 x half> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], <vscale x 4 x half> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vfadd_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
@@ -960,7 +960,7 @@ vfloat16m1_t test_vfadd_vv_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vflo
 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfadd_vf_f16m1_mu
 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], half [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], half [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vfadd_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
@@ -970,7 +970,7 @@ vfloat16m1_t test_vfadd_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vflo
 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfadd_vv_f16m2_mu
 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], <vscale x 8 x half> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], <vscale x 8 x half> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vfadd_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
@@ -980,7 +980,7 @@ vfloat16m2_t test_vfadd_vv_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloa
 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfadd_vf_f16m2_mu
 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], half [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], half [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vfadd_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
@@ -990,7 +990,7 @@ vfloat16m2_t test_vfadd_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloa
 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfadd_vv_f16m4_mu
 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], <vscale x 16 x half> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], <vscale x 16 x half> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vfadd_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
@@ -1000,7 +1000,7 @@ vfloat16m4_t test_vfadd_vv_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloa
 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfadd_vf_f16m4_mu
 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], half [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], half [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vfadd_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
@@ -1010,7 +1010,7 @@ vfloat16m4_t test_vfadd_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloa
 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfadd_vv_f16m8_mu
 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], <vscale x 32 x half> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], <vscale x 32 x half> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vfadd_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
@@ -1020,7 +1020,7 @@ vfloat16m8_t test_vfadd_vv_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloa
 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfadd_vf_f16m8_mu
 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], half [[OP2]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], half [[OP2]], <vscale x 32 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vfadd_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
@@ -1030,7 +1030,7 @@ vfloat16m8_t test_vfadd_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloa
 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfadd_vv_f32mf2_mu
 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], <vscale x 1 x float> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], <vscale x 1 x float> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfadd_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
@@ -1040,7 +1040,7 @@ vfloat32mf2_t test_vfadd_vv_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, v
 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfadd_vf_f32mf2_mu
 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], float [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], float [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfadd_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
@@ -1050,7 +1050,7 @@ vfloat32mf2_t test_vfadd_vf_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, v
 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfadd_vv_f32m1_mu
 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], <vscale x 2 x float> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], <vscale x 2 x float> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfadd_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
@@ -1060,7 +1060,7 @@ vfloat32m1_t test_vfadd_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vflo
 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfadd_vf_f32m1_mu
 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], float [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], float [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfadd_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
@@ -1070,7 +1070,7 @@ vfloat32m1_t test_vfadd_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vflo
 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfadd_vv_f32m2_mu
 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], <vscale x 4 x float> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], <vscale x 4 x float> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfadd_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
@@ -1080,7 +1080,7 @@ vfloat32m2_t test_vfadd_vv_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vflo
 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfadd_vf_f32m2_mu
 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], float [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], float [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfadd_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
@@ -1090,7 +1090,7 @@ vfloat32m2_t test_vfadd_vf_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vflo
 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfadd_vv_f32m4_mu
 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], <vscale x 8 x float> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], <vscale x 8 x float> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfadd_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
@@ -1100,7 +1100,7 @@ vfloat32m4_t test_vfadd_vv_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloa
 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfadd_vf_f32m4_mu
 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], float [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], float [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfadd_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
@@ -1110,7 +1110,7 @@ vfloat32m4_t test_vfadd_vf_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloa
 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfadd_vv_f32m8_mu
 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], <vscale x 16 x float> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], <vscale x 16 x float> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfadd_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
@@ -1120,7 +1120,7 @@ vfloat32m8_t test_vfadd_vv_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloa
 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfadd_vf_f32m8_mu
 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], float [[OP2]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], float [[OP2]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfadd_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) {
@@ -1130,7 +1130,7 @@ vfloat32m8_t test_vfadd_vf_f32m8_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloa
 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfadd_vv_f64m1_mu
 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], <vscale x 1 x double> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], <vscale x 1 x double> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfadd_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
@@ -1140,7 +1140,7 @@ vfloat64m1_t test_vfadd_vv_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vflo
 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfadd_vf_f64m1_mu
 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], double [[OP2]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], double [[OP2]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfadd_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) {
@@ -1150,7 +1150,7 @@ vfloat64m1_t test_vfadd_vf_f64m1_mu(vbool64_t mask, vfloat64m1_t maskedoff, vflo
 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfadd_vv_f64m2_mu
 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], <vscale x 2 x double> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], <vscale x 2 x double> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfadd_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
@@ -1160,7 +1160,7 @@ vfloat64m2_t test_vfadd_vv_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vflo
 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfadd_vf_f64m2_mu
 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], double [[OP2]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], double [[OP2]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfadd_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) {
@@ -1170,7 +1170,7 @@ vfloat64m2_t test_vfadd_vf_f64m2_mu(vbool32_t mask, vfloat64m2_t maskedoff, vflo
 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfadd_vv_f64m4_mu
 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], <vscale x 4 x double> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], <vscale x 4 x double> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfadd_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
@@ -1180,7 +1180,7 @@ vfloat64m4_t test_vfadd_vv_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vflo
 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfadd_vf_f64m4_mu
 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], double [[OP2]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], double [[OP2]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfadd_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) {
@@ -1190,7 +1190,7 @@ vfloat64m4_t test_vfadd_vf_f64m4_mu(vbool16_t mask, vfloat64m4_t maskedoff, vflo
 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfadd_vv_f64m8_mu
 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], <vscale x 8 x double> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], <vscale x 8 x double> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfadd_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
@@ -1200,10 +1200,1210 @@ vfloat64m8_t test_vfadd_vv_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloa
 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfadd_vf_f64m8_mu
 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], double [[OP2]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], double [[OP2]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfadd_vf_f64m8_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) {
   return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl);
 }
 
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfadd_vv_f16mf4_rm_tu
+// CHECK-RV64-SAME: (<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], <vscale x 1 x half> [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfadd_vv_f16mf4_rm_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
+  return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfadd_vf_f16mf4_rm_tu
+// CHECK-RV64-SAME: (<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], half [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfadd_vf_f16mf4_rm_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
+  return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfadd_vv_f16mf2_rm_tu
+// CHECK-RV64-SAME: (<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], <vscale x 2 x half> [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfadd_vv_f16mf2_rm_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
+  return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfadd_vf_f16mf2_rm_tu
+// CHECK-RV64-SAME: (<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], half [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfadd_vf_f16mf2_rm_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
+  return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfadd_vv_f16m1_rm_tu
+// CHECK-RV64-SAME: (<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], <vscale x 4 x half> [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfadd_vv_f16m1_rm_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
+  return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfadd_vf_f16m1_rm_tu
+// CHECK-RV64-SAME: (<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], half [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfadd_vf_f16m1_rm_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
+  return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfadd_vv_f16m2_rm_tu
+// CHECK-RV64-SAME: (<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], <vscale x 8 x half> [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfadd_vv_f16m2_rm_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
+  return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfadd_vf_f16m2_rm_tu
+// CHECK-RV64-SAME: (<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], half [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfadd_vf_f16m2_rm_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
+  return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfadd_vv_f16m4_rm_tu
+// CHECK-RV64-SAME: (<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], <vscale x 16 x half> [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfadd_vv_f16m4_rm_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
+  return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfadd_vf_f16m4_rm_tu
+// CHECK-RV64-SAME: (<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], half [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfadd_vf_f16m4_rm_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
+  return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfadd_vv_f16m8_rm_tu
+// CHECK-RV64-SAME: (<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], <vscale x 32 x half> [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfadd_vv_f16m8_rm_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
+  return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfadd_vf_f16m8_rm_tu
+// CHECK-RV64-SAME: (<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], half [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfadd_vf_f16m8_rm_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
+  return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfadd_vv_f32mf2_rm_tu
+// CHECK-RV64-SAME: (<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], <vscale x 1 x float> [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfadd_vv_f32mf2_rm_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
+  return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfadd_vf_f32mf2_rm_tu
+// CHECK-RV64-SAME: (<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], float [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfadd_vf_f32mf2_rm_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
+  return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfadd_vv_f32m1_rm_tu
+// CHECK-RV64-SAME: (<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], <vscale x 2 x float> [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfadd_vv_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
+  return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfadd_vf_f32m1_rm_tu
+// CHECK-RV64-SAME: (<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], float [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfadd_vf_f32m1_rm_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
+  return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfadd_vv_f32m2_rm_tu
+// CHECK-RV64-SAME: (<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], <vscale x 4 x float> [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfadd_vv_f32m2_rm_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
+  return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfadd_vf_f32m2_rm_tu
+// CHECK-RV64-SAME: (<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], float [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfadd_vf_f32m2_rm_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
+  return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfadd_vv_f32m4_rm_tu
+// CHECK-RV64-SAME: (<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], <vscale x 8 x float> [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfadd_vv_f32m4_rm_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
+  return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfadd_vf_f32m4_rm_tu
+// CHECK-RV64-SAME: (<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], float [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfadd_vf_f32m4_rm_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
+  return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfadd_vv_f32m8_rm_tu
+// CHECK-RV64-SAME: (<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], <vscale x 16 x float> [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfadd_vv_f32m8_rm_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
+  return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfadd_vf_f32m8_rm_tu
+// CHECK-RV64-SAME: (<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], float [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfadd_vf_f32m8_rm_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) {
+  return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfadd_vv_f64m1_rm_tu
+// CHECK-RV64-SAME: (<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], <vscale x 1 x double> [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfadd_vv_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
+  return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfadd_vf_f64m1_rm_tu
+// CHECK-RV64-SAME: (<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], double [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfadd_vf_f64m1_rm_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) {
+  return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfadd_vv_f64m2_rm_tu
+// CHECK-RV64-SAME: (<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], <vscale x 2 x double> [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfadd_vv_f64m2_rm_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
+  return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfadd_vf_f64m2_rm_tu
+// CHECK-RV64-SAME: (<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], double [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfadd_vf_f64m2_rm_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) {
+  return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfadd_vv_f64m4_rm_tu
+// CHECK-RV64-SAME: (<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], <vscale x 4 x double> [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfadd_vv_f64m4_rm_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
+  return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfadd_vf_f64m4_rm_tu
+// CHECK-RV64-SAME: (<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], double [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfadd_vf_f64m4_rm_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) {
+  return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfadd_vv_f64m8_rm_tu
+// CHECK-RV64-SAME: (<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], <vscale x 8 x double> [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfadd_vv_f64m8_rm_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
+  return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfadd_vf_f64m8_rm_tu
+// CHECK-RV64-SAME: (<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], double [[OP2]], i64 0, i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfadd_vf_f64m8_rm_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) {
+  return __riscv_vfadd_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfadd_vv_f16mf4_rm_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], <vscale x 1 x half> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfadd_vv_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
+  return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfadd_vf_f16mf4_rm_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], half [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfadd_vf_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
+  return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfadd_vv_f16mf2_rm_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], <vscale x 2 x half> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfadd_vv_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
+  return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfadd_vf_f16mf2_rm_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], half [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfadd_vf_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
+  return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfadd_vv_f16m1_rm_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], <vscale x 4 x half> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfadd_vv_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
+  return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfadd_vf_f16m1_rm_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], half [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfadd_vf_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
+  return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfadd_vv_f16m2_rm_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], <vscale x 8 x half> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfadd_vv_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
+  return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfadd_vf_f16m2_rm_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], half [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfadd_vf_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
+  return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfadd_vv_f16m4_rm_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], <vscale x 16 x half> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfadd_vv_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
+  return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfadd_vf_f16m4_rm_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], half [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfadd_vf_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
+  return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfadd_vv_f16m8_rm_tum
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], <vscale x 32 x half> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfadd_vv_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
+  return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfadd_vf_f16m8_rm_tum
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], half [[OP2]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfadd_vf_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
+  return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfadd_vv_f32mf2_rm_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], <vscale x 1 x float> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfadd_vv_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
+  return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfadd_vf_f32mf2_rm_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], float [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfadd_vf_f32mf2_rm_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
+  return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfadd_vv_f32m1_rm_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], <vscale x 2 x float> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfadd_vv_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
+  return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfadd_vf_f32m1_rm_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], float [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfadd_vf_f32m1_rm_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
+  return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfadd_vv_f32m2_rm_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], <vscale x 4 x float> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfadd_vv_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
+  return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfadd_vf_f32m2_rm_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], float [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfadd_vf_f32m2_rm_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
+  return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfadd_vv_f32m4_rm_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], <vscale x 8 x float> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfadd_vv_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
+  return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfadd_vf_f32m4_rm_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], float [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfadd_vf_f32m4_rm_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
+  return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfadd_vv_f32m8_rm_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], <vscale x 16 x float> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfadd_vv_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
+  return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfadd_vf_f32m8_rm_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], float [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfadd_vf_f32m8_rm_tum(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) {
+  return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfadd_vv_f64m1_rm_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], <vscale x 1 x double> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfadd_vv_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
+  return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfadd_vf_f64m1_rm_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], double [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfadd_vf_f64m1_rm_tum(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) {
+  return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfadd_vv_f64m2_rm_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], <vscale x 2 x double> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfadd_vv_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
+  return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfadd_vf_f64m2_rm_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], double [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfadd_vf_f64m2_rm_tum(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) {
+  return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfadd_vv_f64m4_rm_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], <vscale x 4 x double> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfadd_vv_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
+  return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfadd_vf_f64m4_rm_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], double [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfadd_vf_f64m4_rm_tum(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) {
+  return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfadd_vv_f64m8_rm_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], <vscale x 8 x double> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfadd_vv_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
+  return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfadd_vf_f64m8_rm_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], double [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfadd_vf_f64m8_rm_tum(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) {
+  return __riscv_vfadd_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfadd_vv_f16mf4_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], <vscale x 1 x half> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfadd_vv_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
+  return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfadd_vf_f16mf4_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], half [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfadd_vf_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
+  return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfadd_vv_f16mf2_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], <vscale x 2 x half> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfadd_vv_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
+  return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfadd_vf_f16mf2_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], half [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfadd_vf_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
+  return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfadd_vv_f16m1_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], <vscale x 4 x half> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfadd_vv_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
+  return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfadd_vf_f16m1_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], half [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfadd_vf_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
+  return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfadd_vv_f16m2_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], <vscale x 8 x half> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfadd_vv_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
+  return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfadd_vf_f16m2_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], half [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfadd_vf_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
+  return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfadd_vv_f16m4_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], <vscale x 16 x half> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfadd_vv_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
+  return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfadd_vf_f16m4_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], half [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfadd_vf_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
+  return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfadd_vv_f16m8_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], <vscale x 32 x half> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfadd_vv_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
+  return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfadd_vf_f16m8_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], half [[OP2]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfadd_vf_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
+  return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfadd_vv_f32mf2_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], <vscale x 1 x float> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfadd_vv_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
+  return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfadd_vf_f32mf2_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], float [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfadd_vf_f32mf2_rm_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
+  return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfadd_vv_f32m1_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], <vscale x 2 x float> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfadd_vv_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
+  return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfadd_vf_f32m1_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], float [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfadd_vf_f32m1_rm_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
+  return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfadd_vv_f32m2_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], <vscale x 4 x float> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfadd_vv_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
+  return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfadd_vf_f32m2_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], float [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfadd_vf_f32m2_rm_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
+  return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfadd_vv_f32m4_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], <vscale x 8 x float> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfadd_vv_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
+  return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfadd_vf_f32m4_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], float [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfadd_vf_f32m4_rm_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
+  return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfadd_vv_f32m8_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], <vscale x 16 x float> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfadd_vv_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
+  return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfadd_vf_f32m8_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], float [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfadd_vf_f32m8_rm_tumu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) {
+  return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfadd_vv_f64m1_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], <vscale x 1 x double> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfadd_vv_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
+  return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfadd_vf_f64m1_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], double [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfadd_vf_f64m1_rm_tumu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) {
+  return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfadd_vv_f64m2_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], <vscale x 2 x double> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfadd_vv_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
+  return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfadd_vf_f64m2_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], double [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfadd_vf_f64m2_rm_tumu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) {
+  return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfadd_vv_f64m4_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], <vscale x 4 x double> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfadd_vv_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
+  return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfadd_vf_f64m4_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], double [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfadd_vf_f64m4_rm_tumu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) {
+  return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfadd_vv_f64m8_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], <vscale x 8 x double> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfadd_vv_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
+  return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfadd_vf_f64m8_rm_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], double [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfadd_vf_f64m8_rm_tumu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) {
+  return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfadd_vv_f16mf4_rm_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], <vscale x 1 x half> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfadd_vv_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
+  return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfadd_vf_f16mf4_rm_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.f16.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x half> [[OP1]], half [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vfadd_vf_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
+  return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfadd_vv_f16mf2_rm_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], <vscale x 2 x half> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfadd_vv_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
+  return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfadd_vf_f16mf2_rm_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.f16.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x half> [[OP1]], half [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vfadd_vf_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
+  return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfadd_vv_f16m1_rm_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], <vscale x 4 x half> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfadd_vv_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
+  return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfadd_vf_f16m1_rm_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.f16.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x half> [[OP1]], half [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vfadd_vf_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
+  return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfadd_vv_f16m2_rm_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], <vscale x 8 x half> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfadd_vv_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
+  return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfadd_vf_f16m2_rm_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.f16.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x half> [[OP1]], half [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vfadd_vf_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
+  return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfadd_vv_f16m4_rm_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], <vscale x 16 x half> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfadd_vv_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
+  return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfadd_vf_f16m4_rm_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.f16.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x half> [[OP1]], half [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vfadd_vf_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
+  return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfadd_vv_f16m8_rm_mu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], <vscale x 32 x half> [[OP2]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfadd_vv_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
+  return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfadd_vf_f16m8_rm_mu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.f16.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x half> [[OP1]], half [[OP2]], <vscale x 32 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vfadd_vf_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
+  return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfadd_vv_f32mf2_rm_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], <vscale x 1 x float> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfadd_vv_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
+  return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfadd_vf_f32mf2_rm_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x float> [[OP1]], float [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfadd_vf_f32mf2_rm_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
+  return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfadd_vv_f32m1_rm_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], <vscale x 2 x float> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfadd_vv_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
+  return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfadd_vf_f32m1_rm_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x float> [[OP1]], float [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfadd_vf_f32m1_rm_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
+  return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfadd_vv_f32m2_rm_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], <vscale x 4 x float> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfadd_vv_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
+  return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfadd_vf_f32m2_rm_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x float> [[OP1]], float [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfadd_vf_f32m2_rm_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
+  return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfadd_vv_f32m4_rm_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], <vscale x 8 x float> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfadd_vv_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
+  return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfadd_vf_f32m4_rm_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x float> [[OP1]], float [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfadd_vf_f32m4_rm_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
+  return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfadd_vv_f32m8_rm_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], <vscale x 16 x float> [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfadd_vv_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
+  return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfadd_vf_f32m8_rm_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x float> [[OP1]], float [[OP2]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfadd_vf_f32m8_rm_mu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) {
+  return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfadd_vv_f64m1_rm_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], <vscale x 1 x double> [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfadd_vv_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
+  return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfadd_vf_f64m1_rm_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x double> [[OP1]], double [[OP2]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfadd_vf_f64m1_rm_mu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) {
+  return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfadd_vv_f64m2_rm_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], <vscale x 2 x double> [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfadd_vv_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
+  return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfadd_vf_f64m2_rm_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x double> [[OP1]], double [[OP2]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfadd_vf_f64m2_rm_mu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) {
+  return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfadd_vv_f64m4_rm_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], <vscale x 4 x double> [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfadd_vv_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
+  return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfadd_vf_f64m4_rm_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x double> [[OP1]], double [[OP2]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfadd_vf_f64m4_rm_mu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) {
+  return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfadd_vv_f64m8_rm_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], <vscale x 8 x double> [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfadd_vv_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
+  return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfadd_vf_f64m8_rm_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x double> [[OP1]], double [[OP2]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfadd_vf_f64m8_rm_mu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) {
+  return __riscv_vfadd_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
+}
+

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vfadd-out-of-range.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vfadd-out-of-range.c
new file mode 100644
index 00000000000000..1c1fbbe19eba31
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-handcrafted/vfadd-out-of-range.c
@@ -0,0 +1,67 @@
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
+// RUN:   -target-feature +v -target-feature +zfh -target-feature +experimental-zvfh \
+// RUN:   -fsyntax-only -verify %s
+
+#include <riscv_vector.h>
+
+vfloat32m1_t test_vfadd_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
+  // expected-error at +1 {{argument value 5 is outside the valid range [0, 4]}}
+  return __riscv_vfadd_vv_f32m1_rm(op1, op2, 5, vl);
+}
+
+vfloat32m1_t test_vfadd_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
+  // expected-error at +1 {{argument value 5 is outside the valid range [0, 4]}}
+  return __riscv_vfadd_vf_f32m1_rm(op1, op2, 5, vl);
+}
+
+vfloat32m1_t test_vfadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
+  // expected-error at +1 {{argument value 5 is outside the valid range [0, 4]}}
+  return __riscv_vfadd_vv_f32m1_rm_m(mask, op1, op2, 5, vl);
+}
+
+vfloat32m1_t test_vfadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) {
+  // expected-error at +1 {{argument value 5 is outside the valid range [0, 4]}}
+  return __riscv_vfadd_vf_f32m1_rm_m(mask, op1, op2, 5, vl);
+}
+
+vfloat32m1_t test_vfadd_vv_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
+  // expected-error at +1 {{argument value 5 is outside the valid range [0, 4]}}
+  return __riscv_vfadd_vv_f32m1_rm_tu(maskedoff, op1, op2, 5, vl);
+}
+
+vfloat32m1_t test_vfadd_vf_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
+  // expected-error at +1 {{argument value 5 is outside the valid range [0, 4]}}
+  return __riscv_vfadd_vf_f32m1_rm_tu(maskedoff, op1, op2, 5, vl);
+}
+
+vfloat32m1_t test_vfadd_vv_f32m1_tum(
+  vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
+  // expected-error at +1 {{argument value 5 is outside the valid range [0, 4]}}
+  return __riscv_vfadd_vv_f32m1_rm_tum(mask, maskedoff, op1, op2, 5, vl);
+}
+
+vfloat32m1_t test_vfadd_vf_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
+  // expected-error at +1 {{argument value 5 is outside the valid range [0, 4]}}
+  return __riscv_vfadd_vf_f32m1_rm_tum(mask, maskedoff, op1, op2, 5, vl);
+}
+
+vfloat32m1_t test_vfadd_vv_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
+  // expected-error at +1 {{argument value 5 is outside the valid range [0, 4]}}
+  return __riscv_vfadd_vv_f32m1_rm_tumu(mask, maskedoff, op1, op2, 5, vl);
+}
+
+vfloat32m1_t test_vfadd_vf_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
+  // expected-error at +1 {{argument value 5 is outside the valid range [0, 4]}}
+  return __riscv_vfadd_vf_f32m1_rm_tumu(mask, maskedoff, op1, op2, 5, vl);
+}
+
+vfloat32m1_t test_vfadd_vv_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
+  // expected-error at +1 {{argument value 5 is outside the valid range [0, 4]}}
+  return __riscv_vfadd_vv_f32m1_rm_mu(mask, maskedoff, op1, op2, 5, vl);
+}
+
+vfloat32m1_t test_vfadd_vf_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
+  // expected-error at +1 {{argument value 5 is outside the valid range [0, 4]}}
+  return __riscv_vfadd_vf_f32m1_rm_mu(mask, maskedoff, op1, op2, 5, vl);
+}

diff  --git a/clang/utils/TableGen/RISCVVEmitter.cpp b/clang/utils/TableGen/RISCVVEmitter.cpp
index f7ef8c5d167fe6..a9349f1fdc1c75 100644
--- a/clang/utils/TableGen/RISCVVEmitter.cpp
+++ b/clang/utils/TableGen/RISCVVEmitter.cpp
@@ -65,6 +65,7 @@ struct SemaRecord {
   bool HasMaskedOffOperand :1;
   bool HasTailPolicy : 1;
   bool HasMaskPolicy : 1;
+  bool HasFRMRoundModeOp : 1;
   bool IsTuple : 1;
   uint8_t UnMaskedPolicyScheme : 2;
   uint8_t MaskedPolicyScheme : 2;
@@ -530,6 +531,7 @@ void RVVEmitter::createRVVIntrinsics(
     StringRef MaskedIRName = R->getValueAsString("MaskedIRName");
     unsigned NF = R->getValueAsInt("NF");
     bool IsTuple = R->getValueAsBit("IsTuple");
+    bool HasFRMRoundModeOp = R->getValueAsBit("HasFRMRoundModeOp");
 
     const Policy DefaultPolicy;
     SmallVector<Policy> SupportedUnMaskedPolicies =
@@ -577,7 +579,7 @@ void RVVEmitter::createRVVIntrinsics(
             /*IsMasked=*/false, /*HasMaskedOffOperand=*/false, HasVL,
             UnMaskedPolicyScheme, SupportOverloading, HasBuiltinAlias,
             ManualCodegen, *Types, IntrinsicTypes, RequiredFeatures, NF,
-            DefaultPolicy));
+            DefaultPolicy, HasFRMRoundModeOp));
         if (UnMaskedPolicyScheme != PolicyScheme::SchemeNone)
           for (auto P : SupportedUnMaskedPolicies) {
             SmallVector<PrototypeDescriptor> PolicyPrototype =
@@ -592,7 +594,7 @@ void RVVEmitter::createRVVIntrinsics(
                 /*IsMask=*/false, /*HasMaskedOffOperand=*/false, HasVL,
                 UnMaskedPolicyScheme, SupportOverloading, HasBuiltinAlias,
                 ManualCodegen, *PolicyTypes, IntrinsicTypes, RequiredFeatures,
-                NF, P));
+                NF, P, HasFRMRoundModeOp));
           }
         if (!HasMasked)
           continue;
@@ -603,7 +605,8 @@ void RVVEmitter::createRVVIntrinsics(
             Name, SuffixStr, OverloadedName, OverloadedSuffixStr, MaskedIRName,
             /*IsMasked=*/true, HasMaskedOffOperand, HasVL, MaskedPolicyScheme,
             SupportOverloading, HasBuiltinAlias, ManualCodegen, *MaskTypes,
-            IntrinsicTypes, RequiredFeatures, NF, DefaultPolicy));
+            IntrinsicTypes, RequiredFeatures, NF, DefaultPolicy,
+            HasFRMRoundModeOp));
         if (MaskedPolicyScheme == PolicyScheme::SchemeNone)
           continue;
         for (auto P : SupportedMaskedPolicies) {
@@ -618,7 +621,7 @@ void RVVEmitter::createRVVIntrinsics(
               MaskedIRName, /*IsMasked=*/true, HasMaskedOffOperand, HasVL,
               MaskedPolicyScheme, SupportOverloading, HasBuiltinAlias,
               ManualCodegen, *PolicyTypes, IntrinsicTypes, RequiredFeatures, NF,
-              P));
+              P, HasFRMRoundModeOp));
         }
       } // End for Log2LMULList
     }   // End for TypeRange
@@ -671,6 +674,7 @@ void RVVEmitter::createRVVIntrinsics(
     SR.Suffix = parsePrototypes(SuffixProto);
     SR.OverloadedSuffix = parsePrototypes(OverloadedSuffixProto);
     SR.IsTuple = IsTuple;
+    SR.HasFRMRoundModeOp = HasFRMRoundModeOp;
 
     SemaRecords->push_back(SR);
   }
@@ -713,6 +717,7 @@ void RVVEmitter::createRVVIntrinsicRecords(std::vector<RVVIntrinsicRecord> &Out,
     R.UnMaskedPolicyScheme = SR.UnMaskedPolicyScheme;
     R.MaskedPolicyScheme = SR.MaskedPolicyScheme;
     R.IsTuple = SR.IsTuple;
+    R.HasFRMRoundModeOp = SR.HasFRMRoundModeOp;
 
     assert(R.PrototypeIndex !=
            static_cast<uint16_t>(SemaSignatureTable::INVALID_INDEX));

diff  --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td
index a9cd18dd91ce10..544f5467b2793b 100644
--- a/llvm/include/llvm/IR/IntrinsicsRISCV.td
+++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td
@@ -420,6 +420,27 @@ let TargetPrefix = "riscv" in {
     let ScalarOperand = 2;
     let VLOperand = 4;
   }
+  // For destination vector type is the same as first source vector.
+  // Input: (passthru, vector_in, vector_in/scalar_in, frm, vl)
+  class RISCVBinaryAAXUnMaskedRoundingMode
+        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
+                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
+                     llvm_anyint_ty, LLVMMatchType<2>],
+                    [ImmArg<ArgIndex<3>>, IntrNoMem]>, RISCVVIntrinsic {
+    let ScalarOperand = 2;
+    let VLOperand = 4;
+  }
+  // For destination vector type is the same as first source vector (with mask).
+  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, frm, vl, policy)
+  class RISCVBinaryAAXMaskedRoundingMode
+       : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
+                   [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
+                    LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
+                    LLVMMatchType<2>, LLVMMatchType<2>],
+                   [ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<6>>, IntrNoMem]>, RISCVVIntrinsic {
+    let ScalarOperand = 2;
+    let VLOperand = 5;
+  }
   // For destination vector type is the same as first source vector. The
   // second source operand must match the destination type or be an XLen scalar.
   // Input: (passthru, vector_in, vector_in/scalar_in, vl)
@@ -1088,6 +1109,10 @@ let TargetPrefix = "riscv" in {
     def "int_riscv_" # NAME : RISCVBinaryAAXUnMasked;
     def "int_riscv_" # NAME # "_mask" : RISCVBinaryAAXMasked;
   }
+  multiclass RISCVBinaryAAXRoundingMode {
+    def "int_riscv_" # NAME : RISCVBinaryAAXUnMaskedRoundingMode;
+    def "int_riscv_" # NAME # "_mask" : RISCVBinaryAAXMaskedRoundingMode;
+  }
   // Like RISCVBinaryAAX, but the second operand is used a shift amount so it
   // must be a vector or an XLen scalar.
   multiclass RISCVBinaryAAShift {
@@ -1296,7 +1321,7 @@ let TargetPrefix = "riscv" in {
   defm vwmaccus : RISCVTernaryWide;
   defm vwmaccsu : RISCVTernaryWide;
 
-  defm vfadd : RISCVBinaryAAX;
+  defm vfadd : RISCVBinaryAAXRoundingMode;
   defm vfsub : RISCVBinaryAAX;
   defm vfrsub : RISCVBinaryAAX;
 

diff  --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h
index 0584b4835f1b39..7269f95a4816d0 100644
--- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h
+++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h
@@ -110,6 +110,9 @@ enum {
 
   HasRoundModeOpShift = IsSignExtendingOpWShift + 1,
   HasRoundModeOpMask = 1 << HasRoundModeOpShift,
+
+  UsesVXRMShift = HasRoundModeOpShift + 1,
+  UsesVXRMMask = 1 << UsesVXRMShift,
 };
 
 enum VLMUL : uint8_t {
@@ -172,6 +175,9 @@ static inline bool hasRoundModeOp(uint64_t TSFlags) {
   return TSFlags & HasRoundModeOpMask;
 }
 
+/// \returns true if this instruction uses vxrm
+static inline bool usesVXRM(uint64_t TSFlags) { return TSFlags & UsesVXRMMask; }
+
 static inline unsigned getVLOpNum(const MCInstrDesc &Desc) {
   const uint64_t TSFlags = Desc.TSFlags;
   // This method is only called if we expect to have a VL operand, and all

diff  --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index 03191f704f2ddc..f7844a2facb914 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -11,6 +11,7 @@
 //===----------------------------------------------------------------------===//
 
 #include "RISCVISelDAGToDAG.h"
+#include "MCTargetDesc/RISCVBaseInfo.h"
 #include "MCTargetDesc/RISCVMCTargetDesc.h"
 #include "MCTargetDesc/RISCVMatInt.h"
 #include "RISCVISelLowering.h"
@@ -3336,8 +3337,21 @@ bool RISCVDAGToDAGISel::performCombineVMergeAndVOps(SDNode *N) {
     Ops.append(True->op_begin() + TrueVLIndex + 3, True->op_end());
   } else {
     Ops.push_back(False);
-    Ops.append(True->op_begin() + HasTiedDest, True->op_begin() + TrueVLIndex);
-    Ops.append({Mask, VL, /* SEW */ True.getOperand(TrueVLIndex + 1)});
+    if (RISCVII::hasRoundModeOp(TrueTSFlags)) {
+      // For unmasked "VOp" with rounding mode operand, that is interfaces like
+      // (..., rm, vl) or (..., rm, vl, policy).
+      // Its masked version is (..., vm, rm, vl, policy).
+      // Check the rounding mode pseudo nodes under RISCVInstrInfoVPseudos.td
+      SDValue RoundMode = True->getOperand(TrueVLIndex - 1);
+      Ops.append(True->op_begin() + HasTiedDest,
+                 True->op_begin() + TrueVLIndex - 1);
+      Ops.append(
+          {Mask, RoundMode, VL, /* SEW */ True.getOperand(TrueVLIndex + 1)});
+    } else {
+      Ops.append(True->op_begin() + HasTiedDest,
+                 True->op_begin() + TrueVLIndex);
+      Ops.append({Mask, VL, /* SEW */ True.getOperand(TrueVLIndex + 1)});
+    }
     Ops.push_back(PolicyOp);
 
     // Result node should have chain operand of True.

diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index e98e37d9347065..d790a4abd0cc57 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -14148,8 +14148,33 @@ RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
   }
 }
 
+// Returns the index to the rounding mode immediate value if any, otherwise the
+// function will return None.
+static std::optional<unsigned> getRoundModeIdx(const MachineInstr &MI) {
+  uint64_t TSFlags = MI.getDesc().TSFlags;
+  if (!RISCVII::hasRoundModeOp(TSFlags))
+    return std::nullopt;
+
+  // The operand order
+  // -------------------------------------
+  // | n-1 (if any)   | n-2  | n-3 | n-4 |
+  // | policy         | sew  | vl  | rm  |
+  // -------------------------------------
+  return MI.getNumExplicitOperands() - RISCVII::hasVecPolicyOp(TSFlags) - 3;
+}
+
 void RISCVTargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
                                                         SDNode *Node) const {
+  // Add FRM dependency to vector floating-point instructions with dynamic
+  // rounding mode.
+  if (auto RoundModeIdx = getRoundModeIdx(MI)) {
+    unsigned FRMImm = MI.getOperand(*RoundModeIdx).getImm();
+    if (FRMImm == RISCVFPRndMode::DYN && !MI.readsRegister(RISCV::FRM)) {
+      MI.addOperand(MachineOperand::CreateReg(RISCV::FRM, /*isDef*/ false,
+                                              /*isImp*/ true));
+    }
+  }
+
   // Add FRM dependency to any instructions with dynamic rounding mode.
   unsigned Opc = MI.getOpcode();
   auto Idx = RISCV::getNamedOperandIdx(Opc, RISCV::OpName::frm);

diff  --git a/llvm/lib/Target/RISCV/RISCVInsertReadWriteCSR.cpp b/llvm/lib/Target/RISCV/RISCVInsertReadWriteCSR.cpp
index 7d3d665808b463..4b26c27bb4f8ef 100644
--- a/llvm/lib/Target/RISCV/RISCVInsertReadWriteCSR.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInsertReadWriteCSR.cpp
@@ -13,6 +13,7 @@
 //
 //===----------------------------------------------------------------------===//
 
+#include "MCTargetDesc/RISCVBaseInfo.h"
 #include "RISCV.h"
 #include "RISCVSubtarget.h"
 #include "llvm/CodeGen/MachineFunctionPass.h"
@@ -45,8 +46,7 @@ class RISCVInsertReadWriteCSR : public MachineFunctionPass {
   }
 
 private:
-  bool emitWriteVXRM(MachineBasicBlock &MBB);
-  std::optional<unsigned> getRoundModeIdx(const MachineInstr &MI);
+  bool emitWriteRoundingMode(MachineBasicBlock &MBB);
 };
 
 } // end anonymous namespace
@@ -56,10 +56,9 @@ char RISCVInsertReadWriteCSR::ID = 0;
 INITIALIZE_PASS(RISCVInsertReadWriteCSR, DEBUG_TYPE,
                 RISCV_INSERT_READ_WRITE_CSR_NAME, false, false)
 
-// This function returns the index to the rounding mode immediate value if any,
-// otherwise the function will return None.
-std::optional<unsigned>
-RISCVInsertReadWriteCSR::getRoundModeIdx(const MachineInstr &MI) {
+// Returns the index to the rounding mode immediate value if any, otherwise the
+// function will return None.
+static std::optional<unsigned> getRoundModeIdx(const MachineInstr &MI) {
   uint64_t TSFlags = MI.getDesc().TSFlags;
   if (!RISCVII::hasRoundModeOp(TSFlags))
     return std::nullopt;
@@ -74,17 +73,42 @@ RISCVInsertReadWriteCSR::getRoundModeIdx(const MachineInstr &MI) {
 
 // This function inserts a write to vxrm when encountering an RVV fixed-point
 // instruction.
-bool RISCVInsertReadWriteCSR::emitWriteVXRM(MachineBasicBlock &MBB) {
+bool RISCVInsertReadWriteCSR::emitWriteRoundingMode(MachineBasicBlock &MBB) {
   bool Changed = false;
   for (MachineInstr &MI : MBB) {
     if (auto RoundModeIdx = getRoundModeIdx(MI)) {
-      Changed = true;
-
-      unsigned VXRMImm = MI.getOperand(*RoundModeIdx).getImm();
-      BuildMI(MBB, MI, MI.getDebugLoc(), TII->get(RISCV::WriteVXRMImm))
-          .addImm(VXRMImm);
-      MI.addOperand(MachineOperand::CreateReg(RISCV::VXRM, /*IsDef*/ false,
-                                              /*IsImp*/ true));
+      if (RISCVII::usesVXRM(MI.getDesc().TSFlags)) {
+        unsigned VXRMImm = MI.getOperand(*RoundModeIdx).getImm();
+
+        Changed = true;
+
+        BuildMI(MBB, MI, MI.getDebugLoc(), TII->get(RISCV::WriteVXRMImm))
+            .addImm(VXRMImm);
+        MI.addOperand(MachineOperand::CreateReg(RISCV::VXRM, /*IsDef*/ false,
+                                                /*IsImp*/ true));
+      } else { // FRM
+        unsigned FRMImm = MI.getOperand(*RoundModeIdx).getImm();
+
+        // The value is a hint to this pass to not alter the frm value.
+        if (FRMImm == RISCVFPRndMode::DYN)
+          continue;
+
+        Changed = true;
+
+        // Save
+        MachineRegisterInfo *MRI = &MBB.getParent()->getRegInfo();
+        Register SavedFRM = MRI->createVirtualRegister(&RISCV::GPRRegClass);
+        BuildMI(MBB, MI, MI.getDebugLoc(), TII->get(RISCV::SwapFRMImm),
+                SavedFRM)
+            .addImm(FRMImm);
+        MI.addOperand(MachineOperand::CreateReg(RISCV::FRM, /*IsDef*/ false,
+                                                /*IsImp*/ true));
+        // Restore
+        MachineInstrBuilder MIB =
+            BuildMI(*MBB.getParent(), {}, TII->get(RISCV::WriteFRM))
+                .addReg(SavedFRM);
+        MBB.insertAfter(MI, MIB);
+      }
     }
   }
   return Changed;
@@ -101,7 +125,7 @@ bool RISCVInsertReadWriteCSR::runOnMachineFunction(MachineFunction &MF) {
   bool Changed = false;
 
   for (MachineBasicBlock &MBB : MF)
-    Changed |= emitWriteVXRM(MBB);
+    Changed |= emitWriteRoundingMode(MBB);
 
   return Changed;
 }

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrFormats.td b/llvm/lib/Target/RISCV/RISCVInstrFormats.td
index ab77d971fb9e58..445dad757dc893 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrFormats.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrFormats.td
@@ -217,6 +217,14 @@ class RVInst<dag outs, dag ins, string opcodestr, string argstr,
 
   bit HasRoundModeOp = 0;
   let TSFlags{19} =  HasRoundModeOp;
+
+  // This is only valid when HasRoundModeOp is set to 1. HasRoundModeOp is set
+  // to 1 for vector fixed-point or floating-point intrinsics. This bit is
+  // processed under pass 'RISCVInsertReadWriteCSR' pass to distinguish between
+  // fixed-point / floating-point instructions and emit appropriate read/write
+  // to the correct CSR.
+  bit UsesVXRM = 0;
+  let TSFlags{20} =  UsesVXRM;
 }
 
 // Pseudo instructions

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 9a8669b5dd4ba1..addd22d2a1ff0d 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -1094,7 +1094,8 @@ class VPseudoBinaryNoMaskTU<VReg RetClass,
 class VPseudoBinaryNoMaskRoundingMode<VReg RetClass,
                                       VReg Op1Class,
                                       DAGOperand Op2Class,
-                                      string Constraint> :
+                                      string Constraint,
+                                      int UsesVXRM_ = 1> :
         Pseudo<(outs RetClass:$rd),
                (ins RetClass:$merge, Op1Class:$rs2, Op2Class:$rs1, ixlenimm:$rm,
                     AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
@@ -1106,12 +1107,14 @@ class VPseudoBinaryNoMaskRoundingMode<VReg RetClass,
   let HasSEWOp = 1;
   let HasVecPolicyOp = 1;
   let HasRoundModeOp = 1;
+  let UsesVXRM = UsesVXRM_;
 }
 
 class VPseudoBinaryMaskPolicyRoundingMode<VReg RetClass,
                                           RegisterClass Op1Class,
                                           DAGOperand Op2Class,
-                                          string Constraint> :
+                                          string Constraint,
+                                          int UsesVXRM_> :
         Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
                 (ins GetVRegNoV0<RetClass>.R:$merge,
                      Op1Class:$rs2, Op2Class:$rs1,
@@ -1126,6 +1129,7 @@ class VPseudoBinaryMaskPolicyRoundingMode<VReg RetClass,
   let HasVecPolicyOp = 1;
   let UsesMaskPolicy = 1;
   let HasRoundModeOp = 1;
+  let UsesVXRM = UsesVXRM_;
 }
 
 // Special version of VPseudoBinaryNoMask where we pretend the first source is
@@ -1864,19 +1868,22 @@ multiclass VPseudoBinaryRoundingMode<VReg RetClass,
                                      DAGOperand Op2Class,
                                      LMULInfo MInfo,
                                      string Constraint = "",
-                                     int sew = 0> {
+                                     int sew = 0,
+                                     int UsesVXRM = 1> {
   let VLMul = MInfo.value in {
     defvar suffix = !if(sew, "_" # MInfo.MX # "_E" # sew, "_" # MInfo.MX);
     def suffix : VPseudoBinaryNoMaskRoundingMode<RetClass, Op1Class, Op2Class,
-                                                 Constraint>;
+                                                 Constraint, UsesVXRM>;
     def suffix # "_MASK" : VPseudoBinaryMaskPolicyRoundingMode<RetClass,
                                                                Op1Class,
                                                                Op2Class,
-                                                               Constraint>,
+                                                               Constraint,
+                                                               UsesVXRM>,
                            RISCVMaskedPseudo</*MaskOpIdx*/ 3>;
   }
 }
 
+
 multiclass VPseudoBinaryM<VReg RetClass,
                           VReg Op1Class,
                           DAGOperand Op2Class,
@@ -1934,6 +1941,12 @@ multiclass VPseudoBinaryFV_VV<LMULInfo m, string Constraint = "", int sew = 0> {
   defm _VV : VPseudoBinary<m.vrclass, m.vrclass, m.vrclass, m, Constraint, sew>;
 }
 
+multiclass VPseudoBinaryFV_VV_RM<LMULInfo m, string Constraint = "", int sew = 0> {
+  defm _VV : VPseudoBinaryRoundingMode<m.vrclass, m.vrclass, m.vrclass, m,
+                                       Constraint, sew,
+                                       /* UsesVXRM = */ 0>;
+}
+
 multiclass VPseudoVGTR_VV_EEW<int eew, string Constraint = ""> {
   foreach m = MxList in {
     defvar mx = m.MX;
@@ -1982,6 +1995,12 @@ multiclass VPseudoBinaryV_VF<LMULInfo m, FPR_Info f, string Constraint = "", int
                                    f.fprclass, m, Constraint, sew>;
 }
 
+multiclass VPseudoBinaryV_VF_RM<LMULInfo m, FPR_Info f, string Constraint = "", int sew = 0> {
+  defm "_V" # f.FX : VPseudoBinaryRoundingMode<m.vrclass, m.vrclass,
+                                               f.fprclass, m, Constraint, sew,
+                                               /* UsesVXRM = */ 0>;
+}
+
 multiclass VPseudoVSLD1_VF<string Constraint = ""> {
   foreach f = FPList in {
     foreach m = f.MxList in {
@@ -2677,6 +2696,28 @@ multiclass VPseudoVALU_VV_VF {
   }
 }
 
+multiclass VPseudoVALU_VV_VF_RM {
+  foreach m = MxListF in {
+    defvar mx = m.MX;
+    defvar WriteVFALUV_MX = !cast<SchedWrite>("WriteVFALUV_" # mx);
+    defvar ReadVFALUV_MX = !cast<SchedRead>("ReadVFALUV_" # mx);
+
+    defm "" : VPseudoBinaryFV_VV_RM<m>,
+              Sched<[WriteVFALUV_MX, ReadVFALUV_MX, ReadVFALUV_MX, ReadVMask]>;
+  }
+
+  foreach f = FPList in {
+    foreach m = f.MxList in {
+      defvar mx = m.MX;
+      defvar WriteVFALUF_MX = !cast<SchedWrite>("WriteVFALUF_" # mx);
+      defvar ReadVFALUV_MX = !cast<SchedRead>("ReadVFALUV_" # mx);
+      defvar ReadVFALUF_MX = !cast<SchedRead>("ReadVFALUF_" # mx);
+      defm "" : VPseudoBinaryV_VF_RM<m, f>,
+                Sched<[WriteVFALUF_MX, ReadVFALUV_MX, ReadVFALUF_MX, ReadVMask]>;
+    }
+  }
+}
+
 multiclass VPseudoVALU_VF {
   foreach f = FPList in {
     foreach m = f.MxList in {
@@ -5667,8 +5708,12 @@ let Predicates = [HasVInstructionsAnyF] in {
 //===----------------------------------------------------------------------===//
 // 13.2. Vector Single-Width Floating-Point Add/Subtract Instructions
 //===----------------------------------------------------------------------===//
+let mayRaiseFPException = true,
+    hasPostISelHook = 1 in {
+defm PseudoVFADD  : VPseudoVALU_VV_VF_RM;
+}
+
 let Uses = [FRM], mayRaiseFPException = true in {
-defm PseudoVFADD  : VPseudoVALU_VV_VF;
 defm PseudoVFSUB  : VPseudoVALU_VV_VF;
 defm PseudoVFRSUB : VPseudoVALU_VF;
 }
@@ -6327,7 +6372,8 @@ defm : VPatBinaryV_WV_WX_WI_RM<"int_riscv_vnclip", "PseudoVNCLIP",
 //===----------------------------------------------------------------------===//
 // 13.2. Vector Single-Width Floating-Point Add/Subtract Instructions
 //===----------------------------------------------------------------------===//
-defm : VPatBinaryV_VV_VX<"int_riscv_vfadd", "PseudoVFADD", AllFloatVectors>;
+defm : VPatBinaryV_VV_VX_RM<"int_riscv_vfadd", "PseudoVFADD",
+                            AllFloatVectors>;
 defm : VPatBinaryV_VV_VX<"int_riscv_vfsub", "PseudoVFSUB", AllFloatVectors>;
 defm : VPatBinaryV_VX<"int_riscv_vfrsub", "PseudoVFRSUB", AllFloatVectors>;
 

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
index 06eb8de9cc700c..4bfa898af4af13 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
@@ -92,6 +92,30 @@ class VPatBinarySDNode_VV<SDPatternOperator vop,
                      op_reg_class:$rs2,
                      avl, log2sew, TA_MA)>;
 
+class VPatBinarySDNode_VV_RM<SDPatternOperator vop,
+                             string instruction_name,
+                             ValueType result_type,
+                             ValueType op_type,
+                             int log2sew,
+                             LMULInfo vlmul,
+                             OutPatFrag avl,
+                             VReg op_reg_class,
+                             bit isSEWAware = 0> :
+    Pat<(result_type (vop
+                     (op_type op_reg_class:$rs1),
+                     (op_type op_reg_class:$rs2))),
+        (!cast<Instruction>(
+                     !if(isSEWAware,
+                         instruction_name#"_VV_"# vlmul.MX#"_E"#!shl(1, log2sew),
+                         instruction_name#"_VV_"# vlmul.MX))
+                     (result_type (IMPLICIT_DEF)),
+                     op_reg_class:$rs1,
+                     op_reg_class:$rs2,
+                     // Value to indicate no rounding mode change in
+                     // RISCVInsertReadWriteCSR
+                     FRM_DYN,
+                     avl, log2sew, TA_MA)>;
+
 class VPatBinarySDNode_XI<SDPatternOperator vop,
                           string instruction_name,
                           string suffix,
@@ -167,6 +191,31 @@ class VPatBinarySDNode_VF<SDPatternOperator vop,
                      (xop_type xop_kind:$rs2),
                      avl, log2sew, TA_MA)>;
 
+class VPatBinarySDNode_VF_RM<SDPatternOperator vop,
+                             string instruction_name,
+                             ValueType result_type,
+                             ValueType vop_type,
+                             ValueType xop_type,
+                             int log2sew,
+                             LMULInfo vlmul,
+                             OutPatFrag avl,
+                             VReg vop_reg_class,
+                             DAGOperand xop_kind,
+                             bit isSEWAware = 0> :
+    Pat<(result_type (vop (vop_type vop_reg_class:$rs1),
+                          (vop_type (SplatFPOp xop_kind:$rs2)))),
+        (!cast<Instruction>(
+                     !if(isSEWAware,
+                         instruction_name#"_"#vlmul.MX#"_E"#!shl(1, log2sew),
+                         instruction_name#"_"#vlmul.MX))
+                     (result_type (IMPLICIT_DEF)),
+                     vop_reg_class:$rs1,
+                     (xop_type xop_kind:$rs2),
+                     // Value to indicate no rounding mode change in
+                     // RISCVInsertReadWriteCSR
+                     FRM_DYN,
+                     avl, log2sew, TA_MA)>;
+
 multiclass VPatBinaryFPSDNode_VV_VF<SDPatternOperator vop, string instruction_name,
                                     bit isSEWAware = 0> {
   foreach vti = AllFloatVectors in {
@@ -182,6 +231,21 @@ multiclass VPatBinaryFPSDNode_VV_VF<SDPatternOperator vop, string instruction_na
   }
 }
 
+multiclass VPatBinaryFPSDNode_VV_VF_RM<SDPatternOperator vop, string instruction_name,
+                                       bit isSEWAware = 0> {
+  foreach vti = AllFloatVectors in {
+    let Predicates = GetVTypePredicates<vti>.Predicates in {
+      def : VPatBinarySDNode_VV_RM<vop, instruction_name,
+                                   vti.Vector, vti.Vector, vti.Log2SEW,
+                                   vti.LMul, vti.AVL, vti.RegClass, isSEWAware>;
+      def : VPatBinarySDNode_VF_RM<vop, instruction_name#"_V"#vti.ScalarSuffix,
+                                   vti.Vector, vti.Vector, vti.Scalar,
+                                   vti.Log2SEW, vti.LMul, vti.AVL, vti.RegClass,
+                                   vti.ScalarRegClass, isSEWAware>;
+    }
+  }
+}
+
 multiclass VPatBinaryFPSDNode_R_VF<SDPatternOperator vop, string instruction_name,
                                    bit isSEWAware = 0> {
   foreach fvti = AllFloatVectors in
@@ -971,7 +1035,7 @@ foreach mti = AllMasks in {
 // 13. Vector Floating-Point Instructions
 
 // 13.2. Vector Single-Width Floating-Point Add/Subtract Instructions
-defm : VPatBinaryFPSDNode_VV_VF<any_fadd, "PseudoVFADD">;
+defm : VPatBinaryFPSDNode_VV_VF_RM<any_fadd, "PseudoVFADD">;
 defm : VPatBinaryFPSDNode_VV_VF<any_fsub, "PseudoVFSUB">;
 defm : VPatBinaryFPSDNode_R_VF<any_fsub, "PseudoVFRSUB">;
 

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
index 3092ba94fbe750..6b489883c0747c 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
@@ -602,6 +602,38 @@ class VPatBinaryVL_V<SDPatternOperator vop,
                    op2_reg_class:$rs2,
                    (mask_type V0), GPR:$vl, log2sew, TAIL_AGNOSTIC)>;
 
+class VPatBinaryVL_V_RM<SDPatternOperator vop,
+                     string instruction_name,
+                     string suffix,
+                     ValueType result_type,
+                     ValueType op1_type,
+                     ValueType op2_type,
+                     ValueType mask_type,
+                     int log2sew,
+                     LMULInfo vlmul,
+                     VReg result_reg_class,
+                     VReg op1_reg_class,
+                     VReg op2_reg_class,
+                     bit isSEWAware = 0>
+    : Pat<(result_type (vop
+                       (op1_type op1_reg_class:$rs1),
+                       (op2_type op2_reg_class:$rs2),
+                       (result_type result_reg_class:$merge),
+                       (mask_type V0),
+                       VLOpFrag)),
+      (!cast<Instruction>(
+                   !if(isSEWAware,
+                       instruction_name#"_"#suffix#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK",
+                       instruction_name#"_"#suffix#"_"#vlmul.MX#"_MASK"))
+                   result_reg_class:$merge,
+                   op1_reg_class:$rs1,
+                   op2_reg_class:$rs2,
+                   (mask_type V0),
+                   // Value to indicate no rounding mode change in
+                   // RISCVInsertReadWriteCSR
+                   FRM_DYN,
+                   GPR:$vl, log2sew, TAIL_AGNOSTIC)>;
+
 multiclass VPatTiedBinaryNoMaskVL_V<SDNode vop,
                                     string instruction_name,
                                     string suffix,
@@ -785,6 +817,36 @@ class VPatBinaryVL_VF<SDPatternOperator vop,
                    scalar_reg_class:$rs2,
                    (mask_type V0), GPR:$vl, log2sew, TAIL_AGNOSTIC)>;
 
+class VPatBinaryVL_VF_RM<SDPatternOperator vop,
+                      string instruction_name,
+                      ValueType result_type,
+                      ValueType vop1_type,
+                      ValueType vop2_type,
+                      ValueType mask_type,
+                      int log2sew,
+                      LMULInfo vlmul,
+                      VReg result_reg_class,
+                      VReg vop_reg_class,
+                      RegisterClass scalar_reg_class,
+                      bit isSEWAware = 0>
+    : Pat<(result_type (vop (vop1_type vop_reg_class:$rs1),
+                       (vop2_type (SplatFPOp scalar_reg_class:$rs2)),
+                       (result_type result_reg_class:$merge),
+                       (mask_type V0),
+                       VLOpFrag)),
+      (!cast<Instruction>(
+                   !if(isSEWAware,
+                       instruction_name#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK",
+                       instruction_name#"_"#vlmul.MX#"_MASK"))
+                   result_reg_class:$merge,
+                   vop_reg_class:$rs1,
+                   scalar_reg_class:$rs2,
+                   (mask_type V0),
+                   // Value to indicate no rounding mode change in
+                   // RISCVInsertReadWriteCSR
+                   FRM_DYN,
+                   GPR:$vl, log2sew, TAIL_AGNOSTIC)>;
+
 multiclass VPatBinaryFPVL_VV_VF<SDPatternOperator vop, string instruction_name,
                                 bit isSEWAware = 0> {
   foreach vti = AllFloatVectors in {
@@ -801,6 +863,22 @@ multiclass VPatBinaryFPVL_VV_VF<SDPatternOperator vop, string instruction_name,
   }
 }
 
+multiclass VPatBinaryFPVL_VV_VF_RM<SDPatternOperator vop, string instruction_name,
+                                bit isSEWAware = 0> {
+  foreach vti = AllFloatVectors in {
+    let Predicates = GetVTypePredicates<vti>.Predicates in {
+      def : VPatBinaryVL_V_RM<vop, instruction_name, "VV",
+                             vti.Vector, vti.Vector, vti.Vector, vti.Mask,
+                             vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass,
+                             vti.RegClass, isSEWAware>;
+      def : VPatBinaryVL_VF_RM<vop, instruction_name#"_V"#vti.ScalarSuffix,
+                               vti.Vector, vti.Vector, vti.Vector, vti.Mask,
+                               vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass,
+                               vti.ScalarRegClass, isSEWAware>;
+      }
+  }
+}
+
 multiclass VPatBinaryFPVL_R_VF<SDPatternOperator vop, string instruction_name,
                                bit isSEWAware = 0> {
   foreach fvti = AllFloatVectors in {
@@ -1836,7 +1914,7 @@ defm : VPatBinaryVL_VV_VX<riscv_usubsat_vl, "PseudoVSSUBU">;
 // 13. Vector Floating-Point Instructions
 
 // 13.2. Vector Single-Width Floating-Point Add/Subtract Instructions
-defm : VPatBinaryFPVL_VV_VF<any_riscv_fadd_vl, "PseudoVFADD">;
+defm : VPatBinaryFPVL_VV_VF_RM<any_riscv_fadd_vl, "PseudoVFADD">;
 defm : VPatBinaryFPVL_VV_VF<any_riscv_fsub_vl, "PseudoVFSUB">;
 defm : VPatBinaryFPVL_R_VF<any_riscv_fsub_vl, "PseudoVFRSUB">;
 

diff  --git a/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-struct.ll b/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-struct.ll
index 80de207ccfd766..bf6293dbd4208a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-struct.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-struct.ll
@@ -41,7 +41,7 @@ entry:
   %3 = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64(
     <vscale x 1 x double> poison,
     <vscale x 1 x double> %1,
-    <vscale x 1 x double> %2, i64 %vl)
+    <vscale x 1 x double> %2, i64 7, i64 %vl)
   ret <vscale x 1 x double> %3
 }
 
@@ -49,4 +49,4 @@ declare <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
   <vscale x 1 x double>,
-  i64)
+  i64, i64)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/combine-vmv.ll b/llvm/test/CodeGen/RISCV/rvv/combine-vmv.ll
index 36a2d45ffb4bd8..e5cd47ee2ea8c7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/combine-vmv.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/combine-vmv.ll
@@ -106,7 +106,7 @@ define <vscale x 4 x i32> @unfoldable_load(<vscale x 4 x i32> %passthru, ptr %p,
 
 declare <vscale x 4 x float> @llvm.riscv.vmv.v.v.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, iXLen)
 
-declare <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, iXLen)
+declare <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, iXLen, iXLen)
 
 define <vscale x 4 x float> @vfadd(<vscale x 4 x float> %passthru, <vscale x 4 x float> %a, <vscale x 4 x float> %b, iXLen %vl1, iXLen %vl2) {
 ; CHECK-LABEL: vfadd:
@@ -116,7 +116,7 @@ define <vscale x 4 x float> @vfadd(<vscale x 4 x float> %passthru, <vscale x 4 x
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, ma
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32(<vscale x 4 x float> poison, <vscale x 4 x float> %a, <vscale x 4 x float> %b, iXLen %vl1)
+  %v = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32(<vscale x 4 x float> poison, <vscale x 4 x float> %a, <vscale x 4 x float> %b, iXLen 7, iXLen %vl1)
   %w = call <vscale x 4 x float> @llvm.riscv.vmv.v.v.nxv4f32(<vscale x 4 x float> %passthru, <vscale x 4 x float> %v, iXLen %vl2)
   ret <vscale x 4 x float> %w
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmf.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmf.ll
index f10252b9618c82..895875518712e7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmf.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmf.ll
@@ -10,7 +10,7 @@ define <2 x double> @foo(<2 x double> %x, <2 x double> %y) {
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:vr = COPY $v9
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v8
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
-  ; CHECK-NEXT:   [[PseudoVFADD_VV_M1_:%[0-9]+]]:vr = nnan ninf nsz arcp contract afn reassoc nofpexcept PseudoVFADD_VV_M1 [[DEF]], [[COPY1]], [[COPY]], 2, 6 /* e64 */, 1 /* ta, mu */, implicit $frm
+  ; CHECK-NEXT:   [[PseudoVFADD_VV_M1_:%[0-9]+]]:vr = nnan ninf nsz arcp contract afn reassoc nofpexcept PseudoVFADD_VV_M1 [[DEF]], [[COPY1]], [[COPY]], 7, 2, 6 /* e64 */, 1 /* ta, mu */
   ; CHECK-NEXT:   $v8 = COPY [[PseudoVFADD_VV_M1_]]
   ; CHECK-NEXT:   PseudoRET implicit $v8
   %1 = fadd fast <2 x double> %x, %y

diff  --git a/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll b/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll
index 5ebb05eaf86f5c..5477c01e2bfa91 100644
--- a/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll
@@ -617,8 +617,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  iXLen,
-  iXLen);
+  iXLen, iXLen, iXLen);
 
 define <vscale x 1 x half> @intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16:
@@ -632,7 +631,7 @@ entry:
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
     <vscale x 1 x i1> %2,
-    iXLen %3, iXLen 3)
+    iXLen 7, iXLen %3, iXLen 3)
 
   ret <vscale x 1 x half> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/masked-tamu.ll b/llvm/test/CodeGen/RISCV/rvv/masked-tamu.ll
index c8ea5e7d06b5ca..ee029926e73e2b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/masked-tamu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/masked-tamu.ll
@@ -587,8 +587,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  iXLen,
-  iXLen)
+  iXLen, iXLen, iXLen);
 define <vscale x 1 x half> @intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
@@ -601,7 +600,7 @@ entry:
     <vscale x 1 x half> %1,
     <vscale x 1 x half> %2,
     <vscale x 1 x i1> %3,
-    iXLen %4, iXLen 1)
+    iXLen 7, iXLen %4, iXLen 1)
 
   ret <vscale x 1 x half> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/masked-tuma.ll b/llvm/test/CodeGen/RISCV/rvv/masked-tuma.ll
index 87b25ddd8e57db..88fc66c4adc17a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/masked-tuma.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/masked-tuma.ll
@@ -587,8 +587,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  iXLen,
-  iXLen)
+  iXLen, iXLen, iXLen);
 define <vscale x 1 x half> @intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
@@ -601,7 +600,7 @@ entry:
     <vscale x 1 x half> %1,
     <vscale x 1 x half> %2,
     <vscale x 1 x i1> %3,
-    iXLen %4, iXLen 2)
+    iXLen 7, iXLen %4, iXLen 2)
 
   ret <vscale x 1 x half> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/masked-tumu.ll b/llvm/test/CodeGen/RISCV/rvv/masked-tumu.ll
index 1c1910474b7edd..d0375fafc89a97 100644
--- a/llvm/test/CodeGen/RISCV/rvv/masked-tumu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/masked-tumu.ll
@@ -587,8 +587,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  iXLen,
-  iXLen)
+  iXLen, iXLen, iXLen);
 define <vscale x 1 x half> @intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
@@ -601,7 +600,7 @@ entry:
     <vscale x 1 x half> %1,
     <vscale x 1 x half> %2,
     <vscale x 1 x i1> %3,
-    iXLen %4, iXLen 0)
+    iXLen 7, iXLen %4, iXLen 0)
 
   ret <vscale x 1 x half> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll b/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll
index 15abf865be7e5c..2322256af5f8f8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll
@@ -83,11 +83,11 @@ define <vscale x 1 x double> @foo(<vscale x 1 x double> %a, <vscale x 1 x double
 ; SPILL-O2-NEXT:    addi sp, sp, 32
 ; SPILL-O2-NEXT:    ret
 {
-   %x = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> %a, <vscale x 1 x double> %b, i32 %gvl)
+   %x = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> %a, <vscale x 1 x double> %b, i32 7, i32 %gvl)
    %call = call signext i32 @puts(ptr @.str)
-   %z = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> %a, <vscale x 1 x double> %x, i32 %gvl)
+   %z = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> %a, <vscale x 1 x double> %x, i32 7, i32 %gvl)
    ret <vscale x 1 x double> %z
 }
 
-declare <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(<vscale x 1 x double> %passthru, <vscale x 1 x double> %a, <vscale x 1 x double> %b, i32 %gvl)
+declare <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(<vscale x 1 x double> %passthru, <vscale x 1 x double> %a, <vscale x 1 x double> %b, i32, i32 %gvl)
 declare i32 @puts(ptr);

diff  --git a/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll b/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll
index 0067735b492c7d..6ef94aaa678001 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll
@@ -114,11 +114,11 @@ define <vscale x 1 x double> @foo(<vscale x 1 x double> %a, <vscale x 1 x double
 ; SPILL-O2-VLEN128-NEXT:    addi sp, sp, 32
 ; SPILL-O2-VLEN128-NEXT:    ret
 {
-   %x = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> %a, <vscale x 1 x double> %b, i64 %gvl)
+   %x = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> %a, <vscale x 1 x double> %b, i64 7, i64 %gvl)
    %call = call signext i32 @puts(ptr @.str)
-   %z = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> %a, <vscale x 1 x double> %x, i64 %gvl)
+   %z = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> %a, <vscale x 1 x double> %x, i64 7, i64 %gvl)
    ret <vscale x 1 x double> %z
 }
 
-declare <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(<vscale x 1 x double> %passthru, <vscale x 1 x double> %a, <vscale x 1 x double> %b, i64 %gvl)
+declare <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(<vscale x 1 x double> %passthru, <vscale x 1 x double> %a, <vscale x 1 x double> %b, i64, i64 %gvl)
 declare i32 @puts(ptr);

diff  --git a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-masked-vops.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-masked-vops.ll
index e75a2925432476..d079230b336352 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-masked-vops.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-masked-vops.ll
@@ -38,14 +38,14 @@ define <vscale x 2 x float> @vpmerge_vfadd(<vscale x 2 x float> %passthru, <vsca
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
 ; CHECK-NEXT:    vfadd.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32(<vscale x 2 x float> %passthru, <vscale x 2 x float> %x, <vscale x 2 x float> %y, <vscale x 2 x i1> %m, i64 %vl, i64 1)
+  %a = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32(<vscale x 2 x float> %passthru, <vscale x 2 x float> %x, <vscale x 2 x float> %y, <vscale x 2 x i1> %m, i64 7, i64 %vl, i64 1)
   %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
   %mask = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
   %b = call <vscale x 2 x float> @llvm.riscv.vmerge.nxv2f32.nxv2f32(<vscale x 2 x float> %passthru, <vscale x 2 x float> %passthru, <vscale x 2 x float> %a, <vscale x 2 x i1> %mask, i64 %vl)
 
   ret <vscale x 2 x float> %b
 }
-declare <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x i1>, i64, i64)
+declare <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x i1>, i64, i64, i64)
 
 define <vscale x 2 x float> @vpmerge_vfsub(<vscale x 2 x float> %passthru, <vscale x 2 x float> %x, <vscale x 2 x float> %y, <vscale x 2 x i1> %m, i64 %vl) {
 ; CHECK-LABEL: vpmerge_vfsub:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll b/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll
index 42ad8fdeaa29f9..a2824d033bc551 100644
--- a/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll
@@ -284,7 +284,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   <vscale x 1 x half>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 1 x half> @intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16:
@@ -297,7 +297,7 @@ entry:
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
     <vscale x 1 x half> %2,
-    iXLen %3)
+    iXLen 7, iXLen %3)
 
   ret <vscale x 1 x half> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfadd.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd.ll
index ebfa04df7c953a..63b929557b7e78 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfadd.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfadd.ll
@@ -11,20 +11,22 @@ declare <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   <vscale x 1 x half>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 1 x half> @intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfadd.vv v8, v8, v9
+; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.nxv1f16(
     <vscale x 1 x half> undef,
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
-    iXLen %2)
+    iXLen 0, iXLen %2)
 
   ret <vscale x 1 x half> %a
 }
@@ -34,14 +36,15 @@ declare <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  iXLen,
-  iXLen);
+  iXLen, iXLen, iXLen);
 
 define <vscale x 1 x half> @intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfadd.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16(
@@ -49,7 +52,7 @@ entry:
     <vscale x 1 x half> %1,
     <vscale x 1 x half> %2,
     <vscale x 1 x i1> %3,
-    iXLen %4, iXLen 1)
+    iXLen 0, iXLen %4, iXLen 1)
 
   ret <vscale x 1 x half> %a
 }
@@ -58,20 +61,22 @@ declare <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   <vscale x 2 x half>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 2 x half> @intrinsic_vfadd_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_vv_nxv2f16_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfadd.vv v8, v8, v9
+; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.nxv2f16(
     <vscale x 2 x half> undef,
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
-    iXLen %2)
+    iXLen 0, iXLen %2)
 
   ret <vscale x 2 x half> %a
 }
@@ -81,14 +86,15 @@ declare <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  iXLen,
-  iXLen);
+  iXLen, iXLen, iXLen);
 
 define <vscale x 2 x half> @intrinsic_vfadd_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f16_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfadd.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16(
@@ -96,7 +102,7 @@ entry:
     <vscale x 2 x half> %1,
     <vscale x 2 x half> %2,
     <vscale x 2 x i1> %3,
-    iXLen %4, iXLen 1)
+    iXLen 0, iXLen %4, iXLen 1)
 
   ret <vscale x 2 x half> %a
 }
@@ -105,20 +111,22 @@ declare <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x half>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 4 x half> @intrinsic_vfadd_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_vv_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfadd.vv v8, v8, v9
+; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.nxv4f16(
     <vscale x 4 x half> undef,
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
-    iXLen %2)
+    iXLen 0, iXLen %2)
 
   ret <vscale x 4 x half> %a
 }
@@ -128,14 +136,15 @@ declare <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  iXLen,
-  iXLen);
+  iXLen, iXLen, iXLen);
 
 define <vscale x 4 x half> @intrinsic_vfadd_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f16_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfadd.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16(
@@ -143,7 +152,7 @@ entry:
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
     <vscale x 4 x i1> %3,
-    iXLen %4, iXLen 1)
+    iXLen 0, iXLen %4, iXLen 1)
 
   ret <vscale x 4 x half> %a
 }
@@ -152,20 +161,22 @@ declare <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   <vscale x 8 x half>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 8 x half> @intrinsic_vfadd_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_vv_nxv8f16_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfadd.vv v8, v8, v10
+; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.nxv8f16(
     <vscale x 8 x half> undef,
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
-    iXLen %2)
+    iXLen 0, iXLen %2)
 
   ret <vscale x 8 x half> %a
 }
@@ -175,14 +186,15 @@ declare <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  iXLen,
-  iXLen);
+  iXLen, iXLen, iXLen);
 
 define <vscale x 8 x half> @intrinsic_vfadd_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f16_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfadd.vv v8, v10, v12, v0.t
+; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16(
@@ -190,7 +202,7 @@ entry:
     <vscale x 8 x half> %1,
     <vscale x 8 x half> %2,
     <vscale x 8 x i1> %3,
-    iXLen %4, iXLen 1)
+    iXLen 0, iXLen %4, iXLen 1)
 
   ret <vscale x 8 x half> %a
 }
@@ -199,20 +211,22 @@ declare <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   <vscale x 16 x half>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 16 x half> @intrinsic_vfadd_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_vv_nxv16f16_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfadd.vv v8, v8, v12
+; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.nxv16f16(
     <vscale x 16 x half> undef,
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
-    iXLen %2)
+    iXLen 0, iXLen %2)
 
   ret <vscale x 16 x half> %a
 }
@@ -222,14 +236,15 @@ declare <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  iXLen,
-  iXLen);
+  iXLen, iXLen, iXLen);
 
 define <vscale x 16 x half> @intrinsic_vfadd_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv16f16_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfadd.vv v8, v12, v16, v0.t
+; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16(
@@ -237,7 +252,7 @@ entry:
     <vscale x 16 x half> %1,
     <vscale x 16 x half> %2,
     <vscale x 16 x i1> %3,
-    iXLen %4, iXLen 1)
+    iXLen 0, iXLen %4, iXLen 1)
 
   ret <vscale x 16 x half> %a
 }
@@ -246,20 +261,22 @@ declare <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.nxv32f16(
   <vscale x 32 x half>,
   <vscale x 32 x half>,
   <vscale x 32 x half>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 32 x half> @intrinsic_vfadd_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_vv_nxv32f16_nxv32f16_nxv32f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfadd.vv v8, v8, v16
+; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.nxv32f16(
     <vscale x 32 x half> undef,
     <vscale x 32 x half> %0,
     <vscale x 32 x half> %1,
-    iXLen %2)
+    iXLen 0, iXLen %2)
 
   ret <vscale x 32 x half> %a
 }
@@ -269,15 +286,16 @@ declare <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16(
   <vscale x 32 x half>,
   <vscale x 32 x half>,
   <vscale x 32 x i1>,
-  iXLen,
-  iXLen);
+  iXLen, iXLen, iXLen);
 
 define <vscale x 32 x half> @intrinsic_vfadd_mask_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv32f16_nxv32f16_nxv32f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
+; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfadd.vv v8, v16, v24, v0.t
+; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16(
@@ -285,7 +303,7 @@ entry:
     <vscale x 32 x half> %1,
     <vscale x 32 x half> %2,
     <vscale x 32 x i1> %3,
-    iXLen %4, iXLen 1)
+    iXLen 0, iXLen %4, iXLen 1)
 
   ret <vscale x 32 x half> %a
 }
@@ -294,20 +312,22 @@ declare <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   <vscale x 1 x float>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 1 x float> @intrinsic_vfadd_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f32_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfadd.vv v8, v8, v9
+; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.nxv1f32(
     <vscale x 1 x float> undef,
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
-    iXLen %2)
+    iXLen 0, iXLen %2)
 
   ret <vscale x 1 x float> %a
 }
@@ -317,14 +337,15 @@ declare <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  iXLen,
-  iXLen);
+  iXLen, iXLen, iXLen);
 
 define <vscale x 1 x float> @intrinsic_vfadd_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f32_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfadd.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32(
@@ -332,7 +353,7 @@ entry:
     <vscale x 1 x float> %1,
     <vscale x 1 x float> %2,
     <vscale x 1 x i1> %3,
-    iXLen %4, iXLen 1)
+    iXLen 0, iXLen %4, iXLen 1)
 
   ret <vscale x 1 x float> %a
 }
@@ -341,20 +362,22 @@ declare <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x float>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 2 x float> @intrinsic_vfadd_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_vv_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfadd.vv v8, v8, v9
+; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.nxv2f32(
     <vscale x 2 x float> undef,
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
-    iXLen %2)
+    iXLen 0, iXLen %2)
 
   ret <vscale x 2 x float> %a
 }
@@ -364,14 +387,15 @@ declare <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  iXLen,
-  iXLen);
+  iXLen, iXLen, iXLen);
 
 define <vscale x 2 x float> @intrinsic_vfadd_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f32_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfadd.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32(
@@ -379,7 +403,7 @@ entry:
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
     <vscale x 2 x i1> %3,
-    iXLen %4, iXLen 1)
+    iXLen 0, iXLen %4, iXLen 1)
 
   ret <vscale x 2 x float> %a
 }
@@ -388,20 +412,22 @@ declare <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   <vscale x 4 x float>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 4 x float> @intrinsic_vfadd_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_vv_nxv4f32_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfadd.vv v8, v8, v10
+; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32(
     <vscale x 4 x float> undef,
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
-    iXLen %2)
+    iXLen 0, iXLen %2)
 
   ret <vscale x 4 x float> %a
 }
@@ -411,14 +437,15 @@ declare <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  iXLen,
-  iXLen);
+  iXLen, iXLen, iXLen);
 
 define <vscale x 4 x float> @intrinsic_vfadd_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f32_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfadd.vv v8, v10, v12, v0.t
+; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32(
@@ -426,7 +453,7 @@ entry:
     <vscale x 4 x float> %1,
     <vscale x 4 x float> %2,
     <vscale x 4 x i1> %3,
-    iXLen %4, iXLen 1)
+    iXLen 0, iXLen %4, iXLen 1)
 
   ret <vscale x 4 x float> %a
 }
@@ -435,20 +462,22 @@ declare <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   <vscale x 8 x float>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 8 x float> @intrinsic_vfadd_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_vv_nxv8f32_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfadd.vv v8, v8, v12
+; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.nxv8f32(
     <vscale x 8 x float> undef,
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
-    iXLen %2)
+    iXLen 0, iXLen %2)
 
   ret <vscale x 8 x float> %a
 }
@@ -458,14 +487,15 @@ declare <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  iXLen,
-  iXLen);
+  iXLen, iXLen, iXLen);
 
 define <vscale x 8 x float> @intrinsic_vfadd_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f32_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfadd.vv v8, v12, v16, v0.t
+; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32(
@@ -473,7 +503,7 @@ entry:
     <vscale x 8 x float> %1,
     <vscale x 8 x float> %2,
     <vscale x 8 x i1> %3,
-    iXLen %4, iXLen 1)
+    iXLen 0, iXLen %4, iXLen 1)
 
   ret <vscale x 8 x float> %a
 }
@@ -482,20 +512,22 @@ declare <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.nxv16f32(
   <vscale x 16 x float>,
   <vscale x 16 x float>,
   <vscale x 16 x float>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 16 x float> @intrinsic_vfadd_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_vv_nxv16f32_nxv16f32_nxv16f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfadd.vv v8, v8, v16
+; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.nxv16f32(
     <vscale x 16 x float> undef,
     <vscale x 16 x float> %0,
     <vscale x 16 x float> %1,
-    iXLen %2)
+    iXLen 0, iXLen %2)
 
   ret <vscale x 16 x float> %a
 }
@@ -505,15 +537,16 @@ declare <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32(
   <vscale x 16 x float>,
   <vscale x 16 x float>,
   <vscale x 16 x i1>,
-  iXLen,
-  iXLen);
+  iXLen, iXLen, iXLen);
 
 define <vscale x 16 x float> @intrinsic_vfadd_mask_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv16f32_nxv16f32_nxv16f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
+; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfadd.vv v8, v16, v24, v0.t
+; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32(
@@ -521,7 +554,7 @@ entry:
     <vscale x 16 x float> %1,
     <vscale x 16 x float> %2,
     <vscale x 16 x i1> %3,
-    iXLen %4, iXLen 1)
+    iXLen 0, iXLen %4, iXLen 1)
 
   ret <vscale x 16 x float> %a
 }
@@ -530,20 +563,22 @@ declare <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
   <vscale x 1 x double>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 1 x double> @intrinsic_vfadd_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfadd.vv v8, v8, v9
+; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(
     <vscale x 1 x double> undef,
     <vscale x 1 x double> %0,
     <vscale x 1 x double> %1,
-    iXLen %2)
+    iXLen 0, iXLen %2)
 
   ret <vscale x 1 x double> %a
 }
@@ -553,14 +588,15 @@ declare <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
   <vscale x 1 x i1>,
-  iXLen,
-  iXLen);
+  iXLen, iXLen, iXLen);
 
 define <vscale x 1 x double> @intrinsic_vfadd_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f64_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfadd.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64(
@@ -568,7 +604,7 @@ entry:
     <vscale x 1 x double> %1,
     <vscale x 1 x double> %2,
     <vscale x 1 x i1> %3,
-    iXLen %4, iXLen 1)
+    iXLen 0, iXLen %4, iXLen 1)
 
   ret <vscale x 1 x double> %a
 }
@@ -577,20 +613,22 @@ declare <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.nxv2f64(
   <vscale x 2 x double>,
   <vscale x 2 x double>,
   <vscale x 2 x double>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 2 x double> @intrinsic_vfadd_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_vv_nxv2f64_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfadd.vv v8, v8, v10
+; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.nxv2f64(
     <vscale x 2 x double> undef,
     <vscale x 2 x double> %0,
     <vscale x 2 x double> %1,
-    iXLen %2)
+    iXLen 0, iXLen %2)
 
   ret <vscale x 2 x double> %a
 }
@@ -600,14 +638,15 @@ declare <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64(
   <vscale x 2 x double>,
   <vscale x 2 x double>,
   <vscale x 2 x i1>,
-  iXLen,
-  iXLen);
+  iXLen, iXLen, iXLen);
 
 define <vscale x 2 x double> @intrinsic_vfadd_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f64_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfadd.vv v8, v10, v12, v0.t
+; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64(
@@ -615,7 +654,7 @@ entry:
     <vscale x 2 x double> %1,
     <vscale x 2 x double> %2,
     <vscale x 2 x i1> %3,
-    iXLen %4, iXLen 1)
+    iXLen 0, iXLen %4, iXLen 1)
 
   ret <vscale x 2 x double> %a
 }
@@ -624,20 +663,22 @@ declare <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.nxv4f64(
   <vscale x 4 x double>,
   <vscale x 4 x double>,
   <vscale x 4 x double>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 4 x double> @intrinsic_vfadd_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_vv_nxv4f64_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfadd.vv v8, v8, v12
+; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.nxv4f64(
     <vscale x 4 x double> undef,
     <vscale x 4 x double> %0,
     <vscale x 4 x double> %1,
-    iXLen %2)
+    iXLen 0, iXLen %2)
 
   ret <vscale x 4 x double> %a
 }
@@ -647,14 +688,15 @@ declare <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64(
   <vscale x 4 x double>,
   <vscale x 4 x double>,
   <vscale x 4 x i1>,
-  iXLen,
-  iXLen);
+  iXLen, iXLen, iXLen);
 
 define <vscale x 4 x double> @intrinsic_vfadd_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f64_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfadd.vv v8, v12, v16, v0.t
+; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64(
@@ -662,7 +704,7 @@ entry:
     <vscale x 4 x double> %1,
     <vscale x 4 x double> %2,
     <vscale x 4 x i1> %3,
-    iXLen %4, iXLen 1)
+    iXLen 0, iXLen %4, iXLen 1)
 
   ret <vscale x 4 x double> %a
 }
@@ -671,20 +713,22 @@ declare <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.nxv8f64(
   <vscale x 8 x double>,
   <vscale x 8 x double>,
   <vscale x 8 x double>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 8 x double> @intrinsic_vfadd_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_vv_nxv8f64_nxv8f64_nxv8f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfadd.vv v8, v8, v16
+; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.nxv8f64(
     <vscale x 8 x double> undef,
     <vscale x 8 x double> %0,
     <vscale x 8 x double> %1,
-    iXLen %2)
+    iXLen 0, iXLen %2)
 
   ret <vscale x 8 x double> %a
 }
@@ -694,15 +738,16 @@ declare <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64(
   <vscale x 8 x double>,
   <vscale x 8 x double>,
   <vscale x 8 x i1>,
-  iXLen,
-  iXLen);
+  iXLen, iXLen, iXLen);
 
 define <vscale x 8 x double> @intrinsic_vfadd_mask_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f64_nxv8f64_nxv8f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfadd.vv v8, v16, v24, v0.t
+; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64(
@@ -710,7 +755,7 @@ entry:
     <vscale x 8 x double> %1,
     <vscale x 8 x double> %2,
     <vscale x 8 x i1> %3,
-    iXLen %4, iXLen 1)
+    iXLen 0, iXLen %4, iXLen 1)
 
   ret <vscale x 8 x double> %a
 }
@@ -719,20 +764,22 @@ declare <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   half,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 1 x half> @intrinsic_vfadd_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f16_nxv1f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfadd.vf v8, v8, fa0
+; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.f16(
     <vscale x 1 x half> undef,
     <vscale x 1 x half> %0,
     half %1,
-    iXLen %2)
+    iXLen 0, iXLen %2)
 
   ret <vscale x 1 x half> %a
 }
@@ -742,14 +789,15 @@ declare <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
   <vscale x 1 x i1>,
-  iXLen,
-  iXLen);
+  iXLen, iXLen, iXLen);
 
 define <vscale x 1 x half> @intrinsic_vfadd_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f16_nxv1f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfadd.vf v8, v9, fa0, v0.t
+; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.f16(
@@ -757,7 +805,7 @@ entry:
     <vscale x 1 x half> %1,
     half %2,
     <vscale x 1 x i1> %3,
-    iXLen %4, iXLen 1)
+    iXLen 0, iXLen %4, iXLen 1)
 
   ret <vscale x 1 x half> %a
 }
@@ -766,20 +814,22 @@ declare <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   half,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 2 x half> @intrinsic_vfadd_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f16_nxv2f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfadd.vf v8, v8, fa0
+; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.f16(
     <vscale x 2 x half> undef,
     <vscale x 2 x half> %0,
     half %1,
-    iXLen %2)
+    iXLen 0, iXLen %2)
 
   ret <vscale x 2 x half> %a
 }
@@ -789,14 +839,15 @@ declare <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.f16(
   <vscale x 2 x half>,
   half,
   <vscale x 2 x i1>,
-  iXLen,
-  iXLen);
+  iXLen, iXLen, iXLen);
 
 define <vscale x 2 x half> @intrinsic_vfadd_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f16_nxv2f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfadd.vf v8, v9, fa0, v0.t
+; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.f16(
@@ -804,7 +855,7 @@ entry:
     <vscale x 2 x half> %1,
     half %2,
     <vscale x 2 x i1> %3,
-    iXLen %4, iXLen 1)
+    iXLen 0, iXLen %4, iXLen 1)
 
   ret <vscale x 2 x half> %a
 }
@@ -813,20 +864,22 @@ declare <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   half,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 4 x half> @intrinsic_vfadd_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f16_nxv4f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfadd.vf v8, v8, fa0
+; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.f16(
     <vscale x 4 x half> undef,
     <vscale x 4 x half> %0,
     half %1,
-    iXLen %2)
+    iXLen 0, iXLen %2)
 
   ret <vscale x 4 x half> %a
 }
@@ -836,14 +889,15 @@ declare <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.f16(
   <vscale x 4 x half>,
   half,
   <vscale x 4 x i1>,
-  iXLen,
-  iXLen);
+  iXLen, iXLen, iXLen);
 
 define <vscale x 4 x half> @intrinsic_vfadd_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f16_nxv4f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfadd.vf v8, v9, fa0, v0.t
+; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.f16(
@@ -851,7 +905,7 @@ entry:
     <vscale x 4 x half> %1,
     half %2,
     <vscale x 4 x i1> %3,
-    iXLen %4, iXLen 1)
+    iXLen 0, iXLen %4, iXLen 1)
 
   ret <vscale x 4 x half> %a
 }
@@ -860,20 +914,22 @@ declare <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   half,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 8 x half> @intrinsic_vfadd_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f16_nxv8f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfadd.vf v8, v8, fa0
+; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.f16(
     <vscale x 8 x half> undef,
     <vscale x 8 x half> %0,
     half %1,
-    iXLen %2)
+    iXLen 0, iXLen %2)
 
   ret <vscale x 8 x half> %a
 }
@@ -883,14 +939,15 @@ declare <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.f16(
   <vscale x 8 x half>,
   half,
   <vscale x 8 x i1>,
-  iXLen,
-  iXLen);
+  iXLen, iXLen, iXLen);
 
 define <vscale x 8 x half> @intrinsic_vfadd_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f16_nxv8f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfadd.vf v8, v10, fa0, v0.t
+; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.f16(
@@ -898,7 +955,7 @@ entry:
     <vscale x 8 x half> %1,
     half %2,
     <vscale x 8 x i1> %3,
-    iXLen %4, iXLen 1)
+    iXLen 0, iXLen %4, iXLen 1)
 
   ret <vscale x 8 x half> %a
 }
@@ -907,20 +964,22 @@ declare <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   half,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 16 x half> @intrinsic_vfadd_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_vf_nxv16f16_nxv16f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfadd.vf v8, v8, fa0
+; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.f16(
     <vscale x 16 x half> undef,
     <vscale x 16 x half> %0,
     half %1,
-    iXLen %2)
+    iXLen 0, iXLen %2)
 
   ret <vscale x 16 x half> %a
 }
@@ -930,14 +989,15 @@ declare <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.f16(
   <vscale x 16 x half>,
   half,
   <vscale x 16 x i1>,
-  iXLen,
-  iXLen);
+  iXLen, iXLen, iXLen);
 
 define <vscale x 16 x half> @intrinsic_vfadd_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv16f16_nxv16f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfadd.vf v8, v12, fa0, v0.t
+; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.f16(
@@ -945,7 +1005,7 @@ entry:
     <vscale x 16 x half> %1,
     half %2,
     <vscale x 16 x i1> %3,
-    iXLen %4, iXLen 1)
+    iXLen 0, iXLen %4, iXLen 1)
 
   ret <vscale x 16 x half> %a
 }
@@ -954,20 +1014,22 @@ declare <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.f16(
   <vscale x 32 x half>,
   <vscale x 32 x half>,
   half,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 32 x half> @intrinsic_vfadd_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, half %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_vf_nxv32f16_nxv32f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfadd.vf v8, v8, fa0
+; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.f16(
     <vscale x 32 x half> undef,
     <vscale x 32 x half> %0,
     half %1,
-    iXLen %2)
+    iXLen 0, iXLen %2)
 
   ret <vscale x 32 x half> %a
 }
@@ -977,14 +1039,15 @@ declare <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.f16(
   <vscale x 32 x half>,
   half,
   <vscale x 32 x i1>,
-  iXLen,
-  iXLen);
+  iXLen, iXLen, iXLen);
 
 define <vscale x 32 x half> @intrinsic_vfadd_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv32f16_nxv32f16_f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
+; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfadd.vf v8, v16, fa0, v0.t
+; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.f16(
@@ -992,7 +1055,7 @@ entry:
     <vscale x 32 x half> %1,
     half %2,
     <vscale x 32 x i1> %3,
-    iXLen %4, iXLen 1)
+    iXLen 0, iXLen %4, iXLen 1)
 
   ret <vscale x 32 x half> %a
 }
@@ -1001,20 +1064,22 @@ declare <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   float,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 1 x float> @intrinsic_vfadd_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f32_nxv1f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfadd.vf v8, v8, fa0
+; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.f32(
     <vscale x 1 x float> undef,
     <vscale x 1 x float> %0,
     float %1,
-    iXLen %2)
+    iXLen 0, iXLen %2)
 
   ret <vscale x 1 x float> %a
 }
@@ -1024,14 +1089,15 @@ declare <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.f32(
   <vscale x 1 x float>,
   float,
   <vscale x 1 x i1>,
-  iXLen,
-  iXLen);
+  iXLen, iXLen, iXLen);
 
 define <vscale x 1 x float> @intrinsic_vfadd_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f32_nxv1f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfadd.vf v8, v9, fa0, v0.t
+; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.f32(
@@ -1039,7 +1105,7 @@ entry:
     <vscale x 1 x float> %1,
     float %2,
     <vscale x 1 x i1> %3,
-    iXLen %4, iXLen 1)
+    iXLen 0, iXLen %4, iXLen 1)
 
   ret <vscale x 1 x float> %a
 }
@@ -1048,20 +1114,22 @@ declare <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   float,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 2 x float> @intrinsic_vfadd_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f32_nxv2f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfadd.vf v8, v8, fa0
+; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.f32(
     <vscale x 2 x float> undef,
     <vscale x 2 x float> %0,
     float %1,
-    iXLen %2)
+    iXLen 0, iXLen %2)
 
   ret <vscale x 2 x float> %a
 }
@@ -1071,14 +1139,15 @@ declare <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.f32(
   <vscale x 2 x float>,
   float,
   <vscale x 2 x i1>,
-  iXLen,
-  iXLen);
+  iXLen, iXLen, iXLen);
 
 define <vscale x 2 x float> @intrinsic_vfadd_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f32_nxv2f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfadd.vf v8, v9, fa0, v0.t
+; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.f32(
@@ -1086,7 +1155,7 @@ entry:
     <vscale x 2 x float> %1,
     float %2,
     <vscale x 2 x i1> %3,
-    iXLen %4, iXLen 1)
+    iXLen 0, iXLen %4, iXLen 1)
 
   ret <vscale x 2 x float> %a
 }
@@ -1095,20 +1164,22 @@ declare <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   float,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 4 x float> @intrinsic_vfadd_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f32_nxv4f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfadd.vf v8, v8, fa0
+; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.f32(
     <vscale x 4 x float> undef,
     <vscale x 4 x float> %0,
     float %1,
-    iXLen %2)
+    iXLen 0, iXLen %2)
 
   ret <vscale x 4 x float> %a
 }
@@ -1118,14 +1189,15 @@ declare <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.f32(
   <vscale x 4 x float>,
   float,
   <vscale x 4 x i1>,
-  iXLen,
-  iXLen);
+  iXLen, iXLen, iXLen);
 
 define <vscale x 4 x float> @intrinsic_vfadd_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f32_nxv4f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfadd.vf v8, v10, fa0, v0.t
+; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.f32(
@@ -1133,7 +1205,7 @@ entry:
     <vscale x 4 x float> %1,
     float %2,
     <vscale x 4 x i1> %3,
-    iXLen %4, iXLen 1)
+    iXLen 0, iXLen %4, iXLen 1)
 
   ret <vscale x 4 x float> %a
 }
@@ -1142,20 +1214,22 @@ declare <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   float,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 8 x float> @intrinsic_vfadd_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f32_nxv8f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfadd.vf v8, v8, fa0
+; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.f32(
     <vscale x 8 x float> undef,
     <vscale x 8 x float> %0,
     float %1,
-    iXLen %2)
+    iXLen 0, iXLen %2)
 
   ret <vscale x 8 x float> %a
 }
@@ -1165,14 +1239,15 @@ declare <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.f32(
   <vscale x 8 x float>,
   float,
   <vscale x 8 x i1>,
-  iXLen,
-  iXLen);
+  iXLen, iXLen, iXLen);
 
 define <vscale x 8 x float> @intrinsic_vfadd_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f32_nxv8f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfadd.vf v8, v12, fa0, v0.t
+; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.f32(
@@ -1180,7 +1255,7 @@ entry:
     <vscale x 8 x float> %1,
     float %2,
     <vscale x 8 x i1> %3,
-    iXLen %4, iXLen 1)
+    iXLen 0, iXLen %4, iXLen 1)
 
   ret <vscale x 8 x float> %a
 }
@@ -1189,20 +1264,22 @@ declare <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.f32(
   <vscale x 16 x float>,
   <vscale x 16 x float>,
   float,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 16 x float> @intrinsic_vfadd_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, float %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_vf_nxv16f32_nxv16f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfadd.vf v8, v8, fa0
+; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.f32(
     <vscale x 16 x float> undef,
     <vscale x 16 x float> %0,
     float %1,
-    iXLen %2)
+    iXLen 0, iXLen %2)
 
   ret <vscale x 16 x float> %a
 }
@@ -1212,14 +1289,15 @@ declare <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.f32(
   <vscale x 16 x float>,
   float,
   <vscale x 16 x i1>,
-  iXLen,
-  iXLen);
+  iXLen, iXLen, iXLen);
 
 define <vscale x 16 x float> @intrinsic_vfadd_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv16f32_nxv16f32_f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
+; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfadd.vf v8, v16, fa0, v0.t
+; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.f32(
@@ -1227,7 +1305,7 @@ entry:
     <vscale x 16 x float> %1,
     float %2,
     <vscale x 16 x i1> %3,
-    iXLen %4, iXLen 1)
+    iXLen 0, iXLen %4, iXLen 1)
 
   ret <vscale x 16 x float> %a
 }
@@ -1236,20 +1314,22 @@ declare <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.f64(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
   double,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 1 x double> @intrinsic_vfadd_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, double %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f64_nxv1f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfadd.vf v8, v8, fa0
+; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.f64(
     <vscale x 1 x double> undef,
     <vscale x 1 x double> %0,
     double %1,
-    iXLen %2)
+    iXLen 0, iXLen %2)
 
   ret <vscale x 1 x double> %a
 }
@@ -1259,14 +1339,15 @@ declare <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.f64(
   <vscale x 1 x double>,
   double,
   <vscale x 1 x i1>,
-  iXLen,
-  iXLen);
+  iXLen, iXLen, iXLen);
 
 define <vscale x 1 x double> @intrinsic_vfadd_mask_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f64_nxv1f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfadd.vf v8, v9, fa0, v0.t
+; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.f64(
@@ -1274,7 +1355,7 @@ entry:
     <vscale x 1 x double> %1,
     double %2,
     <vscale x 1 x i1> %3,
-    iXLen %4, iXLen 1)
+    iXLen 0, iXLen %4, iXLen 1)
 
   ret <vscale x 1 x double> %a
 }
@@ -1283,20 +1364,22 @@ declare <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.f64(
   <vscale x 2 x double>,
   <vscale x 2 x double>,
   double,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 2 x double> @intrinsic_vfadd_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, double %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f64_nxv2f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfadd.vf v8, v8, fa0
+; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.f64(
     <vscale x 2 x double> undef,
     <vscale x 2 x double> %0,
     double %1,
-    iXLen %2)
+    iXLen 0, iXLen %2)
 
   ret <vscale x 2 x double> %a
 }
@@ -1306,14 +1389,15 @@ declare <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.f64(
   <vscale x 2 x double>,
   double,
   <vscale x 2 x i1>,
-  iXLen,
-  iXLen);
+  iXLen, iXLen, iXLen);
 
 define <vscale x 2 x double> @intrinsic_vfadd_mask_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f64_nxv2f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfadd.vf v8, v10, fa0, v0.t
+; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.f64(
@@ -1321,7 +1405,7 @@ entry:
     <vscale x 2 x double> %1,
     double %2,
     <vscale x 2 x i1> %3,
-    iXLen %4, iXLen 1)
+    iXLen 0, iXLen %4, iXLen 1)
 
   ret <vscale x 2 x double> %a
 }
@@ -1330,20 +1414,22 @@ declare <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.f64(
   <vscale x 4 x double>,
   <vscale x 4 x double>,
   double,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 4 x double> @intrinsic_vfadd_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, double %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f64_nxv4f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfadd.vf v8, v8, fa0
+; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.f64(
     <vscale x 4 x double> undef,
     <vscale x 4 x double> %0,
     double %1,
-    iXLen %2)
+    iXLen 0, iXLen %2)
 
   ret <vscale x 4 x double> %a
 }
@@ -1353,14 +1439,15 @@ declare <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.f64(
   <vscale x 4 x double>,
   double,
   <vscale x 4 x i1>,
-  iXLen,
-  iXLen);
+  iXLen, iXLen, iXLen);
 
 define <vscale x 4 x double> @intrinsic_vfadd_mask_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f64_nxv4f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfadd.vf v8, v12, fa0, v0.t
+; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.f64(
@@ -1368,7 +1455,7 @@ entry:
     <vscale x 4 x double> %1,
     double %2,
     <vscale x 4 x i1> %3,
-    iXLen %4, iXLen 1)
+    iXLen 0, iXLen %4, iXLen 1)
 
   ret <vscale x 4 x double> %a
 }
@@ -1377,20 +1464,22 @@ declare <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.f64(
   <vscale x 8 x double>,
   <vscale x 8 x double>,
   double,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 8 x double> @intrinsic_vfadd_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, double %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f64_nxv8f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfadd.vf v8, v8, fa0
+; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.f64(
     <vscale x 8 x double> undef,
     <vscale x 8 x double> %0,
     double %1,
-    iXLen %2)
+    iXLen 0, iXLen %2)
 
   ret <vscale x 8 x double> %a
 }
@@ -1400,14 +1489,15 @@ declare <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.f64(
   <vscale x 8 x double>,
   double,
   <vscale x 8 x i1>,
-  iXLen,
-  iXLen);
+  iXLen, iXLen, iXLen);
 
 define <vscale x 8 x double> @intrinsic_vfadd_mask_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f64_nxv8f64_f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfadd.vf v8, v16, fa0, v0.t
+; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.f64(
@@ -1415,7 +1505,7 @@ entry:
     <vscale x 8 x double> %1,
     double %2,
     <vscale x 8 x i1> %3,
-    iXLen %4, iXLen 1)
+    iXLen 0, iXLen %4, iXLen 1)
 
   ret <vscale x 8 x double> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll
index 2668774528bcac..6294407b0e2caa 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll
@@ -7,8 +7,8 @@
 
 declare i64 @llvm.riscv.vsetvli(i64, i64, i64)
 
-declare <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(<vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, i64)
-declare <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, i64)
+declare <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(<vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, i64, i64)
+declare <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, i64, i64)
 
 declare <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.nxv1f64(<vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, i64)
 
@@ -37,7 +37,7 @@ entry:
   br i1 %tobool, label %if.else, label %if.then
 
 if.then:                                          ; preds = %entry
-  %1 = tail call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> %a, <vscale x 1 x double> %b, i64 %0)
+  %1 = tail call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> %a, <vscale x 1 x double> %b, i64 7, i64 %0)
   br label %if.end
 
 if.else:                                          ; preds = %entry
@@ -70,7 +70,7 @@ entry:
   br i1 %tobool, label %if.else, label %if.then
 
 if.then:                                          ; preds = %entry
-  %1 = tail call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> %a, <vscale x 1 x double> %b, i64 %0)
+  %1 = tail call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> %a, <vscale x 1 x double> %b, i64 7, i64 %0)
   br label %if.end
 
 if.else:                                          ; preds = %entry
@@ -105,7 +105,7 @@ entry:
 
 if.then:                                          ; preds = %entry
   %0 = tail call i64 @llvm.riscv.vsetvli(i64 %avl, i64 3, i64 0)
-  %1 = tail call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> %a, <vscale x 1 x double> %b, i64 %0)
+  %1 = tail call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> %a, <vscale x 1 x double> %b, i64 7, i64 %0)
   br label %if.end
 
 if.else:                                          ; preds = %entry
@@ -158,7 +158,7 @@ entry:
 if.then:                                          ; preds = %entry
   %0 = tail call <vscale x 1 x double> @llvm.riscv.vfmv.v.f.nxv1f64.f64(<vscale x 1 x double> undef, double 1.000000e+00, i64 %avl)
   %1 = tail call <vscale x 1 x double> @llvm.riscv.vfmv.v.f.nxv1f64.f64(<vscale x 1 x double> undef, double 2.000000e+00, i64 %avl)
-  %2 = tail call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> %0, <vscale x 1 x double> %1, i64 %avl)
+  %2 = tail call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> %0, <vscale x 1 x double> %1, i64 7, i64 %avl)
   %3 = bitcast i8* @scratch to <vscale x 1 x double>*
   tail call void @llvm.riscv.vse.nxv1f64(<vscale x 1 x double> %2, <vscale x 1 x double>* %3, i64 %avl)
   br label %if.end
@@ -166,7 +166,7 @@ if.then:                                          ; preds = %entry
 if.else:                                          ; preds = %entry
   %4 = tail call <vscale x 2 x float> @llvm.riscv.vfmv.v.f.nxv2f32.f32(<vscale x 2 x float> undef, float 1.000000e+00, i64 %avl)
   %5 = tail call <vscale x 2 x float> @llvm.riscv.vfmv.v.f.nxv2f32.f32(<vscale x 2 x float> undef, float 2.000000e+00, i64 %avl)
-  %6 = tail call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> %4, <vscale x 2 x float> %5, i64 %avl)
+  %6 = tail call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> %4, <vscale x 2 x float> %5, i64 7, i64 %avl)
   %7 = bitcast i8* @scratch to <vscale x 2 x float>*
   tail call void @llvm.riscv.vse.nxv2f32(<vscale x 2 x float> %6, <vscale x 2 x float>* %7, i64 %avl)
   br label %if.end
@@ -204,7 +204,7 @@ entry:
   br i1 %tobool, label %if.else, label %if.then
 
 if.then:                                          ; preds = %entry
-  %1 = tail call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> %a, <vscale x 1 x double> %b, i64 %0)
+  %1 = tail call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> %a, <vscale x 1 x double> %b, i64 7, i64 %0)
   br label %if.end
 
 if.else:                                          ; preds = %entry
@@ -282,7 +282,7 @@ entry:
   br i1 %tobool, label %if.else, label %if.then
 
 if.then:                                          ; preds = %entry
-  %1 = tail call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> %a, <vscale x 1 x double> %b, i64 %0)
+  %1 = tail call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> %a, <vscale x 1 x double> %b, i64 7, i64 %0)
   br label %if.end
 
 if.else:                                          ; preds = %entry
@@ -299,7 +299,7 @@ if.then4:                                         ; preds = %if.end
   %3 = tail call i64 @llvm.riscv.vsetvli(i64 %avl, i64 3, i64 0)
   %4 = tail call <vscale x 1 x double> @llvm.riscv.vfmv.v.f.nxv1f64.f64(<vscale x 1 x double> undef, double 1.000000e+00, i64 %3)
   %5 = tail call <vscale x 1 x double> @llvm.riscv.vfmv.v.f.nxv1f64.f64(<vscale x 1 x double> undef, double 2.000000e+00, i64 %3)
-  %6 = tail call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> %4, <vscale x 1 x double> %5, i64 %3)
+  %6 = tail call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> %4, <vscale x 1 x double> %5, i64 7, i64 %3)
   %7 = bitcast i8* @scratch to <vscale x 1 x double>*
   tail call void @llvm.riscv.vse.nxv1f64(<vscale x 1 x double> %6, <vscale x 1 x double>* %7, i64 %3)
   br label %if.end10
@@ -308,7 +308,7 @@ if.else5:                                         ; preds = %if.end
   %8 = tail call i64 @llvm.riscv.vsetvli(i64 %avl, i64 2, i64 0)
   %9 = tail call <vscale x 2 x float> @llvm.riscv.vfmv.v.f.nxv2f32.f32(<vscale x 2 x float> undef, float 1.000000e+00, i64 %8)
   %10 = tail call <vscale x 2 x float> @llvm.riscv.vfmv.v.f.nxv2f32.f32( <vscale x 2 x float> undef, float 2.000000e+00, i64 %8)
-  %11 = tail call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> %9, <vscale x 2 x float> %10, i64 %8)
+  %11 = tail call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> %9, <vscale x 2 x float> %10, i64 7, i64 %8)
   %12 = bitcast i8* @scratch to <vscale x 2 x float>*
   tail call void @llvm.riscv.vse.nxv2f32(<vscale x 2 x float> %11, <vscale x 2 x float>* %12, i64 %8)
   br label %if.end10
@@ -366,7 +366,7 @@ entry:
   br i1 %tobool, label %if.else, label %if.then
 
 if.then:                                          ; preds = %entry
-  %1 = tail call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> %a, <vscale x 1 x double> %b, i64 %0)
+  %1 = tail call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> %a, <vscale x 1 x double> %b, i64 7, i64 %0)
   br label %if.end
 
 if.else:                                          ; preds = %entry
@@ -426,7 +426,7 @@ entry:
   br i1 %tobool, label %if.else, label %if.then
 
 if.then:                                          ; preds = %entry
-  %1 = tail call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> %a, <vscale x 1 x double> %b, i64 %0)
+  %1 = tail call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> %a, <vscale x 1 x double> %b, i64 7, i64 %0)
   call void @foo()
   br label %if.end
 
@@ -616,7 +616,7 @@ for.body:                                         ; preds = %entry, %for.body
   %arrayidx1 = getelementptr inbounds double, double* %b, i64 %i.014
   %3 = bitcast double* %arrayidx1 to <vscale x 1 x double>*
   %4 = tail call <vscale x 1 x double> @llvm.riscv.vle.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double>* %3, i64 %0)
-  %5 = tail call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> %2, <vscale x 1 x double> %4, i64 %0)
+  %5 = tail call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> %2, <vscale x 1 x double> %4, i64 7, i64 %0)
   %arrayidx2 = getelementptr inbounds double, double* %c, i64 %i.014
   %6 = bitcast double* %arrayidx2 to <vscale x 1 x double>*
   tail call void @llvm.riscv.vse.nxv1f64.i64(<vscale x 1 x double> %5, <vscale x 1 x double>* %6, i64 %0)
@@ -981,7 +981,7 @@ exit:
 
 declare i64 @llvm.riscv.vsetvlimax.i64(i64, i64)
 declare <vscale x 1 x double> @llvm.riscv.vle.nxv1f64.i64(<vscale x 1 x double>, <vscale x 1 x double>* nocapture, i64)
-declare <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64(<vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, i64)
+declare <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64(<vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, i64, i64)
 declare void @llvm.riscv.vse.nxv1f64.i64(<vscale x 1 x double>, <vscale x 1 x double>* nocapture, i64)
 declare <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll
index 9c2582299a2908..f4bde8438289ef 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll
@@ -8,7 +8,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
   <vscale x 1 x double>,
-  i64)
+  i64, i64)
 declare <vscale x 1 x i64> @llvm.riscv.vle.mask.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>*,
@@ -27,7 +27,7 @@ entry:
     <vscale x 1 x double> undef,
     <vscale x 1 x double> %a,
     <vscale x 1 x double> %b,
-    i64 %0)
+    i64 7, i64 %0)
   ret <vscale x 1 x double> %1
 }
 
@@ -43,7 +43,7 @@ entry:
     <vscale x 1 x double> undef,
     <vscale x 1 x double> %a,
     <vscale x 1 x double> %b,
-    i64 %avl)
+    i64 7, i64 %avl)
   ret <vscale x 1 x double> %1
 }
 
@@ -232,6 +232,7 @@ entry:
     <vscale x 1 x double> %a,
     <vscale x 1 x double> %a,
     <vscale x 1 x i1> %mask,
+    i64 7,
     i64 9,
     i64 0)
   %y = call <vscale x 1 x double> @llvm.riscv.vfmv.s.f.nxv1f64(
@@ -250,7 +251,7 @@ entry:
     <vscale x 1 x double> undef,
     <vscale x 1 x double> %a,
     <vscale x 1 x double> %b,
-    i64 -1)
+    i64 7, i64 -1)
   ret <vscale x 1 x double> %0
 }
 
@@ -269,12 +270,12 @@ entry:
     <vscale x 1 x double> undef,
     <vscale x 1 x double> %a,
     <vscale x 1 x double> %b,
-    i64 1)
+    i64 7, i64 1)
   %f2 = tail call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(
     <vscale x 1 x double> undef,
     <vscale x 1 x double> %f1,
     <vscale x 1 x double> %b,
-    i64 %vsetvli)
+    i64 7, i64 %vsetvli)
   ret <vscale x 1 x double> %f2
 }
 
@@ -291,12 +292,12 @@ entry:
     <vscale x 1 x double> undef,
     <vscale x 1 x double> %a,
     <vscale x 1 x double> %b,
-    i64 %avl)
+    i64 7, i64 %avl)
   %f2 = tail call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(
     <vscale x 1 x double> undef,
     <vscale x 1 x double> %f1,
     <vscale x 1 x double> %b,
-    i64 %vsetvli)
+    i64 7, i64 %vsetvli)
   ret <vscale x 1 x double> %f2
 }
 
@@ -321,7 +322,7 @@ entry:
     <vscale x 1 x double> undef,
     <vscale x 1 x double> %splat,
     <vscale x 1 x double> %b,
-    i64 %vsetvli)
+    i64 7, i64 %vsetvli)
   ret <vscale x 1 x double> %f2
 }
 
@@ -342,7 +343,7 @@ entry:
     <vscale x 1 x double> undef,
     <vscale x 1 x double> %a,
     <vscale x 1 x double> %b,
-    i64 %vsetvli)
+    i64 7, i64 %vsetvli)
   %c2 = extractelement <vscale x 1 x double> %f2, i32 0
   %c3 = fadd double %c1, %c2
   ret double %c3
@@ -370,7 +371,7 @@ entry:
     <vscale x 1 x double> undef,
     <vscale x 1 x double> %a,
     <vscale x 1 x double> %a,
-    i64 %x)
+    i64 7, i64 %x)
   %y2 = call <vscale x 1 x double> @llvm.riscv.vfmv.s.f.nxv1f64(
     <vscale x 1 x double> %f2, double %b, i64 1)
   %res = fadd <vscale x 1 x double> %y, %y2
@@ -574,7 +575,7 @@ entry:
     <vscale x 1 x double> undef,
     <vscale x 1 x double> %1,
     <vscale x 1 x double> %c,
-    i64 %0)
+    i64 7, i64 %0)
   ret <vscale x 1 x double> %2
 }
 
@@ -615,6 +616,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.f64(
   <vscale x 1 x double>,
   <vscale x 1 x i1>,
   i64,
+  i64,
   i64);
 
 declare <vscale x 1 x i64> @llvm.riscv.vmv.s.x.nxv1i64(


        


More information about the llvm-commits mailing list