[clang] 74f38df - [RISCV] Support Xsfvfnrclipxfqf extensions (#68297)

via cfe-commits cfe-commits at lists.llvm.org
Thu Nov 2 19:52:44 PDT 2023


Author: Brandon Wu
Date: 2023-11-03T10:52:37+08:00
New Revision: 74f38df1d1361f77fab56f6e647ebf37972faf7c

URL: https://github.com/llvm/llvm-project/commit/74f38df1d1361f77fab56f6e647ebf37972faf7c
DIFF: https://github.com/llvm/llvm-project/commit/74f38df1d1361f77fab56f6e647ebf37972faf7c.diff

LOG: [RISCV] Support Xsfvfnrclipxfqf extensions (#68297)

FP32-to-int8 Ranged Clip Instructions

https://sifive.cdn.prismic.io/sifive/0aacff47-f530-43dc-8446-5caa2260ece0_xsfvfnrclipxfqf-spec.pdf

Added: 
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/sf_vfnrclip_x_f_qf.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/sf_vfnrclip_x_f_qf_rm.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/sf_vfnrclip_xu_f_qf.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/sf_vfnrclip_xu_f_qf_rm.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/sf_vfnrclip_x_f_qf.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/sf_vfnrclip_x_f_qf_rm.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/sf_vfnrclip_xu_f_qf.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/sf_vfnrclip_xu_f_qf_rm.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/sf_vfnrclip_x_f_qf.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/sf_vfnrclip_x_f_qf_rm.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/sf_vfnrclip_xu_f_qf.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/sf_vfnrclip_xu_f_qf_rm.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/sf_vfnrclip_x_f_qf.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/sf_vfnrclip_x_f_qf_rm.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/sf_vfnrclip_xu_f_qf.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/sf_vfnrclip_xu_f_qf_rm.c
    llvm/test/CodeGen/RISCV/rvv/sf_vfnrclip_x_f_qf.ll
    llvm/test/CodeGen/RISCV/rvv/sf_vfnrclip_xu_f_qf.ll
    llvm/test/MC/RISCV/rvv/xsfvfnrclip.s

Modified: 
    clang/include/clang/Basic/riscv_sifive_vector.td
    clang/include/clang/Basic/riscv_vector_common.td
    clang/include/clang/Support/RISCVVIntrinsicUtils.h
    clang/lib/Sema/SemaRISCVVectorLookup.cpp
    clang/lib/Support/RISCVVIntrinsicUtils.cpp
    clang/test/Preprocessor/riscv-target-features.c
    clang/test/Sema/rvv-required-features-invalid.c
    clang/test/Sema/rvv-required-features.c
    clang/utils/TableGen/RISCVVEmitter.cpp
    llvm/include/llvm/IR/IntrinsicsRISCVXsf.td
    llvm/lib/Support/RISCVISAInfo.cpp
    llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp
    llvm/lib/Target/RISCV/RISCVFeatures.td
    llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td
    llvm/unittests/Support/RISCVISAInfoTest.cpp

Removed: 
    


################################################################################
diff  --git a/clang/include/clang/Basic/riscv_sifive_vector.td b/clang/include/clang/Basic/riscv_sifive_vector.td
index 7b98ac8906667b6..1e081c734d4941b 100644
--- a/clang/include/clang/Basic/riscv_sifive_vector.td
+++ b/clang/include/clang/Basic/riscv_sifive_vector.td
@@ -120,6 +120,14 @@ multiclass RVVVQMACCBuiltinSet<list<list<string>> suffixes_prototypes> {
     defm NAME : RVVOutOp1Op2BuiltinSet<NAME, "i", suffixes_prototypes>;
 }
 
+multiclass RVVVFNRCLIPBuiltinSet<string suffix, string prototype, string type_range> {
+  let Log2LMUL = [-3, -2, -1, 0, 1, 2],
+      Name = NAME,
+      IRName = NAME,
+      MaskedIRName = NAME # "_mask" in
+  def : RVVConvBuiltin<suffix, prototype, type_range, NAME>;
+}
+
 let UnMaskedPolicyScheme = HasPolicyOperand in
   let RequiredFeatures = ["Xsfvqmaccdod"] in {
     defm sf_vqmaccu_2x8x2 : RVVVQMACCBuiltinSet<[["", "v", "vv(FixedSEW:8)SUv(FixedSEW:8)Uv"]]>;
@@ -139,3 +147,57 @@ let UnMaskedPolicyScheme = HasPolicyOperand in
 let UnMaskedPolicyScheme = HasPolicyOperand in
   let RequiredFeatures = ["Xsfvfwmaccqqq"] in
     defm sf_vfwmacc_4x4x4 : RVVVFWMACCBuiltinSet<[["", "w", "wwSvv"]]>;
+
+let UnMaskedPolicyScheme = HasPassthruOperand, RequiredFeatures = ["Xsfvfnrclipxfqf"] in {
+let ManualCodegen = [{
+  {
+    // LLVM intrinsic
+    // Unmasked: (passthru, vector_in, scalar_in, frm, vl)
+    // Masked:   (passthru, vector_in, scalar_in, mask, frm, vl, policy)
+
+    SmallVector<llvm::Value*, 7> Operands;
+    bool HasMaskedOff = !(
+        (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ||
+        (!IsMasked && PolicyAttrs & RVV_VTA));
+    bool HasRoundModeOp = IsMasked ?
+      (HasMaskedOff ? Ops.size() == 6 : Ops.size() == 5) :
+      (HasMaskedOff ? Ops.size() == 5 : Ops.size() == 4);
+
+    unsigned Offset = IsMasked ?
+        (HasMaskedOff ? 2 : 1) : (HasMaskedOff ? 1 : 0);
+
+    if (!HasMaskedOff)
+      Operands.push_back(llvm::PoisonValue::get(ResultType));
+    else
+      Operands.push_back(Ops[IsMasked ? 1 : 0]);
+
+    Operands.push_back(Ops[Offset]); // op0
+    Operands.push_back(Ops[Offset + 1]); // op1
+
+    if (IsMasked)
+      Operands.push_back(Ops[0]); // mask
+
+    if (HasRoundModeOp) {
+      Operands.push_back(Ops[Offset + 2]); // frm
+      Operands.push_back(Ops[Offset + 3]); // vl
+    } else {
+      Operands.push_back(ConstantInt::get(Ops[Offset + 2]->getType(), 7)); // frm
+      Operands.push_back(Ops[Offset + 2]); // vl
+    }
+
+    if (IsMasked)
+      Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
+
+    IntrinsicTypes = {ResultType, Ops[Offset]->getType(), Operands.back()->getType()};
+    llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
+    return Builder.CreateCall(F, Operands, "");
+  }
+}] in {
+  let HasFRMRoundModeOp = true in {
+    defm sf_vfnrclip_x_f_qf : RVVVFNRCLIPBuiltinSet<"v", "vFqfu", "c">;
+    defm sf_vfnrclip_xu_f_qf : RVVVFNRCLIPBuiltinSet<"Uv", "UvFqfu", "c">;
+  }
+  defm sf_vfnrclip_x_f_qf : RVVVFNRCLIPBuiltinSet<"v", "vFqf", "c">;
+  defm sf_vfnrclip_xu_f_qf : RVVVFNRCLIPBuiltinSet<"Uv", "UvFqf", "c">;
+}
+}

diff  --git a/clang/include/clang/Basic/riscv_vector_common.td b/clang/include/clang/Basic/riscv_vector_common.td
index 141fac9d68e6d54..326c3883f0a8409 100644
--- a/clang/include/clang/Basic/riscv_vector_common.td
+++ b/clang/include/clang/Basic/riscv_vector_common.td
@@ -66,6 +66,7 @@
 //   t: ptr
diff _t, ignores "t"
 //   u: unsigned long, ignores "t"
 //   l: long, ignores "t"
+//   f: float32, ignores "t"
 //
 // So for instance if t is "i", i.e. int, then "e" will yield int again. "v"
 // will yield an RVV vector type (assume LMUL=1), so __rvv_int32m1_t.

diff  --git a/clang/include/clang/Support/RISCVVIntrinsicUtils.h b/clang/include/clang/Support/RISCVVIntrinsicUtils.h
index 3c7064c31686333..7904658576e5d50 100644
--- a/clang/include/clang/Support/RISCVVIntrinsicUtils.h
+++ b/clang/include/clang/Support/RISCVVIntrinsicUtils.h
@@ -85,6 +85,7 @@ enum class BaseTypeModifier : uint8_t {
   Ptr
diff ,
   UnsignedLong,
   SignedLong,
+  Float32
 };
 
 // Modifier for type, used for both scalar and vector types.
@@ -485,18 +486,19 @@ enum RVVRequire : uint16_t {
   RVV_REQ_RV64 = 1 << 0,
   RVV_REQ_ZvfhminOrZvfh = 1 << 1,
   RVV_REQ_Xsfvcp = 1 << 2,
-  RVV_REQ_Xsfvfwmaccqqq = 1 << 3,
-  RVV_REQ_Xsfvqmaccdod = 1 << 4,
-  RVV_REQ_Xsfvqmaccqoq = 1 << 5,
-  RVV_REQ_Zvbb = 1 << 6,
-  RVV_REQ_Zvbc = 1 << 7,
-  RVV_REQ_Zvkb = 1 << 8,
-  RVV_REQ_Zvkg = 1 << 9,
-  RVV_REQ_Zvkned = 1 << 10,
-  RVV_REQ_Zvknha = 1 << 11,
-  RVV_REQ_Zvknhb = 1 << 12,
-  RVV_REQ_Zvksed = 1 << 13,
-  RVV_REQ_Zvksh = 1 << 14,
+  RVV_REQ_Xsfvfnrclipxfqf = 1 << 3,
+  RVV_REQ_Xsfvfwmaccqqq = 1 << 4,
+  RVV_REQ_Xsfvqmaccdod = 1 << 5,
+  RVV_REQ_Xsfvqmaccqoq = 1 << 6,
+  RVV_REQ_Zvbb = 1 << 7,
+  RVV_REQ_Zvbc = 1 << 8,
+  RVV_REQ_Zvkb = 1 << 9,
+  RVV_REQ_Zvkg = 1 << 10,
+  RVV_REQ_Zvkned = 1 << 11,
+  RVV_REQ_Zvknha = 1 << 12,
+  RVV_REQ_Zvknhb = 1 << 13,
+  RVV_REQ_Zvksed = 1 << 14,
+  RVV_REQ_Zvksh = 1 << 15,
 
   LLVM_MARK_AS_BITMASK_ENUM(RVV_REQ_Zvksh)
 };

diff  --git a/clang/lib/Sema/SemaRISCVVectorLookup.cpp b/clang/lib/Sema/SemaRISCVVectorLookup.cpp
index e32b12b0e9e816a..8e72eba1ac4c56f 100644
--- a/clang/lib/Sema/SemaRISCVVectorLookup.cpp
+++ b/clang/lib/Sema/SemaRISCVVectorLookup.cpp
@@ -205,6 +205,7 @@ void RISCVIntrinsicManagerImpl::ConstructRVVIntrinsics(
   static const std::pair<const char *, RVVRequire> FeatureCheckList[] = {
       {"64bit", RVV_REQ_RV64},
       {"xsfvcp", RVV_REQ_Xsfvcp},
+      {"xsfvfnrclipxfqf", RVV_REQ_Xsfvfnrclipxfqf},
       {"xsfvfwmaccqqq", RVV_REQ_Xsfvfwmaccqqq},
       {"xsfvqmaccdod", RVV_REQ_Xsfvqmaccdod},
       {"xsfvqmaccqoq", RVV_REQ_Xsfvqmaccqoq},

diff  --git a/clang/lib/Support/RISCVVIntrinsicUtils.cpp b/clang/lib/Support/RISCVVIntrinsicUtils.cpp
index 597ee194fc8d4b1..751d0aedacc9a1f 100644
--- a/clang/lib/Support/RISCVVIntrinsicUtils.cpp
+++ b/clang/lib/Support/RISCVVIntrinsicUtils.cpp
@@ -429,6 +429,9 @@ PrototypeDescriptor::parsePrototypeDescriptor(
   case 'l':
     PT = BaseTypeModifier::SignedLong;
     break;
+  case 'f':
+    PT = BaseTypeModifier::Float32;
+    break;
   default:
     llvm_unreachable("Illegal primitive type transformers!");
   }
@@ -665,6 +668,10 @@ void RVVType::applyModifier(const PrototypeDescriptor &Transformer) {
   case BaseTypeModifier::SignedLong:
     ScalarType = ScalarTypeKind::SignedLong;
     break;
+  case BaseTypeModifier::Float32:
+    ElementBitwidth = 32;
+    ScalarType = ScalarTypeKind::Float;
+    break;
   case BaseTypeModifier::Invalid:
     ScalarType = ScalarTypeKind::Invalid;
     return;
@@ -1149,7 +1156,7 @@ void RVVIntrinsic::updateNamesAndPolicy(
 
 SmallVector<PrototypeDescriptor> parsePrototypes(StringRef Prototypes) {
   SmallVector<PrototypeDescriptor> PrototypeDescriptors;
-  const StringRef Primaries("evwqom0ztul");
+  const StringRef Primaries("evwqom0ztulf");
   while (!Prototypes.empty()) {
     size_t Idx = 0;
     // Skip over complex prototype because it could contain primitive type

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/sf_vfnrclip_x_f_qf.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/sf_vfnrclip_x_f_qf.c
new file mode 100644
index 000000000000000..9b6e2f60f3fc9e8
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/sf_vfnrclip_x_f_qf.c
@@ -0,0 +1,98 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +xsfvfnrclipxfqf \
+// RUN:  -disable-O0-optnone -emit-llvm %s -o - | \
+// RUN:  opt -S -passes=mem2reg | FileCheck %s
+
+#include <sifive_vector.h>
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> poison, <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_sf_vfnrclip_x_f_qf_i8mf8(vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8mf8(vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf4(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> poison, <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_sf_vfnrclip_x_f_qf_i8mf4(vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8mf4(vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf2(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> poison, <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_sf_vfnrclip_x_f_qf_i8mf2(vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8mf2(vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m1(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> poison, <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_sf_vfnrclip_x_f_qf_i8m1(vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8m1(vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m2(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> poison, <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_sf_vfnrclip_x_f_qf_i8m2(vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8m2(vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf8_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> poison, <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_sf_vfnrclip_x_f_qf_i8mf8_m(vbool64_t mask, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8mf8_m(mask, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf4_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> poison, <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_sf_vfnrclip_x_f_qf_i8mf4_m(vbool32_t mask, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8mf4_m(mask, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf2_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> poison, <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_sf_vfnrclip_x_f_qf_i8mf2_m(vbool16_t mask, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8mf2_m(mask, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m1_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> poison, <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_sf_vfnrclip_x_f_qf_i8m1_m(vbool8_t mask, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8m1_m(mask, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m2_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> poison, <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_sf_vfnrclip_x_f_qf_i8m2_m(vbool4_t mask, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8m2_m(mask, vs2, rs1, vl);
+}
+

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/sf_vfnrclip_x_f_qf_rm.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/sf_vfnrclip_x_f_qf_rm.c
new file mode 100644
index 000000000000000..5e4a52ca5f87238
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/sf_vfnrclip_x_f_qf_rm.c
@@ -0,0 +1,98 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +xsfvfnrclipxfqf \
+// RUN:  -disable-O0-optnone -emit-llvm %s -o - | \
+// RUN:  opt -S -passes=mem2reg | FileCheck %s
+
+#include <sifive_vector.h>
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> poison, <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_sf_vfnrclip_x_f_qf_i8mf8(vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8mf8_rm(vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf4(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> poison, <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_sf_vfnrclip_x_f_qf_i8mf4(vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8mf4_rm(vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf2(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> poison, <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_sf_vfnrclip_x_f_qf_i8mf2(vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8mf2_rm(vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m1(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> poison, <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_sf_vfnrclip_x_f_qf_i8m1(vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8m1_rm(vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m2(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> poison, <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_sf_vfnrclip_x_f_qf_i8m2(vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8m2_rm(vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf8_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> poison, <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_sf_vfnrclip_x_f_qf_i8mf8_m(vbool64_t mask, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8mf8_rm_m(mask, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf4_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> poison, <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_sf_vfnrclip_x_f_qf_i8mf4_m(vbool32_t mask, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8mf4_rm_m(mask, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf2_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> poison, <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_sf_vfnrclip_x_f_qf_i8mf2_m(vbool16_t mask, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8mf2_rm_m(mask, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m1_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> poison, <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_sf_vfnrclip_x_f_qf_i8m1_m(vbool8_t mask, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8m1_rm_m(mask, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m2_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> poison, <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_sf_vfnrclip_x_f_qf_i8m2_m(vbool4_t mask, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8m2_rm_m(mask, vs2, rs1, 2, vl);
+}
+

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/sf_vfnrclip_xu_f_qf.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/sf_vfnrclip_xu_f_qf.c
new file mode 100644
index 000000000000000..266356e07f0ba08
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/sf_vfnrclip_xu_f_qf.c
@@ -0,0 +1,98 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +xsfvfnrclipxfqf \
+// RUN:  -disable-O0-optnone -emit-llvm %s -o - | \
+// RUN:  opt -S -passes=mem2reg | FileCheck %s
+
+#include <sifive_vector.h>
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> poison, <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_sf_vfnrclip_xu_f_qf_u8mf8(vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8mf8(vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf4(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> poison, <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_sf_vfnrclip_xu_f_qf_u8mf4(vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8mf4(vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf2(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> poison, <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_sf_vfnrclip_xu_f_qf_u8mf2(vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8mf2(vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m1(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> poison, <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_sf_vfnrclip_xu_f_qf_u8m1(vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8m1(vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m2(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> poison, <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_sf_vfnrclip_xu_f_qf_u8m2(vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8m2(vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf8_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> poison, <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_sf_vfnrclip_xu_f_qf_u8mf8_m(vbool64_t mask, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8mf8_m(mask, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf4_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> poison, <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_sf_vfnrclip_xu_f_qf_u8mf4_m(vbool32_t mask, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8mf4_m(mask, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf2_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> poison, <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_sf_vfnrclip_xu_f_qf_u8mf2_m(vbool16_t mask, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8mf2_m(mask, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m1_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> poison, <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_sf_vfnrclip_xu_f_qf_u8m1_m(vbool8_t mask, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8m1_m(mask, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m2_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> poison, <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_sf_vfnrclip_xu_f_qf_u8m2_m(vbool4_t mask, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8m2_m(mask, vs2, rs1, vl);
+}
+

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/sf_vfnrclip_xu_f_qf_rm.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/sf_vfnrclip_xu_f_qf_rm.c
new file mode 100644
index 000000000000000..45b6778eea8b84d
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/sf_vfnrclip_xu_f_qf_rm.c
@@ -0,0 +1,98 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +xsfvfnrclipxfqf \
+// RUN:  -disable-O0-optnone -emit-llvm %s -o - | \
+// RUN:  opt -S -passes=mem2reg | FileCheck %s
+
+#include <sifive_vector.h>
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> poison, <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_sf_vfnrclip_xu_f_qf_u8mf8(vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8mf8_rm(vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf4(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> poison, <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_sf_vfnrclip_xu_f_qf_u8mf4(vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8mf4_rm(vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf2(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> poison, <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_sf_vfnrclip_xu_f_qf_u8mf2(vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8mf2_rm(vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m1(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> poison, <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_sf_vfnrclip_xu_f_qf_u8m1(vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8m1_rm(vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m2(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> poison, <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_sf_vfnrclip_xu_f_qf_u8m2(vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8m2_rm(vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf8_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> poison, <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_sf_vfnrclip_xu_f_qf_u8mf8_m(vbool64_t mask, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8mf8_rm_m(mask, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf4_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> poison, <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_sf_vfnrclip_xu_f_qf_u8mf4_m(vbool32_t mask, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8mf4_rm_m(mask, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf2_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> poison, <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_sf_vfnrclip_xu_f_qf_u8mf2_m(vbool16_t mask, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8mf2_rm_m(mask, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m1_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> poison, <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_sf_vfnrclip_xu_f_qf_u8m1_m(vbool8_t mask, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8m1_rm_m(mask, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m2_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> poison, <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_sf_vfnrclip_xu_f_qf_u8m2_m(vbool4_t mask, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8m2_rm_m(mask, vs2, rs1, 2, vl);
+}
+

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/sf_vfnrclip_x_f_qf.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/sf_vfnrclip_x_f_qf.c
new file mode 100644
index 000000000000000..849dcbb6a56f996
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/sf_vfnrclip_x_f_qf.c
@@ -0,0 +1,98 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +xsfvfnrclipxfqf \
+// RUN:  -disable-O0-optnone -emit-llvm %s -o - | \
+// RUN:  opt -S -passes=mem2reg | FileCheck %s
+
+#include <sifive_vector.h>
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> poison, <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_sf_vfnrclip_x_f_qf_i8mf8(vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf(vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf4(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> poison, <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_sf_vfnrclip_x_f_qf_i8mf4(vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf(vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf2(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> poison, <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_sf_vfnrclip_x_f_qf_i8mf2(vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf(vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m1(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> poison, <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_sf_vfnrclip_x_f_qf_i8m1(vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf(vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m2(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> poison, <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_sf_vfnrclip_x_f_qf_i8m2(vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf(vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf8_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> poison, <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_sf_vfnrclip_x_f_qf_i8mf8_m(vbool64_t mask, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf(mask, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf4_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> poison, <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_sf_vfnrclip_x_f_qf_i8mf4_m(vbool32_t mask, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf(mask, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf2_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> poison, <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_sf_vfnrclip_x_f_qf_i8mf2_m(vbool16_t mask, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf(mask, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m1_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> poison, <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_sf_vfnrclip_x_f_qf_i8m1_m(vbool8_t mask, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf(mask, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m2_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> poison, <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_sf_vfnrclip_x_f_qf_i8m2_m(vbool4_t mask, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf(mask, vs2, rs1, vl);
+}
+

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/sf_vfnrclip_x_f_qf_rm.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/sf_vfnrclip_x_f_qf_rm.c
new file mode 100644
index 000000000000000..9b3fa1b88c1b87e
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/sf_vfnrclip_x_f_qf_rm.c
@@ -0,0 +1,98 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +xsfvfnrclipxfqf \
+// RUN:  -disable-O0-optnone -emit-llvm %s -o - | \
+// RUN:  opt -S -passes=mem2reg | FileCheck %s
+
+#include <sifive_vector.h>
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> poison, <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_sf_vfnrclip_x_f_qf_i8mf8(vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf(vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf4(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> poison, <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_sf_vfnrclip_x_f_qf_i8mf4(vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf(vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf2(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> poison, <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_sf_vfnrclip_x_f_qf_i8mf2(vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf(vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m1(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> poison, <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_sf_vfnrclip_x_f_qf_i8m1(vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf(vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m2(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> poison, <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_sf_vfnrclip_x_f_qf_i8m2(vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf(vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf8_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> poison, <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_sf_vfnrclip_x_f_qf_i8mf8_m(vbool64_t mask, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf(mask, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf4_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> poison, <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_sf_vfnrclip_x_f_qf_i8mf4_m(vbool32_t mask, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf(mask, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf2_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> poison, <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_sf_vfnrclip_x_f_qf_i8mf2_m(vbool16_t mask, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf(mask, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m1_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> poison, <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_sf_vfnrclip_x_f_qf_i8m1_m(vbool8_t mask, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf(mask, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m2_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> poison, <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_sf_vfnrclip_x_f_qf_i8m2_m(vbool4_t mask, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf(mask, vs2, rs1, 2, vl);
+}
+

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/sf_vfnrclip_xu_f_qf.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/sf_vfnrclip_xu_f_qf.c
new file mode 100644
index 000000000000000..4ab3311d453c23e
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/sf_vfnrclip_xu_f_qf.c
@@ -0,0 +1,98 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +xsfvfnrclipxfqf \
+// RUN:  -disable-O0-optnone -emit-llvm %s -o - | \
+// RUN:  opt -S -passes=mem2reg | FileCheck %s
+
+#include <sifive_vector.h>
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> poison, <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_sf_vfnrclip_xu_f_qf_u8mf8(vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf(vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf4(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> poison, <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_sf_vfnrclip_xu_f_qf_u8mf4(vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf(vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf2(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> poison, <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_sf_vfnrclip_xu_f_qf_u8mf2(vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf(vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m1(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> poison, <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_sf_vfnrclip_xu_f_qf_u8m1(vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf(vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m2(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> poison, <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_sf_vfnrclip_xu_f_qf_u8m2(vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf(vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf8_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> poison, <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_sf_vfnrclip_xu_f_qf_u8mf8_m(vbool64_t mask, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf(mask, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf4_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> poison, <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_sf_vfnrclip_xu_f_qf_u8mf4_m(vbool32_t mask, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf(mask, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf2_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> poison, <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_sf_vfnrclip_xu_f_qf_u8mf2_m(vbool16_t mask, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf(mask, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m1_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> poison, <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_sf_vfnrclip_xu_f_qf_u8m1_m(vbool8_t mask, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf(mask, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m2_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> poison, <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_sf_vfnrclip_xu_f_qf_u8m2_m(vbool4_t mask, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf(mask, vs2, rs1, vl);
+}
+

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/sf_vfnrclip_xu_f_qf_rm.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/sf_vfnrclip_xu_f_qf_rm.c
new file mode 100644
index 000000000000000..be4e954893fe2df
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/sf_vfnrclip_xu_f_qf_rm.c
@@ -0,0 +1,98 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +xsfvfnrclipxfqf \
+// RUN:  -disable-O0-optnone -emit-llvm %s -o - | \
+// RUN:  opt -S -passes=mem2reg | FileCheck %s
+
+#include <sifive_vector.h>
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> poison, <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_sf_vfnrclip_xu_f_qf_u8mf8(vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf(vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf4(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> poison, <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_sf_vfnrclip_xu_f_qf_u8mf4(vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf(vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf2(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> poison, <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_sf_vfnrclip_xu_f_qf_u8mf2(vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf(vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m1(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> poison, <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_sf_vfnrclip_xu_f_qf_u8m1(vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf(vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m2(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> poison, <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_sf_vfnrclip_xu_f_qf_u8m2(vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf(vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf8_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> poison, <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_sf_vfnrclip_xu_f_qf_u8mf8_m(vbool64_t mask, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf(mask, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf4_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> poison, <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_sf_vfnrclip_xu_f_qf_u8mf4_m(vbool32_t mask, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf(mask, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf2_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> poison, <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_sf_vfnrclip_xu_f_qf_u8mf2_m(vbool16_t mask, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf(mask, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m1_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> poison, <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_sf_vfnrclip_xu_f_qf_u8m1_m(vbool8_t mask, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf(mask, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m2_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> poison, <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_sf_vfnrclip_xu_f_qf_u8m2_m(vbool4_t mask, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf(mask, vs2, rs1, 2, vl);
+}
+

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/sf_vfnrclip_x_f_qf.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/sf_vfnrclip_x_f_qf.c
new file mode 100644
index 000000000000000..e4bffdeaf13b5d5
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/sf_vfnrclip_x_f_qf.c
@@ -0,0 +1,188 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +xsfvfnrclipxfqf \
+// RUN:  -disable-O0-optnone -emit-llvm %s -o - | \
+// RUN:  opt -S -passes=mem2reg | FileCheck %s
+
+#include <sifive_vector.h>
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf8_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_sf_vfnrclip_x_f_qf_i8mf8_tu(vint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8mf8_tu(maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf4_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_sf_vfnrclip_x_f_qf_i8mf4_tu(vint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8mf4_tu(maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf2_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_sf_vfnrclip_x_f_qf_i8mf2_tu(vint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8mf2_tu(maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m1_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_sf_vfnrclip_x_f_qf_i8m1_tu(vint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8m1_tu(maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m2_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_sf_vfnrclip_x_f_qf_i8m2_tu(vint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8m2_tu(maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf8_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_sf_vfnrclip_x_f_qf_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8mf8_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf4_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_sf_vfnrclip_x_f_qf_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8mf4_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf2_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_sf_vfnrclip_x_f_qf_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8mf2_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m1_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_sf_vfnrclip_x_f_qf_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8m1_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m2_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_sf_vfnrclip_x_f_qf_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8m2_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf8_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_sf_vfnrclip_x_f_qf_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8mf8_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf4_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_sf_vfnrclip_x_f_qf_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8mf4_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf2_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_sf_vfnrclip_x_f_qf_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8mf2_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m1_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_sf_vfnrclip_x_f_qf_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8m1_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m2_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_sf_vfnrclip_x_f_qf_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8m2_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf8_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_sf_vfnrclip_x_f_qf_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8mf8_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf4_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_sf_vfnrclip_x_f_qf_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8mf4_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf2_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_sf_vfnrclip_x_f_qf_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8mf2_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m1_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_sf_vfnrclip_x_f_qf_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8m1_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m2_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_sf_vfnrclip_x_f_qf_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8m2_mu(mask, maskedoff, vs2, rs1, vl);
+}
+

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/sf_vfnrclip_x_f_qf_rm.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/sf_vfnrclip_x_f_qf_rm.c
new file mode 100644
index 000000000000000..c034a0fb31a3068
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/sf_vfnrclip_x_f_qf_rm.c
@@ -0,0 +1,188 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +xsfvfnrclipxfqf \
+// RUN:  -disable-O0-optnone -emit-llvm %s -o - | \
+// RUN:  opt -S -passes=mem2reg | FileCheck %s
+
+#include <sifive_vector.h>
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf8_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_sf_vfnrclip_x_f_qf_i8mf8_tu(vint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8mf8_rm_tu(maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf4_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_sf_vfnrclip_x_f_qf_i8mf4_tu(vint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8mf4_rm_tu(maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf2_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_sf_vfnrclip_x_f_qf_i8mf2_tu(vint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8mf2_rm_tu(maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m1_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_sf_vfnrclip_x_f_qf_i8m1_tu(vint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8m1_rm_tu(maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m2_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_sf_vfnrclip_x_f_qf_i8m2_tu(vint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8m2_rm_tu(maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf8_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_sf_vfnrclip_x_f_qf_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8mf8_rm_tum(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf4_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_sf_vfnrclip_x_f_qf_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8mf4_rm_tum(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf2_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_sf_vfnrclip_x_f_qf_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8mf2_rm_tum(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m1_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_sf_vfnrclip_x_f_qf_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8m1_rm_tum(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m2_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_sf_vfnrclip_x_f_qf_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8m2_rm_tum(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf8_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_sf_vfnrclip_x_f_qf_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8mf8_rm_tumu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf4_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_sf_vfnrclip_x_f_qf_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8mf4_rm_tumu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf2_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_sf_vfnrclip_x_f_qf_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8mf2_rm_tumu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m1_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_sf_vfnrclip_x_f_qf_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8m1_rm_tumu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m2_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_sf_vfnrclip_x_f_qf_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8m2_rm_tumu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf8_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_sf_vfnrclip_x_f_qf_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8mf8_rm_mu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf4_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_sf_vfnrclip_x_f_qf_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8mf4_rm_mu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf2_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_sf_vfnrclip_x_f_qf_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8mf2_rm_mu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m1_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_sf_vfnrclip_x_f_qf_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8m1_rm_mu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m2_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_sf_vfnrclip_x_f_qf_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8m2_rm_mu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/sf_vfnrclip_xu_f_qf.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/sf_vfnrclip_xu_f_qf.c
new file mode 100644
index 000000000000000..808c5b73da40ab2
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/sf_vfnrclip_xu_f_qf.c
@@ -0,0 +1,188 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +xsfvfnrclipxfqf \
+// RUN:  -disable-O0-optnone -emit-llvm %s -o - | \
+// RUN:  opt -S -passes=mem2reg | FileCheck %s
+
+#include <sifive_vector.h>
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf8_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_sf_vfnrclip_xu_f_qf_u8mf8_tu(vuint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8mf8_tu(maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf4_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_sf_vfnrclip_xu_f_qf_u8mf4_tu(vuint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8mf4_tu(maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf2_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_sf_vfnrclip_xu_f_qf_u8mf2_tu(vuint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8mf2_tu(maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m1_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_sf_vfnrclip_xu_f_qf_u8m1_tu(vuint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8m1_tu(maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m2_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_sf_vfnrclip_xu_f_qf_u8m2_tu(vuint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8m2_tu(maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf8_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_sf_vfnrclip_xu_f_qf_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8mf8_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf4_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_sf_vfnrclip_xu_f_qf_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8mf4_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf2_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_sf_vfnrclip_xu_f_qf_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8mf2_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m1_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_sf_vfnrclip_xu_f_qf_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8m1_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m2_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_sf_vfnrclip_xu_f_qf_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8m2_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf8_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_sf_vfnrclip_xu_f_qf_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8mf8_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf4_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_sf_vfnrclip_xu_f_qf_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8mf4_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf2_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_sf_vfnrclip_xu_f_qf_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8mf2_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m1_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_sf_vfnrclip_xu_f_qf_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8m1_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m2_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_sf_vfnrclip_xu_f_qf_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8m2_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf8_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_sf_vfnrclip_xu_f_qf_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8mf8_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf4_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_sf_vfnrclip_xu_f_qf_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8mf4_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf2_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_sf_vfnrclip_xu_f_qf_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8mf2_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m1_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_sf_vfnrclip_xu_f_qf_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8m1_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m2_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_sf_vfnrclip_xu_f_qf_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8m2_mu(mask, maskedoff, vs2, rs1, vl);
+}
+

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/sf_vfnrclip_xu_f_qf_rm.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/sf_vfnrclip_xu_f_qf_rm.c
new file mode 100644
index 000000000000000..8c9240207f19013
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/sf_vfnrclip_xu_f_qf_rm.c
@@ -0,0 +1,188 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +xsfvfnrclipxfqf \
+// RUN:  -disable-O0-optnone -emit-llvm %s -o - | \
+// RUN:  opt -S -passes=mem2reg | FileCheck %s
+
+#include <sifive_vector.h>
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf8_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_sf_vfnrclip_xu_f_qf_u8mf8_tu(vuint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8mf8_rm_tu(maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf4_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_sf_vfnrclip_xu_f_qf_u8mf4_tu(vuint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8mf4_rm_tu(maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf2_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_sf_vfnrclip_xu_f_qf_u8mf2_tu(vuint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8mf2_rm_tu(maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m1_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_sf_vfnrclip_xu_f_qf_u8m1_tu(vuint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8m1_rm_tu(maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m2_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_sf_vfnrclip_xu_f_qf_u8m2_tu(vuint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8m2_rm_tu(maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf8_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_sf_vfnrclip_xu_f_qf_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8mf8_rm_tum(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf4_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_sf_vfnrclip_xu_f_qf_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8mf4_rm_tum(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf2_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_sf_vfnrclip_xu_f_qf_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8mf2_rm_tum(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m1_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_sf_vfnrclip_xu_f_qf_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8m1_rm_tum(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m2_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_sf_vfnrclip_xu_f_qf_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8m2_rm_tum(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf8_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_sf_vfnrclip_xu_f_qf_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8mf8_rm_tumu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf4_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_sf_vfnrclip_xu_f_qf_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8mf4_rm_tumu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf2_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_sf_vfnrclip_xu_f_qf_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8mf2_rm_tumu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m1_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_sf_vfnrclip_xu_f_qf_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8m1_rm_tumu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m2_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_sf_vfnrclip_xu_f_qf_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8m2_rm_tumu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf8_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_sf_vfnrclip_xu_f_qf_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8mf8_rm_mu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf4_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_sf_vfnrclip_xu_f_qf_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8mf4_rm_mu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf2_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_sf_vfnrclip_xu_f_qf_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8mf2_rm_mu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m1_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_sf_vfnrclip_xu_f_qf_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8m1_rm_mu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m2_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_sf_vfnrclip_xu_f_qf_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8m2_rm_mu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/sf_vfnrclip_x_f_qf.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/sf_vfnrclip_x_f_qf.c
new file mode 100644
index 000000000000000..1e7a352646a4e82
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/sf_vfnrclip_x_f_qf.c
@@ -0,0 +1,188 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +xsfvfnrclipxfqf \
+// RUN:  -disable-O0-optnone -emit-llvm %s -o - | \
+// RUN:  opt -S -passes=mem2reg | FileCheck %s
+
+#include <sifive_vector.h>
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf8_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_sf_vfnrclip_x_f_qf_i8mf8_tu(vint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_tu(maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf4_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_sf_vfnrclip_x_f_qf_i8mf4_tu(vint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_tu(maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf2_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_sf_vfnrclip_x_f_qf_i8mf2_tu(vint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_tu(maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m1_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_sf_vfnrclip_x_f_qf_i8m1_tu(vint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_tu(maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m2_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_sf_vfnrclip_x_f_qf_i8m2_tu(vint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_tu(maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf8_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_sf_vfnrclip_x_f_qf_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf4_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_sf_vfnrclip_x_f_qf_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf2_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_sf_vfnrclip_x_f_qf_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m1_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_sf_vfnrclip_x_f_qf_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m2_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_sf_vfnrclip_x_f_qf_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf8_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_sf_vfnrclip_x_f_qf_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf4_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_sf_vfnrclip_x_f_qf_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf2_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_sf_vfnrclip_x_f_qf_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m1_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_sf_vfnrclip_x_f_qf_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m2_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_sf_vfnrclip_x_f_qf_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf8_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_sf_vfnrclip_x_f_qf_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf4_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_sf_vfnrclip_x_f_qf_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf2_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_sf_vfnrclip_x_f_qf_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m1_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_sf_vfnrclip_x_f_qf_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m2_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_sf_vfnrclip_x_f_qf_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_mu(mask, maskedoff, vs2, rs1, vl);
+}
+

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/sf_vfnrclip_x_f_qf_rm.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/sf_vfnrclip_x_f_qf_rm.c
new file mode 100644
index 000000000000000..d6b1962618cfa4f
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/sf_vfnrclip_x_f_qf_rm.c
@@ -0,0 +1,188 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +xsfvfnrclipxfqf \
+// RUN:  -disable-O0-optnone -emit-llvm %s -o - | \
+// RUN:  opt -S -passes=mem2reg | FileCheck %s
+
+#include <sifive_vector.h>
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf8_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_sf_vfnrclip_x_f_qf_i8mf8_tu(vint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_tu(maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf4_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_sf_vfnrclip_x_f_qf_i8mf4_tu(vint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_tu(maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf2_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_sf_vfnrclip_x_f_qf_i8mf2_tu(vint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_tu(maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m1_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_sf_vfnrclip_x_f_qf_i8m1_tu(vint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_tu(maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m2_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_sf_vfnrclip_x_f_qf_i8m2_tu(vint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_tu(maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf8_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_sf_vfnrclip_x_f_qf_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_tum(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf4_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_sf_vfnrclip_x_f_qf_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_tum(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf2_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_sf_vfnrclip_x_f_qf_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_tum(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m1_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_sf_vfnrclip_x_f_qf_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_tum(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m2_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_sf_vfnrclip_x_f_qf_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_tum(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf8_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_sf_vfnrclip_x_f_qf_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_tumu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf4_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_sf_vfnrclip_x_f_qf_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_tumu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf2_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_sf_vfnrclip_x_f_qf_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_tumu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m1_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_sf_vfnrclip_x_f_qf_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_tumu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m2_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_sf_vfnrclip_x_f_qf_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_tumu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf8_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_sf_vfnrclip_x_f_qf_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_mu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf4_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_sf_vfnrclip_x_f_qf_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_mu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf2_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_sf_vfnrclip_x_f_qf_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_mu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m1_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_sf_vfnrclip_x_f_qf_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_mu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m2_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_sf_vfnrclip_x_f_qf_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_mu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/sf_vfnrclip_xu_f_qf.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/sf_vfnrclip_xu_f_qf.c
new file mode 100644
index 000000000000000..890c3cfb2698488
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/sf_vfnrclip_xu_f_qf.c
@@ -0,0 +1,188 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +xsfvfnrclipxfqf \
+// RUN:  -disable-O0-optnone -emit-llvm %s -o - | \
+// RUN:  opt -S -passes=mem2reg | FileCheck %s
+
+#include <sifive_vector.h>
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf8_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_sf_vfnrclip_xu_f_qf_u8mf8_tu(vuint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_tu(maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf4_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_sf_vfnrclip_xu_f_qf_u8mf4_tu(vuint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_tu(maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf2_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_sf_vfnrclip_xu_f_qf_u8mf2_tu(vuint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_tu(maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m1_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_sf_vfnrclip_xu_f_qf_u8m1_tu(vuint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_tu(maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m2_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_sf_vfnrclip_xu_f_qf_u8m2_tu(vuint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_tu(maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf8_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_sf_vfnrclip_xu_f_qf_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf4_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_sf_vfnrclip_xu_f_qf_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf2_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_sf_vfnrclip_xu_f_qf_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m1_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_sf_vfnrclip_xu_f_qf_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m2_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_sf_vfnrclip_xu_f_qf_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf8_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_sf_vfnrclip_xu_f_qf_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf4_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_sf_vfnrclip_xu_f_qf_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf2_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_sf_vfnrclip_xu_f_qf_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m1_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_sf_vfnrclip_xu_f_qf_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m2_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_sf_vfnrclip_xu_f_qf_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf8_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_sf_vfnrclip_xu_f_qf_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf4_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_sf_vfnrclip_xu_f_qf_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf2_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_sf_vfnrclip_xu_f_qf_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m1_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_sf_vfnrclip_xu_f_qf_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m2_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_sf_vfnrclip_xu_f_qf_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_mu(mask, maskedoff, vs2, rs1, vl);
+}
+

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/sf_vfnrclip_xu_f_qf_rm.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/sf_vfnrclip_xu_f_qf_rm.c
new file mode 100644
index 000000000000000..573b98b2d5ee75c
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/sf_vfnrclip_xu_f_qf_rm.c
@@ -0,0 +1,188 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +xsfvfnrclipxfqf \
+// RUN:  -disable-O0-optnone -emit-llvm %s -o - | \
+// RUN:  opt -S -passes=mem2reg | FileCheck %s
+
+#include <sifive_vector.h>
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf8_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_sf_vfnrclip_xu_f_qf_u8mf8_tu(vuint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_tu(maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf4_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_sf_vfnrclip_xu_f_qf_u8mf4_tu(vuint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_tu(maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf2_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_sf_vfnrclip_xu_f_qf_u8mf2_tu(vuint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_tu(maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m1_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_sf_vfnrclip_xu_f_qf_u8m1_tu(vuint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_tu(maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m2_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_sf_vfnrclip_xu_f_qf_u8m2_tu(vuint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_tu(maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf8_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_sf_vfnrclip_xu_f_qf_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_tum(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf4_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_sf_vfnrclip_xu_f_qf_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_tum(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf2_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_sf_vfnrclip_xu_f_qf_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_tum(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m1_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_sf_vfnrclip_xu_f_qf_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_tum(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m2_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_sf_vfnrclip_xu_f_qf_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_tum(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf8_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_sf_vfnrclip_xu_f_qf_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_tumu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf4_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_sf_vfnrclip_xu_f_qf_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_tumu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf2_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_sf_vfnrclip_xu_f_qf_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_tumu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m1_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_sf_vfnrclip_xu_f_qf_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_tumu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m2_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_sf_vfnrclip_xu_f_qf_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_tumu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf8_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_sf_vfnrclip_xu_f_qf_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_mu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf4_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_sf_vfnrclip_xu_f_qf_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_mu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf2_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_sf_vfnrclip_xu_f_qf_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_mu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m1_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_sf_vfnrclip_xu_f_qf_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_mu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m2_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_sf_vfnrclip_xu_f_qf_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_mu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+

diff  --git a/clang/test/Preprocessor/riscv-target-features.c b/clang/test/Preprocessor/riscv-target-features.c
index 7af9fffefe17623..9f86427e25f53f4 100644
--- a/clang/test/Preprocessor/riscv-target-features.c
+++ b/clang/test/Preprocessor/riscv-target-features.c
@@ -33,6 +33,7 @@
 // CHECK-NOT: __riscv_xcvsimd {{.*$}}
 // CHECK-NOT: __riscv_xsfcie {{.*$}}
 // CHECK-NOT: __riscv_xsfvcp {{.*$}}
+// CHECK-NOT: __riscv_xsfvfnrclipxfqf {{.*$}}
 // CHECK-NOT: __riscv_xsfvfwmaccqqq {{.*$}}
 // CHECK-NOT: __riscv_xsfqmaccdod {{.*$}}
 // CHECK-NOT: __riscv_xsfvqmaccqoq {{.*$}}
@@ -326,6 +327,14 @@
 // RUN: -o - | FileCheck --check-prefix=CHECK-XSFVCP-EXT %s
 // CHECK-XSFVCP-EXT: __riscv_xsfvcp 1000000{{$}}
 
+// RUN: %clang --target=riscv32-unknown-linux-gnu \
+// RUN: -march=rv32ixsfvfnrclipxfqf -x c -E -dM %s \
+// RUN: -o - | FileCheck --check-prefix=CHECK-XSFVFNRCLIPXFQF-EXT %s
+// RUN: %clang --target=riscv64-unknown-linux-gnu \
+// RUN: -march=rv64ixsfvfnrclipxfqf -x c -E -dM %s \
+// RUN: -o - | FileCheck --check-prefix=CHECK-XSFVFNRCLIPXFQF-EXT %s
+// CHECK-XSFVFNRCLIPXFQF-EXT: __riscv_xsfvfnrclipxfqf 1000000{{$}}
+
 // RUN: %clang --target=riscv32-unknown-linux-gnu \
 // RUN: -march=rv32ixsfvfwmaccqqq -x c -E -dM %s \
 // RUN: -o - | FileCheck --check-prefix=CHECK-XSFVFWMACCQQQ-EXT %s

diff  --git a/clang/test/Sema/rvv-required-features-invalid.c b/clang/test/Sema/rvv-required-features-invalid.c
index 7021b32cdf17a3f..933348eae55075e 100644
--- a/clang/test/Sema/rvv-required-features-invalid.c
+++ b/clang/test/Sema/rvv-required-features-invalid.c
@@ -27,3 +27,11 @@ void test_xsfvqmaccqoq() {
 void test_xsfvfwmaccqqq() {
   __riscv_sf_vfwmacc_4x4x4(); // expected-error {{call to undeclared function '__riscv_sf_vfwmacc_4x4x4'}}
 }
+
+void test_xsfvfnrclipxfqf() {
+  __riscv_sf_vfnrclip_x_f_qf(); // expected-error {{call to undeclared function '__riscv_sf_vfnrclip_x_f_qf'}}
+}
+
+void test_xsfvfnrclipxufqf() {
+  __riscv_sf_vfnrclip_xu_f_qf(); // expected-error {{call to undeclared function '__riscv_sf_vfnrclip_xu_f_qf'}}
+}

diff  --git a/clang/test/Sema/rvv-required-features.c b/clang/test/Sema/rvv-required-features.c
index 2f6d3c109119064..ab1582bdb775c51 100644
--- a/clang/test/Sema/rvv-required-features.c
+++ b/clang/test/Sema/rvv-required-features.c
@@ -1,7 +1,8 @@
 // REQUIRES: riscv-registered-target
 // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +xsfvcp \
 // RUN:     -target-feature +xsfvqmaccdod -target-feature +xsfvqmaccqoq \
-// RUN:     -target-feature +zvfh -target-feature +xsfvfwmaccqqq %s -fsyntax-only -verify
+// RUN:     -target-feature +zvfh -target-feature +xsfvfwmaccqqq \
+// RUN:     -target-feature +xsfvfnrclipxfqf %s -fsyntax-only -verify
 
 // expected-no-diagnostics
 
@@ -31,3 +32,11 @@ void test_xsfvqmaccqoq(vint32m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) {
 void test_xsfvfwmaccqqq(vfloat32m1_t vd, vfloat16m1_t vs1, vfloat16mf2_t vs2, size_t vl) {
   __riscv_sf_vfwmacc_4x4x4(vd, vs1, vs2, vl);
 }
+
+void test_xsfvfnrclipxufqf(vfloat32m1_t vs1, float rs2, size_t vl) {
+  __riscv_sf_vfnrclip_xu_f_qf(vs1, rs2, vl);
+}
+
+void test_xsfvfnrclipxfqf(vfloat32m1_t vs1, float rs2, size_t vl) {
+  __riscv_sf_vfnrclip_x_f_qf(vs1, rs2, vl);
+}

diff  --git a/clang/utils/TableGen/RISCVVEmitter.cpp b/clang/utils/TableGen/RISCVVEmitter.cpp
index ed4cdef0fddfcae..64eaf91d99a43ba 100644
--- a/clang/utils/TableGen/RISCVVEmitter.cpp
+++ b/clang/utils/TableGen/RISCVVEmitter.cpp
@@ -656,6 +656,7 @@ void RVVEmitter::createRVVIntrinsics(
                                   .Case("RV64", RVV_REQ_RV64)
                                   .Case("ZvfhminOrZvfh", RVV_REQ_ZvfhminOrZvfh)
                                   .Case("Xsfvcp", RVV_REQ_Xsfvcp)
+                                  .Case("Xsfvfnrclipxfqf", RVV_REQ_Xsfvfnrclipxfqf)
                                   .Case("Xsfvfwmaccqqq", RVV_REQ_Xsfvfwmaccqqq)
                                   .Case("Xsfvqmaccdod", RVV_REQ_Xsfvqmaccdod)
                                   .Case("Xsfvqmaccqoq", RVV_REQ_Xsfvqmaccqoq)

diff  --git a/llvm/include/llvm/IR/IntrinsicsRISCVXsf.td b/llvm/include/llvm/IR/IntrinsicsRISCVXsf.td
index b7548abd0550bae..f55669bded693f9 100644
--- a/llvm/include/llvm/IR/IntrinsicsRISCVXsf.td
+++ b/llvm/include/llvm/IR/IntrinsicsRISCVXsf.td
@@ -136,6 +136,30 @@ let TargetPrefix = "riscv" in {
     let VLOperand = 3;
   }
 
+  // Input: (passthru, vector_in, scalar_in, frm, vl)
+  class RISCVSFCustomVFNRCLIPUnMasked
+        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
+                    [LLVMMatchType<0>, llvm_anyfloat_ty, LLVMVectorElementType<1>,
+                     llvm_anyint_ty, LLVMMatchType<2>],
+                    [ImmArg<ArgIndex<3>>, IntrNoMem]>, RISCVVIntrinsic {
+    let VLOperand = 4;
+  }
+
+  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, frm, vl, policy)
+  class RISCVSFCustomVFNRCLIPMasked
+       : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
+                   [LLVMMatchType<0>, llvm_anyfloat_ty, LLVMVectorElementType<1>,
+                    LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
+                    LLVMMatchType<2>, LLVMMatchType<2>],
+                   [ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<6>>, IntrNoMem]>, RISCVVIntrinsic {
+    let VLOperand = 5;
+  }
+
+  multiclass RISCVSFCustomVFNRCLIP {
+    def NAME : RISCVSFCustomVFNRCLIPUnMasked;
+    def NAME # "_mask" : RISCVSFCustomVFNRCLIPMasked;
+  }
+
   defm "" : RISCVSFCustomVC_X<["x", "i"]>;
   defm "" : RISCVSFCustomVC_XV<["x", "i", "v", "f"]>;
   defm "" : RISCVSFCustomVC_XVV<["x", "i", "v", "f"]>;
@@ -155,4 +179,8 @@ let TargetPrefix = "riscv" in {
 
   // XSfvfwmaccqqq
   def int_riscv_sf_vfwmacc_4x4x4 : RISCVSFCustomVMACC;
+
+  // XSfvfnrclipxfqf
+  defm int_riscv_sf_vfnrclip_x_f_qf : RISCVSFCustomVFNRCLIP;
+  defm int_riscv_sf_vfnrclip_xu_f_qf : RISCVSFCustomVFNRCLIP;
 } // TargetPrefix = "riscv"

diff  --git a/llvm/lib/Support/RISCVISAInfo.cpp b/llvm/lib/Support/RISCVISAInfo.cpp
index 9b0a0c8fe1453b7..158ad6fe1d9ca5d 100644
--- a/llvm/lib/Support/RISCVISAInfo.cpp
+++ b/llvm/lib/Support/RISCVISAInfo.cpp
@@ -73,6 +73,7 @@ static const RISCVSupportedExtension SupportedExtensions[] = {
     {"xcvsimd", RISCVExtensionVersion{1, 0}},
     {"xsfcie", RISCVExtensionVersion{1, 0}},
     {"xsfvcp", RISCVExtensionVersion{1, 0}},
+    {"xsfvfnrclipxfqf", RISCVExtensionVersion{1, 0}},
     {"xsfvfwmaccqqq", RISCVExtensionVersion{1, 0}},
     {"xsfvqmaccdod", RISCVExtensionVersion{1, 0}},
     {"xsfvqmaccqoq", RISCVExtensionVersion{1, 0}},
@@ -994,6 +995,7 @@ static const char *ImpliedExtsF[] = {"zicsr"};
 static const char *ImpliedExtsV[] = {"zvl128b", "zve64d"};
 static const char *ImpliedExtsXTHeadVdot[] = {"v"};
 static const char *ImpliedExtsXsfvcp[] = {"zve32x"};
+static const char *ImpliedExtsXsfvfnrclipxfqf[] = {"zve32f"};
 static const char *ImpliedExtsXsfvfwmaccqqq[] = {"zve32f"};
 static const char *ImpliedExtsXsfvqmaccdod[] = {"zve32x"};
 static const char *ImpliedExtsXsfvqmaccqoq[] = {"zve32x"};
@@ -1064,6 +1066,7 @@ static constexpr ImpliedExtsEntry ImpliedExts[] = {
     {{"f"}, {ImpliedExtsF}},
     {{"v"}, {ImpliedExtsV}},
     {{"xsfvcp"}, {ImpliedExtsXsfvcp}},
+    {{"xsfvfnrclipxfqf"}, {ImpliedExtsXsfvfnrclipxfqf}},
     {{"xsfvfwmaccqqq"}, {ImpliedExtsXsfvfwmaccqqq}},
     {{"xsfvqmaccdod"}, {ImpliedExtsXsfvqmaccdod}},
     {{"xsfvqmaccqoq"}, {ImpliedExtsXsfvqmaccqoq}},

diff  --git a/llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp b/llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp
index fd9e278e471a159..f3a4945ca6285ef 100644
--- a/llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp
+++ b/llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp
@@ -553,6 +553,9 @@ DecodeStatus RISCVDisassembler::getInstruction(MCInst &MI, uint64_t &Size,
     TRY_TO_DECODE_FEATURE(
         RISCV::FeatureVendorXSfvfwmaccqqq, DecoderTableXSfvfwmaccqqq32,
         "SiFive Matrix Multiplication Instruction opcode table");
+    TRY_TO_DECODE_FEATURE(
+        RISCV::FeatureVendorXSfvfnrclipxfqf, DecoderTableXSfvfnrclipxfqf32,
+        "SiFive FP32-to-int8 Ranged Clip Instructions opcode table");
     TRY_TO_DECODE_FEATURE(RISCV::FeatureVendorXSfcie, DecoderTableXSfcie32,
                           "Sifive CIE custom opcode table");
     TRY_TO_DECODE_FEATURE(RISCV::FeatureVendorXCVbitmanip,

diff  --git a/llvm/lib/Target/RISCV/RISCVFeatures.td b/llvm/lib/Target/RISCV/RISCVFeatures.td
index 88ebf1dca348007..1bcf190a583de18 100644
--- a/llvm/lib/Target/RISCV/RISCVFeatures.td
+++ b/llvm/lib/Target/RISCV/RISCVFeatures.td
@@ -844,6 +844,14 @@ def HasVendorXSfvfwmaccqqq : Predicate<"Subtarget->hasVendorXSfvfwmaccqqq()">,
                          AssemblerPredicate<(all_of FeatureVendorXSfvfwmaccqqq),
                          "'XSfvfwmaccqqq' (SiFive Matrix Multiply Accumulate Instruction and 4-by-4))">;
 
+def FeatureVendorXSfvfnrclipxfqf
+    : SubtargetFeature<"xsfvfnrclipxfqf", "HasVendorXSfvfnrclipxfqf", "true",
+                       "'XSfvfnrclipxfqf' (FP32-to-int8 Ranged Clip Instructions)",
+                       [FeatureStdExtZve32f]>;
+def HasVendorXSfvfnrclipxfqf : Predicate<"Subtarget->hasVendorXSfvfnrclipxfqf()">,
+                               AssemblerPredicate<(all_of FeatureVendorXSfvfnrclipxfqf),
+                               "'XSfvfnrclipxfqf' (FP32-to-int8 Ranged Clip Instructions)">;
+
 def FeatureVendorXCVbitmanip
     : SubtargetFeature<"xcvbitmanip", "HasVendorXCVbitmanip", "true",
                        "'XCVbitmanip' (CORE-V Bit Manipulation)">;

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td b/llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td
index 146786eb76c9473..56e59a164198948 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td
@@ -176,6 +176,11 @@ class CustomSiFiveVMACC<bits<6> funct6, RISCVVFormat opv, string opcodestr>
 }
 }
 
+class CustomSiFiveVFNRCLIP<bits<6> funct6, RISCVVFormat opv, string opcodestr>
+    : VALUVF<funct6, opv, opcodestr> {
+  let Inst{6-0} = OPC_CUSTOM_2.Value;
+}
+
 let Predicates = [HasVendorXSfvcp], mayLoad = 0, mayStore = 0,
     hasSideEffects = 1, hasNoSchedulingInfo = 1, DecoderNamespace = "XSfvcp" in {
   defm X   : CustomSiFiveVCIX<"x",   VCIX_X,   uimm5, uimm5, GPR>,   Sched<[]>;
@@ -212,6 +217,10 @@ let Predicates = [HasVendorXSfvfwmaccqqq], DecoderNamespace = "XSfvfwmaccqqq" in
   def VFWMACC_4x4x4 : CustomSiFiveVMACC<0b111100, OPFVV, "sf.vfwmacc.4x4x4">;
 }
 
+let Predicates = [HasVendorXSfvfnrclipxfqf], DecoderNamespace = "XSfvfnrclipxfqf" in {
+  def VFNRCLIP_XU_F_QF : CustomSiFiveVFNRCLIP<0b100010, OPFVF, "sf.vfnrclip.xu.f.qf">;
+  def VFNRCLIP_X_F_QF : CustomSiFiveVFNRCLIP<0b100011, OPFVF, "sf.vfnrclip.x.f.qf">;
+}
 class VPseudoVC_X<Operand OpClass, DAGOperand RS1Class,
                   bit HasSideEffect = 1> :
       Pseudo<(outs),
@@ -350,6 +359,16 @@ multiclass VPseudoSiFiveVFWMACC<string Constraint = ""> {
     defm NAME : VPseudoSiFiveVMACC<m.MX, m.wvrclass, m.vrclass, Constraint>;
 }
 
+multiclass VPseudoSiFiveVFNRCLIP<string Constraint = "@earlyclobber $rd"> {
+  foreach m = MxListVF4 in
+    let hasSideEffects = 0 in
+      defm "Pseudo" # NAME : VPseudoBinaryRoundingMode<!if(!eq(m.vrclass, VRM8),
+                                                           VRM2, VR),
+                                                       m.vrclass, FPR32, m,
+                                                       Constraint, /*sew*/0,
+                                                       UsesVXRM=0>;
+}
+
 let Predicates = [HasVendorXSfvcp] in {
   foreach m = MxList in {
     defm X : VPseudoVC_X<m, GPR>;
@@ -396,6 +415,11 @@ let Predicates = [HasVendorXSfvfwmaccqqq], DecoderNamespace = "XSfvfwmaccqqq" in
   defm VFWMACC_4x4x4 : VPseudoSiFiveVFWMACC;
 }
 
+let Predicates = [HasVendorXSfvfnrclipxfqf], DecoderNamespace = "XSfvfnrclipxfqf" in {
+  defm VFNRCLIP_XU_F_QF : VPseudoSiFiveVFNRCLIP;
+  defm VFNRCLIP_X_F_QF : VPseudoSiFiveVFNRCLIP;
+}
+
 class VPatVC_OP4<string intrinsic_name,
                  string inst,
                  ValueType op2_type,
@@ -555,6 +579,26 @@ multiclass VPatVFWMACC<string intrinsic, string instruction, string kind>
     : VPatVMACC<intrinsic, instruction, kind, AllWidenableBFloatToFloatVectors,
                 vbfloat16m1_t>;
 
+defset list<VTypeInfoToWide> VFNRCLIPInfoPairs = {
+  def : VTypeInfoToWide<VI8MF8, VF32MF2>;
+  def : VTypeInfoToWide<VI8MF4, VF32M1>;
+  def : VTypeInfoToWide<VI8MF2, VF32M2>;
+  def : VTypeInfoToWide<VI8M1,  VF32M4>;
+  def : VTypeInfoToWide<VI8M2,  VF32M8>;
+}
+
+multiclass VPatVFNRCLIP<string intrinsic, string instruction> {
+  foreach pair = VFNRCLIPInfoPairs in {
+    defvar Vti = pair.Vti;
+    defvar Wti = pair.Wti;
+    defm : VPatBinaryRoundingMode<"int_riscv_sf_" # intrinsic,
+                                  "Pseudo" # instruction # "_" # Wti.LMul.MX,
+                                  Vti.Vector, Wti.Vector, Wti.Scalar, Vti.Mask,
+                                  Vti.Log2SEW, Vti.RegClass,
+                                  Wti.RegClass, Wti.ScalarRegClass>;
+  }
+}
+
 let Predicates = [HasVendorXSfvcp] in {
   foreach vti = AllVectors in {
     defm : VPatVC_X<"x", "X", vti, XLenVT, GPR>;
@@ -608,6 +652,11 @@ let Predicates = [HasVendorXSfvfwmaccqqq] in {
   defm : VPatVFWMACC<"vfwmacc_4x4x4", "VFWMACC", "4x4x4">;
 }
 
+let Predicates = [HasVendorXSfvfnrclipxfqf] in {
+  defm : VPatVFNRCLIP<"vfnrclip_xu_f_qf", "VFNRCLIP_XU_F_QF">;
+  defm : VPatVFNRCLIP<"vfnrclip_x_f_qf", "VFNRCLIP_X_F_QF">;
+}
+
 let Predicates = [HasVendorXSfcie] in {
 let hasSideEffects = 1, mayLoad = 0, mayStore = 0, DecoderNamespace = "XSfcie" in {
 def SF_CFLUSH_D_L1 : RVInstI<0b000, OPC_SYSTEM, (outs), (ins GPR:$rs1), "cflush.d.l1","$rs1">,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/sf_vfnrclip_x_f_qf.ll b/llvm/test/CodeGen/RISCV/rvv/sf_vfnrclip_x_f_qf.ll
new file mode 100644
index 000000000000000..b4f4a879a0b57f2
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/sf_vfnrclip_x_f_qf.ll
@@ -0,0 +1,260 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+xsfvfnrclipxfqf \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+xsfvfnrclipxfqf \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv1i8.nxv1f32.iXLen(
+  <vscale x 1 x i8>,
+  <vscale x 1 x float>,
+  float,
+  iXLen, iXLen);
+
+define <vscale x 1 x i8> @intrinsic_sf_vfnrclip_x_f_qf_nxv1i8_nxv1f32(<vscale x 1 x float> %0, float %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_sf_vfnrclip_x_f_qf_nxv1i8_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT:    fsrmi a0, 0
+; CHECK-NEXT:    sf.vfnrclip.x.f.qf v9, v8, fa0
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv1i8.nxv1f32.iXLen(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x float> %0,
+    float %1,
+    iXLen 0, iXLen %2)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv1i8.nxv1f32.iXLen(
+  <vscale x 1 x i8>,
+  <vscale x 1 x float>,
+  float,
+  <vscale x 1 x i1>,
+  iXLen, iXLen, iXLen);
+
+define <vscale x 1 x i8> @intrinsic_sf_vfnrclip_x_f_qf_mask_nxv1i8_nxv1f32(<vscale x 1 x i8> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_sf_vfnrclip_x_f_qf_mask_nxv1i8_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT:    fsrmi a0, 0
+; CHECK-NEXT:    sf.vfnrclip.x.f.qf v8, v9, fa0, v0.t
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv1i8.nxv1f32.iXLen(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x float> %1,
+    float %2,
+    <vscale x 1 x i1> %3,
+    iXLen 0, iXLen %4, iXLen 1)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv2i8.nxv2f32.iXLen(
+  <vscale x 2 x i8>,
+  <vscale x 2 x float>,
+  float,
+  iXLen, iXLen);
+
+define <vscale x 2 x i8> @intrinsic_sf_vfnrclip_x_f_qf_nxv2i8_nxv2f32(<vscale x 2 x float> %0, float %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_sf_vfnrclip_x_f_qf_nxv2i8_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT:    fsrmi a0, 0
+; CHECK-NEXT:    sf.vfnrclip.x.f.qf v9, v8, fa0
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vmv.v.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv2i8.nxv2f32.iXLen(
+    <vscale x 2 x i8> undef,
+    <vscale x 2 x float> %0,
+    float %1,
+    iXLen 0, iXLen %2)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv2i8.nxv2f32.iXLen(
+  <vscale x 2 x i8>,
+  <vscale x 2 x float>,
+  float,
+  <vscale x 2 x i1>,
+  iXLen, iXLen, iXLen);
+
+define <vscale x 2 x i8> @intrinsic_sf_vfnrclip_x_f_qf_mask_nxv2i8_nxv2f32(<vscale x 2 x i8> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_sf_vfnrclip_x_f_qf_mask_nxv2i8_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT:    fsrmi a0, 0
+; CHECK-NEXT:    sf.vfnrclip.x.f.qf v8, v9, fa0, v0.t
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv2i8.nxv2f32.iXLen(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x float> %1,
+    float %2,
+    <vscale x 2 x i1> %3,
+    iXLen 0, iXLen %4, iXLen 1)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv4i8.nxv4f32.iXLen(
+  <vscale x 4 x i8>,
+  <vscale x 4 x float>,
+  float,
+  iXLen, iXLen);
+
+define <vscale x 4 x i8> @intrinsic_sf_vfnrclip_x_f_qf_nxv4i8_nxv4f32(<vscale x 4 x float> %0, float %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_sf_vfnrclip_x_f_qf_nxv4i8_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT:    fsrmi a0, 0
+; CHECK-NEXT:    sf.vfnrclip.x.f.qf v10, v8, fa0
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vmv1r.v v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv4i8.nxv4f32.iXLen(
+    <vscale x 4 x i8> undef,
+    <vscale x 4 x float> %0,
+    float %1,
+    iXLen 0, iXLen %2)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv4i8.nxv4f32.iXLen(
+  <vscale x 4 x i8>,
+  <vscale x 4 x float>,
+  float,
+  <vscale x 4 x i1>,
+  iXLen, iXLen, iXLen);
+
+define <vscale x 4 x i8> @intrinsic_sf_vfnrclip_x_f_qf_mask_nxv4i8_nxv4f32(<vscale x 4 x i8> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_sf_vfnrclip_x_f_qf_mask_nxv4i8_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT:    fsrmi a0, 0
+; CHECK-NEXT:    sf.vfnrclip.x.f.qf v8, v10, fa0, v0.t
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv4i8.nxv4f32.iXLen(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x float> %1,
+    float %2,
+    <vscale x 4 x i1> %3,
+    iXLen 0, iXLen %4, iXLen 1)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv8i8.nxv8f32.iXLen(
+  <vscale x 8 x i8>,
+  <vscale x 8 x float>,
+  float,
+  iXLen, iXLen);
+
+define <vscale x 8 x i8> @intrinsic_sf_vfnrclip_x_f_qf_nxv8i8_nxv8f32(<vscale x 8 x float> %0, float %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_sf_vfnrclip_x_f_qf_nxv8i8_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT:    fsrmi a0, 0
+; CHECK-NEXT:    sf.vfnrclip.x.f.qf v12, v8, fa0
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vmv1r.v v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv8i8.nxv8f32.iXLen(
+    <vscale x 8 x i8> undef,
+    <vscale x 8 x float> %0,
+    float %1,
+    iXLen 0, iXLen %2)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv8i8.nxv8f32.iXLen(
+  <vscale x 8 x i8>,
+  <vscale x 8 x float>,
+  float,
+  <vscale x 8 x i1>,
+  iXLen, iXLen, iXLen);
+
+define <vscale x 8 x i8> @intrinsic_sf_vfnrclip_x_f_qf_mask_nxv8i8_nxv8f32(<vscale x 8 x i8> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_sf_vfnrclip_x_f_qf_mask_nxv8i8_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT:    fsrmi a0, 0
+; CHECK-NEXT:    sf.vfnrclip.x.f.qf v8, v12, fa0, v0.t
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv8i8.nxv8f32.iXLen(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x float> %1,
+    float %2,
+    <vscale x 8 x i1> %3,
+    iXLen 0, iXLen %4, iXLen 1)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv16i8.nxv16f32.iXLen(
+  <vscale x 16 x i8>,
+  <vscale x 16 x float>,
+  float,
+  iXLen, iXLen);
+
+define <vscale x 16 x i8> @intrinsic_sf_vfnrclip_x_f_qf_nxv16i8_nxv16f32(<vscale x 16 x float> %0, float %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_sf_vfnrclip_x_f_qf_nxv16i8_nxv16f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT:    fsrmi a0, 0
+; CHECK-NEXT:    sf.vfnrclip.x.f.qf v16, v8, fa0
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vmv2r.v v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv16i8.nxv16f32.iXLen(
+    <vscale x 16 x i8> undef,
+    <vscale x 16 x float> %0,
+    float %1,
+    iXLen 0, iXLen %2)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv16i8.nxv16f32.iXLen(
+  <vscale x 16 x i8>,
+  <vscale x 16 x float>,
+  float,
+  <vscale x 16 x i1>,
+  iXLen, iXLen, iXLen);
+
+define <vscale x 16 x i8> @intrinsic_sf_vfnrclip_x_f_qf_mask_nxv16i8_nxv16f32(<vscale x 16 x i8> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_sf_vfnrclip_x_f_qf_mask_nxv16i8_nxv16f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
+; CHECK-NEXT:    fsrmi a0, 0
+; CHECK-NEXT:    sf.vfnrclip.x.f.qf v8, v16, fa0, v0.t
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv16i8.nxv16f32.iXLen(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x float> %1,
+    float %2,
+    <vscale x 16 x i1> %3,
+    iXLen 0, iXLen %4, iXLen 1)
+
+  ret <vscale x 16 x i8> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/sf_vfnrclip_xu_f_qf.ll b/llvm/test/CodeGen/RISCV/rvv/sf_vfnrclip_xu_f_qf.ll
new file mode 100644
index 000000000000000..363cccd5ad3562c
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/sf_vfnrclip_xu_f_qf.ll
@@ -0,0 +1,260 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+xsfvfnrclipxfqf \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+xsfvfnrclipxfqf \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv1i8.nxv1f32.iXLen(
+  <vscale x 1 x i8>,
+  <vscale x 1 x float>,
+  float,
+  iXLen, iXLen);
+
+define <vscale x 1 x i8> @intrinsic_sf_vfnrclip_xu_f_qf_nxv1i8_nxv1f32(<vscale x 1 x float> %0, float %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_sf_vfnrclip_xu_f_qf_nxv1i8_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT:    fsrmi a0, 0
+; CHECK-NEXT:    sf.vfnrclip.xu.f.qf v9, v8, fa0
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv1i8.nxv1f32.iXLen(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x float> %0,
+    float %1,
+    iXLen 0, iXLen %2)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv1i8.nxv1f32.iXLen(
+  <vscale x 1 x i8>,
+  <vscale x 1 x float>,
+  float,
+  <vscale x 1 x i1>,
+  iXLen, iXLen, iXLen);
+
+define <vscale x 1 x i8> @intrinsic_sf_vfnrclip_xu_f_qf_mask_nxv1i8_nxv1f32(<vscale x 1 x i8> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_sf_vfnrclip_xu_f_qf_mask_nxv1i8_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT:    fsrmi a0, 0
+; CHECK-NEXT:    sf.vfnrclip.xu.f.qf v8, v9, fa0, v0.t
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv1i8.nxv1f32.iXLen(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x float> %1,
+    float %2,
+    <vscale x 1 x i1> %3,
+    iXLen 0, iXLen %4, iXLen 1)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv2i8.nxv2f32.iXLen(
+  <vscale x 2 x i8>,
+  <vscale x 2 x float>,
+  float,
+  iXLen, iXLen);
+
+define <vscale x 2 x i8> @intrinsic_sf_vfnrclip_xu_f_qf_nxv2i8_nxv2f32(<vscale x 2 x float> %0, float %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_sf_vfnrclip_xu_f_qf_nxv2i8_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT:    fsrmi a0, 0
+; CHECK-NEXT:    sf.vfnrclip.xu.f.qf v9, v8, fa0
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vmv.v.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv2i8.nxv2f32.iXLen(
+    <vscale x 2 x i8> undef,
+    <vscale x 2 x float> %0,
+    float %1,
+    iXLen 0, iXLen %2)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv2i8.nxv2f32.iXLen(
+  <vscale x 2 x i8>,
+  <vscale x 2 x float>,
+  float,
+  <vscale x 2 x i1>,
+  iXLen, iXLen, iXLen);
+
+define <vscale x 2 x i8> @intrinsic_sf_vfnrclip_xu_f_qf_mask_nxv2i8_nxv2f32(<vscale x 2 x i8> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_sf_vfnrclip_xu_f_qf_mask_nxv2i8_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT:    fsrmi a0, 0
+; CHECK-NEXT:    sf.vfnrclip.xu.f.qf v8, v9, fa0, v0.t
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv2i8.nxv2f32.iXLen(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x float> %1,
+    float %2,
+    <vscale x 2 x i1> %3,
+    iXLen 0, iXLen %4, iXLen 1)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv4i8.nxv4f32.iXLen(
+  <vscale x 4 x i8>,
+  <vscale x 4 x float>,
+  float,
+  iXLen, iXLen);
+
+define <vscale x 4 x i8> @intrinsic_sf_vfnrclip_xu_f_qf_nxv4i8_nxv4f32(<vscale x 4 x float> %0, float %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_sf_vfnrclip_xu_f_qf_nxv4i8_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT:    fsrmi a0, 0
+; CHECK-NEXT:    sf.vfnrclip.xu.f.qf v10, v8, fa0
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vmv1r.v v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv4i8.nxv4f32.iXLen(
+    <vscale x 4 x i8> undef,
+    <vscale x 4 x float> %0,
+    float %1,
+    iXLen 0, iXLen %2)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv4i8.nxv4f32.iXLen(
+  <vscale x 4 x i8>,
+  <vscale x 4 x float>,
+  float,
+  <vscale x 4 x i1>,
+  iXLen, iXLen, iXLen);
+
+define <vscale x 4 x i8> @intrinsic_sf_vfnrclip_xu_f_qf_mask_nxv4i8_nxv4f32(<vscale x 4 x i8> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_sf_vfnrclip_xu_f_qf_mask_nxv4i8_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT:    fsrmi a0, 0
+; CHECK-NEXT:    sf.vfnrclip.xu.f.qf v8, v10, fa0, v0.t
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv4i8.nxv4f32.iXLen(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x float> %1,
+    float %2,
+    <vscale x 4 x i1> %3,
+    iXLen 0, iXLen %4, iXLen 1)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv8i8.nxv8f32.iXLen(
+  <vscale x 8 x i8>,
+  <vscale x 8 x float>,
+  float,
+  iXLen, iXLen);
+
+define <vscale x 8 x i8> @intrinsic_sf_vfnrclip_xu_f_qf_nxv8i8_nxv8f32(<vscale x 8 x float> %0, float %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_sf_vfnrclip_xu_f_qf_nxv8i8_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT:    fsrmi a0, 0
+; CHECK-NEXT:    sf.vfnrclip.xu.f.qf v12, v8, fa0
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vmv1r.v v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv8i8.nxv8f32.iXLen(
+    <vscale x 8 x i8> undef,
+    <vscale x 8 x float> %0,
+    float %1,
+    iXLen 0, iXLen %2)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv8i8.nxv8f32.iXLen(
+  <vscale x 8 x i8>,
+  <vscale x 8 x float>,
+  float,
+  <vscale x 8 x i1>,
+  iXLen, iXLen, iXLen);
+
+define <vscale x 8 x i8> @intrinsic_sf_vfnrclip_xu_f_qf_mask_nxv8i8_nxv8f32(<vscale x 8 x i8> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_sf_vfnrclip_xu_f_qf_mask_nxv8i8_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT:    fsrmi a0, 0
+; CHECK-NEXT:    sf.vfnrclip.xu.f.qf v8, v12, fa0, v0.t
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv8i8.nxv8f32.iXLen(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x float> %1,
+    float %2,
+    <vscale x 8 x i1> %3,
+    iXLen 0, iXLen %4, iXLen 1)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv16i8.nxv16f32.iXLen(
+  <vscale x 16 x i8>,
+  <vscale x 16 x float>,
+  float,
+  iXLen, iXLen);
+
+define <vscale x 16 x i8> @intrinsic_sf_vfnrclip_xu_f_qf_nxv16i8_nxv16f32(<vscale x 16 x float> %0, float %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_sf_vfnrclip_xu_f_qf_nxv16i8_nxv16f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT:    fsrmi a0, 0
+; CHECK-NEXT:    sf.vfnrclip.xu.f.qf v16, v8, fa0
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vmv2r.v v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv16i8.nxv16f32.iXLen(
+    <vscale x 16 x i8> undef,
+    <vscale x 16 x float> %0,
+    float %1,
+    iXLen 0, iXLen %2)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv16i8.nxv16f32.iXLen(
+  <vscale x 16 x i8>,
+  <vscale x 16 x float>,
+  float,
+  <vscale x 16 x i1>,
+  iXLen, iXLen, iXLen);
+
+define <vscale x 16 x i8> @intrinsic_sf_vfnrclip_xu_f_qf_mask_nxv16i8_nxv16f32(<vscale x 16 x i8> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_sf_vfnrclip_xu_f_qf_mask_nxv16i8_nxv16f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
+; CHECK-NEXT:    fsrmi a0, 0
+; CHECK-NEXT:    sf.vfnrclip.xu.f.qf v8, v16, fa0, v0.t
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv16i8.nxv16f32.iXLen(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x float> %1,
+    float %2,
+    <vscale x 16 x i1> %3,
+    iXLen 0, iXLen %4, iXLen 1)
+
+  ret <vscale x 16 x i8> %a
+}

diff  --git a/llvm/test/MC/RISCV/rvv/xsfvfnrclip.s b/llvm/test/MC/RISCV/rvv/xsfvfnrclip.s
new file mode 100644
index 000000000000000..dc9b7ef8bf61cab
--- /dev/null
+++ b/llvm/test/MC/RISCV/rvv/xsfvfnrclip.s
@@ -0,0 +1,33 @@
+# RUN: llvm-mc -triple=riscv64 -show-encoding --mattr=+v,+xsfvfnrclipxfqf %s \
+# RUN:        | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
+# RUN: not llvm-mc -triple=riscv64 -show-encoding %s 2>&1 \
+# RUN:        | FileCheck %s --check-prefix=CHECK-ERROR
+# RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+v,+xsfvfnrclipxfqf %s \
+# RUN:        | llvm-objdump -d --mattr=+v,+xsfvfnrclipxfqf - \
+# RUN:        | FileCheck %s --check-prefix=CHECK-INST
+# RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+v,+xsfvfnrclipxfqf %s \
+# RUN:        | llvm-objdump -d - | FileCheck %s --check-prefix=CHECK-UNKNOWN
+
+sf.vfnrclip.xu.f.qf v4, v8, fa2
+# CHECK-INST: sf.vfnrclip.xu.f.qf v4, v8, fa2
+# CHECK-ENCODING: [0x5b,0x52,0x86,0x8a]
+# CHECK-ERROR: instruction requires the following: 'XSfvfnrclipxfqf' (FP32-to-int8 Ranged Clip Instructions)
+# CHECK-UNKNOWN: 5b 52 86 8a <unknown>
+
+sf.vfnrclip.xu.f.qf v4, v8, fa2, v0.t
+# CHECK-INST: sf.vfnrclip.xu.f.qf v4, v8, fa2
+# CHECK-ENCODING: [0x5b,0x52,0x86,0x88]
+# CHECK-ERROR: instruction requires the following: 'XSfvfnrclipxfqf' (FP32-to-int8 Ranged Clip Instructions)
+# CHECK-UNKNOWN: 5b 52 86 88 <unknown>
+
+sf.vfnrclip.x.f.qf v4, v8, fa2
+# CHECK-INST: sf.vfnrclip.x.f.qf v4, v8, fa2
+# CHECK-ENCODING: [0x5b,0x52,0x86,0x8e]
+# CHECK-ERROR: instruction requires the following: 'XSfvfnrclipxfqf' (FP32-to-int8 Ranged Clip Instructions)
+# CHECK-UNKNOWN: 5b 52 86 8e <unknown>
+
+sf.vfnrclip.x.f.qf v4, v8, fa2, v0.t
+# CHECK-INST: sf.vfnrclip.x.f.qf v4, v8, fa2
+# CHECK-ENCODING: [0x5b,0x52,0x86,0x8c]
+# CHECK-ERROR: instruction requires the following: 'XSfvfnrclipxfqf' (FP32-to-int8 Ranged Clip Instructions)
+# CHECK-UNKNOWN: 5b 52 86 8c <unknown>

diff  --git a/llvm/unittests/Support/RISCVISAInfoTest.cpp b/llvm/unittests/Support/RISCVISAInfoTest.cpp
index 76740bba32f7209..462f78c94d59b1c 100644
--- a/llvm/unittests/Support/RISCVISAInfoTest.cpp
+++ b/llvm/unittests/Support/RISCVISAInfoTest.cpp
@@ -714,6 +714,7 @@ R"(All available -march extensions for RISC-V
     xcvsimd             1.0
     xsfcie              1.0
     xsfvcp              1.0
+    xsfvfnrclipxfqf     1.0
     xsfvfwmaccqqq       1.0
     xsfvqmaccdod        1.0
     xsfvqmaccqoq        1.0


        


More information about the cfe-commits mailing list