[clang] [RISCV] Support Xsfvfnrclipxfqf extensions (PR #68297)

Brandon Wu via cfe-commits cfe-commits at lists.llvm.org
Tue Oct 10 20:12:20 PDT 2023


https://github.com/4vtomat updated https://github.com/llvm/llvm-project/pull/68297

>From db7e1f109a67ded3c345c76ee1e346ad6fb34dc6 Mon Sep 17 00:00:00 2001
From: Brandon Wu <brandon.wu at sifive.com>
Date: Mon, 2 Oct 2023 20:45:46 -0700
Subject: [PATCH 1/4] [RISCV] Support Xsfvfnrclipxfqf extensions

FP32-to-int8 Ranged Clip Instructions
https://sifive.cdn.prismic.io/sifive/0aacff47-f530-43dc-8446-5caa2260ece0_xsfvfnrclipxfqf-spec.pdf
---
 .../clang/Basic/riscv_sifive_vector.td        |  62 +++++
 .../clang/Basic/riscv_vector_common.td        |   1 +
 .../clang/Support/RISCVVIntrinsicUtils.h      |  18 +-
 clang/lib/Sema/SemaRISCVVectorLookup.cpp      |   1 +
 clang/lib/Support/RISCVVIntrinsicUtils.cpp    |   9 +-
 .../non-overloaded/sf_vfnrclip_x_f_qf.c       |  98 +++++++
 .../non-overloaded/sf_vfnrclip_x_f_qf_rm.c    |  98 +++++++
 .../non-overloaded/sf_vfnrclip_xu_f_qf.c      |  98 +++++++
 .../non-overloaded/sf_vfnrclip_xu_f_qf_rm.c   |  98 +++++++
 .../overloaded/sf_vfnrclip_x_f_qf.c           |  98 +++++++
 .../overloaded/sf_vfnrclip_x_f_qf_rm.c        |  98 +++++++
 .../overloaded/sf_vfnrclip_xu_f_qf.c          |  98 +++++++
 .../overloaded/sf_vfnrclip_xu_f_qf_rm.c       |  98 +++++++
 .../non-overloaded/sf_vfnrclip_x_f_qf.c       | 188 +++++++++++++
 .../non-overloaded/sf_vfnrclip_x_f_qf_rm.c    | 188 +++++++++++++
 .../non-overloaded/sf_vfnrclip_xu_f_qf.c      | 188 +++++++++++++
 .../non-overloaded/sf_vfnrclip_xu_f_qf_rm.c   | 188 +++++++++++++
 .../policy/overloaded/sf_vfnrclip_x_f_qf.c    | 188 +++++++++++++
 .../policy/overloaded/sf_vfnrclip_x_f_qf_rm.c | 188 +++++++++++++
 .../policy/overloaded/sf_vfnrclip_xu_f_qf.c   | 188 +++++++++++++
 .../overloaded/sf_vfnrclip_xu_f_qf_rm.c       | 188 +++++++++++++
 .../test/Sema/rvv-required-features-invalid.c |   8 +
 clang/test/Sema/rvv-required-features.c       |  11 +-
 clang/utils/TableGen/RISCVVEmitter.cpp        |   1 +
 llvm/include/llvm/IR/IntrinsicsRISCVXsf.td    |  28 ++
 llvm/lib/Support/RISCVISAInfo.cpp             |   3 +
 .../RISCV/Disassembler/RISCVDisassembler.cpp  |   2 +
 llvm/lib/Target/RISCV/RISCVFeatures.td        |   8 +
 llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td    |  49 ++++
 .../CodeGen/RISCV/rvv/sf_vfnrclip_x_f_qf.ll   | 260 ++++++++++++++++++
 .../CodeGen/RISCV/rvv/sf_vfnrclip_xu_f_qf.ll  | 260 ++++++++++++++++++
 llvm/test/MC/RISCV/rvv/xsfvfnrclip.s          |  33 +++
 llvm/unittests/Support/RISCVISAInfoTest.cpp   |   1 +
 33 files changed, 3033 insertions(+), 10 deletions(-)
 create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/sf_vfnrclip_x_f_qf.c
 create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/sf_vfnrclip_x_f_qf_rm.c
 create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/sf_vfnrclip_xu_f_qf.c
 create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/sf_vfnrclip_xu_f_qf_rm.c
 create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/sf_vfnrclip_x_f_qf.c
 create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/sf_vfnrclip_x_f_qf_rm.c
 create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/sf_vfnrclip_xu_f_qf.c
 create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/sf_vfnrclip_xu_f_qf_rm.c
 create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/sf_vfnrclip_x_f_qf.c
 create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/sf_vfnrclip_x_f_qf_rm.c
 create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/sf_vfnrclip_xu_f_qf.c
 create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/sf_vfnrclip_xu_f_qf_rm.c
 create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/sf_vfnrclip_x_f_qf.c
 create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/sf_vfnrclip_x_f_qf_rm.c
 create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/sf_vfnrclip_xu_f_qf.c
 create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/sf_vfnrclip_xu_f_qf_rm.c
 create mode 100644 llvm/test/CodeGen/RISCV/rvv/sf_vfnrclip_x_f_qf.ll
 create mode 100644 llvm/test/CodeGen/RISCV/rvv/sf_vfnrclip_xu_f_qf.ll
 create mode 100644 llvm/test/MC/RISCV/rvv/xsfvfnrclip.s

diff --git a/clang/include/clang/Basic/riscv_sifive_vector.td b/clang/include/clang/Basic/riscv_sifive_vector.td
index 6583a7eb7b2e59b..6e171cc7618b03a 100644
--- a/clang/include/clang/Basic/riscv_sifive_vector.td
+++ b/clang/include/clang/Basic/riscv_sifive_vector.td
@@ -103,3 +103,65 @@ let SupportOverloading = false in {
     defm sf_vc_v_fvw : RVVVCIXBuiltinSet<["si"],  "UwKzUwUvFe", [-1, 0, 2, 3], UseGPR=0>;
   }
 }
+
+multiclass RVVVFNRCLIPBuiltinSet<string suffix, string prototype, string type_range> {
+  let Log2LMUL = [-3, -2, -1, 0, 1, 2],
+      Name = NAME,
+      IRName = NAME,
+      MaskedIRName = NAME # "_mask" in
+  def : RVVConvBuiltin<suffix, prototype, type_range, NAME>;
+}
+
+let UnMaskedPolicyScheme = HasPassthruOperand, RequiredFeatures = ["Xsfvfnrclipxfqf"] in {
+let ManualCodegen = [{
+  {
+    // LLVM intrinsic
+    // Unmasked: (passthru, vector_in, scalar_in, frm, vl)
+    // Masked:   (passthru, vector_in, scalar_in, mask, frm, vl, policy)
+
+    SmallVector<llvm::Value*, 7> Operands;
+    bool HasMaskedOff = !(
+        (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ||
+        (!IsMasked && PolicyAttrs & RVV_VTA));
+    bool HasRoundModeOp = IsMasked ?
+      (HasMaskedOff ? Ops.size() == 6 : Ops.size() == 5) :
+      (HasMaskedOff ? Ops.size() == 5 : Ops.size() == 4);
+
+    unsigned Offset = IsMasked ?
+        (HasMaskedOff ? 2 : 1) : (HasMaskedOff ? 1 : 0);
+
+    if (!HasMaskedOff)
+      Operands.push_back(llvm::PoisonValue::get(ResultType));
+    else
+      Operands.push_back(Ops[IsMasked ? 1 : 0]);
+
+    Operands.push_back(Ops[Offset]); // op0
+    Operands.push_back(Ops[Offset + 1]); // op1
+
+    if (IsMasked)
+      Operands.push_back(Ops[0]); // mask
+
+    if (HasRoundModeOp) {
+      Operands.push_back(Ops[Offset + 2]); // frm
+      Operands.push_back(Ops[Offset + 3]); // vl
+    } else {
+      Operands.push_back(ConstantInt::get(Ops[Offset + 2]->getType(), 7)); // frm
+      Operands.push_back(Ops[Offset + 2]); // vl
+    }
+
+    if (IsMasked)
+      Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
+
+    IntrinsicTypes = {ResultType, Ops[Offset]->getType(), Operands.back()->getType()};
+    llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
+    return Builder.CreateCall(F, Operands, "");
+  }
+}] in {
+  let HasFRMRoundModeOp = true in {
+    defm sf_vfnrclip_x_f_qf : RVVVFNRCLIPBuiltinSet<"v", "vFqfu", "c">;
+    defm sf_vfnrclip_xu_f_qf : RVVVFNRCLIPBuiltinSet<"Uv", "UvFqfu", "c">;
+  }
+  defm sf_vfnrclip_x_f_qf : RVVVFNRCLIPBuiltinSet<"v", "vFqf", "c">;
+  defm sf_vfnrclip_xu_f_qf : RVVVFNRCLIPBuiltinSet<"Uv", "UvFqf", "c">;
+}
+}
diff --git a/clang/include/clang/Basic/riscv_vector_common.td b/clang/include/clang/Basic/riscv_vector_common.td
index 141fac9d68e6d54..326c3883f0a8409 100644
--- a/clang/include/clang/Basic/riscv_vector_common.td
+++ b/clang/include/clang/Basic/riscv_vector_common.td
@@ -66,6 +66,7 @@
 //   t: ptrdiff_t, ignores "t"
 //   u: unsigned long, ignores "t"
 //   l: long, ignores "t"
+//   f: float32, ignores "t"
 //
 // So for instance if t is "i", i.e. int, then "e" will yield int again. "v"
 // will yield an RVV vector type (assume LMUL=1), so __rvv_int32m1_t.
diff --git a/clang/include/clang/Support/RISCVVIntrinsicUtils.h b/clang/include/clang/Support/RISCVVIntrinsicUtils.h
index 8ba57d77221dc52..9de83028f85e496 100644
--- a/clang/include/clang/Support/RISCVVIntrinsicUtils.h
+++ b/clang/include/clang/Support/RISCVVIntrinsicUtils.h
@@ -85,6 +85,7 @@ enum class BaseTypeModifier : uint8_t {
   Ptrdiff,
   UnsignedLong,
   SignedLong,
+  Float32
 };
 
 // Modifier for type, used for both scalar and vector types.
@@ -485,14 +486,15 @@ enum RVVRequire : uint16_t {
   RVV_REQ_RV64 = 1 << 0,
   RVV_REQ_ZvfhminOrZvfh = 1 << 1,
   RVV_REQ_Xsfvcp = 1 << 2,
-  RVV_REQ_Zvbb = 1 << 3,
-  RVV_REQ_Zvbc = 1 << 4,
-  RVV_REQ_Zvkb = 1 << 5,
-  RVV_REQ_Zvkg = 1 << 6,
-  RVV_REQ_Zvkned = 1 << 7,
-  RVV_REQ_Zvknha = 1 << 8,
-  RVV_REQ_Zvksed = 1 << 9,
-  RVV_REQ_Zvksh = 1 << 10,
+  RVV_REQ_Xsfvfnrclipxfqf = 1 << 3,
+  RVV_REQ_Zvbb = 1 << 4,
+  RVV_REQ_Zvbc = 1 << 5,
+  RVV_REQ_Zvkb = 1 << 6,
+  RVV_REQ_Zvkg = 1 << 7,
+  RVV_REQ_Zvkned = 1 << 8,
+  RVV_REQ_Zvknha = 1 << 9,
+  RVV_REQ_Zvksed = 1 << 10,
+  RVV_REQ_Zvksh = 1 << 11,
 
   LLVM_MARK_AS_BITMASK_ENUM(RVV_REQ_Zvksh)
 };
diff --git a/clang/lib/Sema/SemaRISCVVectorLookup.cpp b/clang/lib/Sema/SemaRISCVVectorLookup.cpp
index ae584dc68719901..6440e379d2d5036 100644
--- a/clang/lib/Sema/SemaRISCVVectorLookup.cpp
+++ b/clang/lib/Sema/SemaRISCVVectorLookup.cpp
@@ -205,6 +205,7 @@ void RISCVIntrinsicManagerImpl::ConstructRVVIntrinsics(
   static const std::pair<const char *, RVVRequire> FeatureCheckList[] = {
       {"64bit", RVV_REQ_RV64},
       {"xsfvcp", RVV_REQ_Xsfvcp},
+      {"xsfvfnrclipxfqf", RVV_REQ_Xsfvfnrclipxfqf},
       {"experimental-zvbb", RVV_REQ_Zvbb},
       {"experimental-zvbc", RVV_REQ_Zvbc},
       {"experimental-zvkb", RVV_REQ_Zvkb},
diff --git a/clang/lib/Support/RISCVVIntrinsicUtils.cpp b/clang/lib/Support/RISCVVIntrinsicUtils.cpp
index c105db434dc43c9..bc366cb2d99056d 100644
--- a/clang/lib/Support/RISCVVIntrinsicUtils.cpp
+++ b/clang/lib/Support/RISCVVIntrinsicUtils.cpp
@@ -430,6 +430,9 @@ PrototypeDescriptor::parsePrototypeDescriptor(
   case 'l':
     PT = BaseTypeModifier::SignedLong;
     break;
+  case 'f':
+    PT = BaseTypeModifier::Float32;
+    break;
   default:
     llvm_unreachable("Illegal primitive type transformers!");
   }
@@ -666,6 +669,10 @@ void RVVType::applyModifier(const PrototypeDescriptor &Transformer) {
   case BaseTypeModifier::SignedLong:
     ScalarType = ScalarTypeKind::SignedLong;
     break;
+  case BaseTypeModifier::Float32:
+    ElementBitwidth = 32;
+    ScalarType = ScalarTypeKind::Float;
+    break;
   case BaseTypeModifier::Invalid:
     ScalarType = ScalarTypeKind::Invalid;
     return;
@@ -1150,7 +1157,7 @@ void RVVIntrinsic::updateNamesAndPolicy(
 
 SmallVector<PrototypeDescriptor> parsePrototypes(StringRef Prototypes) {
   SmallVector<PrototypeDescriptor> PrototypeDescriptors;
-  const StringRef Primaries("evwqom0ztul");
+  const StringRef Primaries("evwqo4m0ztulf");
   while (!Prototypes.empty()) {
     size_t Idx = 0;
     // Skip over complex prototype because it could contain primitive type
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/sf_vfnrclip_x_f_qf.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/sf_vfnrclip_x_f_qf.c
new file mode 100644
index 000000000000000..9b6e2f60f3fc9e8
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/sf_vfnrclip_x_f_qf.c
@@ -0,0 +1,98 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +xsfvfnrclipxfqf \
+// RUN:  -disable-O0-optnone -emit-llvm %s -o - | \
+// RUN:  opt -S -passes=mem2reg | FileCheck %s
+
+#include <sifive_vector.h>
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> poison, <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_sf_vfnrclip_x_f_qf_i8mf8(vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8mf8(vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf4(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> poison, <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_sf_vfnrclip_x_f_qf_i8mf4(vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8mf4(vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf2(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> poison, <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_sf_vfnrclip_x_f_qf_i8mf2(vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8mf2(vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m1(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> poison, <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_sf_vfnrclip_x_f_qf_i8m1(vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8m1(vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m2(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> poison, <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_sf_vfnrclip_x_f_qf_i8m2(vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8m2(vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf8_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> poison, <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_sf_vfnrclip_x_f_qf_i8mf8_m(vbool64_t mask, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8mf8_m(mask, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf4_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> poison, <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_sf_vfnrclip_x_f_qf_i8mf4_m(vbool32_t mask, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8mf4_m(mask, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf2_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> poison, <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_sf_vfnrclip_x_f_qf_i8mf2_m(vbool16_t mask, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8mf2_m(mask, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m1_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> poison, <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_sf_vfnrclip_x_f_qf_i8m1_m(vbool8_t mask, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8m1_m(mask, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m2_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> poison, <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_sf_vfnrclip_x_f_qf_i8m2_m(vbool4_t mask, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8m2_m(mask, vs2, rs1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/sf_vfnrclip_x_f_qf_rm.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/sf_vfnrclip_x_f_qf_rm.c
new file mode 100644
index 000000000000000..5e4a52ca5f87238
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/sf_vfnrclip_x_f_qf_rm.c
@@ -0,0 +1,98 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +xsfvfnrclipxfqf \
+// RUN:  -disable-O0-optnone -emit-llvm %s -o - | \
+// RUN:  opt -S -passes=mem2reg | FileCheck %s
+
+#include <sifive_vector.h>
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> poison, <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_sf_vfnrclip_x_f_qf_i8mf8(vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8mf8_rm(vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf4(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> poison, <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_sf_vfnrclip_x_f_qf_i8mf4(vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8mf4_rm(vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf2(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> poison, <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_sf_vfnrclip_x_f_qf_i8mf2(vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8mf2_rm(vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m1(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> poison, <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_sf_vfnrclip_x_f_qf_i8m1(vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8m1_rm(vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m2(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> poison, <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_sf_vfnrclip_x_f_qf_i8m2(vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8m2_rm(vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf8_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> poison, <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_sf_vfnrclip_x_f_qf_i8mf8_m(vbool64_t mask, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8mf8_rm_m(mask, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf4_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> poison, <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_sf_vfnrclip_x_f_qf_i8mf4_m(vbool32_t mask, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8mf4_rm_m(mask, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf2_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> poison, <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_sf_vfnrclip_x_f_qf_i8mf2_m(vbool16_t mask, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8mf2_rm_m(mask, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m1_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> poison, <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_sf_vfnrclip_x_f_qf_i8m1_m(vbool8_t mask, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8m1_rm_m(mask, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m2_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> poison, <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_sf_vfnrclip_x_f_qf_i8m2_m(vbool4_t mask, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8m2_rm_m(mask, vs2, rs1, 2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/sf_vfnrclip_xu_f_qf.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/sf_vfnrclip_xu_f_qf.c
new file mode 100644
index 000000000000000..266356e07f0ba08
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/sf_vfnrclip_xu_f_qf.c
@@ -0,0 +1,98 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +xsfvfnrclipxfqf \
+// RUN:  -disable-O0-optnone -emit-llvm %s -o - | \
+// RUN:  opt -S -passes=mem2reg | FileCheck %s
+
+#include <sifive_vector.h>
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> poison, <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_sf_vfnrclip_xu_f_qf_u8mf8(vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8mf8(vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf4(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> poison, <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_sf_vfnrclip_xu_f_qf_u8mf4(vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8mf4(vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf2(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> poison, <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_sf_vfnrclip_xu_f_qf_u8mf2(vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8mf2(vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m1(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> poison, <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_sf_vfnrclip_xu_f_qf_u8m1(vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8m1(vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m2(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> poison, <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_sf_vfnrclip_xu_f_qf_u8m2(vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8m2(vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf8_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> poison, <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_sf_vfnrclip_xu_f_qf_u8mf8_m(vbool64_t mask, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8mf8_m(mask, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf4_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> poison, <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_sf_vfnrclip_xu_f_qf_u8mf4_m(vbool32_t mask, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8mf4_m(mask, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf2_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> poison, <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_sf_vfnrclip_xu_f_qf_u8mf2_m(vbool16_t mask, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8mf2_m(mask, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m1_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> poison, <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_sf_vfnrclip_xu_f_qf_u8m1_m(vbool8_t mask, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8m1_m(mask, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m2_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> poison, <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_sf_vfnrclip_xu_f_qf_u8m2_m(vbool4_t mask, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8m2_m(mask, vs2, rs1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/sf_vfnrclip_xu_f_qf_rm.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/sf_vfnrclip_xu_f_qf_rm.c
new file mode 100644
index 000000000000000..45b6778eea8b84d
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/sf_vfnrclip_xu_f_qf_rm.c
@@ -0,0 +1,98 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +xsfvfnrclipxfqf \
+// RUN:  -disable-O0-optnone -emit-llvm %s -o - | \
+// RUN:  opt -S -passes=mem2reg | FileCheck %s
+
+#include <sifive_vector.h>
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> poison, <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_sf_vfnrclip_xu_f_qf_u8mf8(vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8mf8_rm(vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf4(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> poison, <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_sf_vfnrclip_xu_f_qf_u8mf4(vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8mf4_rm(vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf2(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> poison, <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_sf_vfnrclip_xu_f_qf_u8mf2(vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8mf2_rm(vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m1(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> poison, <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_sf_vfnrclip_xu_f_qf_u8m1(vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8m1_rm(vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m2(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> poison, <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_sf_vfnrclip_xu_f_qf_u8m2(vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8m2_rm(vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf8_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> poison, <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_sf_vfnrclip_xu_f_qf_u8mf8_m(vbool64_t mask, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8mf8_rm_m(mask, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf4_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> poison, <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_sf_vfnrclip_xu_f_qf_u8mf4_m(vbool32_t mask, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8mf4_rm_m(mask, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf2_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> poison, <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_sf_vfnrclip_xu_f_qf_u8mf2_m(vbool16_t mask, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8mf2_rm_m(mask, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m1_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> poison, <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_sf_vfnrclip_xu_f_qf_u8m1_m(vbool8_t mask, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8m1_rm_m(mask, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m2_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> poison, <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_sf_vfnrclip_xu_f_qf_u8m2_m(vbool4_t mask, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8m2_rm_m(mask, vs2, rs1, 2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/sf_vfnrclip_x_f_qf.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/sf_vfnrclip_x_f_qf.c
new file mode 100644
index 000000000000000..849dcbb6a56f996
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/sf_vfnrclip_x_f_qf.c
@@ -0,0 +1,98 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +xsfvfnrclipxfqf \
+// RUN:  -disable-O0-optnone -emit-llvm %s -o - | \
+// RUN:  opt -S -passes=mem2reg | FileCheck %s
+
+#include <sifive_vector.h>
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> poison, <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_sf_vfnrclip_x_f_qf_i8mf8(vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf(vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf4(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> poison, <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_sf_vfnrclip_x_f_qf_i8mf4(vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf(vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf2(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> poison, <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_sf_vfnrclip_x_f_qf_i8mf2(vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf(vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m1(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> poison, <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_sf_vfnrclip_x_f_qf_i8m1(vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf(vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m2(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> poison, <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_sf_vfnrclip_x_f_qf_i8m2(vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf(vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf8_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> poison, <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_sf_vfnrclip_x_f_qf_i8mf8_m(vbool64_t mask, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf(mask, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf4_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> poison, <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_sf_vfnrclip_x_f_qf_i8mf4_m(vbool32_t mask, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf(mask, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf2_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> poison, <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_sf_vfnrclip_x_f_qf_i8mf2_m(vbool16_t mask, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf(mask, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m1_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> poison, <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_sf_vfnrclip_x_f_qf_i8m1_m(vbool8_t mask, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf(mask, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m2_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> poison, <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_sf_vfnrclip_x_f_qf_i8m2_m(vbool4_t mask, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf(mask, vs2, rs1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/sf_vfnrclip_x_f_qf_rm.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/sf_vfnrclip_x_f_qf_rm.c
new file mode 100644
index 000000000000000..9b3fa1b88c1b87e
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/sf_vfnrclip_x_f_qf_rm.c
@@ -0,0 +1,98 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +xsfvfnrclipxfqf \
+// RUN:  -disable-O0-optnone -emit-llvm %s -o - | \
+// RUN:  opt -S -passes=mem2reg | FileCheck %s
+
+#include <sifive_vector.h>
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> poison, <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_sf_vfnrclip_x_f_qf_i8mf8(vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf(vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf4(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> poison, <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_sf_vfnrclip_x_f_qf_i8mf4(vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf(vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf2(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> poison, <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_sf_vfnrclip_x_f_qf_i8mf2(vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf(vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m1(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> poison, <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_sf_vfnrclip_x_f_qf_i8m1(vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf(vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m2(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> poison, <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_sf_vfnrclip_x_f_qf_i8m2(vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf(vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf8_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> poison, <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_sf_vfnrclip_x_f_qf_i8mf8_m(vbool64_t mask, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf(mask, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf4_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> poison, <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_sf_vfnrclip_x_f_qf_i8mf4_m(vbool32_t mask, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf(mask, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf2_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> poison, <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_sf_vfnrclip_x_f_qf_i8mf2_m(vbool16_t mask, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf(mask, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m1_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> poison, <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_sf_vfnrclip_x_f_qf_i8m1_m(vbool8_t mask, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf(mask, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m2_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> poison, <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_sf_vfnrclip_x_f_qf_i8m2_m(vbool4_t mask, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf(mask, vs2, rs1, 2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/sf_vfnrclip_xu_f_qf.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/sf_vfnrclip_xu_f_qf.c
new file mode 100644
index 000000000000000..4ab3311d453c23e
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/sf_vfnrclip_xu_f_qf.c
@@ -0,0 +1,98 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +xsfvfnrclipxfqf \
+// RUN:  -disable-O0-optnone -emit-llvm %s -o - | \
+// RUN:  opt -S -passes=mem2reg | FileCheck %s
+
+#include <sifive_vector.h>
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> poison, <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_sf_vfnrclip_xu_f_qf_u8mf8(vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf(vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf4(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> poison, <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_sf_vfnrclip_xu_f_qf_u8mf4(vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf(vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf2(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> poison, <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_sf_vfnrclip_xu_f_qf_u8mf2(vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf(vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m1(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> poison, <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_sf_vfnrclip_xu_f_qf_u8m1(vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf(vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m2(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> poison, <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_sf_vfnrclip_xu_f_qf_u8m2(vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf(vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf8_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> poison, <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_sf_vfnrclip_xu_f_qf_u8mf8_m(vbool64_t mask, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf(mask, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf4_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> poison, <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_sf_vfnrclip_xu_f_qf_u8mf4_m(vbool32_t mask, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf(mask, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf2_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> poison, <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_sf_vfnrclip_xu_f_qf_u8mf2_m(vbool16_t mask, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf(mask, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m1_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> poison, <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_sf_vfnrclip_xu_f_qf_u8m1_m(vbool8_t mask, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf(mask, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m2_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> poison, <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_sf_vfnrclip_xu_f_qf_u8m2_m(vbool4_t mask, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf(mask, vs2, rs1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/sf_vfnrclip_xu_f_qf_rm.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/sf_vfnrclip_xu_f_qf_rm.c
new file mode 100644
index 000000000000000..be4e954893fe2df
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/sf_vfnrclip_xu_f_qf_rm.c
@@ -0,0 +1,98 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +xsfvfnrclipxfqf \
+// RUN:  -disable-O0-optnone -emit-llvm %s -o - | \
+// RUN:  opt -S -passes=mem2reg | FileCheck %s
+
+#include <sifive_vector.h>
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> poison, <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_sf_vfnrclip_xu_f_qf_u8mf8(vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf(vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf4(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> poison, <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_sf_vfnrclip_xu_f_qf_u8mf4(vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf(vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf2(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> poison, <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_sf_vfnrclip_xu_f_qf_u8mf2(vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf(vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m1(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> poison, <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_sf_vfnrclip_xu_f_qf_u8m1(vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf(vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m2(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> poison, <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_sf_vfnrclip_xu_f_qf_u8m2(vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf(vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf8_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> poison, <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_sf_vfnrclip_xu_f_qf_u8mf8_m(vbool64_t mask, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf(mask, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf4_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> poison, <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_sf_vfnrclip_xu_f_qf_u8mf4_m(vbool32_t mask, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf(mask, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf2_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> poison, <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_sf_vfnrclip_xu_f_qf_u8mf2_m(vbool16_t mask, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf(mask, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m1_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> poison, <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_sf_vfnrclip_xu_f_qf_u8m1_m(vbool8_t mask, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf(mask, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m2_m(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> poison, <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 3)
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_sf_vfnrclip_xu_f_qf_u8m2_m(vbool4_t mask, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf(mask, vs2, rs1, 2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/sf_vfnrclip_x_f_qf.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/sf_vfnrclip_x_f_qf.c
new file mode 100644
index 000000000000000..e4bffdeaf13b5d5
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/sf_vfnrclip_x_f_qf.c
@@ -0,0 +1,188 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +xsfvfnrclipxfqf \
+// RUN:  -disable-O0-optnone -emit-llvm %s -o - | \
+// RUN:  opt -S -passes=mem2reg | FileCheck %s
+
+#include <sifive_vector.h>
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf8_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_sf_vfnrclip_x_f_qf_i8mf8_tu(vint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8mf8_tu(maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf4_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_sf_vfnrclip_x_f_qf_i8mf4_tu(vint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8mf4_tu(maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf2_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_sf_vfnrclip_x_f_qf_i8mf2_tu(vint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8mf2_tu(maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m1_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_sf_vfnrclip_x_f_qf_i8m1_tu(vint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8m1_tu(maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m2_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_sf_vfnrclip_x_f_qf_i8m2_tu(vint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8m2_tu(maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf8_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_sf_vfnrclip_x_f_qf_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8mf8_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf4_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_sf_vfnrclip_x_f_qf_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8mf4_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf2_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_sf_vfnrclip_x_f_qf_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8mf2_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m1_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_sf_vfnrclip_x_f_qf_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8m1_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m2_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_sf_vfnrclip_x_f_qf_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8m2_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf8_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_sf_vfnrclip_x_f_qf_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8mf8_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf4_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_sf_vfnrclip_x_f_qf_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8mf4_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf2_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_sf_vfnrclip_x_f_qf_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8mf2_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m1_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_sf_vfnrclip_x_f_qf_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8m1_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m2_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_sf_vfnrclip_x_f_qf_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8m2_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf8_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_sf_vfnrclip_x_f_qf_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8mf8_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf4_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_sf_vfnrclip_x_f_qf_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8mf4_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf2_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_sf_vfnrclip_x_f_qf_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8mf2_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m1_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_sf_vfnrclip_x_f_qf_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8m1_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m2_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_sf_vfnrclip_x_f_qf_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8m2_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/sf_vfnrclip_x_f_qf_rm.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/sf_vfnrclip_x_f_qf_rm.c
new file mode 100644
index 000000000000000..c034a0fb31a3068
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/sf_vfnrclip_x_f_qf_rm.c
@@ -0,0 +1,188 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +xsfvfnrclipxfqf \
+// RUN:  -disable-O0-optnone -emit-llvm %s -o - | \
+// RUN:  opt -S -passes=mem2reg | FileCheck %s
+
+#include <sifive_vector.h>
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf8_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_sf_vfnrclip_x_f_qf_i8mf8_tu(vint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8mf8_rm_tu(maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf4_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_sf_vfnrclip_x_f_qf_i8mf4_tu(vint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8mf4_rm_tu(maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf2_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_sf_vfnrclip_x_f_qf_i8mf2_tu(vint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8mf2_rm_tu(maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m1_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_sf_vfnrclip_x_f_qf_i8m1_tu(vint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8m1_rm_tu(maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m2_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_sf_vfnrclip_x_f_qf_i8m2_tu(vint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8m2_rm_tu(maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf8_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_sf_vfnrclip_x_f_qf_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8mf8_rm_tum(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf4_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_sf_vfnrclip_x_f_qf_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8mf4_rm_tum(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf2_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_sf_vfnrclip_x_f_qf_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8mf2_rm_tum(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m1_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_sf_vfnrclip_x_f_qf_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8m1_rm_tum(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m2_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_sf_vfnrclip_x_f_qf_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8m2_rm_tum(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf8_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_sf_vfnrclip_x_f_qf_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8mf8_rm_tumu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf4_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_sf_vfnrclip_x_f_qf_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8mf4_rm_tumu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf2_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_sf_vfnrclip_x_f_qf_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8mf2_rm_tumu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m1_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_sf_vfnrclip_x_f_qf_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8m1_rm_tumu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m2_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_sf_vfnrclip_x_f_qf_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8m2_rm_tumu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf8_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_sf_vfnrclip_x_f_qf_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8mf8_rm_mu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf4_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_sf_vfnrclip_x_f_qf_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8mf4_rm_mu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf2_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_sf_vfnrclip_x_f_qf_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8mf2_rm_mu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m1_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_sf_vfnrclip_x_f_qf_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8m1_rm_mu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m2_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_sf_vfnrclip_x_f_qf_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_i8m2_rm_mu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/sf_vfnrclip_xu_f_qf.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/sf_vfnrclip_xu_f_qf.c
new file mode 100644
index 000000000000000..808c5b73da40ab2
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/sf_vfnrclip_xu_f_qf.c
@@ -0,0 +1,188 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +xsfvfnrclipxfqf \
+// RUN:  -disable-O0-optnone -emit-llvm %s -o - | \
+// RUN:  opt -S -passes=mem2reg | FileCheck %s
+
+#include <sifive_vector.h>
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf8_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_sf_vfnrclip_xu_f_qf_u8mf8_tu(vuint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8mf8_tu(maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf4_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_sf_vfnrclip_xu_f_qf_u8mf4_tu(vuint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8mf4_tu(maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf2_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_sf_vfnrclip_xu_f_qf_u8mf2_tu(vuint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8mf2_tu(maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m1_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_sf_vfnrclip_xu_f_qf_u8m1_tu(vuint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8m1_tu(maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m2_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_sf_vfnrclip_xu_f_qf_u8m2_tu(vuint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8m2_tu(maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf8_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_sf_vfnrclip_xu_f_qf_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8mf8_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf4_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_sf_vfnrclip_xu_f_qf_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8mf4_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf2_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_sf_vfnrclip_xu_f_qf_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8mf2_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m1_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_sf_vfnrclip_xu_f_qf_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8m1_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m2_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_sf_vfnrclip_xu_f_qf_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8m2_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf8_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_sf_vfnrclip_xu_f_qf_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8mf8_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf4_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_sf_vfnrclip_xu_f_qf_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8mf4_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf2_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_sf_vfnrclip_xu_f_qf_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8mf2_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m1_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_sf_vfnrclip_xu_f_qf_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8m1_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m2_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_sf_vfnrclip_xu_f_qf_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8m2_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf8_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_sf_vfnrclip_xu_f_qf_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8mf8_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf4_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_sf_vfnrclip_xu_f_qf_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8mf4_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf2_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_sf_vfnrclip_xu_f_qf_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8mf2_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m1_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_sf_vfnrclip_xu_f_qf_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8m1_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m2_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_sf_vfnrclip_xu_f_qf_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8m2_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/sf_vfnrclip_xu_f_qf_rm.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/sf_vfnrclip_xu_f_qf_rm.c
new file mode 100644
index 000000000000000..8c9240207f19013
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/sf_vfnrclip_xu_f_qf_rm.c
@@ -0,0 +1,188 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +xsfvfnrclipxfqf \
+// RUN:  -disable-O0-optnone -emit-llvm %s -o - | \
+// RUN:  opt -S -passes=mem2reg | FileCheck %s
+
+#include <sifive_vector.h>
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf8_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_sf_vfnrclip_xu_f_qf_u8mf8_tu(vuint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8mf8_rm_tu(maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf4_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_sf_vfnrclip_xu_f_qf_u8mf4_tu(vuint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8mf4_rm_tu(maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf2_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_sf_vfnrclip_xu_f_qf_u8mf2_tu(vuint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8mf2_rm_tu(maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m1_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_sf_vfnrclip_xu_f_qf_u8m1_tu(vuint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8m1_rm_tu(maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m2_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_sf_vfnrclip_xu_f_qf_u8m2_tu(vuint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8m2_rm_tu(maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf8_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_sf_vfnrclip_xu_f_qf_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8mf8_rm_tum(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf4_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_sf_vfnrclip_xu_f_qf_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8mf4_rm_tum(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf2_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_sf_vfnrclip_xu_f_qf_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8mf2_rm_tum(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m1_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_sf_vfnrclip_xu_f_qf_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8m1_rm_tum(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m2_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_sf_vfnrclip_xu_f_qf_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8m2_rm_tum(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf8_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_sf_vfnrclip_xu_f_qf_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8mf8_rm_tumu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf4_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_sf_vfnrclip_xu_f_qf_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8mf4_rm_tumu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf2_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_sf_vfnrclip_xu_f_qf_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8mf2_rm_tumu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m1_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_sf_vfnrclip_xu_f_qf_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8m1_rm_tumu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m2_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_sf_vfnrclip_xu_f_qf_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8m2_rm_tumu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf8_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_sf_vfnrclip_xu_f_qf_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8mf8_rm_mu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf4_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_sf_vfnrclip_xu_f_qf_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8mf4_rm_mu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf2_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_sf_vfnrclip_xu_f_qf_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8mf2_rm_mu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m1_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_sf_vfnrclip_xu_f_qf_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8m1_rm_mu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m2_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_sf_vfnrclip_xu_f_qf_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_u8m2_rm_mu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/sf_vfnrclip_x_f_qf.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/sf_vfnrclip_x_f_qf.c
new file mode 100644
index 000000000000000..1e7a352646a4e82
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/sf_vfnrclip_x_f_qf.c
@@ -0,0 +1,188 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +xsfvfnrclipxfqf \
+// RUN:  -disable-O0-optnone -emit-llvm %s -o - | \
+// RUN:  opt -S -passes=mem2reg | FileCheck %s
+
+#include <sifive_vector.h>
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf8_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_sf_vfnrclip_x_f_qf_i8mf8_tu(vint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_tu(maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf4_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_sf_vfnrclip_x_f_qf_i8mf4_tu(vint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_tu(maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf2_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_sf_vfnrclip_x_f_qf_i8mf2_tu(vint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_tu(maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m1_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_sf_vfnrclip_x_f_qf_i8m1_tu(vint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_tu(maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m2_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_sf_vfnrclip_x_f_qf_i8m2_tu(vint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_tu(maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf8_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_sf_vfnrclip_x_f_qf_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf4_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_sf_vfnrclip_x_f_qf_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf2_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_sf_vfnrclip_x_f_qf_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m1_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_sf_vfnrclip_x_f_qf_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m2_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_sf_vfnrclip_x_f_qf_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf8_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_sf_vfnrclip_x_f_qf_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf4_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_sf_vfnrclip_x_f_qf_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf2_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_sf_vfnrclip_x_f_qf_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m1_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_sf_vfnrclip_x_f_qf_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m2_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_sf_vfnrclip_x_f_qf_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf8_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_sf_vfnrclip_x_f_qf_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf4_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_sf_vfnrclip_x_f_qf_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf2_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_sf_vfnrclip_x_f_qf_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m1_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_sf_vfnrclip_x_f_qf_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m2_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_sf_vfnrclip_x_f_qf_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/sf_vfnrclip_x_f_qf_rm.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/sf_vfnrclip_x_f_qf_rm.c
new file mode 100644
index 000000000000000..d6b1962618cfa4f
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/sf_vfnrclip_x_f_qf_rm.c
@@ -0,0 +1,188 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +xsfvfnrclipxfqf \
+// RUN:  -disable-O0-optnone -emit-llvm %s -o - | \
+// RUN:  opt -S -passes=mem2reg | FileCheck %s
+
+#include <sifive_vector.h>
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf8_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_sf_vfnrclip_x_f_qf_i8mf8_tu(vint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_tu(maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf4_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_sf_vfnrclip_x_f_qf_i8mf4_tu(vint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_tu(maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf2_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_sf_vfnrclip_x_f_qf_i8mf2_tu(vint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_tu(maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m1_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_sf_vfnrclip_x_f_qf_i8m1_tu(vint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_tu(maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m2_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_sf_vfnrclip_x_f_qf_i8m2_tu(vint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_tu(maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf8_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_sf_vfnrclip_x_f_qf_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_tum(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf4_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_sf_vfnrclip_x_f_qf_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_tum(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf2_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_sf_vfnrclip_x_f_qf_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_tum(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m1_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_sf_vfnrclip_x_f_qf_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_tum(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m2_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_sf_vfnrclip_x_f_qf_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_tum(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf8_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_sf_vfnrclip_x_f_qf_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_tumu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf4_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_sf_vfnrclip_x_f_qf_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_tumu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf2_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_sf_vfnrclip_x_f_qf_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_tumu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m1_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_sf_vfnrclip_x_f_qf_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_tumu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m2_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_sf_vfnrclip_x_f_qf_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_tumu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf8_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_sf_vfnrclip_x_f_qf_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_mu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf4_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_sf_vfnrclip_x_f_qf_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_mu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8mf2_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_sf_vfnrclip_x_f_qf_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_mu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m1_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_sf_vfnrclip_x_f_qf_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_mu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_x_f_qf_i8m2_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_sf_vfnrclip_x_f_qf_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_x_f_qf_mu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/sf_vfnrclip_xu_f_qf.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/sf_vfnrclip_xu_f_qf.c
new file mode 100644
index 000000000000000..890c3cfb2698488
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/sf_vfnrclip_xu_f_qf.c
@@ -0,0 +1,188 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +xsfvfnrclipxfqf \
+// RUN:  -disable-O0-optnone -emit-llvm %s -o - | \
+// RUN:  opt -S -passes=mem2reg | FileCheck %s
+
+#include <sifive_vector.h>
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf8_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_sf_vfnrclip_xu_f_qf_u8mf8_tu(vuint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_tu(maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf4_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_sf_vfnrclip_xu_f_qf_u8mf4_tu(vuint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_tu(maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf2_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_sf_vfnrclip_xu_f_qf_u8mf2_tu(vuint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_tu(maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m1_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_sf_vfnrclip_xu_f_qf_u8m1_tu(vuint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_tu(maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m2_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 7, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_sf_vfnrclip_xu_f_qf_u8m2_tu(vuint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_tu(maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf8_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_sf_vfnrclip_xu_f_qf_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf4_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_sf_vfnrclip_xu_f_qf_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf2_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_sf_vfnrclip_xu_f_qf_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m1_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_sf_vfnrclip_xu_f_qf_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m2_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_sf_vfnrclip_xu_f_qf_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_tum(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf8_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_sf_vfnrclip_xu_f_qf_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf4_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_sf_vfnrclip_xu_f_qf_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf2_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_sf_vfnrclip_xu_f_qf_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m1_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_sf_vfnrclip_xu_f_qf_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m2_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_sf_vfnrclip_xu_f_qf_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_tumu(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf8_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_sf_vfnrclip_xu_f_qf_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf4_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_sf_vfnrclip_xu_f_qf_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf2_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_sf_vfnrclip_xu_f_qf_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m1_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_sf_vfnrclip_xu_f_qf_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m2_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_sf_vfnrclip_xu_f_qf_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_mu(mask, maskedoff, vs2, rs1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/sf_vfnrclip_xu_f_qf_rm.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/sf_vfnrclip_xu_f_qf_rm.c
new file mode 100644
index 000000000000000..573b98b2d5ee75c
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/sf_vfnrclip_xu_f_qf_rm.c
@@ -0,0 +1,188 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +xsfvfnrclipxfqf \
+// RUN:  -disable-O0-optnone -emit-llvm %s -o - | \
+// RUN:  opt -S -passes=mem2reg | FileCheck %s
+
+#include <sifive_vector.h>
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf8_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_sf_vfnrclip_xu_f_qf_u8mf8_tu(vuint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_tu(maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf4_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_sf_vfnrclip_xu_f_qf_u8mf4_tu(vuint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_tu(maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf2_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_sf_vfnrclip_xu_f_qf_u8mf2_tu(vuint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_tu(maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m1_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_sf_vfnrclip_xu_f_qf_u8m1_tu(vuint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_tu(maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m2_tu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], i64 2, i64 [[VL:%.*]])
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_sf_vfnrclip_xu_f_qf_u8m2_tu(vuint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_tu(maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf8_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_sf_vfnrclip_xu_f_qf_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_tum(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf4_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_sf_vfnrclip_xu_f_qf_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_tum(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf2_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_sf_vfnrclip_xu_f_qf_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_tum(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m1_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_sf_vfnrclip_xu_f_qf_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_tum(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m2_tum(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 2)
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_sf_vfnrclip_xu_f_qf_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_tum(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf8_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_sf_vfnrclip_xu_f_qf_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_tumu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf4_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_sf_vfnrclip_xu_f_qf_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_tumu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf2_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_sf_vfnrclip_xu_f_qf_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_tumu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m1_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_sf_vfnrclip_xu_f_qf_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_tumu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m2_tumu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 0)
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_sf_vfnrclip_xu_f_qf_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_tumu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf8_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv1i8.nxv1f32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_sf_vfnrclip_xu_f_qf_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vfloat32mf2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_mu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf4_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv2i8.nxv2f32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_sf_vfnrclip_xu_f_qf_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vfloat32m1_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_mu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8mf2_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv4i8.nxv4f32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_sf_vfnrclip_xu_f_qf_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vfloat32m2_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_mu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m1_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv8i8.nxv8f32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_sf_vfnrclip_xu_f_qf_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vfloat32m4_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_mu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
+// CHECK-LABEL: @test_sf_vfnrclip_xu_f_qf_u8m2_mu(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv16i8.nxv16f32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VS2:%.*]], float [[RS1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 2, i64 [[VL:%.*]], i64 1)
+// CHECK-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_sf_vfnrclip_xu_f_qf_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vfloat32m8_t vs2, float rs1, size_t vl) {
+  return __riscv_sf_vfnrclip_xu_f_qf_mu(mask, maskedoff, vs2, rs1, 2, vl);
+}
+
diff --git a/clang/test/Sema/rvv-required-features-invalid.c b/clang/test/Sema/rvv-required-features-invalid.c
index 0d0d00764a31e39..6ac7a4e564fc4d4 100644
--- a/clang/test/Sema/rvv-required-features-invalid.c
+++ b/clang/test/Sema/rvv-required-features-invalid.c
@@ -15,3 +15,11 @@ void test_vsoxei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t value, siz
 void test_xsfvcp_sf_vc_x_se_u64m1(uint64_t rs1, size_t vl) {
   __riscv_sf_vc_x_se_u64m1(1, 1, 1, rs1, vl); // expected-error {{call to undeclared function '__riscv_sf_vc_x_se_u64m1'}}
 }
+
+void test_xsfvfnrclipxfqf() {
+  __riscv_sf_vfnrclip_x_f_qf(); // expected-error {{call to undeclared function '__riscv_sf_vfnrclip_x_f_qf'}}
+}
+
+void test_xsfvfnrclipxufqf() {
+  __riscv_sf_vfnrclip_xu_f_qf(); // expected-error {{call to undeclared function '__riscv_sf_vfnrclip_xu_f_qf'}}
+}
diff --git a/clang/test/Sema/rvv-required-features.c b/clang/test/Sema/rvv-required-features.c
index c3b7965599e68fb..af1abfc9600a22c 100644
--- a/clang/test/Sema/rvv-required-features.c
+++ b/clang/test/Sema/rvv-required-features.c
@@ -1,5 +1,6 @@
 // REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +xsfvcp %s -fsyntax-only -verify
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +xsfvcp \
+// RUN:     -target-feature +xsfvfnrclipxfqf %s -fsyntax-only -verify
 
 // expected-no-diagnostics
 
@@ -17,3 +18,11 @@ void test_vsoxei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t value, siz
 void test_sf_vc_x_se_u64m1(uint64_t rs1, size_t vl) {
   __riscv_sf_vc_x_se_u64m1(1, 1, 1, rs1, vl);
 }
+
+void test_xsfvfnrclipxufqf(vfloat32m1_t vs1, float rs2, size_t vl) {
+  __riscv_sf_vfnrclip_xu_f_qf(vs1, rs2, vl);
+}
+
+void test_xsfvfnrclipxfqf(vfloat32m1_t vs1, float rs2, size_t vl) {
+  __riscv_sf_vfnrclip_x_f_qf(vs1, rs2, vl);
+}
diff --git a/clang/utils/TableGen/RISCVVEmitter.cpp b/clang/utils/TableGen/RISCVVEmitter.cpp
index 41025926058ed07..eaba8d80d217e88 100644
--- a/clang/utils/TableGen/RISCVVEmitter.cpp
+++ b/clang/utils/TableGen/RISCVVEmitter.cpp
@@ -656,6 +656,7 @@ void RVVEmitter::createRVVIntrinsics(
                                   .Case("RV64", RVV_REQ_RV64)
                                   .Case("ZvfhminOrZvfh", RVV_REQ_ZvfhminOrZvfh)
                                   .Case("Xsfvcp", RVV_REQ_Xsfvcp)
+                                  .Case("Xsfvfnrclipxfqf", RVV_REQ_Xsfvfnrclipxfqf)
                                   .Case("Zvbb", RVV_REQ_Zvbb)
                                   .Case("Zvbc", RVV_REQ_Zvbc)
                                   .Case("Zvkb", RVV_REQ_Zvkb)
diff --git a/llvm/include/llvm/IR/IntrinsicsRISCVXsf.td b/llvm/include/llvm/IR/IntrinsicsRISCVXsf.td
index c8d24ec7d83addf..6915cfec99d5f1e 100644
--- a/llvm/include/llvm/IR/IntrinsicsRISCVXsf.td
+++ b/llvm/include/llvm/IR/IntrinsicsRISCVXsf.td
@@ -128,8 +128,36 @@ let TargetPrefix = "riscv" in {
     }
   }
 
+  // Input: (passthru, vector_in, scalar_in, frm, vl)
+  class RISCVSFCustomVFNRCLIPUnMasked
+        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
+                    [LLVMMatchType<0>, llvm_anyfloat_ty, LLVMVectorElementType<1>,
+                     llvm_anyint_ty, LLVMMatchType<2>],
+                    [ImmArg<ArgIndex<3>>, IntrNoMem]>, RISCVVIntrinsic {
+    let VLOperand = 4;
+  }
+
+  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, frm, vl, policy)
+  class RISCVSFCustomVFNRCLIPMasked
+       : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
+                   [LLVMMatchType<0>, llvm_anyfloat_ty, LLVMVectorElementType<1>,
+                    LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
+                    LLVMMatchType<2>, LLVMMatchType<2>],
+                   [ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<6>>, IntrNoMem]>, RISCVVIntrinsic {
+    let VLOperand = 5;
+  }
+
+  multiclass RISCVSFCustomVFNRCLIP {
+    def NAME : RISCVSFCustomVFNRCLIPUnMasked;
+    def NAME # "_mask" : RISCVSFCustomVFNRCLIPMasked;
+  }
+
   defm "" : RISCVSFCustomVC_X<["x", "i"]>;
   defm "" : RISCVSFCustomVC_XV<["x", "i", "v", "f"]>;
   defm "" : RISCVSFCustomVC_XVV<["x", "i", "v", "f"]>;
   defm "" : RISCVSFCustomVC_XVW<["x", "i", "v", "f"]>;
+
+  // XSfvfnrclipxfqf
+  defm int_riscv_sf_vfnrclip_x_f_qf : RISCVSFCustomVFNRCLIP;
+  defm int_riscv_sf_vfnrclip_xu_f_qf : RISCVSFCustomVFNRCLIP;
 } // TargetPrefix = "riscv"
diff --git a/llvm/lib/Support/RISCVISAInfo.cpp b/llvm/lib/Support/RISCVISAInfo.cpp
index 72d33e1e65c8f58..36ee8b6790f3870 100644
--- a/llvm/lib/Support/RISCVISAInfo.cpp
+++ b/llvm/lib/Support/RISCVISAInfo.cpp
@@ -73,6 +73,7 @@ static const RISCVSupportedExtension SupportedExtensions[] = {
     {"xcvsimd", RISCVExtensionVersion{1, 0}},
     {"xsfcie", RISCVExtensionVersion{1, 0}},
     {"xsfvcp", RISCVExtensionVersion{1, 0}},
+    {"xsfvfnrclipxfqf", RISCVExtensionVersion{1, 0}},
     {"xtheadba", RISCVExtensionVersion{1, 0}},
     {"xtheadbb", RISCVExtensionVersion{1, 0}},
     {"xtheadbs", RISCVExtensionVersion{1, 0}},
@@ -991,6 +992,7 @@ static const char *ImpliedExtsF[] = {"zicsr"};
 static const char *ImpliedExtsV[] = {"zvl128b", "zve64d"};
 static const char *ImpliedExtsXTHeadVdot[] = {"v"};
 static const char *ImpliedExtsXsfvcp[] = {"zve32x"};
+static const char *ImpliedExtsXsfvfnrclipxfqf[] = {"zve32f"};
 static const char *ImpliedExtsZacas[] = {"a"};
 static const char *ImpliedExtsZcb[] = {"zca"};
 static const char *ImpliedExtsZcd[] = {"d", "zca"};
@@ -1058,6 +1060,7 @@ static constexpr ImpliedExtsEntry ImpliedExts[] = {
     {{"f"}, {ImpliedExtsF}},
     {{"v"}, {ImpliedExtsV}},
     {{"xsfvcp"}, {ImpliedExtsXsfvcp}},
+    {{"xsfvfnrclipxfqf"}, {ImpliedExtsXsfvfnrclipxfqf}},
     {{"xtheadvdot"}, {ImpliedExtsXTHeadVdot}},
     {{"zacas"}, {ImpliedExtsZacas}},
     {{"zcb"}, {ImpliedExtsZcb}},
diff --git a/llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp b/llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp
index d561d90d3088c1a..75a93406ee71634 100644
--- a/llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp
+++ b/llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp
@@ -558,6 +558,8 @@ DecodeStatus RISCVDisassembler::getInstruction(MCInst &MI, uint64_t &Size,
                           "XTHeadVdot custom opcode table");
     TRY_TO_DECODE_FEATURE(RISCV::FeatureVendorXSfvcp, DecoderTableXSfvcp32,
                           "SiFive VCIX custom opcode table");
+    TRY_TO_DECODE_FEATURE(RISCV::FeatureVendorXSfvfnrclipxfqf, DecoderTableXSfvfnrclipxfqf32,
+                          "SiFive FP32-to-int8 Ranged Clip Instructions opcode table");
     TRY_TO_DECODE_FEATURE(RISCV::FeatureVendorXSfcie, DecoderTableXSfcie32,
                           "Sifive CIE custom opcode table");
     TRY_TO_DECODE_FEATURE(RISCV::FeatureVendorXCVbitmanip,
diff --git a/llvm/lib/Target/RISCV/RISCVFeatures.td b/llvm/lib/Target/RISCV/RISCVFeatures.td
index 3d3486b7fa89563..01bc42232575dc7 100644
--- a/llvm/lib/Target/RISCV/RISCVFeatures.td
+++ b/llvm/lib/Target/RISCV/RISCVFeatures.td
@@ -813,6 +813,14 @@ def HasVendorXSfcie : Predicate<"Subtarget->hasVendorXSfcie()">,
                         AssemblerPredicate<(all_of FeatureVendorXSfcie),
                         "'XSfcie' (SiFive Custom Instruction Extension SCIE.)">;
 
+def FeatureVendorXSfvfnrclipxfqf
+    : SubtargetFeature<"xsfvfnrclipxfqf", "HasVendorXSfvfnrclipxfqf", "true",
+                       "'XSfvfnrclipxfqf' (FP32-to-int8 Ranged Clip Instructions)",
+                       [FeatureStdExtZve32f]>;
+def HasVendorXSfvfnrclipxfqf : Predicate<"Subtarget->hasVendorXSfvfnrclipxfqf()">,
+                               AssemblerPredicate<(all_of FeatureVendorXSfvfnrclipxfqf),
+                               "'XSfvfnrclipxfqf' (FP32-to-int8 Ranged Clip Instructions)">;
+
 def FeatureVendorXCVbitmanip
     : SubtargetFeature<"xcvbitmanip", "HasVendorXCVbitmanip", "true",
                        "'XCVbitmanip' (CORE-V Bit Manipulation)">;
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td b/llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td
index 3975b8426256ac7..62a3556581f64ea 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td
@@ -178,6 +178,11 @@ multiclass CustomSiFiveVCIX<string suffix, VCIXType type,
                                              InTyRs1, 1>;
 }
 
+class CustomSiFiveVFNRCLIP<bits<6> funct6, RISCVVFormat opv, string opcodestr>
+    : VALUVF<funct6, opv, opcodestr> {
+  let Inst{6-0} = OPC_CUSTOM_2.Value;
+}
+
 let Predicates = [HasVendorXSfvcp], mayLoad = 0, mayStore = 0,
     hasSideEffects = 1, hasNoSchedulingInfo = 1, DecoderNamespace = "XSfvcp" in {
   defm X   : CustomSiFiveVCIX<"x",   VCIX_X,   uimm5, uimm5, GPR>,   Sched<[]>;
@@ -196,6 +201,10 @@ let Predicates = [HasVendorXSfvcp], mayLoad = 0, mayStore = 0,
   defm FVW : CustomSiFiveVCIX<"fvw", VCIX_XVW, VR,    VR,    FPR32>, Sched<[]>;
 }
 
+let Predicates = [HasVendorXSfvfnrclipxfqf], DecoderNamespace = "XSfvfnrclipxfqf" in {
+  def VFNRCLIP_XU_F_QF : CustomSiFiveVFNRCLIP<0b100010, OPFVF, "sf.vfnrclip.xu.f.qf">;
+  def VFNRCLIP_X_F_QF : CustomSiFiveVFNRCLIP<0b100011, OPFVF, "sf.vfnrclip.x.f.qf">;
+}
 class VPseudoVC_X<Operand OpClass, DAGOperand RS1Class,
                   bit HasSideEffect = 1> :
       Pseudo<(outs),
@@ -318,6 +327,16 @@ multiclass VPseudoVC_XVW<LMULInfo m, DAGOperand RS1Class,
   }
 }
 
+multiclass VPseudoSiFiveVFNRCLIP<string Constraint = "@earlyclobber $rd"> {
+  foreach m = MxListVF4 in
+    let hasSideEffects = 0 in
+      defm "Pseudo" # NAME : VPseudoBinaryRoundingMode<!if(!eq(m.vrclass, VRM8),
+                                                           VRM2, VR),
+                                                       m.vrclass, FPR32, m,
+                                                       Constraint, /*sew*/0,
+                                                       UsesVXRM=0>;
+}
+
 let Predicates = [HasVendorXSfvcp] in {
   foreach m = MxList in {
     defm X : VPseudoVC_X<m, GPR>;
@@ -346,6 +365,11 @@ let Predicates = [HasVendorXSfvcp] in {
   }
 }
 
+let Predicates = [HasVendorXSfvfnrclipxfqf], DecoderNamespace = "XSfvfnrclipxfqf" in {
+  defm VFNRCLIP_XU_F_QF : VPseudoSiFiveVFNRCLIP;
+  defm VFNRCLIP_X_F_QF : VPseudoSiFiveVFNRCLIP;
+}
+
 class VPatVC_OP4<string intrinsic_name,
                  string inst,
                  ValueType op2_type,
@@ -476,6 +500,26 @@ class GetFTypeInfo<int Sew> {
                               !eq(Scalar, f64) : "FPR64");
 }
 
+defset list<VTypeInfoToWide> VFNRCLIPInfoPairs = {
+  def : VTypeInfoToWide<VI8MF8, VF32MF2>;
+  def : VTypeInfoToWide<VI8MF4, VF32M1>;
+  def : VTypeInfoToWide<VI8MF2, VF32M2>;
+  def : VTypeInfoToWide<VI8M1,  VF32M4>;
+  def : VTypeInfoToWide<VI8M2,  VF32M8>;
+}
+
+multiclass VPatVFNRCLIP<string intrinsic, string instruction> {
+  foreach pair = VFNRCLIPInfoPairs in {
+    defvar Vti = pair.Vti;
+    defvar Wti = pair.Wti;
+    defm : VPatBinaryRoundingMode<"int_riscv_sf_" # intrinsic,
+                                  "Pseudo" # instruction # "_" # Wti.LMul.MX,
+                                  Vti.Vector, Wti.Vector, Wti.Scalar, Vti.Mask,
+                                  Vti.Log2SEW, Vti.RegClass,
+                                  Wti.RegClass, Wti.ScalarRegClass>;
+  }
+}
+
 let Predicates = [HasVendorXSfvcp] in {
   foreach vti = AllVectors in {
     defm : VPatVC_X<"x", "X", vti, XLenVT, GPR>;
@@ -511,6 +555,11 @@ let Predicates = [HasVendorXSfvcp] in {
   }
 }
 
+let Predicates = [HasVendorXSfvfnrclipxfqf] in {
+  defm : VPatVFNRCLIP<"vfnrclip_xu_f_qf", "VFNRCLIP_XU_F_QF">;
+  defm : VPatVFNRCLIP<"vfnrclip_x_f_qf", "VFNRCLIP_X_F_QF">;
+}
+
 let Predicates = [HasVendorXSfcie] in {
 let hasSideEffects = 1, mayLoad = 0, mayStore = 0, DecoderNamespace = "XSfcie" in {
 def SF_CFLUSH_D_L1 : RVInstI<0b000, OPC_SYSTEM, (outs), (ins GPR:$rs1), "cflush.d.l1","$rs1">,
diff --git a/llvm/test/CodeGen/RISCV/rvv/sf_vfnrclip_x_f_qf.ll b/llvm/test/CodeGen/RISCV/rvv/sf_vfnrclip_x_f_qf.ll
new file mode 100644
index 000000000000000..b4f4a879a0b57f2
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/sf_vfnrclip_x_f_qf.ll
@@ -0,0 +1,260 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+xsfvfnrclipxfqf \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+xsfvfnrclipxfqf \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv1i8.nxv1f32.iXLen(
+  <vscale x 1 x i8>,
+  <vscale x 1 x float>,
+  float,
+  iXLen, iXLen);
+
+define <vscale x 1 x i8> @intrinsic_sf_vfnrclip_x_f_qf_nxv1i8_nxv1f32(<vscale x 1 x float> %0, float %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_sf_vfnrclip_x_f_qf_nxv1i8_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT:    fsrmi a0, 0
+; CHECK-NEXT:    sf.vfnrclip.x.f.qf v9, v8, fa0
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv1i8.nxv1f32.iXLen(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x float> %0,
+    float %1,
+    iXLen 0, iXLen %2)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv1i8.nxv1f32.iXLen(
+  <vscale x 1 x i8>,
+  <vscale x 1 x float>,
+  float,
+  <vscale x 1 x i1>,
+  iXLen, iXLen, iXLen);
+
+define <vscale x 1 x i8> @intrinsic_sf_vfnrclip_x_f_qf_mask_nxv1i8_nxv1f32(<vscale x 1 x i8> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_sf_vfnrclip_x_f_qf_mask_nxv1i8_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT:    fsrmi a0, 0
+; CHECK-NEXT:    sf.vfnrclip.x.f.qf v8, v9, fa0, v0.t
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv1i8.nxv1f32.iXLen(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x float> %1,
+    float %2,
+    <vscale x 1 x i1> %3,
+    iXLen 0, iXLen %4, iXLen 1)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv2i8.nxv2f32.iXLen(
+  <vscale x 2 x i8>,
+  <vscale x 2 x float>,
+  float,
+  iXLen, iXLen);
+
+define <vscale x 2 x i8> @intrinsic_sf_vfnrclip_x_f_qf_nxv2i8_nxv2f32(<vscale x 2 x float> %0, float %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_sf_vfnrclip_x_f_qf_nxv2i8_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT:    fsrmi a0, 0
+; CHECK-NEXT:    sf.vfnrclip.x.f.qf v9, v8, fa0
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vmv.v.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv2i8.nxv2f32.iXLen(
+    <vscale x 2 x i8> undef,
+    <vscale x 2 x float> %0,
+    float %1,
+    iXLen 0, iXLen %2)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv2i8.nxv2f32.iXLen(
+  <vscale x 2 x i8>,
+  <vscale x 2 x float>,
+  float,
+  <vscale x 2 x i1>,
+  iXLen, iXLen, iXLen);
+
+define <vscale x 2 x i8> @intrinsic_sf_vfnrclip_x_f_qf_mask_nxv2i8_nxv2f32(<vscale x 2 x i8> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_sf_vfnrclip_x_f_qf_mask_nxv2i8_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT:    fsrmi a0, 0
+; CHECK-NEXT:    sf.vfnrclip.x.f.qf v8, v9, fa0, v0.t
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv2i8.nxv2f32.iXLen(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x float> %1,
+    float %2,
+    <vscale x 2 x i1> %3,
+    iXLen 0, iXLen %4, iXLen 1)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv4i8.nxv4f32.iXLen(
+  <vscale x 4 x i8>,
+  <vscale x 4 x float>,
+  float,
+  iXLen, iXLen);
+
+define <vscale x 4 x i8> @intrinsic_sf_vfnrclip_x_f_qf_nxv4i8_nxv4f32(<vscale x 4 x float> %0, float %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_sf_vfnrclip_x_f_qf_nxv4i8_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT:    fsrmi a0, 0
+; CHECK-NEXT:    sf.vfnrclip.x.f.qf v10, v8, fa0
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vmv1r.v v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv4i8.nxv4f32.iXLen(
+    <vscale x 4 x i8> undef,
+    <vscale x 4 x float> %0,
+    float %1,
+    iXLen 0, iXLen %2)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv4i8.nxv4f32.iXLen(
+  <vscale x 4 x i8>,
+  <vscale x 4 x float>,
+  float,
+  <vscale x 4 x i1>,
+  iXLen, iXLen, iXLen);
+
+define <vscale x 4 x i8> @intrinsic_sf_vfnrclip_x_f_qf_mask_nxv4i8_nxv4f32(<vscale x 4 x i8> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_sf_vfnrclip_x_f_qf_mask_nxv4i8_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT:    fsrmi a0, 0
+; CHECK-NEXT:    sf.vfnrclip.x.f.qf v8, v10, fa0, v0.t
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv4i8.nxv4f32.iXLen(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x float> %1,
+    float %2,
+    <vscale x 4 x i1> %3,
+    iXLen 0, iXLen %4, iXLen 1)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv8i8.nxv8f32.iXLen(
+  <vscale x 8 x i8>,
+  <vscale x 8 x float>,
+  float,
+  iXLen, iXLen);
+
+define <vscale x 8 x i8> @intrinsic_sf_vfnrclip_x_f_qf_nxv8i8_nxv8f32(<vscale x 8 x float> %0, float %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_sf_vfnrclip_x_f_qf_nxv8i8_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT:    fsrmi a0, 0
+; CHECK-NEXT:    sf.vfnrclip.x.f.qf v12, v8, fa0
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vmv1r.v v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv8i8.nxv8f32.iXLen(
+    <vscale x 8 x i8> undef,
+    <vscale x 8 x float> %0,
+    float %1,
+    iXLen 0, iXLen %2)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv8i8.nxv8f32.iXLen(
+  <vscale x 8 x i8>,
+  <vscale x 8 x float>,
+  float,
+  <vscale x 8 x i1>,
+  iXLen, iXLen, iXLen);
+
+define <vscale x 8 x i8> @intrinsic_sf_vfnrclip_x_f_qf_mask_nxv8i8_nxv8f32(<vscale x 8 x i8> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_sf_vfnrclip_x_f_qf_mask_nxv8i8_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT:    fsrmi a0, 0
+; CHECK-NEXT:    sf.vfnrclip.x.f.qf v8, v12, fa0, v0.t
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv8i8.nxv8f32.iXLen(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x float> %1,
+    float %2,
+    <vscale x 8 x i1> %3,
+    iXLen 0, iXLen %4, iXLen 1)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv16i8.nxv16f32.iXLen(
+  <vscale x 16 x i8>,
+  <vscale x 16 x float>,
+  float,
+  iXLen, iXLen);
+
+define <vscale x 16 x i8> @intrinsic_sf_vfnrclip_x_f_qf_nxv16i8_nxv16f32(<vscale x 16 x float> %0, float %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_sf_vfnrclip_x_f_qf_nxv16i8_nxv16f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT:    fsrmi a0, 0
+; CHECK-NEXT:    sf.vfnrclip.x.f.qf v16, v8, fa0
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vmv2r.v v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.nxv16i8.nxv16f32.iXLen(
+    <vscale x 16 x i8> undef,
+    <vscale x 16 x float> %0,
+    float %1,
+    iXLen 0, iXLen %2)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv16i8.nxv16f32.iXLen(
+  <vscale x 16 x i8>,
+  <vscale x 16 x float>,
+  float,
+  <vscale x 16 x i1>,
+  iXLen, iXLen, iXLen);
+
+define <vscale x 16 x i8> @intrinsic_sf_vfnrclip_x_f_qf_mask_nxv16i8_nxv16f32(<vscale x 16 x i8> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_sf_vfnrclip_x_f_qf_mask_nxv16i8_nxv16f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
+; CHECK-NEXT:    fsrmi a0, 0
+; CHECK-NEXT:    sf.vfnrclip.x.f.qf v8, v16, fa0, v0.t
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.x.f.qf.mask.nxv16i8.nxv16f32.iXLen(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x float> %1,
+    float %2,
+    <vscale x 16 x i1> %3,
+    iXLen 0, iXLen %4, iXLen 1)
+
+  ret <vscale x 16 x i8> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/sf_vfnrclip_xu_f_qf.ll b/llvm/test/CodeGen/RISCV/rvv/sf_vfnrclip_xu_f_qf.ll
new file mode 100644
index 000000000000000..363cccd5ad3562c
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/sf_vfnrclip_xu_f_qf.ll
@@ -0,0 +1,260 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+xsfvfnrclipxfqf \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+xsfvfnrclipxfqf \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv1i8.nxv1f32.iXLen(
+  <vscale x 1 x i8>,
+  <vscale x 1 x float>,
+  float,
+  iXLen, iXLen);
+
+define <vscale x 1 x i8> @intrinsic_sf_vfnrclip_xu_f_qf_nxv1i8_nxv1f32(<vscale x 1 x float> %0, float %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_sf_vfnrclip_xu_f_qf_nxv1i8_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT:    fsrmi a0, 0
+; CHECK-NEXT:    sf.vfnrclip.xu.f.qf v9, v8, fa0
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv1i8.nxv1f32.iXLen(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x float> %0,
+    float %1,
+    iXLen 0, iXLen %2)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv1i8.nxv1f32.iXLen(
+  <vscale x 1 x i8>,
+  <vscale x 1 x float>,
+  float,
+  <vscale x 1 x i1>,
+  iXLen, iXLen, iXLen);
+
+define <vscale x 1 x i8> @intrinsic_sf_vfnrclip_xu_f_qf_mask_nxv1i8_nxv1f32(<vscale x 1 x i8> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_sf_vfnrclip_xu_f_qf_mask_nxv1i8_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT:    fsrmi a0, 0
+; CHECK-NEXT:    sf.vfnrclip.xu.f.qf v8, v9, fa0, v0.t
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv1i8.nxv1f32.iXLen(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x float> %1,
+    float %2,
+    <vscale x 1 x i1> %3,
+    iXLen 0, iXLen %4, iXLen 1)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv2i8.nxv2f32.iXLen(
+  <vscale x 2 x i8>,
+  <vscale x 2 x float>,
+  float,
+  iXLen, iXLen);
+
+define <vscale x 2 x i8> @intrinsic_sf_vfnrclip_xu_f_qf_nxv2i8_nxv2f32(<vscale x 2 x float> %0, float %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_sf_vfnrclip_xu_f_qf_nxv2i8_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT:    fsrmi a0, 0
+; CHECK-NEXT:    sf.vfnrclip.xu.f.qf v9, v8, fa0
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vmv.v.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv2i8.nxv2f32.iXLen(
+    <vscale x 2 x i8> undef,
+    <vscale x 2 x float> %0,
+    float %1,
+    iXLen 0, iXLen %2)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv2i8.nxv2f32.iXLen(
+  <vscale x 2 x i8>,
+  <vscale x 2 x float>,
+  float,
+  <vscale x 2 x i1>,
+  iXLen, iXLen, iXLen);
+
+define <vscale x 2 x i8> @intrinsic_sf_vfnrclip_xu_f_qf_mask_nxv2i8_nxv2f32(<vscale x 2 x i8> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_sf_vfnrclip_xu_f_qf_mask_nxv2i8_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT:    fsrmi a0, 0
+; CHECK-NEXT:    sf.vfnrclip.xu.f.qf v8, v9, fa0, v0.t
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv2i8.nxv2f32.iXLen(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x float> %1,
+    float %2,
+    <vscale x 2 x i1> %3,
+    iXLen 0, iXLen %4, iXLen 1)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv4i8.nxv4f32.iXLen(
+  <vscale x 4 x i8>,
+  <vscale x 4 x float>,
+  float,
+  iXLen, iXLen);
+
+define <vscale x 4 x i8> @intrinsic_sf_vfnrclip_xu_f_qf_nxv4i8_nxv4f32(<vscale x 4 x float> %0, float %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_sf_vfnrclip_xu_f_qf_nxv4i8_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT:    fsrmi a0, 0
+; CHECK-NEXT:    sf.vfnrclip.xu.f.qf v10, v8, fa0
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vmv1r.v v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv4i8.nxv4f32.iXLen(
+    <vscale x 4 x i8> undef,
+    <vscale x 4 x float> %0,
+    float %1,
+    iXLen 0, iXLen %2)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv4i8.nxv4f32.iXLen(
+  <vscale x 4 x i8>,
+  <vscale x 4 x float>,
+  float,
+  <vscale x 4 x i1>,
+  iXLen, iXLen, iXLen);
+
+define <vscale x 4 x i8> @intrinsic_sf_vfnrclip_xu_f_qf_mask_nxv4i8_nxv4f32(<vscale x 4 x i8> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_sf_vfnrclip_xu_f_qf_mask_nxv4i8_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT:    fsrmi a0, 0
+; CHECK-NEXT:    sf.vfnrclip.xu.f.qf v8, v10, fa0, v0.t
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv4i8.nxv4f32.iXLen(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x float> %1,
+    float %2,
+    <vscale x 4 x i1> %3,
+    iXLen 0, iXLen %4, iXLen 1)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv8i8.nxv8f32.iXLen(
+  <vscale x 8 x i8>,
+  <vscale x 8 x float>,
+  float,
+  iXLen, iXLen);
+
+define <vscale x 8 x i8> @intrinsic_sf_vfnrclip_xu_f_qf_nxv8i8_nxv8f32(<vscale x 8 x float> %0, float %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_sf_vfnrclip_xu_f_qf_nxv8i8_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT:    fsrmi a0, 0
+; CHECK-NEXT:    sf.vfnrclip.xu.f.qf v12, v8, fa0
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vmv1r.v v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv8i8.nxv8f32.iXLen(
+    <vscale x 8 x i8> undef,
+    <vscale x 8 x float> %0,
+    float %1,
+    iXLen 0, iXLen %2)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv8i8.nxv8f32.iXLen(
+  <vscale x 8 x i8>,
+  <vscale x 8 x float>,
+  float,
+  <vscale x 8 x i1>,
+  iXLen, iXLen, iXLen);
+
+define <vscale x 8 x i8> @intrinsic_sf_vfnrclip_xu_f_qf_mask_nxv8i8_nxv8f32(<vscale x 8 x i8> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_sf_vfnrclip_xu_f_qf_mask_nxv8i8_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT:    fsrmi a0, 0
+; CHECK-NEXT:    sf.vfnrclip.xu.f.qf v8, v12, fa0, v0.t
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv8i8.nxv8f32.iXLen(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x float> %1,
+    float %2,
+    <vscale x 8 x i1> %3,
+    iXLen 0, iXLen %4, iXLen 1)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv16i8.nxv16f32.iXLen(
+  <vscale x 16 x i8>,
+  <vscale x 16 x float>,
+  float,
+  iXLen, iXLen);
+
+define <vscale x 16 x i8> @intrinsic_sf_vfnrclip_xu_f_qf_nxv16i8_nxv16f32(<vscale x 16 x float> %0, float %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_sf_vfnrclip_xu_f_qf_nxv16i8_nxv16f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT:    fsrmi a0, 0
+; CHECK-NEXT:    sf.vfnrclip.xu.f.qf v16, v8, fa0
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vmv2r.v v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.nxv16i8.nxv16f32.iXLen(
+    <vscale x 16 x i8> undef,
+    <vscale x 16 x float> %0,
+    float %1,
+    iXLen 0, iXLen %2)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv16i8.nxv16f32.iXLen(
+  <vscale x 16 x i8>,
+  <vscale x 16 x float>,
+  float,
+  <vscale x 16 x i1>,
+  iXLen, iXLen, iXLen);
+
+define <vscale x 16 x i8> @intrinsic_sf_vfnrclip_xu_f_qf_mask_nxv16i8_nxv16f32(<vscale x 16 x i8> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_sf_vfnrclip_xu_f_qf_mask_nxv16i8_nxv16f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
+; CHECK-NEXT:    fsrmi a0, 0
+; CHECK-NEXT:    sf.vfnrclip.xu.f.qf v8, v16, fa0, v0.t
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.sf.vfnrclip.xu.f.qf.mask.nxv16i8.nxv16f32.iXLen(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x float> %1,
+    float %2,
+    <vscale x 16 x i1> %3,
+    iXLen 0, iXLen %4, iXLen 1)
+
+  ret <vscale x 16 x i8> %a
+}
diff --git a/llvm/test/MC/RISCV/rvv/xsfvfnrclip.s b/llvm/test/MC/RISCV/rvv/xsfvfnrclip.s
new file mode 100644
index 000000000000000..dc9b7ef8bf61cab
--- /dev/null
+++ b/llvm/test/MC/RISCV/rvv/xsfvfnrclip.s
@@ -0,0 +1,33 @@
+# RUN: llvm-mc -triple=riscv64 -show-encoding --mattr=+v,+xsfvfnrclipxfqf %s \
+# RUN:        | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
+# RUN: not llvm-mc -triple=riscv64 -show-encoding %s 2>&1 \
+# RUN:        | FileCheck %s --check-prefix=CHECK-ERROR
+# RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+v,+xsfvfnrclipxfqf %s \
+# RUN:        | llvm-objdump -d --mattr=+v,+xsfvfnrclipxfqf - \
+# RUN:        | FileCheck %s --check-prefix=CHECK-INST
+# RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+v,+xsfvfnrclipxfqf %s \
+# RUN:        | llvm-objdump -d - | FileCheck %s --check-prefix=CHECK-UNKNOWN
+
+sf.vfnrclip.xu.f.qf v4, v8, fa2
+# CHECK-INST: sf.vfnrclip.xu.f.qf v4, v8, fa2
+# CHECK-ENCODING: [0x5b,0x52,0x86,0x8a]
+# CHECK-ERROR: instruction requires the following: 'XSfvfnrclipxfqf' (FP32-to-int8 Ranged Clip Instructions)
+# CHECK-UNKNOWN: 5b 52 86 8a <unknown>
+
+sf.vfnrclip.xu.f.qf v4, v8, fa2, v0.t
+# CHECK-INST: sf.vfnrclip.xu.f.qf v4, v8, fa2
+# CHECK-ENCODING: [0x5b,0x52,0x86,0x88]
+# CHECK-ERROR: instruction requires the following: 'XSfvfnrclipxfqf' (FP32-to-int8 Ranged Clip Instructions)
+# CHECK-UNKNOWN: 5b 52 86 88 <unknown>
+
+sf.vfnrclip.x.f.qf v4, v8, fa2
+# CHECK-INST: sf.vfnrclip.x.f.qf v4, v8, fa2
+# CHECK-ENCODING: [0x5b,0x52,0x86,0x8e]
+# CHECK-ERROR: instruction requires the following: 'XSfvfnrclipxfqf' (FP32-to-int8 Ranged Clip Instructions)
+# CHECK-UNKNOWN: 5b 52 86 8e <unknown>
+
+sf.vfnrclip.x.f.qf v4, v8, fa2, v0.t
+# CHECK-INST: sf.vfnrclip.x.f.qf v4, v8, fa2
+# CHECK-ENCODING: [0x5b,0x52,0x86,0x8c]
+# CHECK-ERROR: instruction requires the following: 'XSfvfnrclipxfqf' (FP32-to-int8 Ranged Clip Instructions)
+# CHECK-UNKNOWN: 5b 52 86 8c <unknown>
diff --git a/llvm/unittests/Support/RISCVISAInfoTest.cpp b/llvm/unittests/Support/RISCVISAInfoTest.cpp
index 90e26a23e87c205..d5a3a93e32bb246 100644
--- a/llvm/unittests/Support/RISCVISAInfoTest.cpp
+++ b/llvm/unittests/Support/RISCVISAInfoTest.cpp
@@ -713,6 +713,7 @@ R"(All available -march extensions for RISC-V
     xcvsimd             1.0
     xsfcie              1.0
     xsfvcp              1.0
+    xsfvfnrclipxfqf     1.0
     xtheadba            1.0
     xtheadbb            1.0
     xtheadbs            1.0

>From 1ab3fd44c37814403d592da0f864f6cb378fe6ef Mon Sep 17 00:00:00 2001
From: Brandon Wu <brandon.wu at sifive.com>
Date: Thu, 5 Oct 2023 03:22:43 -0700
Subject: [PATCH 2/4] fixup! [RISCV] Support Xsfvfnrclipxfqf extensions

---
 clang/utils/TableGen/RISCVVEmitter.cpp        | 29 ++++++++++---------
 .../RISCV/Disassembler/RISCVDisassembler.cpp  |  5 ++--
 llvm/unittests/Support/RISCVISAInfoTest.cpp   |  2 +-
 3 files changed, 19 insertions(+), 17 deletions(-)

diff --git a/clang/utils/TableGen/RISCVVEmitter.cpp b/clang/utils/TableGen/RISCVVEmitter.cpp
index eaba8d80d217e88..4d5e947f4ea57b3 100644
--- a/clang/utils/TableGen/RISCVVEmitter.cpp
+++ b/clang/utils/TableGen/RISCVVEmitter.cpp
@@ -652,20 +652,21 @@ void RVVEmitter::createRVVIntrinsics(
 
     SR.RequiredExtensions = 0;
     for (auto RequiredFeature : RequiredFeatures) {
-      RVVRequire RequireExt = StringSwitch<RVVRequire>(RequiredFeature)
-                                  .Case("RV64", RVV_REQ_RV64)
-                                  .Case("ZvfhminOrZvfh", RVV_REQ_ZvfhminOrZvfh)
-                                  .Case("Xsfvcp", RVV_REQ_Xsfvcp)
-                                  .Case("Xsfvfnrclipxfqf", RVV_REQ_Xsfvfnrclipxfqf)
-                                  .Case("Zvbb", RVV_REQ_Zvbb)
-                                  .Case("Zvbc", RVV_REQ_Zvbc)
-                                  .Case("Zvkb", RVV_REQ_Zvkb)
-                                  .Case("Zvkg", RVV_REQ_Zvkg)
-                                  .Case("Zvkned", RVV_REQ_Zvkned)
-                                  .Case("Zvknha", RVV_REQ_Zvknha)
-                                  .Case("Zvksed", RVV_REQ_Zvksed)
-                                  .Case("Zvksh", RVV_REQ_Zvksh)
-                                  .Default(RVV_REQ_None);
+      RVVRequire RequireExt =
+          StringSwitch<RVVRequire>(RequiredFeature)
+              .Case("RV64", RVV_REQ_RV64)
+              .Case("ZvfhminOrZvfh", RVV_REQ_ZvfhminOrZvfh)
+              .Case("Xsfvcp", RVV_REQ_Xsfvcp)
+              .Case("Xsfvfnrclipxfqf", RVV_REQ_Xsfvfnrclipxfqf)
+              .Case("Zvbb", RVV_REQ_Zvbb)
+              .Case("Zvbc", RVV_REQ_Zvbc)
+              .Case("Zvkb", RVV_REQ_Zvkb)
+              .Case("Zvkg", RVV_REQ_Zvkg)
+              .Case("Zvkned", RVV_REQ_Zvkned)
+              .Case("Zvknha", RVV_REQ_Zvknha)
+              .Case("Zvksed", RVV_REQ_Zvksed)
+              .Case("Zvksh", RVV_REQ_Zvksh)
+              .Default(RVV_REQ_None);
       assert(RequireExt != RVV_REQ_None && "Unrecognized required feature?");
       SR.RequiredExtensions |= RequireExt;
     }
diff --git a/llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp b/llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp
index 75a93406ee71634..3a7928e49d9a696 100644
--- a/llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp
+++ b/llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp
@@ -558,8 +558,9 @@ DecodeStatus RISCVDisassembler::getInstruction(MCInst &MI, uint64_t &Size,
                           "XTHeadVdot custom opcode table");
     TRY_TO_DECODE_FEATURE(RISCV::FeatureVendorXSfvcp, DecoderTableXSfvcp32,
                           "SiFive VCIX custom opcode table");
-    TRY_TO_DECODE_FEATURE(RISCV::FeatureVendorXSfvfnrclipxfqf, DecoderTableXSfvfnrclipxfqf32,
-                          "SiFive FP32-to-int8 Ranged Clip Instructions opcode table");
+    TRY_TO_DECODE_FEATURE(
+        RISCV::FeatureVendorXSfvfnrclipxfqf, DecoderTableXSfvfnrclipxfqf32,
+        "SiFive FP32-to-int8 Ranged Clip Instructions opcode table");
     TRY_TO_DECODE_FEATURE(RISCV::FeatureVendorXSfcie, DecoderTableXSfcie32,
                           "Sifive CIE custom opcode table");
     TRY_TO_DECODE_FEATURE(RISCV::FeatureVendorXCVbitmanip,
diff --git a/llvm/unittests/Support/RISCVISAInfoTest.cpp b/llvm/unittests/Support/RISCVISAInfoTest.cpp
index d5a3a93e32bb246..430a4b5247ef9ea 100644
--- a/llvm/unittests/Support/RISCVISAInfoTest.cpp
+++ b/llvm/unittests/Support/RISCVISAInfoTest.cpp
@@ -630,7 +630,7 @@ TEST(getTargetFeatureForExtension, RetrieveTargetFeatureFromOneExt) {
 
 TEST(RiscvExtensionsHelp, CheckExtensions) {
   std::string ExpectedOutput =
-R"(All available -march extensions for RISC-V
+      R"(All available -march extensions for RISC-V
 
     Name                Version   Description
     i                   2.1       This is a long dummy description

>From 788d5606ad178fccaf28d647a9aa0e80f82934cd Mon Sep 17 00:00:00 2001
From: Brandon Wu <brandon.wu at sifive.com>
Date: Fri, 6 Oct 2023 10:17:08 -0700
Subject: [PATCH 3/4] fixup! [RISCV] Support Xsfvfnrclipxfqf extensions

---
 llvm/unittests/Support/RISCVISAInfoTest.cpp | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/llvm/unittests/Support/RISCVISAInfoTest.cpp b/llvm/unittests/Support/RISCVISAInfoTest.cpp
index 430a4b5247ef9ea..f3e704b638eaeb6 100644
--- a/llvm/unittests/Support/RISCVISAInfoTest.cpp
+++ b/llvm/unittests/Support/RISCVISAInfoTest.cpp
@@ -629,8 +629,9 @@ TEST(getTargetFeatureForExtension, RetrieveTargetFeatureFromOneExt) {
 }
 
 TEST(RiscvExtensionsHelp, CheckExtensions) {
+  // clang-format off
   std::string ExpectedOutput =
-      R"(All available -march extensions for RISC-V
+R"(All available -march extensions for RISC-V
 
     Name                Version   Description
     i                   2.1       This is a long dummy description
@@ -756,6 +757,7 @@ Experimental extensions
 
 Use -march to specify the target's extension.
 For example, clang -march=rv32i_v1p0)";
+  // clang-format on
 
   StringMap<StringRef> DummyMap;
   DummyMap["i"] = "This is a long dummy description";

>From eb0dda13fc480ff213d743c85c6a905175e1267f Mon Sep 17 00:00:00 2001
From: Brandon Wu <brandon.wu at sifive.com>
Date: Tue, 10 Oct 2023 20:12:04 -0700
Subject: [PATCH 4/4] fixup! [RISCV] Support Xsfvfnrclipxfqf extensions

---
 clang/test/Preprocessor/riscv-target-features.c | 9 +++++++++
 1 file changed, 9 insertions(+)

diff --git a/clang/test/Preprocessor/riscv-target-features.c b/clang/test/Preprocessor/riscv-target-features.c
index 242197e3f129a3f..6b0026da40acc1f 100644
--- a/clang/test/Preprocessor/riscv-target-features.c
+++ b/clang/test/Preprocessor/riscv-target-features.c
@@ -33,6 +33,7 @@
 // CHECK-NOT: __riscv_xcvsimd {{.*$}}
 // CHECK-NOT: __riscv_xsfcie {{.*$}}
 // CHECK-NOT: __riscv_xsfvcp {{.*$}}
+// CHECK-NOT: __riscv_xsfvfnrclipxfqf {{.*$}}
 // CHECK-NOT: __riscv_xtheadba {{.*$}}
 // CHECK-NOT: __riscv_xtheadbb {{.*$}}
 // CHECK-NOT: __riscv_xtheadbs {{.*$}}
@@ -323,6 +324,14 @@
 // RUN: -o - | FileCheck --check-prefix=CHECK-XSFVCP-EXT %s
 // CHECK-XSFVCP-EXT: __riscv_xsfvcp 1000000{{$}}
 
+// RUN: %clang --target=riscv32-unknown-linux-gnu \
+// RUN: -march=rv32ixsfvfnrclipxfqf -x c -E -dM %s \
+// RUN: -o - | FileCheck --check-prefix=CHECK-XSFVFNRCLIPXFQF-EXT %s
+// RUN: %clang --target=riscv64-unknown-linux-gnu \
+// RUN: -march=rv64ixsfvfnrclipxfqf -x c -E -dM %s \
+// RUN: -o - | FileCheck --check-prefix=CHECK-XSFVFNRCLIPXFQF-EXT %s
+// CHECK-XSFVFNRCLIPXFQF-EXT: __riscv_xsfvfnrclipxfqf 1000000{{$}}
+
 // RUN: %clang --target=riscv32-unknown-linux-gnu \
 // RUN: -march=rv32ixtheadba -x c -E -dM %s \
 // RUN: -o - | FileCheck --check-prefix=CHECK-XTHEADBA-EXT %s



More information about the cfe-commits mailing list