[llvm] [llvm][RISCV] Support Zvfofp8min llvm intrinsics and codegen (PR #172585)

Brandon Wu via llvm-commits llvm-commits at lists.llvm.org
Tue Dec 23 18:06:24 PST 2025


https://github.com/4vtomat updated https://github.com/llvm/llvm-project/pull/172585

>From 39cb90eafff71043e533293f338a1a44a191a3e6 Mon Sep 17 00:00:00 2001
From: Brandon Wu <songwu0813 at gmail.com>
Date: Mon, 15 Dec 2025 01:54:20 -0800
Subject: [PATCH 1/5] [llvm][RISCV] Support Zvfofp8min llvm intrinsics and
 codegen

This is follow up patch for https://github.com/llvm/llvm-project/pull/157014
to support llvm intrinsics and codegen.
---
 llvm/include/llvm/IR/IntrinsicsRISCV.td       |  20 +
 .../RISCV/MCTargetDesc/RISCVInstPrinter.cpp   |   4 +-
 .../Target/RISCV/RISCVInstrInfoVPseudos.td    |   4 +-
 .../Target/RISCV/RISCVInstrInfoZvfofp8min.td  | 127 ++++++
 llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-alt.ll | 357 +++++++++++++++++
 llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f.ll     | 360 +++++++++++++++++-
 .../CodeGen/RISCV/rvv/vfncvt-sat-f-f-alt.ll   | 357 +++++++++++++++++
 llvm/test/CodeGen/RISCV/rvv/vfncvt-sat-f-f.ll | 357 +++++++++++++++++
 llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f-alt.ll | 227 +++++++++++
 llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f.ll     | 200 +++++++++-
 10 files changed, 2003 insertions(+), 10 deletions(-)
 create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-alt.ll
 create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfncvt-sat-f-f-alt.ll
 create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfncvt-sat-f-f.ll
 create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f-alt.ll

diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td
index 77fcc46ea5a89..9088e5e6a357b 100644
--- a/llvm/include/llvm/IR/IntrinsicsRISCV.td
+++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td
@@ -1958,6 +1958,26 @@ let TargetPrefix = "riscv" in {
 let TargetPrefix = "riscv" in
 def int_riscv_pause : DefaultAttrsIntrinsic<[], [], [IntrNoMem, IntrHasSideEffects]>;
 
+
+//===----------------------------------------------------------------------===//
+// Zvfofp8min - OFP8 conversion extension
+// The Zvfofp8min extension provides basic support for the two 8-bit
+// floating-point formats defined in the Open Compute Project OFP8
+// specification, OFP8 E4M3 and OFP8 E5M2.
+let TargetPrefix = "riscv" in {
+  // OFP8 to BF16 conversion instructions
+  defm vfwcvt_f_f_v_alt : RISCVConversion;
+  // BF16 to OFP8 conversion instructions
+  defm vfncvt_sat_f_f_w : RISCVConversionRoundingMode;
+  defm vfncvt_f_f_w_alt : RISCVConversionRoundingMode;
+  defm vfncvt_sat_f_f_w_alt : RISCVConversionRoundingMode;
+  // FP32 to OFP8 conversion instructions
+  defm vfncvt_f_f_q : RISCVConversionRoundingMode;
+  defm vfncvt_f_f_q_alt : RISCVConversionRoundingMode;
+  defm vfncvt_sat_f_f_q : RISCVConversionRoundingMode;
+  defm vfncvt_sat_f_f_q_alt : RISCVConversionRoundingMode;
+} // TargetPrefix = "riscv"
+
 // Vendor extensions
 //===----------------------------------------------------------------------===//
 include "llvm/IR/IntrinsicsRISCVXTHead.td"
diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVInstPrinter.cpp b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVInstPrinter.cpp
index 7b9c4b3e800cd..f2c5f6947aa00 100644
--- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVInstPrinter.cpp
+++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVInstPrinter.cpp
@@ -216,11 +216,13 @@ void RISCVInstPrinter::printVTypeI(const MCInst *MI, unsigned OpNo,
                                    const MCSubtargetInfo &STI, raw_ostream &O) {
   unsigned Imm = MI->getOperand(OpNo).getImm();
   // Print the raw immediate for reserved values: vlmul[2:0]=4, vsew[2:0]=0b1xx,
-  // altfmt=1 without zvfbfa extension, or non-zero in bits 9 and above.
+  // altfmt=1 without zvfbfa or zvfofp8min extension, or non-zero in bits 9 and
+  // above.
   if (RISCVVType::getVLMUL(Imm) == RISCVVType::VLMUL::LMUL_RESERVED ||
       RISCVVType::getSEW(Imm) > 64 ||
       (RISCVVType::isAltFmt(Imm) &&
        !(STI.hasFeature(RISCV::FeatureStdExtZvfbfa) ||
+         STI.hasFeature(RISCV::FeatureStdExtZvfofp8min) ||
          STI.hasFeature(RISCV::FeatureVendorXSfvfbfexp16e))) ||
       (Imm >> 9) != 0) {
     O << formatImm(Imm);
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index e07d7b5ee5563..a32f6a566493f 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -5851,8 +5851,9 @@ multiclass VPatConversionWF_VI<string intrinsic, string instruction,
 }
 
 multiclass VPatConversionWF_VF<string intrinsic, string instruction,
+                               list<VTypeInfoToWide> wlist = AllWidenableFloatVectors,
                                bit isSEWAware = 0> {
-  foreach fvtiToFWti = AllWidenableFloatVectors in {
+  foreach fvtiToFWti = wlist in {
     defvar fvti = fvtiToFWti.Vti;
     defvar fwti = fvtiToFWti.Wti;
     // Define vfwcvt.f.f.v for f16 when Zvfhmin is enable.
@@ -7177,6 +7178,7 @@ defm : VPatConversionWF_VI<"int_riscv_vfwcvt_f_xu_v", "PseudoVFWCVT_F_XU",
 defm : VPatConversionWF_VI<"int_riscv_vfwcvt_f_x_v", "PseudoVFWCVT_F_X",
                            isSEWAware=1>;
 defm : VPatConversionWF_VF<"int_riscv_vfwcvt_f_f_v", "PseudoVFWCVT_F_F",
+                           wlist=AllWidenableFloatVectors,
                            isSEWAware=1>;
 
 //===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZvfofp8min.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZvfofp8min.td
index 86cab697cbf55..b067488ea662f 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZvfofp8min.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZvfofp8min.td
@@ -24,3 +24,130 @@ let Predicates = [HasStdExtZvfofp8min], Constraints = "@earlyclobber $vd",
   defm VFNCVT_F_F_Q : VNCVTF_FV_VS2<"vfncvt.f.f.q", 0b010010, 0b11001>;
   defm VFNCVT_SAT_F_F_Q : VNCVTF_FV_VS2<"vfncvt.sat.f.f.q", 0b010010, 0b11011>;
 }
+
+//===----------------------------------------------------------------------===//
+// Pseudo instructions
+//===----------------------------------------------------------------------===//
+defvar MxListQ = [V_MF8, V_MF4, V_MF2, V_M1, V_M2];
+
+defset list<VTypeInfoToWide> AllWidenableIntToBFloatVectors = {
+  def : VTypeInfoToWide<VI8MF8, VBF16MF4>;
+  def : VTypeInfoToWide<VI8MF4, VBF16MF2>;
+  def : VTypeInfoToWide<VI8MF2, VBF16M1>;
+  def : VTypeInfoToWide<VI8M1, VBF16M2>;
+  def : VTypeInfoToWide<VI8M2, VBF16M4>;
+  def : VTypeInfoToWide<VI8M4, VBF16M8>;
+}
+
+defset list<VTypeInfoToWide> AllWidenableInt8ToFloat32Vectors = {
+  def : VTypeInfoToWide<VI8MF8, VF32MF2>;
+  def : VTypeInfoToWide<VI8MF4, VF32M1>;
+  def : VTypeInfoToWide<VI8MF2, VF32M2>;
+  def : VTypeInfoToWide<VI8M1, VF32M4>;
+  def : VTypeInfoToWide<VI8M2, VF32M8>;
+}
+
+class QVRClass<LMULInfo m> {
+  LMULInfo c = !cond(!eq(m, V_MF8): V_MF2,
+                     !eq(m, V_MF4): V_M1,
+                     !eq(m, V_MF2): V_M2,
+                     !eq(m, V_M1): V_M4,
+                     !eq(m, V_M2): V_M8);
+}
+
+multiclass VPseudoVWCVTD_V_NoSched_Zvfofp8min {
+  defvar constraint = "@earlyclobber $rd";
+  foreach m = MxListW in {
+    defm _V : VPseudoConversion<m.wvrclass, m.vrclass, m, constraint, sew=8,
+                                TargetConstraintType=3>;
+  }
+}
+
+multiclass VPseudoVNCVTD_W_RM_NoSched_Zvfofp8min {
+  defvar constraint = "@earlyclobber $rd";
+  foreach m = MxListW in {
+    defm _W : VPseudoConversionRoundingMode<m.vrclass, m.wvrclass, m,
+                                            constraint, sew=8,
+                                            TargetConstraintType=2>;
+  }
+}
+
+multiclass VPseudoVNCVTD_Q_RM_NoSched_Zvfofp8min {
+  defvar constraint = "@earlyclobber $rd";
+  foreach m = MxListQ in {
+    defm _Q : VPseudoConversionRoundingMode<m.vrclass, QVRClass<m>.c.vrclass, m,
+                                            constraint, sew=8,
+                                            TargetConstraintType=2>;
+  }
+}
+
+let mayRaiseFPException = true, Predicates = [HasStdExtZvfofp8min] in {
+  defm PseudoVFWCVTBF16_F_F : VPseudoVWCVTD_V_NoSched_Zvfofp8min;
+  let AltFmtType = IS_ALTFMT in
+    defm PseudoVFWCVTBF16_F_F_ALT : VPseudoVWCVTD_V_NoSched_Zvfofp8min;
+
+  defm PseudoVFNCVTBF16_F_F :     VPseudoVNCVTD_W_RM_NoSched_Zvfofp8min;
+  defm PseudoVFNCVTBF16_SAT_F_F : VPseudoVNCVTD_W_RM_NoSched_Zvfofp8min;
+  defm PseudoVFNCVT_F_F :         VPseudoVNCVTD_Q_RM_NoSched_Zvfofp8min;
+  defm PseudoVFNCVT_SAT_F_F :     VPseudoVNCVTD_Q_RM_NoSched_Zvfofp8min;
+  let AltFmtType = IS_ALTFMT in {
+    defm PseudoVFNCVTBF16_F_F_ALT :     VPseudoVNCVTD_W_RM_NoSched_Zvfofp8min;
+    defm PseudoVFNCVTBF16_SAT_F_F_ALT : VPseudoVNCVTD_W_RM_NoSched_Zvfofp8min;
+    defm PseudoVFNCVT_F_F_ALT :         VPseudoVNCVTD_Q_RM_NoSched_Zvfofp8min;
+    defm PseudoVFNCVT_SAT_F_F_ALT :     VPseudoVNCVTD_Q_RM_NoSched_Zvfofp8min;
+  }
+}
+
+//===----------------------------------------------------------------------===//
+// Patterns
+//===----------------------------------------------------------------------===//
+multiclass VPatConversionQF_RM<string intrinsic, string instruction,
+                               bit isSEWAware = 0> {
+  foreach fvtiToFWti = AllWidenableInt8ToFloat32Vectors in {
+    defvar fvti = fvtiToFWti.Vti;
+    defvar fwti = fvtiToFWti.Wti;
+    let Predicates = [HasStdExtZvfofp8min] in
+    defm : VPatConversionRoundingMode<intrinsic, instruction, "Q",
+                                      fvti.Vector, fwti.Vector, fvti.Mask, fvti.Log2SEW,
+                                      fvti.LMul, fvti.RegClass, fwti.RegClass,
+                                      isSEWAware>;
+  }
+}
+
+let Predicates = [HasStdExtZvfofp8min] in {
+  // OFP8 to BF16 conversion instructions
+  defm : VPatConversionWF_VF<"int_riscv_vfwcvt_f_f_v",
+                             "PseudoVFWCVTBF16_F_F",
+                             wlist=AllWidenableIntToBFloatVectors,
+                             isSEWAware=1>;
+  defm : VPatConversionWF_VF<"int_riscv_vfwcvt_f_f_v_alt",
+                             "PseudoVFWCVTBF16_F_F_ALT",
+                             wlist=AllWidenableIntToBFloatVectors,
+                             isSEWAware=1>;
+  // BF16 to OFP8 conversion instructions
+  defm : VPatConversionVF_WF_RM<"int_riscv_vfncvt_f_f_w", 
+                                "PseudoVFNCVTBF16_F_F",
+                                wlist=AllWidenableIntToBFloatVectors,
+                                isSEWAware=1>;
+  defm : VPatConversionVF_WF_RM<"int_riscv_vfncvt_sat_f_f_w", 
+                                "PseudoVFNCVTBF16_SAT_F_F",
+                                wlist=AllWidenableIntToBFloatVectors,
+                                isSEWAware=1>;
+  defm : VPatConversionVF_WF_RM<"int_riscv_vfncvt_f_f_w_alt", 
+                                "PseudoVFNCVTBF16_F_F_ALT",
+                                wlist=AllWidenableIntToBFloatVectors,
+                                isSEWAware=1>;
+  defm : VPatConversionVF_WF_RM<"int_riscv_vfncvt_sat_f_f_w_alt", 
+                                "PseudoVFNCVTBF16_SAT_F_F_ALT",
+                                 wlist=AllWidenableIntToBFloatVectors,
+                                 isSEWAware=1>;
+  // FP32 to OFP8 conversion instructions
+  defm : VPatConversionQF_RM<"int_riscv_vfncvt_f_f_q",
+                             "PseudoVFNCVT_F_F", isSEWAware=1>;
+  defm : VPatConversionQF_RM<"int_riscv_vfncvt_sat_f_f_q",
+                             "PseudoVFNCVT_SAT_F_F", isSEWAware=1>;
+  defm : VPatConversionQF_RM<"int_riscv_vfncvt_f_f_q_alt",
+                             "PseudoVFNCVT_F_F_ALT", isSEWAware=1>;
+  defm : VPatConversionQF_RM<"int_riscv_vfncvt_sat_f_f_q_alt",
+                             "PseudoVFNCVT_SAT_F_F_ALT", isSEWAware=1>;
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-alt.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-alt.ll
new file mode 100644
index 0000000000000..feafd4ba2d01a
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-alt.ll
@@ -0,0 +1,357 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfbfmin,+experimental-zvfofp8min \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfbfmin,+experimental-zvfofp8min \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+define <vscale x 1 x i8> @intrinsic_vfncvt_f.f.w.alt_nxv1i8_nxv1bf16(<vscale x 1 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_f.f.w.alt_nxv1i8_nxv1bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, mf8, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v9, v8
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.f.f.w.alt.nxv1i8.nxv1bf16(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x bfloat> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vfncvt_mask_f.f.w.alt_nxv1i8_nxv1bf16(<vscale x 1 x i8> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w.alt_nxv1i8_nxv1bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, mf8, ta, mu
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.f.f.w.alt.mask.nxv1i8.nxv1bf16(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x bfloat> %1,
+    <vscale x 1 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vfncvt_f.f.w.alt_nxv2i8_nxv2bf16(<vscale x 2 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_f.f.w.alt_nxv2i8_nxv2bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, mf4, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v9, v8
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.f.f.w.alt.nxv2i8.nxv2bf16(
+    <vscale x 2 x i8> undef,
+    <vscale x 2 x bfloat> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vfncvt_mask_f.f.w.alt_nxv2i8_nxv2bf16(<vscale x 2 x i8> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w.alt_nxv2i8_nxv2bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, mf4, ta, mu
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.f.f.w.alt.mask.nxv2i8.nxv2bf16(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x bfloat> %1,
+    <vscale x 2 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vfncvt_f.f.w.alt_nxv4i8_nxv4bf16(<vscale x 4 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_f.f.w.alt_nxv4i8_nxv4bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, mf2, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v9, v8
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.f.f.w.alt.nxv4i8.nxv4bf16(
+    <vscale x 4 x i8> undef,
+    <vscale x 4 x bfloat> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vfncvt_mask_f.f.w.alt_nxv4i8_nxv4bf16(<vscale x 4 x i8> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w.alt_nxv4i8_nxv4bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, mf2, ta, mu
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.f.f.w.alt.mask.nxv4i8.nxv4bf16(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x bfloat> %1,
+    <vscale x 4 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vfncvt_f.f.w.alt_nxv8i8_nxv8bf16(<vscale x 8 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_f.f.w.alt_nxv8i8_nxv8bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, m1, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT:    vmv.v.v v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.f.f.w.alt.nxv8i8.nxv8bf16(
+    <vscale x 8 x i8> undef,
+    <vscale x 8 x bfloat> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vfncvt_mask_f.f.w.alt_nxv8i8_nxv8bf16(<vscale x 8 x i8> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w.alt_nxv8i8_nxv8bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, m1, ta, mu
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.f.f.w.alt.mask.nxv8i8.nxv8bf16(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x bfloat> %1,
+    <vscale x 8 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vfncvt_f.f.w.alt_nxv16i8_nxv16bf16(<vscale x 16 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_f.f.w.alt_nxv16i8_nxv16bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, m2, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v12, v8
+; CHECK-NEXT:    vmv.v.v v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.f.f.w.alt.nxv16i8.nxv16bf16(
+    <vscale x 16 x i8> undef,
+    <vscale x 16 x bfloat> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vfncvt_mask_f.f.w.alt_nxv16i8_nxv16bf16(<vscale x 16 x i8> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w.alt_nxv16i8_nxv16bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, m2, ta, mu
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v12, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.f.f.w.alt.mask.nxv16i8.nxv16bf16(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x bfloat> %1,
+    <vscale x 16 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vfncvt_f.f.w.alt_nxv32i8_nxv32bf16(<vscale x 32 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_f.f.w.alt_nxv32i8_nxv32bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, m4, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v16, v8
+; CHECK-NEXT:    vmv.v.v v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.f.f.w.alt.nxv32i8.nxv32bf16(
+    <vscale x 32 x i8> undef,
+    <vscale x 32 x bfloat> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vfncvt_mask_f.f.w.alt_nxv32i8_nxv32bf16(<vscale x 32 x i8> %0, <vscale x 32 x bfloat> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w.alt_nxv32i8_nxv32bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, m4, ta, mu
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v16, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.f.f.w.alt.mask.nxv32i8.nxv32bf16(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x bfloat> %1,
+    <vscale x 32 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vfncvt_f.f.q.alt_nxv1i8_nxv1f32(<vscale x 1 x float> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_f.f.q.alt_nxv1i8_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, mf8, ta, ma
+; CHECK-NEXT:    vfncvt.f.f.q v9, v8
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.f.f.q.alt.nxv1i8.nxv1f32(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x float> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vfncvt_mask_f.f.q.alt_nxv1i8_nxv1f32(<vscale x 1 x i8> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.q.alt_nxv1i8_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, mf8, ta, mu
+; CHECK-NEXT:    vfncvt.f.f.q v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.f.f.q.alt.mask.nxv1i8.nxv1f32(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x float> %1,
+    <vscale x 1 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vfncvt_f.f.q.alt_nxv2i8_nxv2f32(<vscale x 2 x float> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_f.f.q.alt_nxv2i8_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, mf4, ta, ma
+; CHECK-NEXT:    vfncvt.f.f.q v9, v8
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.f.f.q.alt.nxv2i8.nxv2f32(
+    <vscale x 2 x i8> undef,
+    <vscale x 2 x float> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vfncvt_mask_f.f.q.alt_nxv2i8_nxv2f32(<vscale x 2 x i8> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.q.alt_nxv2i8_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, mf4, ta, mu
+; CHECK-NEXT:    vfncvt.f.f.q v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.f.f.q.alt.mask.nxv2i8.nxv2f32(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x float> %1,
+    <vscale x 2 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vfncvt_f.f.q.alt_nxv4i8_nxv4f32(<vscale x 4 x float> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_f.f.q.alt_nxv4i8_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, mf2, ta, ma
+; CHECK-NEXT:    vfncvt.f.f.q v10, v8
+; CHECK-NEXT:    vmv1r.v v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.f.f.q.alt.nxv4i8.nxv4f32(
+    <vscale x 4 x i8> undef,
+    <vscale x 4 x float> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vfncvt_mask_f.f.q.alt_nxv4i8_nxv4f32(<vscale x 4 x i8> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.q.alt_nxv4i8_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, mf2, ta, mu
+; CHECK-NEXT:    vfncvt.f.f.q v8, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.f.f.q.alt.mask.nxv4i8.nxv4f32(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x float> %1,
+    <vscale x 4 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vfncvt_f.f.q.alt_nxv8i8_nxv8f32(<vscale x 8 x float> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_f.f.q.alt_nxv8i8_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, m1, ta, ma
+; CHECK-NEXT:    vfncvt.f.f.q v12, v8
+; CHECK-NEXT:    vmv.v.v v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.f.f.q.alt.nxv8i8.nxv8f32(
+    <vscale x 8 x i8> undef,
+    <vscale x 8 x float> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vfncvt_mask_f.f.q.alt_nxv8i8_nxv8f32(<vscale x 8 x i8> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.q.alt_nxv8i8_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, m1, ta, mu
+; CHECK-NEXT:    vfncvt.f.f.q v8, v12, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.f.f.q.alt.mask.nxv8i8.nxv8f32(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x float> %1,
+    <vscale x 8 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vfncvt_f.f.q.alt_nxv16i8_nxv16f32(<vscale x 16 x float> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_f.f.q.alt_nxv16i8_nxv16f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, m2, ta, ma
+; CHECK-NEXT:    vfncvt.f.f.q v16, v8
+; CHECK-NEXT:    vmv.v.v v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.f.f.q.alt.nxv16i8.nxv16f32(
+    <vscale x 16 x i8> undef,
+    <vscale x 16 x float> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vfncvt_mask_f.f.q.alt_nxv16i8_nxv16f32(<vscale x 16 x i8> %0, <vscale x 16 x float> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.q.alt_nxv16i8_nxv16f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, m2, ta, mu
+; CHECK-NEXT:    vfncvt.f.f.q v8, v16, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.f.f.q.alt.mask.nxv16i8.nxv16f32(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x float> %1,
+    <vscale x 16 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 16 x i8> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f.ll
index 9f74f5570e434..88f2f1d741121 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f.ll
@@ -1,11 +1,11 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfh \
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfh,+zvfbfmin,+experimental-zvfofp8min \
 ; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
-; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh,+zvfbfmin,+experimental-zvfofp8min \
 ; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
-; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfhmin \
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfhmin,+zvfbfmin,+experimental-zvfofp8min \
 ; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
-; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin \
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin,+experimental-zvfofp8min \
 ; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 
 define <vscale x 1 x half> @intrinsic_vfncvt_f.f.w_nxv1f16_nxv1f32(<vscale x 1 x float> %0, iXLen %1) nounwind {
@@ -331,3 +331,355 @@ entry:
 
   ret <vscale x 8 x float> %a
 }
+
+define <vscale x 1 x i8> @intrinsic_vfncvt_f.f.w_nxv1i8_nxv1bf16(<vscale x 1 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv1i8_nxv1bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v9, v8
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.f.f.w.nxv1i8.nxv1bf16(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x bfloat> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vfncvt_mask_f.f.w_nxv1i8_nxv1bf16(<vscale x 1 x i8> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv1i8_nxv1bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.f.f.w.mask.nxv1i8.nxv1bf16(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x bfloat> %1,
+    <vscale x 1 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vfncvt_f.f.w_nxv2i8_nxv2bf16(<vscale x 2 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv2i8_nxv2bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v9, v8
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.f.f.w.nxv2i8.nxv2bf16(
+    <vscale x 2 x i8> undef,
+    <vscale x 2 x bfloat> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vfncvt_mask_f.f.w_nxv2i8_nxv2bf16(<vscale x 2 x i8> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv2i8_nxv2bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.f.f.w.mask.nxv2i8.nxv2bf16(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x bfloat> %1,
+    <vscale x 2 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vfncvt_f.f.w_nxv4i8_nxv4bf16(<vscale x 4 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv4i8_nxv4bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v9, v8
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.f.f.w.nxv4i8.nxv4bf16(
+    <vscale x 4 x i8> undef,
+    <vscale x 4 x bfloat> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vfncvt_mask_f.f.w_nxv4i8_nxv4bf16(<vscale x 4 x i8> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv4i8_nxv4bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.f.f.w.mask.nxv4i8.nxv4bf16(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x bfloat> %1,
+    <vscale x 4 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vfncvt_f.f.w_nxv8i8_nxv8bf16(<vscale x 8 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv8i8_nxv8bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT:    vmv.v.v v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.f.f.w.nxv8i8.nxv8bf16(
+    <vscale x 8 x i8> undef,
+    <vscale x 8 x bfloat> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vfncvt_mask_f.f.w_nxv8i8_nxv8bf16(<vscale x 8 x i8> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv8i8_nxv8bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.f.f.w.mask.nxv8i8.nxv8bf16(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x bfloat> %1,
+    <vscale x 8 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vfncvt_f.f.w_nxv16i8_nxv16bf16(<vscale x 16 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv16i8_nxv16bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v12, v8
+; CHECK-NEXT:    vmv.v.v v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.f.f.w.nxv16i8.nxv16bf16(
+    <vscale x 16 x i8> undef,
+    <vscale x 16 x bfloat> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vfncvt_mask_f.f.w_nxv16i8_nxv16bf16(<vscale x 16 x i8> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv16i8_nxv16bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v12, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.f.f.w.mask.nxv16i8.nxv16bf16(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x bfloat> %1,
+    <vscale x 16 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vfncvt_f.f.w_nxv32i8_nxv32bf16(<vscale x 32 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv32i8_nxv32bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v16, v8
+; CHECK-NEXT:    vmv.v.v v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.f.f.w.nxv32i8.nxv32bf16(
+    <vscale x 32 x i8> undef,
+    <vscale x 32 x bfloat> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vfncvt_mask_f.f.w_nxv32i8_nxv32bf16(<vscale x 32 x i8> %0, <vscale x 32 x bfloat> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv32i8_nxv32bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v16, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.f.f.w.mask.nxv32i8.nxv32bf16(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x bfloat> %1,
+    <vscale x 32 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vfncvt_f.f.q_nxv1i8_nxv1f32(<vscale x 1 x float> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_f.f.q_nxv1i8_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    vfncvt.f.f.q v9, v8
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.f.f.q.nxv1i8.nxv1f32(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x float> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vfncvt_mask_f.f.q_nxv1i8_nxv1f32(<vscale x 1 x i8> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.q_nxv1i8_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT:    vfncvt.f.f.q v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.f.f.q.mask.nxv1i8.nxv1f32(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x float> %1,
+    <vscale x 1 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vfncvt_f.f.q_nxv2i8_nxv2f32(<vscale x 2 x float> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_f.f.q_nxv2i8_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT:    vfncvt.f.f.q v9, v8
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.f.f.q.nxv2i8.nxv2f32(
+    <vscale x 2 x i8> undef,
+    <vscale x 2 x float> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vfncvt_mask_f.f.q_nxv2i8_nxv2f32(<vscale x 2 x i8> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.q_nxv2i8_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT:    vfncvt.f.f.q v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.f.f.q.mask.nxv2i8.nxv2f32(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x float> %1,
+    <vscale x 2 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vfncvt_f.f.q_nxv4i8_nxv4f32(<vscale x 4 x float> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_f.f.q_nxv4i8_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT:    vfncvt.f.f.q v10, v8
+; CHECK-NEXT:    vmv1r.v v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.f.f.q.nxv4i8.nxv4f32(
+    <vscale x 4 x i8> undef,
+    <vscale x 4 x float> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vfncvt_mask_f.f.q_nxv4i8_nxv4f32(<vscale x 4 x i8> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.q_nxv4i8_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT:    vfncvt.f.f.q v8, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.f.f.q.mask.nxv4i8.nxv4f32(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x float> %1,
+    <vscale x 4 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vfncvt_f.f.q_nxv8i8_nxv8f32(<vscale x 8 x float> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_f.f.q_nxv8i8_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT:    vfncvt.f.f.q v12, v8
+; CHECK-NEXT:    vmv.v.v v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.f.f.q.nxv8i8.nxv8f32(
+    <vscale x 8 x i8> undef,
+    <vscale x 8 x float> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vfncvt_mask_f.f.q_nxv8i8_nxv8f32(<vscale x 8 x i8> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.q_nxv8i8_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT:    vfncvt.f.f.q v8, v12, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.f.f.q.mask.nxv8i8.nxv8f32(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x float> %1,
+    <vscale x 8 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vfncvt_f.f.q_nxv16i8_nxv16f32(<vscale x 16 x float> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_f.f.q_nxv16i8_nxv16f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT:    vfncvt.f.f.q v16, v8
+; CHECK-NEXT:    vmv.v.v v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.f.f.q.nxv16i8.nxv16f32(
+    <vscale x 16 x i8> undef,
+    <vscale x 16 x float> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vfncvt_mask_f.f.q_nxv16i8_nxv16f32(<vscale x 16 x i8> %0, <vscale x 16 x float> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.q_nxv16i8_nxv16f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT:    vfncvt.f.f.q v8, v16, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.f.f.q.mask.nxv16i8.nxv16f32(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x float> %1,
+    <vscale x 16 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 16 x i8> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-sat-f-f-alt.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-sat-f-f-alt.ll
new file mode 100644
index 0000000000000..93b892be37904
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-sat-f-f-alt.ll
@@ -0,0 +1,357 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfbfmin,+experimental-zvfofp8min \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfbfmin,+experimental-zvfofp8min \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+define <vscale x 1 x i8> @intrinsic_vfncvt.sat_f.f.w.alt_nxv1i8_nxv1bf16(<vscale x 1 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_f.f.w.alt_nxv1i8_nxv1bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, mf8, ta, ma
+; CHECK-NEXT:    vfncvtbf16.sat.f.f.w v9, v8
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.nxv1i8.nxv1bf16(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x bfloat> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vfncvt.sat_mask_f.f.w.alt_nxv1i8_nxv1bf16(<vscale x 1 x i8> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_mask_f.f.w.alt_nxv1i8_nxv1bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, mf8, ta, mu
+; CHECK-NEXT:    vfncvtbf16.sat.f.f.w v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.mask.nxv1i8.nxv1bf16(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x bfloat> %1,
+    <vscale x 1 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vfncvt.sat_f.f.w.alt_nxv2i8_nxv2bf16(<vscale x 2 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_f.f.w.alt_nxv2i8_nxv2bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, mf4, ta, ma
+; CHECK-NEXT:    vfncvtbf16.sat.f.f.w v9, v8
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.nxv2i8.nxv2bf16(
+    <vscale x 2 x i8> undef,
+    <vscale x 2 x bfloat> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vfncvt.sat_mask_f.f.w.alt_nxv2i8_nxv2bf16(<vscale x 2 x i8> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_mask_f.f.w.alt_nxv2i8_nxv2bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, mf4, ta, mu
+; CHECK-NEXT:    vfncvtbf16.sat.f.f.w v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.mask.nxv2i8.nxv2bf16(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x bfloat> %1,
+    <vscale x 2 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vfncvt.sat_f.f.w.alt_nxv4i8_nxv4bf16(<vscale x 4 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_f.f.w.alt_nxv4i8_nxv4bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, mf2, ta, ma
+; CHECK-NEXT:    vfncvtbf16.sat.f.f.w v9, v8
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.nxv4i8.nxv4bf16(
+    <vscale x 4 x i8> undef,
+    <vscale x 4 x bfloat> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vfncvt.sat_mask_f.f.w.alt_nxv4i8_nxv4bf16(<vscale x 4 x i8> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_mask_f.f.w.alt_nxv4i8_nxv4bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, mf2, ta, mu
+; CHECK-NEXT:    vfncvtbf16.sat.f.f.w v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.mask.nxv4i8.nxv4bf16(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x bfloat> %1,
+    <vscale x 4 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vfncvt.sat_f.f.w.alt_nxv8i8_nxv8bf16(<vscale x 8 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_f.f.w.alt_nxv8i8_nxv8bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, m1, ta, ma
+; CHECK-NEXT:    vfncvtbf16.sat.f.f.w v10, v8
+; CHECK-NEXT:    vmv.v.v v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.nxv8i8.nxv8bf16(
+    <vscale x 8 x i8> undef,
+    <vscale x 8 x bfloat> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vfncvt.sat_mask_f.f.w.alt_nxv8i8_nxv8bf16(<vscale x 8 x i8> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_mask_f.f.w.alt_nxv8i8_nxv8bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, m1, ta, mu
+; CHECK-NEXT:    vfncvtbf16.sat.f.f.w v8, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.mask.nxv8i8.nxv8bf16(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x bfloat> %1,
+    <vscale x 8 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vfncvt.sat_f.f.w.alt_nxv16i8_nxv16bf16(<vscale x 16 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_f.f.w.alt_nxv16i8_nxv16bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, m2, ta, ma
+; CHECK-NEXT:    vfncvtbf16.sat.f.f.w v12, v8
+; CHECK-NEXT:    vmv.v.v v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.nxv16i8.nxv16bf16(
+    <vscale x 16 x i8> undef,
+    <vscale x 16 x bfloat> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vfncvt.sat_mask_f.f.w.alt_nxv16i8_nxv16bf16(<vscale x 16 x i8> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_mask_f.f.w.alt_nxv16i8_nxv16bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, m2, ta, mu
+; CHECK-NEXT:    vfncvtbf16.sat.f.f.w v8, v12, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.mask.nxv16i8.nxv16bf16(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x bfloat> %1,
+    <vscale x 16 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vfncvt.sat_f.f.w.alt_nxv32i8_nxv32bf16(<vscale x 32 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_f.f.w.alt_nxv32i8_nxv32bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, m4, ta, ma
+; CHECK-NEXT:    vfncvtbf16.sat.f.f.w v16, v8
+; CHECK-NEXT:    vmv.v.v v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.nxv32i8.nxv32bf16(
+    <vscale x 32 x i8> undef,
+    <vscale x 32 x bfloat> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vfncvt.sat_mask_f.f.w.alt_nxv32i8_nxv32bf16(<vscale x 32 x i8> %0, <vscale x 32 x bfloat> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_mask_f.f.w.alt_nxv32i8_nxv32bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, m4, ta, mu
+; CHECK-NEXT:    vfncvtbf16.sat.f.f.w v8, v16, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.mask.nxv32i8.nxv32bf16(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x bfloat> %1,
+    <vscale x 32 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vfncvt.sat_f.f.q.alt_nxv1i8_nxv1f32(<vscale x 1 x float> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_f.f.q.alt_nxv1i8_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, mf8, ta, ma
+; CHECK-NEXT:    vfncvt.sat.f.f.q v9, v8
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.sat.f.f.q.alt.nxv1i8.nxv1f32(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x float> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vfncvt.sat_mask_f.f.q.alt_nxv1i8_nxv1f32(<vscale x 1 x i8> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_mask_f.f.q.alt_nxv1i8_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, mf8, ta, mu
+; CHECK-NEXT:    vfncvt.sat.f.f.q v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.sat.f.f.q.alt.mask.nxv1i8.nxv1f32(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x float> %1,
+    <vscale x 1 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vfncvt.sat_f.f.q.alt_nxv2i8_nxv2f32(<vscale x 2 x float> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_f.f.q.alt_nxv2i8_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, mf4, ta, ma
+; CHECK-NEXT:    vfncvt.sat.f.f.q v9, v8
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.sat.f.f.q.alt.nxv2i8.nxv2f32(
+    <vscale x 2 x i8> undef,
+    <vscale x 2 x float> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vfncvt.sat_mask_f.f.q.alt_nxv2i8_nxv2f32(<vscale x 2 x i8> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_mask_f.f.q.alt_nxv2i8_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, mf4, ta, mu
+; CHECK-NEXT:    vfncvt.sat.f.f.q v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.sat.f.f.q.alt.mask.nxv2i8.nxv2f32(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x float> %1,
+    <vscale x 2 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vfncvt.sat_f.f.q.alt_nxv4i8_nxv4f32(<vscale x 4 x float> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_f.f.q.alt_nxv4i8_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, mf2, ta, ma
+; CHECK-NEXT:    vfncvt.sat.f.f.q v10, v8
+; CHECK-NEXT:    vmv1r.v v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.sat.f.f.q.alt.nxv4i8.nxv4f32(
+    <vscale x 4 x i8> undef,
+    <vscale x 4 x float> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vfncvt.sat_mask_f.f.q.alt_nxv4i8_nxv4f32(<vscale x 4 x i8> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_mask_f.f.q.alt_nxv4i8_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, mf2, ta, mu
+; CHECK-NEXT:    vfncvt.sat.f.f.q v8, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.sat.f.f.q.alt.mask.nxv4i8.nxv4f32(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x float> %1,
+    <vscale x 4 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vfncvt.sat_f.f.q.alt_nxv8i8_nxv8f32(<vscale x 8 x float> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_f.f.q.alt_nxv8i8_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, m1, ta, ma
+; CHECK-NEXT:    vfncvt.sat.f.f.q v12, v8
+; CHECK-NEXT:    vmv.v.v v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.sat.f.f.q.alt.nxv8i8.nxv8f32(
+    <vscale x 8 x i8> undef,
+    <vscale x 8 x float> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vfncvt.sat_mask_f.f.q.alt_nxv8i8_nxv8f32(<vscale x 8 x i8> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_mask_f.f.q.alt_nxv8i8_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, m1, ta, mu
+; CHECK-NEXT:    vfncvt.sat.f.f.q v8, v12, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.sat.f.f.q.alt.mask.nxv8i8.nxv8f32(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x float> %1,
+    <vscale x 8 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vfncvt.sat_f.f.q.alt_nxv16i8_nxv16f32(<vscale x 16 x float> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_f.f.q.alt_nxv16i8_nxv16f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, m2, ta, ma
+; CHECK-NEXT:    vfncvt.sat.f.f.q v16, v8
+; CHECK-NEXT:    vmv.v.v v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.sat.f.f.q.alt.nxv16i8.nxv16f32(
+    <vscale x 16 x i8> undef,
+    <vscale x 16 x float> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vfncvt.sat_mask_f.f.q.alt_nxv16i8_nxv16f32(<vscale x 16 x i8> %0, <vscale x 16 x float> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_mask_f.f.q.alt_nxv16i8_nxv16f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, m2, ta, mu
+; CHECK-NEXT:    vfncvt.sat.f.f.q v8, v16, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.sat.f.f.q.alt.mask.nxv16i8.nxv16f32(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x float> %1,
+    <vscale x 16 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 16 x i8> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-sat-f-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-sat-f-f.ll
new file mode 100644
index 0000000000000..e81dd721b63be
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-sat-f-f.ll
@@ -0,0 +1,357 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfbfmin,+experimental-zvfofp8min \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfbfmin,+experimental-zvfofp8min \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+define <vscale x 1 x i8> @intrinsic_vfncvt.sat_f.f.w_nxv1i8_nxv1bf16(<vscale x 1 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_f.f.w_nxv1i8_nxv1bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    vfncvtbf16.sat.f.f.w v9, v8
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.sat.f.f.w.nxv1i8.nxv1bf16(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x bfloat> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vfncvt.sat_mask_f.f.w_nxv1i8_nxv1bf16(<vscale x 1 x i8> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_mask_f.f.w_nxv1i8_nxv1bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT:    vfncvtbf16.sat.f.f.w v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.sat.f.f.w.mask.nxv1i8.nxv1bf16(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x bfloat> %1,
+    <vscale x 1 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vfncvt.sat_f.f.w_nxv2i8_nxv2bf16(<vscale x 2 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_f.f.w_nxv2i8_nxv2bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT:    vfncvtbf16.sat.f.f.w v9, v8
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.sat.f.f.w.nxv2i8.nxv2bf16(
+    <vscale x 2 x i8> undef,
+    <vscale x 2 x bfloat> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vfncvt.sat_mask_f.f.w_nxv2i8_nxv2bf16(<vscale x 2 x i8> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_mask_f.f.w_nxv2i8_nxv2bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT:    vfncvtbf16.sat.f.f.w v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.sat.f.f.w.mask.nxv2i8.nxv2bf16(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x bfloat> %1,
+    <vscale x 2 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vfncvt.sat_f.f.w_nxv4i8_nxv4bf16(<vscale x 4 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_f.f.w_nxv4i8_nxv4bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT:    vfncvtbf16.sat.f.f.w v9, v8
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.sat.f.f.w.nxv4i8.nxv4bf16(
+    <vscale x 4 x i8> undef,
+    <vscale x 4 x bfloat> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vfncvt.sat_mask_f.f.w_nxv4i8_nxv4bf16(<vscale x 4 x i8> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_mask_f.f.w_nxv4i8_nxv4bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT:    vfncvtbf16.sat.f.f.w v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.sat.f.f.w.mask.nxv4i8.nxv4bf16(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x bfloat> %1,
+    <vscale x 4 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vfncvt.sat_f.f.w_nxv8i8_nxv8bf16(<vscale x 8 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_f.f.w_nxv8i8_nxv8bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT:    vfncvtbf16.sat.f.f.w v10, v8
+; CHECK-NEXT:    vmv.v.v v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.sat.f.f.w.nxv8i8.nxv8bf16(
+    <vscale x 8 x i8> undef,
+    <vscale x 8 x bfloat> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vfncvt.sat_mask_f.f.w_nxv8i8_nxv8bf16(<vscale x 8 x i8> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_mask_f.f.w_nxv8i8_nxv8bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT:    vfncvtbf16.sat.f.f.w v8, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.sat.f.f.w.mask.nxv8i8.nxv8bf16(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x bfloat> %1,
+    <vscale x 8 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vfncvt.sat_f.f.w_nxv16i8_nxv16bf16(<vscale x 16 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_f.f.w_nxv16i8_nxv16bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT:    vfncvtbf16.sat.f.f.w v12, v8
+; CHECK-NEXT:    vmv.v.v v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.sat.f.f.w.nxv16i8.nxv16bf16(
+    <vscale x 16 x i8> undef,
+    <vscale x 16 x bfloat> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vfncvt.sat_mask_f.f.w_nxv16i8_nxv16bf16(<vscale x 16 x i8> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_mask_f.f.w_nxv16i8_nxv16bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT:    vfncvtbf16.sat.f.f.w v8, v12, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.sat.f.f.w.mask.nxv16i8.nxv16bf16(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x bfloat> %1,
+    <vscale x 16 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vfncvt.sat_f.f.w_nxv32i8_nxv32bf16(<vscale x 32 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_f.f.w_nxv32i8_nxv32bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT:    vfncvtbf16.sat.f.f.w v16, v8
+; CHECK-NEXT:    vmv.v.v v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.sat.f.f.w.nxv32i8.nxv32bf16(
+    <vscale x 32 x i8> undef,
+    <vscale x 32 x bfloat> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vfncvt.sat_mask_f.f.w_nxv32i8_nxv32bf16(<vscale x 32 x i8> %0, <vscale x 32 x bfloat> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_mask_f.f.w_nxv32i8_nxv32bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT:    vfncvtbf16.sat.f.f.w v8, v16, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.sat.f.f.w.mask.nxv32i8.nxv32bf16(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x bfloat> %1,
+    <vscale x 32 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vfncvt.sat_f.f.q_nxv1i8_nxv1f32(<vscale x 1 x float> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_f.f.q_nxv1i8_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    vfncvt.sat.f.f.q v9, v8
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.sat.f.f.q.nxv1i8.nxv1f32(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x float> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vfncvt.sat_mask_f.f.q_nxv1i8_nxv1f32(<vscale x 1 x i8> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_mask_f.f.q_nxv1i8_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT:    vfncvt.sat.f.f.q v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.sat.f.f.q.mask.nxv1i8.nxv1f32(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x float> %1,
+    <vscale x 1 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vfncvt.sat_f.f.q_nxv2i8_nxv2f32(<vscale x 2 x float> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_f.f.q_nxv2i8_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT:    vfncvt.sat.f.f.q v9, v8
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.sat.f.f.q.nxv2i8.nxv2f32(
+    <vscale x 2 x i8> undef,
+    <vscale x 2 x float> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vfncvt.sat_mask_f.f.q_nxv2i8_nxv2f32(<vscale x 2 x i8> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_mask_f.f.q_nxv2i8_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT:    vfncvt.sat.f.f.q v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.sat.f.f.q.mask.nxv2i8.nxv2f32(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x float> %1,
+    <vscale x 2 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vfncvt.sat_f.f.q_nxv4i8_nxv4f32(<vscale x 4 x float> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_f.f.q_nxv4i8_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT:    vfncvt.sat.f.f.q v10, v8
+; CHECK-NEXT:    vmv1r.v v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.sat.f.f.q.nxv4i8.nxv4f32(
+    <vscale x 4 x i8> undef,
+    <vscale x 4 x float> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vfncvt.sat_mask_f.f.q_nxv4i8_nxv4f32(<vscale x 4 x i8> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_mask_f.f.q_nxv4i8_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT:    vfncvt.sat.f.f.q v8, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.sat.f.f.q.mask.nxv4i8.nxv4f32(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x float> %1,
+    <vscale x 4 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vfncvt.sat_f.f.q_nxv8i8_nxv8f32(<vscale x 8 x float> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_f.f.q_nxv8i8_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT:    vfncvt.sat.f.f.q v12, v8
+; CHECK-NEXT:    vmv.v.v v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.sat.f.f.q.nxv8i8.nxv8f32(
+    <vscale x 8 x i8> undef,
+    <vscale x 8 x float> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vfncvt.sat_mask_f.f.q_nxv8i8_nxv8f32(<vscale x 8 x i8> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_mask_f.f.q_nxv8i8_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT:    vfncvt.sat.f.f.q v8, v12, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.sat.f.f.q.mask.nxv8i8.nxv8f32(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x float> %1,
+    <vscale x 8 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vfncvt.sat_f.f.q_nxv16i8_nxv16f32(<vscale x 16 x float> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_f.f.q_nxv16i8_nxv16f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT:    vfncvt.sat.f.f.q v16, v8
+; CHECK-NEXT:    vmv.v.v v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.sat.f.f.q.nxv16i8.nxv16f32(
+    <vscale x 16 x i8> undef,
+    <vscale x 16 x float> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vfncvt.sat_mask_f.f.q_nxv16i8_nxv16f32(<vscale x 16 x i8> %0, <vscale x 16 x float> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_mask_f.f.q_nxv16i8_nxv16f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT:    vfncvt.sat.f.f.q v8, v16, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.sat.f.f.q.mask.nxv16i8.nxv16f32(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x float> %1,
+    <vscale x 16 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 16 x i8> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f-alt.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f-alt.ll
new file mode 100644
index 0000000000000..6569c07125e92
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f-alt.ll
@@ -0,0 +1,227 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfbfmin,+experimental-zvfofp8min \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfbfmin,+experimental-zvfofp8min \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.nxv1.nxv1i8(
+  <vscale x 1 x bfloat>,
+  <vscale x 1 x i8>,
+  iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfwcvt_f.f.v.alt_nxv1bf16_nxv1i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_f.f.v.alt_nxv1bf16_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, mf8, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.nxv1.nxv1i8(
+    <vscale x 1 x bfloat> undef,
+    <vscale x 1 x i8> %0,
+    iXLen %1)
+
+  ret <vscale x 1 x bfloat> %a
+}
+
+define <vscale x 1 x bfloat> @intrinsic_vfwcvt_mask_f.f.v.alt_nxv1bf16_nxv1i8(<vscale x 1 x bfloat> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v.alt_nxv1bf16_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, mf8, ta, mu
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.mask.nxv1bf16.nxv1i8(
+    <vscale x 1 x bfloat> %0,
+    <vscale x 1 x i8> %1,
+    <vscale x 1 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.nxv2.nxv2i8(
+  <vscale x 2 x bfloat>,
+  <vscale x 2 x i8>,
+  iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfwcvt_f.f.v.alt_nxv2bf16_nxv2i8(<vscale x 2 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_f.f.v.alt_nxv2bf16_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, mf4, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.nxv2.nxv2i8(
+    <vscale x 2 x bfloat> undef,
+    <vscale x 2 x i8> %0,
+    iXLen %1)
+
+  ret <vscale x 2 x bfloat> %a
+}
+
+define <vscale x 2 x bfloat> @intrinsic_vfwcvt_mask_f.f.v.alt_nxv2bf16_nxv2i8(<vscale x 2 x bfloat> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v.alt_nxv2bf16_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, mf4, ta, mu
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.mask.nxv2bf16.nxv2i8(
+    <vscale x 2 x bfloat> %0,
+    <vscale x 2 x i8> %1,
+    <vscale x 2 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.nxv4.nxv4i8(
+  <vscale x 4 x bfloat>,
+  <vscale x 4 x i8>,
+  iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfwcvt_f.f.v.alt_nxv4bf16_nxv4i8(<vscale x 4 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_f.f.v.alt_nxv4bf16_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, mf2, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.nxv4.nxv4i8(
+    <vscale x 4 x bfloat> undef,
+    <vscale x 4 x i8> %0,
+    iXLen %1)
+
+  ret <vscale x 4 x bfloat> %a
+}
+
+define <vscale x 4 x bfloat> @intrinsic_vfwcvt_mask_f.f.v.alt_nxv4bf16_nxv4i8(<vscale x 4 x bfloat> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v.alt_nxv4bf16_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, mf2, ta, mu
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.mask.nxv4bf16.nxv4i8(
+    <vscale x 4 x bfloat> %0,
+    <vscale x 4 x i8> %1,
+    <vscale x 4 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.nxv8.nxv8i8(
+  <vscale x 8 x bfloat>,
+  <vscale x 8 x i8>,
+  iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfwcvt_f.f.v.alt_nxv8bf16_nxv8i8(<vscale x 8 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_f.f.v.alt_nxv8bf16_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, m1, ta, ma
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.nxv8.nxv8i8(
+    <vscale x 8 x bfloat> undef,
+    <vscale x 8 x i8> %0,
+    iXLen %1)
+
+  ret <vscale x 8 x bfloat> %a
+}
+
+define <vscale x 8 x bfloat> @intrinsic_vfwcvt_mask_f.f.v.alt_nxv8bf16_nxv8i8(<vscale x 8 x bfloat> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v.alt_nxv8bf16_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, m1, ta, mu
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v8, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.mask.nxv8bf16.nxv8i8(
+    <vscale x 8 x bfloat> %0,
+    <vscale x 8 x i8> %1,
+    <vscale x 8 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.nxv16.nxv16i8(
+  <vscale x 16 x bfloat>,
+  <vscale x 16 x i8>,
+  iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfwcvt_f.f.v.alt_nxv16bf16_nxv16i8(<vscale x 16 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_f.f.v.alt_nxv16bf16_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, m2, ta, ma
+; CHECK-NEXT:    vmv2r.v v12, v8
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.nxv16.nxv16i8(
+    <vscale x 16 x bfloat> undef,
+    <vscale x 16 x i8> %0,
+    iXLen %1)
+
+  ret <vscale x 16 x bfloat> %a
+}
+
+define <vscale x 16 x bfloat> @intrinsic_vfwcvt_mask_f.f.v.alt_nxv16bf16_nxv16i8(<vscale x 16 x bfloat> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v.alt_nxv16bf16_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, m2, ta, mu
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v8, v12, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.mask.nxv16bf16.nxv16i8(
+    <vscale x 16 x bfloat> %0,
+    <vscale x 16 x i8> %1,
+    <vscale x 16 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.nxv32.nxv32i8(
+  <vscale x 32 x bfloat>,
+  <vscale x 32 x i8>,
+  iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfwcvt_f.f.v.alt_nxv32bf16_nxv32i8(<vscale x 32 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_f.f.v.alt_nxv32bf16_nxv32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, m4, ta, ma
+; CHECK-NEXT:    vmv4r.v v16, v8
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.nxv32.nxv32i8(
+    <vscale x 32 x bfloat> undef,
+    <vscale x 32 x i8> %0,
+    iXLen %1)
+
+  ret <vscale x 32 x bfloat> %a
+}
+
+define <vscale x 32 x bfloat> @intrinsic_vfwcvt_mask_f.f.v.alt_nxv32bf16_nxv32i8(<vscale x 32 x bfloat> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v.alt_nxv32bf16_nxv32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, m4, ta, mu
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v8, v16, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.mask.nxv32bf16.nxv32i8(
+    <vscale x 32 x bfloat> %0,
+    <vscale x 32 x i8> %1,
+    <vscale x 32 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 32 x bfloat> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f.ll
index b51c8efca9f7c..53cdaa9753975 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f.ll
@@ -1,13 +1,205 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfh \
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfh,+zvfbfmin,+experimental-zvfofp8min \
 ; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
-; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh,+zvfbfmin,+experimental-zvfofp8min \
 ; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
-; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfhmin \
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfhmin,+zvfbfmin,+experimental-zvfofp8min \
 ; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
-; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin \
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin,+experimental-zvfofp8min \
 ; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 
+define <vscale x 1 x bfloat> @intrinsic_vfwcvt_f.f.v_nxv1bf16_nxv1i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv1bf16_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.f.v.nxv1.nxv1i8(
+    <vscale x 1 x bfloat> undef,
+    <vscale x 1 x i8> %0,
+    iXLen %1)
+
+  ret <vscale x 1 x bfloat> %a
+}
+
+define <vscale x 1 x bfloat> @intrinsic_vfwcvt_mask_f.f.v_nxv1bf16_nxv1i8(<vscale x 1 x bfloat> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv1bf16_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1bf16.nxv1i8(
+    <vscale x 1 x bfloat> %0,
+    <vscale x 1 x i8> %1,
+    <vscale x 1 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 1 x bfloat> %a
+}
+
+define <vscale x 2 x bfloat> @intrinsic_vfwcvt_f.f.v_nxv2bf16_nxv2i8(<vscale x 2 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv2bf16_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.f.v.nxv2.nxv2i8(
+    <vscale x 2 x bfloat> undef,
+    <vscale x 2 x i8> %0,
+    iXLen %1)
+
+  ret <vscale x 2 x bfloat> %a
+}
+
+define <vscale x 2 x bfloat> @intrinsic_vfwcvt_mask_f.f.v_nxv2bf16_nxv2i8(<vscale x 2 x bfloat> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv2bf16_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.f.v.mask.nxv2bf16.nxv2i8(
+    <vscale x 2 x bfloat> %0,
+    <vscale x 2 x i8> %1,
+    <vscale x 2 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 2 x bfloat> %a
+}
+
+define <vscale x 4 x bfloat> @intrinsic_vfwcvt_f.f.v_nxv4bf16_nxv4i8(<vscale x 4 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv4bf16_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.f.v.nxv4.nxv4i8(
+    <vscale x 4 x bfloat> undef,
+    <vscale x 4 x i8> %0,
+    iXLen %1)
+
+  ret <vscale x 4 x bfloat> %a
+}
+
+define <vscale x 4 x bfloat> @intrinsic_vfwcvt_mask_f.f.v_nxv4bf16_nxv4i8(<vscale x 4 x bfloat> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv4bf16_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.f.v.mask.nxv4bf16.nxv4i8(
+    <vscale x 4 x bfloat> %0,
+    <vscale x 4 x i8> %1,
+    <vscale x 4 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 4 x bfloat> %a
+}
+
+define <vscale x 8 x bfloat> @intrinsic_vfwcvt_f.f.v_nxv8bf16_nxv8i8(<vscale x 8 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv8bf16_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.f.v.nxv8.nxv8i8(
+    <vscale x 8 x bfloat> undef,
+    <vscale x 8 x i8> %0,
+    iXLen %1)
+
+  ret <vscale x 8 x bfloat> %a
+}
+
+define <vscale x 8 x bfloat> @intrinsic_vfwcvt_mask_f.f.v_nxv8bf16_nxv8i8(<vscale x 8 x bfloat> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv8bf16_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v8, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.f.v.mask.nxv8bf16.nxv8i8(
+    <vscale x 8 x bfloat> %0,
+    <vscale x 8 x i8> %1,
+    <vscale x 8 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 8 x bfloat> %a
+}
+
+define <vscale x 16 x bfloat> @intrinsic_vfwcvt_f.f.v_nxv16bf16_nxv16i8(<vscale x 16 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv16bf16_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT:    vmv2r.v v12, v8
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.f.v.nxv16.nxv16i8(
+    <vscale x 16 x bfloat> undef,
+    <vscale x 16 x i8> %0,
+    iXLen %1)
+
+  ret <vscale x 16 x bfloat> %a
+}
+
+define <vscale x 16 x bfloat> @intrinsic_vfwcvt_mask_f.f.v_nxv16bf16_nxv16i8(<vscale x 16 x bfloat> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv16bf16_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v8, v12, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.f.v.mask.nxv16bf16.nxv16i8(
+    <vscale x 16 x bfloat> %0,
+    <vscale x 16 x i8> %1,
+    <vscale x 16 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 16 x bfloat> %a
+}
+
+define <vscale x 32 x bfloat> @intrinsic_vfwcvt_f.f.v_nxv32bf16_nxv32i8(<vscale x 32 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv32bf16_nxv32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT:    vmv4r.v v16, v8
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.f.v.nxv32.nxv32i8(
+    <vscale x 32 x bfloat> undef,
+    <vscale x 32 x i8> %0,
+    iXLen %1)
+
+  ret <vscale x 32 x bfloat> %a
+}
+
+define <vscale x 32 x bfloat> @intrinsic_vfwcvt_mask_f.f.v_nxv32bf16_nxv32i8(<vscale x 32 x bfloat> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv32bf16_nxv32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v8, v16, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.f.v.mask.nxv32bf16.nxv32i8(
+    <vscale x 32 x bfloat> %0,
+    <vscale x 32 x i8> %1,
+    <vscale x 32 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 32 x bfloat> %a
+}
+
 define <vscale x 1 x float> @intrinsic_vfwcvt_f.f.v_nxv1f32_nxv1f16(<vscale x 1 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv1f32_nxv1f16:
 ; CHECK:       # %bb.0: # %entry

>From 7ab793b6f796c72f997f64abd559e36b3ae78e78 Mon Sep 17 00:00:00 2001
From: Brandon Wu <brandon.wu at sifive.com>
Date: Tue, 16 Dec 2025 18:26:51 -0800
Subject: [PATCH 2/5] fixup! replace undef

---
 llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-alt.ll | 22 +++++++++----------
 llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f.ll     | 22 +++++++++----------
 .../CodeGen/RISCV/rvv/vfncvt-sat-f-f-alt.ll   | 22 +++++++++----------
 llvm/test/CodeGen/RISCV/rvv/vfncvt-sat-f-f.ll | 22 +++++++++----------
 llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f-alt.ll | 12 +++++-----
 llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f.ll     | 12 +++++-----
 6 files changed, 56 insertions(+), 56 deletions(-)

diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-alt.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-alt.ll
index feafd4ba2d01a..e90b7de3eacf2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-alt.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-alt.ll
@@ -13,7 +13,7 @@ define <vscale x 1 x i8> @intrinsic_vfncvt_f.f.w.alt_nxv1i8_nxv1bf16(<vscale x 1
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.f.f.w.alt.nxv1i8.nxv1bf16(
-    <vscale x 1 x i8> undef,
+    <vscale x 1 x i8> poison,
     <vscale x 1 x bfloat> %0,
     iXLen 7, iXLen %1)
 
@@ -45,7 +45,7 @@ define <vscale x 2 x i8> @intrinsic_vfncvt_f.f.w.alt_nxv2i8_nxv2bf16(<vscale x 2
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.f.f.w.alt.nxv2i8.nxv2bf16(
-    <vscale x 2 x i8> undef,
+    <vscale x 2 x i8> poison,
     <vscale x 2 x bfloat> %0,
     iXLen 7, iXLen %1)
 
@@ -77,7 +77,7 @@ define <vscale x 4 x i8> @intrinsic_vfncvt_f.f.w.alt_nxv4i8_nxv4bf16(<vscale x 4
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.f.f.w.alt.nxv4i8.nxv4bf16(
-    <vscale x 4 x i8> undef,
+    <vscale x 4 x i8> poison,
     <vscale x 4 x bfloat> %0,
     iXLen 7, iXLen %1)
 
@@ -109,7 +109,7 @@ define <vscale x 8 x i8> @intrinsic_vfncvt_f.f.w.alt_nxv8i8_nxv8bf16(<vscale x 8
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.f.f.w.alt.nxv8i8.nxv8bf16(
-    <vscale x 8 x i8> undef,
+    <vscale x 8 x i8> poison,
     <vscale x 8 x bfloat> %0,
     iXLen 7, iXLen %1)
 
@@ -141,7 +141,7 @@ define <vscale x 16 x i8> @intrinsic_vfncvt_f.f.w.alt_nxv16i8_nxv16bf16(<vscale
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.f.f.w.alt.nxv16i8.nxv16bf16(
-    <vscale x 16 x i8> undef,
+    <vscale x 16 x i8> poison,
     <vscale x 16 x bfloat> %0,
     iXLen 7, iXLen %1)
 
@@ -173,7 +173,7 @@ define <vscale x 32 x i8> @intrinsic_vfncvt_f.f.w.alt_nxv32i8_nxv32bf16(<vscale
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.f.f.w.alt.nxv32i8.nxv32bf16(
-    <vscale x 32 x i8> undef,
+    <vscale x 32 x i8> poison,
     <vscale x 32 x bfloat> %0,
     iXLen 7, iXLen %1)
 
@@ -205,7 +205,7 @@ define <vscale x 1 x i8> @intrinsic_vfncvt_f.f.q.alt_nxv1i8_nxv1f32(<vscale x 1
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.f.f.q.alt.nxv1i8.nxv1f32(
-    <vscale x 1 x i8> undef,
+    <vscale x 1 x i8> poison,
     <vscale x 1 x float> %0,
     iXLen 7, iXLen %1)
 
@@ -237,7 +237,7 @@ define <vscale x 2 x i8> @intrinsic_vfncvt_f.f.q.alt_nxv2i8_nxv2f32(<vscale x 2
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.f.f.q.alt.nxv2i8.nxv2f32(
-    <vscale x 2 x i8> undef,
+    <vscale x 2 x i8> poison,
     <vscale x 2 x float> %0,
     iXLen 7, iXLen %1)
 
@@ -269,7 +269,7 @@ define <vscale x 4 x i8> @intrinsic_vfncvt_f.f.q.alt_nxv4i8_nxv4f32(<vscale x 4
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.f.f.q.alt.nxv4i8.nxv4f32(
-    <vscale x 4 x i8> undef,
+    <vscale x 4 x i8> poison,
     <vscale x 4 x float> %0,
     iXLen 7, iXLen %1)
 
@@ -301,7 +301,7 @@ define <vscale x 8 x i8> @intrinsic_vfncvt_f.f.q.alt_nxv8i8_nxv8f32(<vscale x 8
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.f.f.q.alt.nxv8i8.nxv8f32(
-    <vscale x 8 x i8> undef,
+    <vscale x 8 x i8> poison,
     <vscale x 8 x float> %0,
     iXLen 7, iXLen %1)
 
@@ -333,7 +333,7 @@ define <vscale x 16 x i8> @intrinsic_vfncvt_f.f.q.alt_nxv16i8_nxv16f32(<vscale x
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.f.f.q.alt.nxv16i8.nxv16f32(
-    <vscale x 16 x i8> undef,
+    <vscale x 16 x i8> poison,
     <vscale x 16 x float> %0,
     iXLen 7, iXLen %1)
 
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f.ll
index 88f2f1d741121..acc38f7b01a48 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f.ll
@@ -341,7 +341,7 @@ define <vscale x 1 x i8> @intrinsic_vfncvt_f.f.w_nxv1i8_nxv1bf16(<vscale x 1 x b
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.f.f.w.nxv1i8.nxv1bf16(
-    <vscale x 1 x i8> undef,
+    <vscale x 1 x i8> poison,
     <vscale x 1 x bfloat> %0,
     iXLen 7, iXLen %1)
 
@@ -373,7 +373,7 @@ define <vscale x 2 x i8> @intrinsic_vfncvt_f.f.w_nxv2i8_nxv2bf16(<vscale x 2 x b
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.f.f.w.nxv2i8.nxv2bf16(
-    <vscale x 2 x i8> undef,
+    <vscale x 2 x i8> poison,
     <vscale x 2 x bfloat> %0,
     iXLen 7, iXLen %1)
 
@@ -405,7 +405,7 @@ define <vscale x 4 x i8> @intrinsic_vfncvt_f.f.w_nxv4i8_nxv4bf16(<vscale x 4 x b
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.f.f.w.nxv4i8.nxv4bf16(
-    <vscale x 4 x i8> undef,
+    <vscale x 4 x i8> poison,
     <vscale x 4 x bfloat> %0,
     iXLen 7, iXLen %1)
 
@@ -437,7 +437,7 @@ define <vscale x 8 x i8> @intrinsic_vfncvt_f.f.w_nxv8i8_nxv8bf16(<vscale x 8 x b
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.f.f.w.nxv8i8.nxv8bf16(
-    <vscale x 8 x i8> undef,
+    <vscale x 8 x i8> poison,
     <vscale x 8 x bfloat> %0,
     iXLen 7, iXLen %1)
 
@@ -469,7 +469,7 @@ define <vscale x 16 x i8> @intrinsic_vfncvt_f.f.w_nxv16i8_nxv16bf16(<vscale x 16
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.f.f.w.nxv16i8.nxv16bf16(
-    <vscale x 16 x i8> undef,
+    <vscale x 16 x i8> poison,
     <vscale x 16 x bfloat> %0,
     iXLen 7, iXLen %1)
 
@@ -501,7 +501,7 @@ define <vscale x 32 x i8> @intrinsic_vfncvt_f.f.w_nxv32i8_nxv32bf16(<vscale x 32
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.f.f.w.nxv32i8.nxv32bf16(
-    <vscale x 32 x i8> undef,
+    <vscale x 32 x i8> poison,
     <vscale x 32 x bfloat> %0,
     iXLen 7, iXLen %1)
 
@@ -533,7 +533,7 @@ define <vscale x 1 x i8> @intrinsic_vfncvt_f.f.q_nxv1i8_nxv1f32(<vscale x 1 x fl
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.f.f.q.nxv1i8.nxv1f32(
-    <vscale x 1 x i8> undef,
+    <vscale x 1 x i8> poison,
     <vscale x 1 x float> %0,
     iXLen 7, iXLen %1)
 
@@ -565,7 +565,7 @@ define <vscale x 2 x i8> @intrinsic_vfncvt_f.f.q_nxv2i8_nxv2f32(<vscale x 2 x fl
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.f.f.q.nxv2i8.nxv2f32(
-    <vscale x 2 x i8> undef,
+    <vscale x 2 x i8> poison,
     <vscale x 2 x float> %0,
     iXLen 7, iXLen %1)
 
@@ -597,7 +597,7 @@ define <vscale x 4 x i8> @intrinsic_vfncvt_f.f.q_nxv4i8_nxv4f32(<vscale x 4 x fl
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.f.f.q.nxv4i8.nxv4f32(
-    <vscale x 4 x i8> undef,
+    <vscale x 4 x i8> poison,
     <vscale x 4 x float> %0,
     iXLen 7, iXLen %1)
 
@@ -629,7 +629,7 @@ define <vscale x 8 x i8> @intrinsic_vfncvt_f.f.q_nxv8i8_nxv8f32(<vscale x 8 x fl
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.f.f.q.nxv8i8.nxv8f32(
-    <vscale x 8 x i8> undef,
+    <vscale x 8 x i8> poison,
     <vscale x 8 x float> %0,
     iXLen 7, iXLen %1)
 
@@ -661,7 +661,7 @@ define <vscale x 16 x i8> @intrinsic_vfncvt_f.f.q_nxv16i8_nxv16f32(<vscale x 16
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.f.f.q.nxv16i8.nxv16f32(
-    <vscale x 16 x i8> undef,
+    <vscale x 16 x i8> poison,
     <vscale x 16 x float> %0,
     iXLen 7, iXLen %1)
 
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-sat-f-f-alt.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-sat-f-f-alt.ll
index 93b892be37904..92c36bc1d46d2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-sat-f-f-alt.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-sat-f-f-alt.ll
@@ -13,7 +13,7 @@ define <vscale x 1 x i8> @intrinsic_vfncvt.sat_f.f.w.alt_nxv1i8_nxv1bf16(<vscale
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.nxv1i8.nxv1bf16(
-    <vscale x 1 x i8> undef,
+    <vscale x 1 x i8> poison,
     <vscale x 1 x bfloat> %0,
     iXLen 7, iXLen %1)
 
@@ -45,7 +45,7 @@ define <vscale x 2 x i8> @intrinsic_vfncvt.sat_f.f.w.alt_nxv2i8_nxv2bf16(<vscale
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.nxv2i8.nxv2bf16(
-    <vscale x 2 x i8> undef,
+    <vscale x 2 x i8> poison,
     <vscale x 2 x bfloat> %0,
     iXLen 7, iXLen %1)
 
@@ -77,7 +77,7 @@ define <vscale x 4 x i8> @intrinsic_vfncvt.sat_f.f.w.alt_nxv4i8_nxv4bf16(<vscale
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.nxv4i8.nxv4bf16(
-    <vscale x 4 x i8> undef,
+    <vscale x 4 x i8> poison,
     <vscale x 4 x bfloat> %0,
     iXLen 7, iXLen %1)
 
@@ -109,7 +109,7 @@ define <vscale x 8 x i8> @intrinsic_vfncvt.sat_f.f.w.alt_nxv8i8_nxv8bf16(<vscale
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.nxv8i8.nxv8bf16(
-    <vscale x 8 x i8> undef,
+    <vscale x 8 x i8> poison,
     <vscale x 8 x bfloat> %0,
     iXLen 7, iXLen %1)
 
@@ -141,7 +141,7 @@ define <vscale x 16 x i8> @intrinsic_vfncvt.sat_f.f.w.alt_nxv16i8_nxv16bf16(<vsc
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.nxv16i8.nxv16bf16(
-    <vscale x 16 x i8> undef,
+    <vscale x 16 x i8> poison,
     <vscale x 16 x bfloat> %0,
     iXLen 7, iXLen %1)
 
@@ -173,7 +173,7 @@ define <vscale x 32 x i8> @intrinsic_vfncvt.sat_f.f.w.alt_nxv32i8_nxv32bf16(<vsc
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.nxv32i8.nxv32bf16(
-    <vscale x 32 x i8> undef,
+    <vscale x 32 x i8> poison,
     <vscale x 32 x bfloat> %0,
     iXLen 7, iXLen %1)
 
@@ -205,7 +205,7 @@ define <vscale x 1 x i8> @intrinsic_vfncvt.sat_f.f.q.alt_nxv1i8_nxv1f32(<vscale
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.sat.f.f.q.alt.nxv1i8.nxv1f32(
-    <vscale x 1 x i8> undef,
+    <vscale x 1 x i8> poison,
     <vscale x 1 x float> %0,
     iXLen 7, iXLen %1)
 
@@ -237,7 +237,7 @@ define <vscale x 2 x i8> @intrinsic_vfncvt.sat_f.f.q.alt_nxv2i8_nxv2f32(<vscale
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.sat.f.f.q.alt.nxv2i8.nxv2f32(
-    <vscale x 2 x i8> undef,
+    <vscale x 2 x i8> poison,
     <vscale x 2 x float> %0,
     iXLen 7, iXLen %1)
 
@@ -269,7 +269,7 @@ define <vscale x 4 x i8> @intrinsic_vfncvt.sat_f.f.q.alt_nxv4i8_nxv4f32(<vscale
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.sat.f.f.q.alt.nxv4i8.nxv4f32(
-    <vscale x 4 x i8> undef,
+    <vscale x 4 x i8> poison,
     <vscale x 4 x float> %0,
     iXLen 7, iXLen %1)
 
@@ -301,7 +301,7 @@ define <vscale x 8 x i8> @intrinsic_vfncvt.sat_f.f.q.alt_nxv8i8_nxv8f32(<vscale
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.sat.f.f.q.alt.nxv8i8.nxv8f32(
-    <vscale x 8 x i8> undef,
+    <vscale x 8 x i8> poison,
     <vscale x 8 x float> %0,
     iXLen 7, iXLen %1)
 
@@ -333,7 +333,7 @@ define <vscale x 16 x i8> @intrinsic_vfncvt.sat_f.f.q.alt_nxv16i8_nxv16f32(<vsca
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.sat.f.f.q.alt.nxv16i8.nxv16f32(
-    <vscale x 16 x i8> undef,
+    <vscale x 16 x i8> poison,
     <vscale x 16 x float> %0,
     iXLen 7, iXLen %1)
 
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-sat-f-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-sat-f-f.ll
index e81dd721b63be..4c3edc9005ecb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-sat-f-f.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-sat-f-f.ll
@@ -13,7 +13,7 @@ define <vscale x 1 x i8> @intrinsic_vfncvt.sat_f.f.w_nxv1i8_nxv1bf16(<vscale x 1
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.sat.f.f.w.nxv1i8.nxv1bf16(
-    <vscale x 1 x i8> undef,
+    <vscale x 1 x i8> poison,
     <vscale x 1 x bfloat> %0,
     iXLen 7, iXLen %1)
 
@@ -45,7 +45,7 @@ define <vscale x 2 x i8> @intrinsic_vfncvt.sat_f.f.w_nxv2i8_nxv2bf16(<vscale x 2
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.sat.f.f.w.nxv2i8.nxv2bf16(
-    <vscale x 2 x i8> undef,
+    <vscale x 2 x i8> poison,
     <vscale x 2 x bfloat> %0,
     iXLen 7, iXLen %1)
 
@@ -77,7 +77,7 @@ define <vscale x 4 x i8> @intrinsic_vfncvt.sat_f.f.w_nxv4i8_nxv4bf16(<vscale x 4
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.sat.f.f.w.nxv4i8.nxv4bf16(
-    <vscale x 4 x i8> undef,
+    <vscale x 4 x i8> poison,
     <vscale x 4 x bfloat> %0,
     iXLen 7, iXLen %1)
 
@@ -109,7 +109,7 @@ define <vscale x 8 x i8> @intrinsic_vfncvt.sat_f.f.w_nxv8i8_nxv8bf16(<vscale x 8
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.sat.f.f.w.nxv8i8.nxv8bf16(
-    <vscale x 8 x i8> undef,
+    <vscale x 8 x i8> poison,
     <vscale x 8 x bfloat> %0,
     iXLen 7, iXLen %1)
 
@@ -141,7 +141,7 @@ define <vscale x 16 x i8> @intrinsic_vfncvt.sat_f.f.w_nxv16i8_nxv16bf16(<vscale
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.sat.f.f.w.nxv16i8.nxv16bf16(
-    <vscale x 16 x i8> undef,
+    <vscale x 16 x i8> poison,
     <vscale x 16 x bfloat> %0,
     iXLen 7, iXLen %1)
 
@@ -173,7 +173,7 @@ define <vscale x 32 x i8> @intrinsic_vfncvt.sat_f.f.w_nxv32i8_nxv32bf16(<vscale
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.sat.f.f.w.nxv32i8.nxv32bf16(
-    <vscale x 32 x i8> undef,
+    <vscale x 32 x i8> poison,
     <vscale x 32 x bfloat> %0,
     iXLen 7, iXLen %1)
 
@@ -205,7 +205,7 @@ define <vscale x 1 x i8> @intrinsic_vfncvt.sat_f.f.q_nxv1i8_nxv1f32(<vscale x 1
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.sat.f.f.q.nxv1i8.nxv1f32(
-    <vscale x 1 x i8> undef,
+    <vscale x 1 x i8> poison,
     <vscale x 1 x float> %0,
     iXLen 7, iXLen %1)
 
@@ -237,7 +237,7 @@ define <vscale x 2 x i8> @intrinsic_vfncvt.sat_f.f.q_nxv2i8_nxv2f32(<vscale x 2
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.sat.f.f.q.nxv2i8.nxv2f32(
-    <vscale x 2 x i8> undef,
+    <vscale x 2 x i8> poison,
     <vscale x 2 x float> %0,
     iXLen 7, iXLen %1)
 
@@ -269,7 +269,7 @@ define <vscale x 4 x i8> @intrinsic_vfncvt.sat_f.f.q_nxv4i8_nxv4f32(<vscale x 4
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.sat.f.f.q.nxv4i8.nxv4f32(
-    <vscale x 4 x i8> undef,
+    <vscale x 4 x i8> poison,
     <vscale x 4 x float> %0,
     iXLen 7, iXLen %1)
 
@@ -301,7 +301,7 @@ define <vscale x 8 x i8> @intrinsic_vfncvt.sat_f.f.q_nxv8i8_nxv8f32(<vscale x 8
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.sat.f.f.q.nxv8i8.nxv8f32(
-    <vscale x 8 x i8> undef,
+    <vscale x 8 x i8> poison,
     <vscale x 8 x float> %0,
     iXLen 7, iXLen %1)
 
@@ -333,7 +333,7 @@ define <vscale x 16 x i8> @intrinsic_vfncvt.sat_f.f.q_nxv16i8_nxv16f32(<vscale x
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.sat.f.f.q.nxv16i8.nxv16f32(
-    <vscale x 16 x i8> undef,
+    <vscale x 16 x i8> poison,
     <vscale x 16 x float> %0,
     iXLen 7, iXLen %1)
 
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f-alt.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f-alt.ll
index 6569c07125e92..2f789f5f85ebc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f-alt.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f-alt.ll
@@ -18,7 +18,7 @@ define <vscale x 1 x bfloat> @intrinsic_vfwcvt_f.f.v.alt_nxv1bf16_nxv1i8(<vscale
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.nxv1.nxv1i8(
-    <vscale x 1 x bfloat> undef,
+    <vscale x 1 x bfloat> poison,
     <vscale x 1 x i8> %0,
     iXLen %1)
 
@@ -55,7 +55,7 @@ define <vscale x 2 x bfloat> @intrinsic_vfwcvt_f.f.v.alt_nxv2bf16_nxv2i8(<vscale
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.nxv2.nxv2i8(
-    <vscale x 2 x bfloat> undef,
+    <vscale x 2 x bfloat> poison,
     <vscale x 2 x i8> %0,
     iXLen %1)
 
@@ -92,7 +92,7 @@ define <vscale x 4 x bfloat> @intrinsic_vfwcvt_f.f.v.alt_nxv4bf16_nxv4i8(<vscale
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.nxv4.nxv4i8(
-    <vscale x 4 x bfloat> undef,
+    <vscale x 4 x bfloat> poison,
     <vscale x 4 x i8> %0,
     iXLen %1)
 
@@ -129,7 +129,7 @@ define <vscale x 8 x bfloat> @intrinsic_vfwcvt_f.f.v.alt_nxv8bf16_nxv8i8(<vscale
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.nxv8.nxv8i8(
-    <vscale x 8 x bfloat> undef,
+    <vscale x 8 x bfloat> poison,
     <vscale x 8 x i8> %0,
     iXLen %1)
 
@@ -166,7 +166,7 @@ define <vscale x 16 x bfloat> @intrinsic_vfwcvt_f.f.v.alt_nxv16bf16_nxv16i8(<vsc
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.nxv16.nxv16i8(
-    <vscale x 16 x bfloat> undef,
+    <vscale x 16 x bfloat> poison,
     <vscale x 16 x i8> %0,
     iXLen %1)
 
@@ -203,7 +203,7 @@ define <vscale x 32 x bfloat> @intrinsic_vfwcvt_f.f.v.alt_nxv32bf16_nxv32i8(<vsc
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.nxv32.nxv32i8(
-    <vscale x 32 x bfloat> undef,
+    <vscale x 32 x bfloat> poison,
     <vscale x 32 x i8> %0,
     iXLen %1)
 
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f.ll
index 53cdaa9753975..7f43ad2e3ad94 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f.ll
@@ -17,7 +17,7 @@ define <vscale x 1 x bfloat> @intrinsic_vfwcvt_f.f.v_nxv1bf16_nxv1i8(<vscale x 1
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.f.v.nxv1.nxv1i8(
-    <vscale x 1 x bfloat> undef,
+    <vscale x 1 x bfloat> poison,
     <vscale x 1 x i8> %0,
     iXLen %1)
 
@@ -49,7 +49,7 @@ define <vscale x 2 x bfloat> @intrinsic_vfwcvt_f.f.v_nxv2bf16_nxv2i8(<vscale x 2
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.f.v.nxv2.nxv2i8(
-    <vscale x 2 x bfloat> undef,
+    <vscale x 2 x bfloat> poison,
     <vscale x 2 x i8> %0,
     iXLen %1)
 
@@ -81,7 +81,7 @@ define <vscale x 4 x bfloat> @intrinsic_vfwcvt_f.f.v_nxv4bf16_nxv4i8(<vscale x 4
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.f.v.nxv4.nxv4i8(
-    <vscale x 4 x bfloat> undef,
+    <vscale x 4 x bfloat> poison,
     <vscale x 4 x i8> %0,
     iXLen %1)
 
@@ -113,7 +113,7 @@ define <vscale x 8 x bfloat> @intrinsic_vfwcvt_f.f.v_nxv8bf16_nxv8i8(<vscale x 8
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.f.v.nxv8.nxv8i8(
-    <vscale x 8 x bfloat> undef,
+    <vscale x 8 x bfloat> poison,
     <vscale x 8 x i8> %0,
     iXLen %1)
 
@@ -145,7 +145,7 @@ define <vscale x 16 x bfloat> @intrinsic_vfwcvt_f.f.v_nxv16bf16_nxv16i8(<vscale
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.f.v.nxv16.nxv16i8(
-    <vscale x 16 x bfloat> undef,
+    <vscale x 16 x bfloat> poison,
     <vscale x 16 x i8> %0,
     iXLen %1)
 
@@ -177,7 +177,7 @@ define <vscale x 32 x bfloat> @intrinsic_vfwcvt_f.f.v_nxv32bf16_nxv32i8(<vscale
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.f.v.nxv32.nxv32i8(
-    <vscale x 32 x bfloat> undef,
+    <vscale x 32 x bfloat> poison,
     <vscale x 32 x i8> %0,
     iXLen %1)
 

>From a1333e0157d51206e0893622573b7cfd44a83c91 Mon Sep 17 00:00:00 2001
From: Brandon Wu <brandon.wu at sifive.com>
Date: Thu, 18 Dec 2025 23:40:27 -0800
Subject: [PATCH 3/5] fixup! add IS_NOT_ALTFMT, remove fp exception

---
 .../Target/RISCV/RISCVInstrInfoZvfofp8min.td  | 28 +++++++++++--------
 1 file changed, 16 insertions(+), 12 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZvfofp8min.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZvfofp8min.td
index b067488ea662f..3f6b16dc6f70f 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZvfofp8min.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZvfofp8min.td
@@ -81,20 +81,24 @@ multiclass VPseudoVNCVTD_Q_RM_NoSched_Zvfofp8min {
   }
 }
 
-let mayRaiseFPException = true, Predicates = [HasStdExtZvfofp8min] in {
-  defm PseudoVFWCVTBF16_F_F : VPseudoVWCVTD_V_NoSched_Zvfofp8min;
+let Predicates = [HasStdExtZvfofp8min] in {
+  let AltFmtType = IS_NOT_ALTFMT in
+    defm PseudoVFWCVTBF16_F_F : VPseudoVWCVTD_V_NoSched_Zvfofp8min;
   let AltFmtType = IS_ALTFMT in
     defm PseudoVFWCVTBF16_F_F_ALT : VPseudoVWCVTD_V_NoSched_Zvfofp8min;
-
-  defm PseudoVFNCVTBF16_F_F :     VPseudoVNCVTD_W_RM_NoSched_Zvfofp8min;
-  defm PseudoVFNCVTBF16_SAT_F_F : VPseudoVNCVTD_W_RM_NoSched_Zvfofp8min;
-  defm PseudoVFNCVT_F_F :         VPseudoVNCVTD_Q_RM_NoSched_Zvfofp8min;
-  defm PseudoVFNCVT_SAT_F_F :     VPseudoVNCVTD_Q_RM_NoSched_Zvfofp8min;
-  let AltFmtType = IS_ALTFMT in {
-    defm PseudoVFNCVTBF16_F_F_ALT :     VPseudoVNCVTD_W_RM_NoSched_Zvfofp8min;
-    defm PseudoVFNCVTBF16_SAT_F_F_ALT : VPseudoVNCVTD_W_RM_NoSched_Zvfofp8min;
-    defm PseudoVFNCVT_F_F_ALT :         VPseudoVNCVTD_Q_RM_NoSched_Zvfofp8min;
-    defm PseudoVFNCVT_SAT_F_F_ALT :     VPseudoVNCVTD_Q_RM_NoSched_Zvfofp8min;
+  let mayRaiseFPException = true in {
+    let AltFmtType = IS_NOT_ALTFMT in {
+      defm PseudoVFNCVTBF16_F_F :     VPseudoVNCVTD_W_RM_NoSched_Zvfofp8min;
+      defm PseudoVFNCVTBF16_SAT_F_F : VPseudoVNCVTD_W_RM_NoSched_Zvfofp8min;
+      defm PseudoVFNCVT_F_F :         VPseudoVNCVTD_Q_RM_NoSched_Zvfofp8min;
+      defm PseudoVFNCVT_SAT_F_F :     VPseudoVNCVTD_Q_RM_NoSched_Zvfofp8min;
+    }
+    let AltFmtType = IS_ALTFMT in {
+      defm PseudoVFNCVTBF16_F_F_ALT :     VPseudoVNCVTD_W_RM_NoSched_Zvfofp8min;
+      defm PseudoVFNCVTBF16_SAT_F_F_ALT : VPseudoVNCVTD_W_RM_NoSched_Zvfofp8min;
+      defm PseudoVFNCVT_F_F_ALT :         VPseudoVNCVTD_Q_RM_NoSched_Zvfofp8min;
+      defm PseudoVFNCVT_SAT_F_F_ALT :     VPseudoVNCVTD_Q_RM_NoSched_Zvfofp8min;
+    }
   }
 }
 

>From c2034f3b0412c3a1974db0e24b86b94677f7dd0c Mon Sep 17 00:00:00 2001
From: Brandon Wu <brandon.wu at sifive.com>
Date: Mon, 22 Dec 2025 21:42:52 -0800
Subject: [PATCH 4/5] fixup! alt interleave tests

---
 .../RISCV/rvv/zvfofp8min-alt-interlave.ll     | 243 ++++++++++++++++++
 1 file changed, 243 insertions(+)
 create mode 100644 llvm/test/CodeGen/RISCV/rvv/zvfofp8min-alt-interlave.ll

diff --git a/llvm/test/CodeGen/RISCV/rvv/zvfofp8min-alt-interlave.ll b/llvm/test/CodeGen/RISCV/rvv/zvfofp8min-alt-interlave.ll
new file mode 100644
index 0000000000000..3c95377d48f8f
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/zvfofp8min-alt-interlave.ll
@@ -0,0 +1,243 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfbfmin,+experimental-zvfofp8min \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfbfmin,+experimental-zvfofp8min \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+; a -> need alt
+; n -> dont need alt
+; d -> dont care alt
+
+define <vscale x 1 x bfloat> @test_a_n_interleave(<vscale x 1 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: test_a_n_interleave:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, mf8, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v9, v8
+; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.f.f.w.alt.nxv1i8.nxv1bf16(
+    <vscale x 1 x i8> poison,
+    <vscale x 1 x bfloat> %0,
+    iXLen 7, iXLen %1)
+
+  %b = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.f.v.nxv1bf16.nxv1i8(
+    <vscale x 1 x bfloat> poison,
+    <vscale x 1 x i8> %a,
+    iXLen %1)
+
+  ret <vscale x 1 x bfloat> %b
+}
+
+define <vscale x 1 x i8> @test_a_n_a_interleave(<vscale x 1 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: test_a_n_a_interleave:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, mf8, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v9, v8
+; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v10, v9
+; CHECK-NEXT:    vsetvli zero, zero, e8alt, mf8, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.f.f.w.alt.nxv1i8.nxv1bf16(
+    <vscale x 1 x i8> poison,
+    <vscale x 1 x bfloat> %0,
+    iXLen 7, iXLen %1)
+
+  %b = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.f.v.nxv1bf16.nxv1i8(
+    <vscale x 1 x bfloat> poison,
+    <vscale x 1 x i8> %a,
+    iXLen %1)
+
+  %c = call <vscale x 1 x i8> @llvm.riscv.vfncvt.f.f.w.alt.nxv1i8.nxv1bf16(
+    <vscale x 1 x i8> poison,
+    <vscale x 1 x bfloat> %b,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 1 x i8> %c
+}
+
+define <vscale x 1 x i8> @test_a_n_d_interleave(<vscale x 1 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: test_a_n_d_interleave:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, mf8, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
+; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v9
+; CHECK-NEXT:    vadd.vv v8, v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.nxv1bf16.nxv1i8(
+    <vscale x 1 x bfloat> poison,
+    <vscale x 1 x i8> %0,
+    iXLen %1)
+
+  %b = call <vscale x 1 x i8> @llvm.riscv.vfncvt.f.f.w.nxv1i8.nxv1bf16(
+    <vscale x 1 x i8> poison,
+    <vscale x 1 x bfloat> %a,
+    iXLen 7, iXLen %1)
+
+
+  %c = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8(
+    <vscale x 1 x i8> poison,
+    <vscale x 1 x i8> %b,
+    <vscale x 1 x i8> %b,
+    iXLen %1)
+
+  ret <vscale x 1 x i8> %c
+}
+
+define <vscale x 1 x i8> @test_a_n_a_d_interleave(<vscale x 1 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: test_a_n_a_d_interleave:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, mf8, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v9, v8
+; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v8, v9
+; CHECK-NEXT:    vsetvli zero, zero, e8alt, mf8, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v9, v8
+; CHECK-NEXT:    vadd.vv v8, v9, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.f.f.w.alt.nxv1i8.nxv1bf16(
+    <vscale x 1 x i8> poison,
+    <vscale x 1 x bfloat> %0,
+    iXLen 7, iXLen %1)
+
+  %b = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.f.v.nxv1bf16.nxv1i8(
+    <vscale x 1 x bfloat> poison,
+    <vscale x 1 x i8> %a,
+    iXLen %1)
+
+  %c = call <vscale x 1 x i8> @llvm.riscv.vfncvt.f.f.w.alt.nxv1i8.nxv1bf16(
+    <vscale x 1 x i8> poison,
+    <vscale x 1 x bfloat> %b,
+    iXLen 7, iXLen %1)
+
+  %d = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8(
+    <vscale x 1 x i8> poison,
+    <vscale x 1 x i8> %c,
+    <vscale x 1 x i8> %c,
+    iXLen %1)
+
+  ret <vscale x 1 x i8> %d
+}
+
+define <vscale x 1 x bfloat> @test_n_a_interleave(<vscale x 1 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: test_n_a_interleave:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v9, v8
+; CHECK-NEXT:    vsetvli zero, zero, e8alt, mf8, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.f.f.w.nxv1i8.nxv1bf16(
+    <vscale x 1 x i8> poison,
+    <vscale x 1 x bfloat> %0,
+    iXLen 7, iXLen %1)
+
+  %b = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.nxv1bf16.nxv1i8(
+    <vscale x 1 x bfloat> poison,
+    <vscale x 1 x i8> %a,
+    iXLen %1)
+
+  ret <vscale x 1 x bfloat> %b
+}
+
+define <vscale x 1 x i8> @test_n_a_n_interleave(<vscale x 1 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: test_n_a_n_interleave:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v9, v8
+; CHECK-NEXT:    vsetvli zero, zero, e8alt, mf8, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v10, v9
+; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.f.f.w.nxv1i8.nxv1bf16(
+    <vscale x 1 x i8> poison,
+    <vscale x 1 x bfloat> %0,
+    iXLen 7, iXLen %1)
+
+  %b = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.nxv1bf16.nxv1i8(
+    <vscale x 1 x bfloat> poison,
+    <vscale x 1 x i8> %a,
+    iXLen %1)
+
+  %c = call <vscale x 1 x i8> @llvm.riscv.vfncvt.f.f.w.nxv1i8.nxv1bf16(
+    <vscale x 1 x i8> poison,
+    <vscale x 1 x bfloat> %b,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 1 x i8> %c
+}
+
+define <vscale x 1 x i8> @test_n_a_d_interleave(<vscale x 1 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: test_n_a_d_interleave:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
+; CHECK-NEXT:    vsetvli zero, zero, e8alt, mf8, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v9
+; CHECK-NEXT:    vadd.vv v8, v8, v8
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.f.v.nxv1bf16.nxv1i8(
+    <vscale x 1 x bfloat> poison,
+    <vscale x 1 x i8> %0,
+    iXLen %1)
+
+  %b = call <vscale x 1 x i8> @llvm.riscv.vfncvt.f.f.w.alt.nxv1i8.nxv1bf16(
+    <vscale x 1 x i8> poison,
+    <vscale x 1 x bfloat> %a,
+    iXLen 7, iXLen %1)
+
+
+  %c = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8(
+    <vscale x 1 x i8> poison,
+    <vscale x 1 x i8> %b,
+    <vscale x 1 x i8> %b,
+    iXLen %1)
+
+  ret <vscale x 1 x i8> %c
+}
+
+define <vscale x 1 x i8> @test_n_a_n_d_interleave(<vscale x 1 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: test_n_a_n_d_interleave:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v9, v8
+; CHECK-NEXT:    vsetvli zero, zero, e8alt, mf8, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v8, v9
+; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v9, v8
+; CHECK-NEXT:    vadd.vv v8, v9, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.f.f.w.nxv1i8.nxv1bf16(
+    <vscale x 1 x i8> poison,
+    <vscale x 1 x bfloat> %0,
+    iXLen 7, iXLen %1)
+
+  %b = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.nxv1bf16.nxv1i8(
+    <vscale x 1 x bfloat> poison,
+    <vscale x 1 x i8> %a,
+    iXLen %1)
+
+  %c = call <vscale x 1 x i8> @llvm.riscv.vfncvt.f.f.w.nxv1i8.nxv1bf16(
+    <vscale x 1 x i8> poison,
+    <vscale x 1 x bfloat> %b,
+    iXLen 7, iXLen %1)
+
+  %d = call <vscale x 1 x i8> @llvm.riscv.vadd.nxv1i8(
+    <vscale x 1 x i8> poison,
+    <vscale x 1 x i8> %c,
+    <vscale x 1 x i8> %c,
+    iXLen %1)
+
+  ret <vscale x 1 x i8> %d
+}

>From 1c81fadfa8e5894cb75d3cd652dc03d2fe65f2b4 Mon Sep 17 00:00:00 2001
From: Brandon Wu <brandon.wu at sifive.com>
Date: Tue, 23 Dec 2025 18:06:04 -0800
Subject: [PATCH 5/5] fixup! wording

---
 llvm/test/CodeGen/RISCV/rvv/zvfofp8min-alt-interlave.ll | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/llvm/test/CodeGen/RISCV/rvv/zvfofp8min-alt-interlave.ll b/llvm/test/CodeGen/RISCV/rvv/zvfofp8min-alt-interlave.ll
index 3c95377d48f8f..8b164d4ec273b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/zvfofp8min-alt-interlave.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/zvfofp8min-alt-interlave.ll
@@ -5,8 +5,8 @@
 ; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 
 ; a -> need alt
-; n -> dont need alt
-; d -> dont care alt
+; n -> don't need alt
+; d -> don't care alt
 
 define <vscale x 1 x bfloat> @test_a_n_interleave(<vscale x 1 x bfloat> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: test_a_n_interleave:



More information about the llvm-commits mailing list