[clang] [llvm] zvfofp8min suffix types (PR #172626)

Brandon Wu via llvm-commits llvm-commits at lists.llvm.org
Wed Dec 17 02:13:09 PST 2025


https://github.com/4vtomat created https://github.com/llvm/llvm-project/pull/172626

- [llvm][RISCV] Support Zvfofp8min llvm intrinsics and codegen
- fixup! replace undef
- [RISCV][clang] Support f8e4m3 and f8e5m2 suffix type for intrinsics


>From 39cb90eafff71043e533293f338a1a44a191a3e6 Mon Sep 17 00:00:00 2001
From: Brandon Wu <songwu0813 at gmail.com>
Date: Mon, 15 Dec 2025 01:54:20 -0800
Subject: [PATCH 1/3] [llvm][RISCV] Support Zvfofp8min llvm intrinsics and
 codegen

This is follow up patch for https://github.com/llvm/llvm-project/pull/157014
to support llvm intrinsics and codegen.
---
 llvm/include/llvm/IR/IntrinsicsRISCV.td       |  20 +
 .../RISCV/MCTargetDesc/RISCVInstPrinter.cpp   |   4 +-
 .../Target/RISCV/RISCVInstrInfoVPseudos.td    |   4 +-
 .../Target/RISCV/RISCVInstrInfoZvfofp8min.td  | 127 ++++++
 llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-alt.ll | 357 +++++++++++++++++
 llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f.ll     | 360 +++++++++++++++++-
 .../CodeGen/RISCV/rvv/vfncvt-sat-f-f-alt.ll   | 357 +++++++++++++++++
 llvm/test/CodeGen/RISCV/rvv/vfncvt-sat-f-f.ll | 357 +++++++++++++++++
 llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f-alt.ll | 227 +++++++++++
 llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f.ll     | 200 +++++++++-
 10 files changed, 2003 insertions(+), 10 deletions(-)
 create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-alt.ll
 create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfncvt-sat-f-f-alt.ll
 create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfncvt-sat-f-f.ll
 create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f-alt.ll

diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td
index 77fcc46ea5a89..9088e5e6a357b 100644
--- a/llvm/include/llvm/IR/IntrinsicsRISCV.td
+++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td
@@ -1958,6 +1958,26 @@ let TargetPrefix = "riscv" in {
 let TargetPrefix = "riscv" in
 def int_riscv_pause : DefaultAttrsIntrinsic<[], [], [IntrNoMem, IntrHasSideEffects]>;
 
+
+//===----------------------------------------------------------------------===//
+// Zvfofp8min - OFP8 conversion extension
+// The Zvfofp8min extension provides basic support for the two 8-bit
+// floating-point formats defined in the Open Compute Project OFP8
+// specification, OFP8 E4M3 and OFP8 E5M2.
+let TargetPrefix = "riscv" in {
+  // OFP8 to BF16 conversion instructions
+  defm vfwcvt_f_f_v_alt : RISCVConversion;
+  // BF16 to OFP8 conversion instructions
+  defm vfncvt_sat_f_f_w : RISCVConversionRoundingMode;
+  defm vfncvt_f_f_w_alt : RISCVConversionRoundingMode;
+  defm vfncvt_sat_f_f_w_alt : RISCVConversionRoundingMode;
+  // FP32 to OFP8 conversion instructions
+  defm vfncvt_f_f_q : RISCVConversionRoundingMode;
+  defm vfncvt_f_f_q_alt : RISCVConversionRoundingMode;
+  defm vfncvt_sat_f_f_q : RISCVConversionRoundingMode;
+  defm vfncvt_sat_f_f_q_alt : RISCVConversionRoundingMode;
+} // TargetPrefix = "riscv"
+
 // Vendor extensions
 //===----------------------------------------------------------------------===//
 include "llvm/IR/IntrinsicsRISCVXTHead.td"
diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVInstPrinter.cpp b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVInstPrinter.cpp
index 7b9c4b3e800cd..f2c5f6947aa00 100644
--- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVInstPrinter.cpp
+++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVInstPrinter.cpp
@@ -216,11 +216,13 @@ void RISCVInstPrinter::printVTypeI(const MCInst *MI, unsigned OpNo,
                                    const MCSubtargetInfo &STI, raw_ostream &O) {
   unsigned Imm = MI->getOperand(OpNo).getImm();
   // Print the raw immediate for reserved values: vlmul[2:0]=4, vsew[2:0]=0b1xx,
-  // altfmt=1 without zvfbfa extension, or non-zero in bits 9 and above.
+  // altfmt=1 without zvfbfa or zvfofp8min extension, or non-zero in bits 9 and
+  // above.
   if (RISCVVType::getVLMUL(Imm) == RISCVVType::VLMUL::LMUL_RESERVED ||
       RISCVVType::getSEW(Imm) > 64 ||
       (RISCVVType::isAltFmt(Imm) &&
        !(STI.hasFeature(RISCV::FeatureStdExtZvfbfa) ||
+         STI.hasFeature(RISCV::FeatureStdExtZvfofp8min) ||
          STI.hasFeature(RISCV::FeatureVendorXSfvfbfexp16e))) ||
       (Imm >> 9) != 0) {
     O << formatImm(Imm);
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index e07d7b5ee5563..a32f6a566493f 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -5851,8 +5851,9 @@ multiclass VPatConversionWF_VI<string intrinsic, string instruction,
 }
 
 multiclass VPatConversionWF_VF<string intrinsic, string instruction,
+                               list<VTypeInfoToWide> wlist = AllWidenableFloatVectors,
                                bit isSEWAware = 0> {
-  foreach fvtiToFWti = AllWidenableFloatVectors in {
+  foreach fvtiToFWti = wlist in {
     defvar fvti = fvtiToFWti.Vti;
     defvar fwti = fvtiToFWti.Wti;
     // Define vfwcvt.f.f.v for f16 when Zvfhmin is enable.
@@ -7177,6 +7178,7 @@ defm : VPatConversionWF_VI<"int_riscv_vfwcvt_f_xu_v", "PseudoVFWCVT_F_XU",
 defm : VPatConversionWF_VI<"int_riscv_vfwcvt_f_x_v", "PseudoVFWCVT_F_X",
                            isSEWAware=1>;
 defm : VPatConversionWF_VF<"int_riscv_vfwcvt_f_f_v", "PseudoVFWCVT_F_F",
+                           wlist=AllWidenableFloatVectors,
                            isSEWAware=1>;
 
 //===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZvfofp8min.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZvfofp8min.td
index 86cab697cbf55..b067488ea662f 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZvfofp8min.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZvfofp8min.td
@@ -24,3 +24,130 @@ let Predicates = [HasStdExtZvfofp8min], Constraints = "@earlyclobber $vd",
   defm VFNCVT_F_F_Q : VNCVTF_FV_VS2<"vfncvt.f.f.q", 0b010010, 0b11001>;
   defm VFNCVT_SAT_F_F_Q : VNCVTF_FV_VS2<"vfncvt.sat.f.f.q", 0b010010, 0b11011>;
 }
+
+//===----------------------------------------------------------------------===//
+// Pseudo instructions
+//===----------------------------------------------------------------------===//
+defvar MxListQ = [V_MF8, V_MF4, V_MF2, V_M1, V_M2];
+
+defset list<VTypeInfoToWide> AllWidenableIntToBFloatVectors = {
+  def : VTypeInfoToWide<VI8MF8, VBF16MF4>;
+  def : VTypeInfoToWide<VI8MF4, VBF16MF2>;
+  def : VTypeInfoToWide<VI8MF2, VBF16M1>;
+  def : VTypeInfoToWide<VI8M1, VBF16M2>;
+  def : VTypeInfoToWide<VI8M2, VBF16M4>;
+  def : VTypeInfoToWide<VI8M4, VBF16M8>;
+}
+
+defset list<VTypeInfoToWide> AllWidenableInt8ToFloat32Vectors = {
+  def : VTypeInfoToWide<VI8MF8, VF32MF2>;
+  def : VTypeInfoToWide<VI8MF4, VF32M1>;
+  def : VTypeInfoToWide<VI8MF2, VF32M2>;
+  def : VTypeInfoToWide<VI8M1, VF32M4>;
+  def : VTypeInfoToWide<VI8M2, VF32M8>;
+}
+
+class QVRClass<LMULInfo m> {
+  LMULInfo c = !cond(!eq(m, V_MF8): V_MF2,
+                     !eq(m, V_MF4): V_M1,
+                     !eq(m, V_MF2): V_M2,
+                     !eq(m, V_M1): V_M4,
+                     !eq(m, V_M2): V_M8);
+}
+
+multiclass VPseudoVWCVTD_V_NoSched_Zvfofp8min {
+  defvar constraint = "@earlyclobber $rd";
+  foreach m = MxListW in {
+    defm _V : VPseudoConversion<m.wvrclass, m.vrclass, m, constraint, sew=8,
+                                TargetConstraintType=3>;
+  }
+}
+
+multiclass VPseudoVNCVTD_W_RM_NoSched_Zvfofp8min {
+  defvar constraint = "@earlyclobber $rd";
+  foreach m = MxListW in {
+    defm _W : VPseudoConversionRoundingMode<m.vrclass, m.wvrclass, m,
+                                            constraint, sew=8,
+                                            TargetConstraintType=2>;
+  }
+}
+
+multiclass VPseudoVNCVTD_Q_RM_NoSched_Zvfofp8min {
+  defvar constraint = "@earlyclobber $rd";
+  foreach m = MxListQ in {
+    defm _Q : VPseudoConversionRoundingMode<m.vrclass, QVRClass<m>.c.vrclass, m,
+                                            constraint, sew=8,
+                                            TargetConstraintType=2>;
+  }
+}
+
+let mayRaiseFPException = true, Predicates = [HasStdExtZvfofp8min] in {
+  defm PseudoVFWCVTBF16_F_F : VPseudoVWCVTD_V_NoSched_Zvfofp8min;
+  let AltFmtType = IS_ALTFMT in
+    defm PseudoVFWCVTBF16_F_F_ALT : VPseudoVWCVTD_V_NoSched_Zvfofp8min;
+
+  defm PseudoVFNCVTBF16_F_F :     VPseudoVNCVTD_W_RM_NoSched_Zvfofp8min;
+  defm PseudoVFNCVTBF16_SAT_F_F : VPseudoVNCVTD_W_RM_NoSched_Zvfofp8min;
+  defm PseudoVFNCVT_F_F :         VPseudoVNCVTD_Q_RM_NoSched_Zvfofp8min;
+  defm PseudoVFNCVT_SAT_F_F :     VPseudoVNCVTD_Q_RM_NoSched_Zvfofp8min;
+  let AltFmtType = IS_ALTFMT in {
+    defm PseudoVFNCVTBF16_F_F_ALT :     VPseudoVNCVTD_W_RM_NoSched_Zvfofp8min;
+    defm PseudoVFNCVTBF16_SAT_F_F_ALT : VPseudoVNCVTD_W_RM_NoSched_Zvfofp8min;
+    defm PseudoVFNCVT_F_F_ALT :         VPseudoVNCVTD_Q_RM_NoSched_Zvfofp8min;
+    defm PseudoVFNCVT_SAT_F_F_ALT :     VPseudoVNCVTD_Q_RM_NoSched_Zvfofp8min;
+  }
+}
+
+//===----------------------------------------------------------------------===//
+// Patterns
+//===----------------------------------------------------------------------===//
+multiclass VPatConversionQF_RM<string intrinsic, string instruction,
+                               bit isSEWAware = 0> {
+  foreach fvtiToFWti = AllWidenableInt8ToFloat32Vectors in {
+    defvar fvti = fvtiToFWti.Vti;
+    defvar fwti = fvtiToFWti.Wti;
+    let Predicates = [HasStdExtZvfofp8min] in
+    defm : VPatConversionRoundingMode<intrinsic, instruction, "Q",
+                                      fvti.Vector, fwti.Vector, fvti.Mask, fvti.Log2SEW,
+                                      fvti.LMul, fvti.RegClass, fwti.RegClass,
+                                      isSEWAware>;
+  }
+}
+
+let Predicates = [HasStdExtZvfofp8min] in {
+  // OFP8 to BF16 conversion instructions
+  defm : VPatConversionWF_VF<"int_riscv_vfwcvt_f_f_v",
+                             "PseudoVFWCVTBF16_F_F",
+                             wlist=AllWidenableIntToBFloatVectors,
+                             isSEWAware=1>;
+  defm : VPatConversionWF_VF<"int_riscv_vfwcvt_f_f_v_alt",
+                             "PseudoVFWCVTBF16_F_F_ALT",
+                             wlist=AllWidenableIntToBFloatVectors,
+                             isSEWAware=1>;
+  // BF16 to OFP8 conversion instructions
+  defm : VPatConversionVF_WF_RM<"int_riscv_vfncvt_f_f_w", 
+                                "PseudoVFNCVTBF16_F_F",
+                                wlist=AllWidenableIntToBFloatVectors,
+                                isSEWAware=1>;
+  defm : VPatConversionVF_WF_RM<"int_riscv_vfncvt_sat_f_f_w", 
+                                "PseudoVFNCVTBF16_SAT_F_F",
+                                wlist=AllWidenableIntToBFloatVectors,
+                                isSEWAware=1>;
+  defm : VPatConversionVF_WF_RM<"int_riscv_vfncvt_f_f_w_alt", 
+                                "PseudoVFNCVTBF16_F_F_ALT",
+                                wlist=AllWidenableIntToBFloatVectors,
+                                isSEWAware=1>;
+  defm : VPatConversionVF_WF_RM<"int_riscv_vfncvt_sat_f_f_w_alt", 
+                                "PseudoVFNCVTBF16_SAT_F_F_ALT",
+                                 wlist=AllWidenableIntToBFloatVectors,
+                                 isSEWAware=1>;
+  // FP32 to OFP8 conversion instructions
+  defm : VPatConversionQF_RM<"int_riscv_vfncvt_f_f_q",
+                             "PseudoVFNCVT_F_F", isSEWAware=1>;
+  defm : VPatConversionQF_RM<"int_riscv_vfncvt_sat_f_f_q",
+                             "PseudoVFNCVT_SAT_F_F", isSEWAware=1>;
+  defm : VPatConversionQF_RM<"int_riscv_vfncvt_f_f_q_alt",
+                             "PseudoVFNCVT_F_F_ALT", isSEWAware=1>;
+  defm : VPatConversionQF_RM<"int_riscv_vfncvt_sat_f_f_q_alt",
+                             "PseudoVFNCVT_SAT_F_F_ALT", isSEWAware=1>;
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-alt.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-alt.ll
new file mode 100644
index 0000000000000..feafd4ba2d01a
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-alt.ll
@@ -0,0 +1,357 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfbfmin,+experimental-zvfofp8min \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfbfmin,+experimental-zvfofp8min \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+define <vscale x 1 x i8> @intrinsic_vfncvt_f.f.w.alt_nxv1i8_nxv1bf16(<vscale x 1 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_f.f.w.alt_nxv1i8_nxv1bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, mf8, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v9, v8
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.f.f.w.alt.nxv1i8.nxv1bf16(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x bfloat> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vfncvt_mask_f.f.w.alt_nxv1i8_nxv1bf16(<vscale x 1 x i8> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w.alt_nxv1i8_nxv1bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, mf8, ta, mu
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.f.f.w.alt.mask.nxv1i8.nxv1bf16(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x bfloat> %1,
+    <vscale x 1 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vfncvt_f.f.w.alt_nxv2i8_nxv2bf16(<vscale x 2 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_f.f.w.alt_nxv2i8_nxv2bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, mf4, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v9, v8
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.f.f.w.alt.nxv2i8.nxv2bf16(
+    <vscale x 2 x i8> undef,
+    <vscale x 2 x bfloat> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vfncvt_mask_f.f.w.alt_nxv2i8_nxv2bf16(<vscale x 2 x i8> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w.alt_nxv2i8_nxv2bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, mf4, ta, mu
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.f.f.w.alt.mask.nxv2i8.nxv2bf16(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x bfloat> %1,
+    <vscale x 2 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vfncvt_f.f.w.alt_nxv4i8_nxv4bf16(<vscale x 4 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_f.f.w.alt_nxv4i8_nxv4bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, mf2, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v9, v8
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.f.f.w.alt.nxv4i8.nxv4bf16(
+    <vscale x 4 x i8> undef,
+    <vscale x 4 x bfloat> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vfncvt_mask_f.f.w.alt_nxv4i8_nxv4bf16(<vscale x 4 x i8> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w.alt_nxv4i8_nxv4bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, mf2, ta, mu
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.f.f.w.alt.mask.nxv4i8.nxv4bf16(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x bfloat> %1,
+    <vscale x 4 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vfncvt_f.f.w.alt_nxv8i8_nxv8bf16(<vscale x 8 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_f.f.w.alt_nxv8i8_nxv8bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, m1, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT:    vmv.v.v v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.f.f.w.alt.nxv8i8.nxv8bf16(
+    <vscale x 8 x i8> undef,
+    <vscale x 8 x bfloat> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vfncvt_mask_f.f.w.alt_nxv8i8_nxv8bf16(<vscale x 8 x i8> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w.alt_nxv8i8_nxv8bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, m1, ta, mu
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.f.f.w.alt.mask.nxv8i8.nxv8bf16(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x bfloat> %1,
+    <vscale x 8 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vfncvt_f.f.w.alt_nxv16i8_nxv16bf16(<vscale x 16 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_f.f.w.alt_nxv16i8_nxv16bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, m2, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v12, v8
+; CHECK-NEXT:    vmv.v.v v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.f.f.w.alt.nxv16i8.nxv16bf16(
+    <vscale x 16 x i8> undef,
+    <vscale x 16 x bfloat> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vfncvt_mask_f.f.w.alt_nxv16i8_nxv16bf16(<vscale x 16 x i8> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w.alt_nxv16i8_nxv16bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, m2, ta, mu
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v12, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.f.f.w.alt.mask.nxv16i8.nxv16bf16(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x bfloat> %1,
+    <vscale x 16 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vfncvt_f.f.w.alt_nxv32i8_nxv32bf16(<vscale x 32 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_f.f.w.alt_nxv32i8_nxv32bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, m4, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v16, v8
+; CHECK-NEXT:    vmv.v.v v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.f.f.w.alt.nxv32i8.nxv32bf16(
+    <vscale x 32 x i8> undef,
+    <vscale x 32 x bfloat> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vfncvt_mask_f.f.w.alt_nxv32i8_nxv32bf16(<vscale x 32 x i8> %0, <vscale x 32 x bfloat> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w.alt_nxv32i8_nxv32bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, m4, ta, mu
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v16, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.f.f.w.alt.mask.nxv32i8.nxv32bf16(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x bfloat> %1,
+    <vscale x 32 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vfncvt_f.f.q.alt_nxv1i8_nxv1f32(<vscale x 1 x float> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_f.f.q.alt_nxv1i8_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, mf8, ta, ma
+; CHECK-NEXT:    vfncvt.f.f.q v9, v8
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.f.f.q.alt.nxv1i8.nxv1f32(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x float> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vfncvt_mask_f.f.q.alt_nxv1i8_nxv1f32(<vscale x 1 x i8> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.q.alt_nxv1i8_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, mf8, ta, mu
+; CHECK-NEXT:    vfncvt.f.f.q v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.f.f.q.alt.mask.nxv1i8.nxv1f32(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x float> %1,
+    <vscale x 1 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vfncvt_f.f.q.alt_nxv2i8_nxv2f32(<vscale x 2 x float> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_f.f.q.alt_nxv2i8_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, mf4, ta, ma
+; CHECK-NEXT:    vfncvt.f.f.q v9, v8
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.f.f.q.alt.nxv2i8.nxv2f32(
+    <vscale x 2 x i8> undef,
+    <vscale x 2 x float> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vfncvt_mask_f.f.q.alt_nxv2i8_nxv2f32(<vscale x 2 x i8> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.q.alt_nxv2i8_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, mf4, ta, mu
+; CHECK-NEXT:    vfncvt.f.f.q v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.f.f.q.alt.mask.nxv2i8.nxv2f32(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x float> %1,
+    <vscale x 2 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vfncvt_f.f.q.alt_nxv4i8_nxv4f32(<vscale x 4 x float> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_f.f.q.alt_nxv4i8_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, mf2, ta, ma
+; CHECK-NEXT:    vfncvt.f.f.q v10, v8
+; CHECK-NEXT:    vmv1r.v v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.f.f.q.alt.nxv4i8.nxv4f32(
+    <vscale x 4 x i8> undef,
+    <vscale x 4 x float> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vfncvt_mask_f.f.q.alt_nxv4i8_nxv4f32(<vscale x 4 x i8> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.q.alt_nxv4i8_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, mf2, ta, mu
+; CHECK-NEXT:    vfncvt.f.f.q v8, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.f.f.q.alt.mask.nxv4i8.nxv4f32(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x float> %1,
+    <vscale x 4 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vfncvt_f.f.q.alt_nxv8i8_nxv8f32(<vscale x 8 x float> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_f.f.q.alt_nxv8i8_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, m1, ta, ma
+; CHECK-NEXT:    vfncvt.f.f.q v12, v8
+; CHECK-NEXT:    vmv.v.v v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.f.f.q.alt.nxv8i8.nxv8f32(
+    <vscale x 8 x i8> undef,
+    <vscale x 8 x float> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vfncvt_mask_f.f.q.alt_nxv8i8_nxv8f32(<vscale x 8 x i8> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.q.alt_nxv8i8_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, m1, ta, mu
+; CHECK-NEXT:    vfncvt.f.f.q v8, v12, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.f.f.q.alt.mask.nxv8i8.nxv8f32(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x float> %1,
+    <vscale x 8 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vfncvt_f.f.q.alt_nxv16i8_nxv16f32(<vscale x 16 x float> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_f.f.q.alt_nxv16i8_nxv16f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, m2, ta, ma
+; CHECK-NEXT:    vfncvt.f.f.q v16, v8
+; CHECK-NEXT:    vmv.v.v v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.f.f.q.alt.nxv16i8.nxv16f32(
+    <vscale x 16 x i8> undef,
+    <vscale x 16 x float> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vfncvt_mask_f.f.q.alt_nxv16i8_nxv16f32(<vscale x 16 x i8> %0, <vscale x 16 x float> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.q.alt_nxv16i8_nxv16f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, m2, ta, mu
+; CHECK-NEXT:    vfncvt.f.f.q v8, v16, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.f.f.q.alt.mask.nxv16i8.nxv16f32(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x float> %1,
+    <vscale x 16 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 16 x i8> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f.ll
index 9f74f5570e434..88f2f1d741121 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f.ll
@@ -1,11 +1,11 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfh \
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfh,+zvfbfmin,+experimental-zvfofp8min \
 ; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
-; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh,+zvfbfmin,+experimental-zvfofp8min \
 ; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
-; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfhmin \
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfhmin,+zvfbfmin,+experimental-zvfofp8min \
 ; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
-; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin \
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin,+experimental-zvfofp8min \
 ; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 
 define <vscale x 1 x half> @intrinsic_vfncvt_f.f.w_nxv1f16_nxv1f32(<vscale x 1 x float> %0, iXLen %1) nounwind {
@@ -331,3 +331,355 @@ entry:
 
   ret <vscale x 8 x float> %a
 }
+
+define <vscale x 1 x i8> @intrinsic_vfncvt_f.f.w_nxv1i8_nxv1bf16(<vscale x 1 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv1i8_nxv1bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v9, v8
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.f.f.w.nxv1i8.nxv1bf16(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x bfloat> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vfncvt_mask_f.f.w_nxv1i8_nxv1bf16(<vscale x 1 x i8> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv1i8_nxv1bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.f.f.w.mask.nxv1i8.nxv1bf16(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x bfloat> %1,
+    <vscale x 1 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vfncvt_f.f.w_nxv2i8_nxv2bf16(<vscale x 2 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv2i8_nxv2bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v9, v8
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.f.f.w.nxv2i8.nxv2bf16(
+    <vscale x 2 x i8> undef,
+    <vscale x 2 x bfloat> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vfncvt_mask_f.f.w_nxv2i8_nxv2bf16(<vscale x 2 x i8> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv2i8_nxv2bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.f.f.w.mask.nxv2i8.nxv2bf16(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x bfloat> %1,
+    <vscale x 2 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vfncvt_f.f.w_nxv4i8_nxv4bf16(<vscale x 4 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv4i8_nxv4bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v9, v8
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.f.f.w.nxv4i8.nxv4bf16(
+    <vscale x 4 x i8> undef,
+    <vscale x 4 x bfloat> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vfncvt_mask_f.f.w_nxv4i8_nxv4bf16(<vscale x 4 x i8> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv4i8_nxv4bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.f.f.w.mask.nxv4i8.nxv4bf16(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x bfloat> %1,
+    <vscale x 4 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vfncvt_f.f.w_nxv8i8_nxv8bf16(<vscale x 8 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv8i8_nxv8bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT:    vmv.v.v v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.f.f.w.nxv8i8.nxv8bf16(
+    <vscale x 8 x i8> undef,
+    <vscale x 8 x bfloat> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vfncvt_mask_f.f.w_nxv8i8_nxv8bf16(<vscale x 8 x i8> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv8i8_nxv8bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.f.f.w.mask.nxv8i8.nxv8bf16(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x bfloat> %1,
+    <vscale x 8 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vfncvt_f.f.w_nxv16i8_nxv16bf16(<vscale x 16 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv16i8_nxv16bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v12, v8
+; CHECK-NEXT:    vmv.v.v v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.f.f.w.nxv16i8.nxv16bf16(
+    <vscale x 16 x i8> undef,
+    <vscale x 16 x bfloat> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vfncvt_mask_f.f.w_nxv16i8_nxv16bf16(<vscale x 16 x i8> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv16i8_nxv16bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v12, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.f.f.w.mask.nxv16i8.nxv16bf16(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x bfloat> %1,
+    <vscale x 16 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vfncvt_f.f.w_nxv32i8_nxv32bf16(<vscale x 32 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv32i8_nxv32bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v16, v8
+; CHECK-NEXT:    vmv.v.v v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.f.f.w.nxv32i8.nxv32bf16(
+    <vscale x 32 x i8> undef,
+    <vscale x 32 x bfloat> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vfncvt_mask_f.f.w_nxv32i8_nxv32bf16(<vscale x 32 x i8> %0, <vscale x 32 x bfloat> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv32i8_nxv32bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v16, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.f.f.w.mask.nxv32i8.nxv32bf16(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x bfloat> %1,
+    <vscale x 32 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vfncvt_f.f.q_nxv1i8_nxv1f32(<vscale x 1 x float> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_f.f.q_nxv1i8_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    vfncvt.f.f.q v9, v8
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.f.f.q.nxv1i8.nxv1f32(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x float> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vfncvt_mask_f.f.q_nxv1i8_nxv1f32(<vscale x 1 x i8> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.q_nxv1i8_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT:    vfncvt.f.f.q v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.f.f.q.mask.nxv1i8.nxv1f32(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x float> %1,
+    <vscale x 1 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vfncvt_f.f.q_nxv2i8_nxv2f32(<vscale x 2 x float> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_f.f.q_nxv2i8_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT:    vfncvt.f.f.q v9, v8
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.f.f.q.nxv2i8.nxv2f32(
+    <vscale x 2 x i8> undef,
+    <vscale x 2 x float> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vfncvt_mask_f.f.q_nxv2i8_nxv2f32(<vscale x 2 x i8> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.q_nxv2i8_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT:    vfncvt.f.f.q v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.f.f.q.mask.nxv2i8.nxv2f32(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x float> %1,
+    <vscale x 2 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vfncvt_f.f.q_nxv4i8_nxv4f32(<vscale x 4 x float> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_f.f.q_nxv4i8_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT:    vfncvt.f.f.q v10, v8
+; CHECK-NEXT:    vmv1r.v v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.f.f.q.nxv4i8.nxv4f32(
+    <vscale x 4 x i8> undef,
+    <vscale x 4 x float> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vfncvt_mask_f.f.q_nxv4i8_nxv4f32(<vscale x 4 x i8> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.q_nxv4i8_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT:    vfncvt.f.f.q v8, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.f.f.q.mask.nxv4i8.nxv4f32(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x float> %1,
+    <vscale x 4 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vfncvt_f.f.q_nxv8i8_nxv8f32(<vscale x 8 x float> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_f.f.q_nxv8i8_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT:    vfncvt.f.f.q v12, v8
+; CHECK-NEXT:    vmv.v.v v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.f.f.q.nxv8i8.nxv8f32(
+    <vscale x 8 x i8> undef,
+    <vscale x 8 x float> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vfncvt_mask_f.f.q_nxv8i8_nxv8f32(<vscale x 8 x i8> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.q_nxv8i8_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT:    vfncvt.f.f.q v8, v12, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.f.f.q.mask.nxv8i8.nxv8f32(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x float> %1,
+    <vscale x 8 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vfncvt_f.f.q_nxv16i8_nxv16f32(<vscale x 16 x float> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_f.f.q_nxv16i8_nxv16f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT:    vfncvt.f.f.q v16, v8
+; CHECK-NEXT:    vmv.v.v v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.f.f.q.nxv16i8.nxv16f32(
+    <vscale x 16 x i8> undef,
+    <vscale x 16 x float> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vfncvt_mask_f.f.q_nxv16i8_nxv16f32(<vscale x 16 x i8> %0, <vscale x 16 x float> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.q_nxv16i8_nxv16f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT:    vfncvt.f.f.q v8, v16, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.f.f.q.mask.nxv16i8.nxv16f32(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x float> %1,
+    <vscale x 16 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 16 x i8> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-sat-f-f-alt.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-sat-f-f-alt.ll
new file mode 100644
index 0000000000000..93b892be37904
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-sat-f-f-alt.ll
@@ -0,0 +1,357 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfbfmin,+experimental-zvfofp8min \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfbfmin,+experimental-zvfofp8min \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+define <vscale x 1 x i8> @intrinsic_vfncvt.sat_f.f.w.alt_nxv1i8_nxv1bf16(<vscale x 1 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_f.f.w.alt_nxv1i8_nxv1bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, mf8, ta, ma
+; CHECK-NEXT:    vfncvtbf16.sat.f.f.w v9, v8
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.nxv1i8.nxv1bf16(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x bfloat> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vfncvt.sat_mask_f.f.w.alt_nxv1i8_nxv1bf16(<vscale x 1 x i8> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_mask_f.f.w.alt_nxv1i8_nxv1bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, mf8, ta, mu
+; CHECK-NEXT:    vfncvtbf16.sat.f.f.w v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.mask.nxv1i8.nxv1bf16(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x bfloat> %1,
+    <vscale x 1 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vfncvt.sat_f.f.w.alt_nxv2i8_nxv2bf16(<vscale x 2 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_f.f.w.alt_nxv2i8_nxv2bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, mf4, ta, ma
+; CHECK-NEXT:    vfncvtbf16.sat.f.f.w v9, v8
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.nxv2i8.nxv2bf16(
+    <vscale x 2 x i8> undef,
+    <vscale x 2 x bfloat> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vfncvt.sat_mask_f.f.w.alt_nxv2i8_nxv2bf16(<vscale x 2 x i8> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_mask_f.f.w.alt_nxv2i8_nxv2bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, mf4, ta, mu
+; CHECK-NEXT:    vfncvtbf16.sat.f.f.w v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.mask.nxv2i8.nxv2bf16(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x bfloat> %1,
+    <vscale x 2 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vfncvt.sat_f.f.w.alt_nxv4i8_nxv4bf16(<vscale x 4 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_f.f.w.alt_nxv4i8_nxv4bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, mf2, ta, ma
+; CHECK-NEXT:    vfncvtbf16.sat.f.f.w v9, v8
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.nxv4i8.nxv4bf16(
+    <vscale x 4 x i8> undef,
+    <vscale x 4 x bfloat> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vfncvt.sat_mask_f.f.w.alt_nxv4i8_nxv4bf16(<vscale x 4 x i8> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_mask_f.f.w.alt_nxv4i8_nxv4bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, mf2, ta, mu
+; CHECK-NEXT:    vfncvtbf16.sat.f.f.w v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.mask.nxv4i8.nxv4bf16(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x bfloat> %1,
+    <vscale x 4 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vfncvt.sat_f.f.w.alt_nxv8i8_nxv8bf16(<vscale x 8 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_f.f.w.alt_nxv8i8_nxv8bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, m1, ta, ma
+; CHECK-NEXT:    vfncvtbf16.sat.f.f.w v10, v8
+; CHECK-NEXT:    vmv.v.v v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.nxv8i8.nxv8bf16(
+    <vscale x 8 x i8> undef,
+    <vscale x 8 x bfloat> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vfncvt.sat_mask_f.f.w.alt_nxv8i8_nxv8bf16(<vscale x 8 x i8> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_mask_f.f.w.alt_nxv8i8_nxv8bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, m1, ta, mu
+; CHECK-NEXT:    vfncvtbf16.sat.f.f.w v8, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.mask.nxv8i8.nxv8bf16(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x bfloat> %1,
+    <vscale x 8 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vfncvt.sat_f.f.w.alt_nxv16i8_nxv16bf16(<vscale x 16 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_f.f.w.alt_nxv16i8_nxv16bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, m2, ta, ma
+; CHECK-NEXT:    vfncvtbf16.sat.f.f.w v12, v8
+; CHECK-NEXT:    vmv.v.v v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.nxv16i8.nxv16bf16(
+    <vscale x 16 x i8> undef,
+    <vscale x 16 x bfloat> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vfncvt.sat_mask_f.f.w.alt_nxv16i8_nxv16bf16(<vscale x 16 x i8> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_mask_f.f.w.alt_nxv16i8_nxv16bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, m2, ta, mu
+; CHECK-NEXT:    vfncvtbf16.sat.f.f.w v8, v12, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.mask.nxv16i8.nxv16bf16(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x bfloat> %1,
+    <vscale x 16 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vfncvt.sat_f.f.w.alt_nxv32i8_nxv32bf16(<vscale x 32 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_f.f.w.alt_nxv32i8_nxv32bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, m4, ta, ma
+; CHECK-NEXT:    vfncvtbf16.sat.f.f.w v16, v8
+; CHECK-NEXT:    vmv.v.v v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.nxv32i8.nxv32bf16(
+    <vscale x 32 x i8> undef,
+    <vscale x 32 x bfloat> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vfncvt.sat_mask_f.f.w.alt_nxv32i8_nxv32bf16(<vscale x 32 x i8> %0, <vscale x 32 x bfloat> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_mask_f.f.w.alt_nxv32i8_nxv32bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, m4, ta, mu
+; CHECK-NEXT:    vfncvtbf16.sat.f.f.w v8, v16, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.mask.nxv32i8.nxv32bf16(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x bfloat> %1,
+    <vscale x 32 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vfncvt.sat_f.f.q.alt_nxv1i8_nxv1f32(<vscale x 1 x float> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_f.f.q.alt_nxv1i8_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, mf8, ta, ma
+; CHECK-NEXT:    vfncvt.sat.f.f.q v9, v8
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.sat.f.f.q.alt.nxv1i8.nxv1f32(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x float> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vfncvt.sat_mask_f.f.q.alt_nxv1i8_nxv1f32(<vscale x 1 x i8> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_mask_f.f.q.alt_nxv1i8_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, mf8, ta, mu
+; CHECK-NEXT:    vfncvt.sat.f.f.q v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.sat.f.f.q.alt.mask.nxv1i8.nxv1f32(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x float> %1,
+    <vscale x 1 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vfncvt.sat_f.f.q.alt_nxv2i8_nxv2f32(<vscale x 2 x float> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_f.f.q.alt_nxv2i8_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, mf4, ta, ma
+; CHECK-NEXT:    vfncvt.sat.f.f.q v9, v8
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.sat.f.f.q.alt.nxv2i8.nxv2f32(
+    <vscale x 2 x i8> undef,
+    <vscale x 2 x float> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vfncvt.sat_mask_f.f.q.alt_nxv2i8_nxv2f32(<vscale x 2 x i8> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_mask_f.f.q.alt_nxv2i8_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, mf4, ta, mu
+; CHECK-NEXT:    vfncvt.sat.f.f.q v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.sat.f.f.q.alt.mask.nxv2i8.nxv2f32(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x float> %1,
+    <vscale x 2 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vfncvt.sat_f.f.q.alt_nxv4i8_nxv4f32(<vscale x 4 x float> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_f.f.q.alt_nxv4i8_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, mf2, ta, ma
+; CHECK-NEXT:    vfncvt.sat.f.f.q v10, v8
+; CHECK-NEXT:    vmv1r.v v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.sat.f.f.q.alt.nxv4i8.nxv4f32(
+    <vscale x 4 x i8> undef,
+    <vscale x 4 x float> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vfncvt.sat_mask_f.f.q.alt_nxv4i8_nxv4f32(<vscale x 4 x i8> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_mask_f.f.q.alt_nxv4i8_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, mf2, ta, mu
+; CHECK-NEXT:    vfncvt.sat.f.f.q v8, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.sat.f.f.q.alt.mask.nxv4i8.nxv4f32(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x float> %1,
+    <vscale x 4 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vfncvt.sat_f.f.q.alt_nxv8i8_nxv8f32(<vscale x 8 x float> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_f.f.q.alt_nxv8i8_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, m1, ta, ma
+; CHECK-NEXT:    vfncvt.sat.f.f.q v12, v8
+; CHECK-NEXT:    vmv.v.v v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.sat.f.f.q.alt.nxv8i8.nxv8f32(
+    <vscale x 8 x i8> undef,
+    <vscale x 8 x float> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vfncvt.sat_mask_f.f.q.alt_nxv8i8_nxv8f32(<vscale x 8 x i8> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_mask_f.f.q.alt_nxv8i8_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, m1, ta, mu
+; CHECK-NEXT:    vfncvt.sat.f.f.q v8, v12, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.sat.f.f.q.alt.mask.nxv8i8.nxv8f32(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x float> %1,
+    <vscale x 8 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vfncvt.sat_f.f.q.alt_nxv16i8_nxv16f32(<vscale x 16 x float> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_f.f.q.alt_nxv16i8_nxv16f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, m2, ta, ma
+; CHECK-NEXT:    vfncvt.sat.f.f.q v16, v8
+; CHECK-NEXT:    vmv.v.v v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.sat.f.f.q.alt.nxv16i8.nxv16f32(
+    <vscale x 16 x i8> undef,
+    <vscale x 16 x float> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vfncvt.sat_mask_f.f.q.alt_nxv16i8_nxv16f32(<vscale x 16 x i8> %0, <vscale x 16 x float> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_mask_f.f.q.alt_nxv16i8_nxv16f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, m2, ta, mu
+; CHECK-NEXT:    vfncvt.sat.f.f.q v8, v16, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.sat.f.f.q.alt.mask.nxv16i8.nxv16f32(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x float> %1,
+    <vscale x 16 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 16 x i8> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-sat-f-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-sat-f-f.ll
new file mode 100644
index 0000000000000..e81dd721b63be
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-sat-f-f.ll
@@ -0,0 +1,357 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfbfmin,+experimental-zvfofp8min \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfbfmin,+experimental-zvfofp8min \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+define <vscale x 1 x i8> @intrinsic_vfncvt.sat_f.f.w_nxv1i8_nxv1bf16(<vscale x 1 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_f.f.w_nxv1i8_nxv1bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    vfncvtbf16.sat.f.f.w v9, v8
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.sat.f.f.w.nxv1i8.nxv1bf16(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x bfloat> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vfncvt.sat_mask_f.f.w_nxv1i8_nxv1bf16(<vscale x 1 x i8> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_mask_f.f.w_nxv1i8_nxv1bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT:    vfncvtbf16.sat.f.f.w v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.sat.f.f.w.mask.nxv1i8.nxv1bf16(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x bfloat> %1,
+    <vscale x 1 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vfncvt.sat_f.f.w_nxv2i8_nxv2bf16(<vscale x 2 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_f.f.w_nxv2i8_nxv2bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT:    vfncvtbf16.sat.f.f.w v9, v8
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.sat.f.f.w.nxv2i8.nxv2bf16(
+    <vscale x 2 x i8> undef,
+    <vscale x 2 x bfloat> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vfncvt.sat_mask_f.f.w_nxv2i8_nxv2bf16(<vscale x 2 x i8> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_mask_f.f.w_nxv2i8_nxv2bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT:    vfncvtbf16.sat.f.f.w v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.sat.f.f.w.mask.nxv2i8.nxv2bf16(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x bfloat> %1,
+    <vscale x 2 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vfncvt.sat_f.f.w_nxv4i8_nxv4bf16(<vscale x 4 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_f.f.w_nxv4i8_nxv4bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT:    vfncvtbf16.sat.f.f.w v9, v8
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.sat.f.f.w.nxv4i8.nxv4bf16(
+    <vscale x 4 x i8> undef,
+    <vscale x 4 x bfloat> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vfncvt.sat_mask_f.f.w_nxv4i8_nxv4bf16(<vscale x 4 x i8> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_mask_f.f.w_nxv4i8_nxv4bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT:    vfncvtbf16.sat.f.f.w v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.sat.f.f.w.mask.nxv4i8.nxv4bf16(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x bfloat> %1,
+    <vscale x 4 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vfncvt.sat_f.f.w_nxv8i8_nxv8bf16(<vscale x 8 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_f.f.w_nxv8i8_nxv8bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT:    vfncvtbf16.sat.f.f.w v10, v8
+; CHECK-NEXT:    vmv.v.v v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.sat.f.f.w.nxv8i8.nxv8bf16(
+    <vscale x 8 x i8> undef,
+    <vscale x 8 x bfloat> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vfncvt.sat_mask_f.f.w_nxv8i8_nxv8bf16(<vscale x 8 x i8> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_mask_f.f.w_nxv8i8_nxv8bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT:    vfncvtbf16.sat.f.f.w v8, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.sat.f.f.w.mask.nxv8i8.nxv8bf16(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x bfloat> %1,
+    <vscale x 8 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vfncvt.sat_f.f.w_nxv16i8_nxv16bf16(<vscale x 16 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_f.f.w_nxv16i8_nxv16bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT:    vfncvtbf16.sat.f.f.w v12, v8
+; CHECK-NEXT:    vmv.v.v v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.sat.f.f.w.nxv16i8.nxv16bf16(
+    <vscale x 16 x i8> undef,
+    <vscale x 16 x bfloat> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vfncvt.sat_mask_f.f.w_nxv16i8_nxv16bf16(<vscale x 16 x i8> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_mask_f.f.w_nxv16i8_nxv16bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT:    vfncvtbf16.sat.f.f.w v8, v12, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.sat.f.f.w.mask.nxv16i8.nxv16bf16(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x bfloat> %1,
+    <vscale x 16 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vfncvt.sat_f.f.w_nxv32i8_nxv32bf16(<vscale x 32 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_f.f.w_nxv32i8_nxv32bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT:    vfncvtbf16.sat.f.f.w v16, v8
+; CHECK-NEXT:    vmv.v.v v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.sat.f.f.w.nxv32i8.nxv32bf16(
+    <vscale x 32 x i8> undef,
+    <vscale x 32 x bfloat> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vfncvt.sat_mask_f.f.w_nxv32i8_nxv32bf16(<vscale x 32 x i8> %0, <vscale x 32 x bfloat> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_mask_f.f.w_nxv32i8_nxv32bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT:    vfncvtbf16.sat.f.f.w v8, v16, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.sat.f.f.w.mask.nxv32i8.nxv32bf16(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x bfloat> %1,
+    <vscale x 32 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vfncvt.sat_f.f.q_nxv1i8_nxv1f32(<vscale x 1 x float> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_f.f.q_nxv1i8_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    vfncvt.sat.f.f.q v9, v8
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.sat.f.f.q.nxv1i8.nxv1f32(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x float> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vfncvt.sat_mask_f.f.q_nxv1i8_nxv1f32(<vscale x 1 x i8> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_mask_f.f.q_nxv1i8_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT:    vfncvt.sat.f.f.q v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.sat.f.f.q.mask.nxv1i8.nxv1f32(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x float> %1,
+    <vscale x 1 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vfncvt.sat_f.f.q_nxv2i8_nxv2f32(<vscale x 2 x float> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_f.f.q_nxv2i8_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT:    vfncvt.sat.f.f.q v9, v8
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.sat.f.f.q.nxv2i8.nxv2f32(
+    <vscale x 2 x i8> undef,
+    <vscale x 2 x float> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vfncvt.sat_mask_f.f.q_nxv2i8_nxv2f32(<vscale x 2 x i8> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_mask_f.f.q_nxv2i8_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT:    vfncvt.sat.f.f.q v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.sat.f.f.q.mask.nxv2i8.nxv2f32(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x float> %1,
+    <vscale x 2 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vfncvt.sat_f.f.q_nxv4i8_nxv4f32(<vscale x 4 x float> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_f.f.q_nxv4i8_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT:    vfncvt.sat.f.f.q v10, v8
+; CHECK-NEXT:    vmv1r.v v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.sat.f.f.q.nxv4i8.nxv4f32(
+    <vscale x 4 x i8> undef,
+    <vscale x 4 x float> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vfncvt.sat_mask_f.f.q_nxv4i8_nxv4f32(<vscale x 4 x i8> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_mask_f.f.q_nxv4i8_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT:    vfncvt.sat.f.f.q v8, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.sat.f.f.q.mask.nxv4i8.nxv4f32(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x float> %1,
+    <vscale x 4 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vfncvt.sat_f.f.q_nxv8i8_nxv8f32(<vscale x 8 x float> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_f.f.q_nxv8i8_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT:    vfncvt.sat.f.f.q v12, v8
+; CHECK-NEXT:    vmv.v.v v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.sat.f.f.q.nxv8i8.nxv8f32(
+    <vscale x 8 x i8> undef,
+    <vscale x 8 x float> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vfncvt.sat_mask_f.f.q_nxv8i8_nxv8f32(<vscale x 8 x i8> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_mask_f.f.q_nxv8i8_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT:    vfncvt.sat.f.f.q v8, v12, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.sat.f.f.q.mask.nxv8i8.nxv8f32(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x float> %1,
+    <vscale x 8 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vfncvt.sat_f.f.q_nxv16i8_nxv16f32(<vscale x 16 x float> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_f.f.q_nxv16i8_nxv16f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT:    vfncvt.sat.f.f.q v16, v8
+; CHECK-NEXT:    vmv.v.v v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.sat.f.f.q.nxv16i8.nxv16f32(
+    <vscale x 16 x i8> undef,
+    <vscale x 16 x float> %0,
+    iXLen 7, iXLen %1)
+
+  ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vfncvt.sat_mask_f.f.q_nxv16i8_nxv16f32(<vscale x 16 x i8> %0, <vscale x 16 x float> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfncvt.sat_mask_f.f.q_nxv16i8_nxv16f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT:    vfncvt.sat.f.f.q v8, v16, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.sat.f.f.q.mask.nxv16i8.nxv16f32(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x float> %1,
+    <vscale x 16 x i1> %2,
+    iXLen 7, iXLen %3, iXLen 1)
+
+  ret <vscale x 16 x i8> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f-alt.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f-alt.ll
new file mode 100644
index 0000000000000..6569c07125e92
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f-alt.ll
@@ -0,0 +1,227 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfbfmin,+experimental-zvfofp8min \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfbfmin,+experimental-zvfofp8min \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.nxv1.nxv1i8(
+  <vscale x 1 x bfloat>,
+  <vscale x 1 x i8>,
+  iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vfwcvt_f.f.v.alt_nxv1bf16_nxv1i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_f.f.v.alt_nxv1bf16_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, mf8, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.nxv1.nxv1i8(
+    <vscale x 1 x bfloat> undef,
+    <vscale x 1 x i8> %0,
+    iXLen %1)
+
+  ret <vscale x 1 x bfloat> %a
+}
+
+define <vscale x 1 x bfloat> @intrinsic_vfwcvt_mask_f.f.v.alt_nxv1bf16_nxv1i8(<vscale x 1 x bfloat> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v.alt_nxv1bf16_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, mf8, ta, mu
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.mask.nxv1bf16.nxv1i8(
+    <vscale x 1 x bfloat> %0,
+    <vscale x 1 x i8> %1,
+    <vscale x 1 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.nxv2.nxv2i8(
+  <vscale x 2 x bfloat>,
+  <vscale x 2 x i8>,
+  iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vfwcvt_f.f.v.alt_nxv2bf16_nxv2i8(<vscale x 2 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_f.f.v.alt_nxv2bf16_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, mf4, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.nxv2.nxv2i8(
+    <vscale x 2 x bfloat> undef,
+    <vscale x 2 x i8> %0,
+    iXLen %1)
+
+  ret <vscale x 2 x bfloat> %a
+}
+
+define <vscale x 2 x bfloat> @intrinsic_vfwcvt_mask_f.f.v.alt_nxv2bf16_nxv2i8(<vscale x 2 x bfloat> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v.alt_nxv2bf16_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, mf4, ta, mu
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.mask.nxv2bf16.nxv2i8(
+    <vscale x 2 x bfloat> %0,
+    <vscale x 2 x i8> %1,
+    <vscale x 2 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.nxv4.nxv4i8(
+  <vscale x 4 x bfloat>,
+  <vscale x 4 x i8>,
+  iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vfwcvt_f.f.v.alt_nxv4bf16_nxv4i8(<vscale x 4 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_f.f.v.alt_nxv4bf16_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, mf2, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.nxv4.nxv4i8(
+    <vscale x 4 x bfloat> undef,
+    <vscale x 4 x i8> %0,
+    iXLen %1)
+
+  ret <vscale x 4 x bfloat> %a
+}
+
+define <vscale x 4 x bfloat> @intrinsic_vfwcvt_mask_f.f.v.alt_nxv4bf16_nxv4i8(<vscale x 4 x bfloat> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v.alt_nxv4bf16_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, mf2, ta, mu
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.mask.nxv4bf16.nxv4i8(
+    <vscale x 4 x bfloat> %0,
+    <vscale x 4 x i8> %1,
+    <vscale x 4 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.nxv8.nxv8i8(
+  <vscale x 8 x bfloat>,
+  <vscale x 8 x i8>,
+  iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vfwcvt_f.f.v.alt_nxv8bf16_nxv8i8(<vscale x 8 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_f.f.v.alt_nxv8bf16_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, m1, ta, ma
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.nxv8.nxv8i8(
+    <vscale x 8 x bfloat> undef,
+    <vscale x 8 x i8> %0,
+    iXLen %1)
+
+  ret <vscale x 8 x bfloat> %a
+}
+
+define <vscale x 8 x bfloat> @intrinsic_vfwcvt_mask_f.f.v.alt_nxv8bf16_nxv8i8(<vscale x 8 x bfloat> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v.alt_nxv8bf16_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, m1, ta, mu
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v8, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.mask.nxv8bf16.nxv8i8(
+    <vscale x 8 x bfloat> %0,
+    <vscale x 8 x i8> %1,
+    <vscale x 8 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.nxv16.nxv16i8(
+  <vscale x 16 x bfloat>,
+  <vscale x 16 x i8>,
+  iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vfwcvt_f.f.v.alt_nxv16bf16_nxv16i8(<vscale x 16 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_f.f.v.alt_nxv16bf16_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, m2, ta, ma
+; CHECK-NEXT:    vmv2r.v v12, v8
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.nxv16.nxv16i8(
+    <vscale x 16 x bfloat> undef,
+    <vscale x 16 x i8> %0,
+    iXLen %1)
+
+  ret <vscale x 16 x bfloat> %a
+}
+
+define <vscale x 16 x bfloat> @intrinsic_vfwcvt_mask_f.f.v.alt_nxv16bf16_nxv16i8(<vscale x 16 x bfloat> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v.alt_nxv16bf16_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, m2, ta, mu
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v8, v12, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.mask.nxv16bf16.nxv16i8(
+    <vscale x 16 x bfloat> %0,
+    <vscale x 16 x i8> %1,
+    <vscale x 16 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.nxv32.nxv32i8(
+  <vscale x 32 x bfloat>,
+  <vscale x 32 x i8>,
+  iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vfwcvt_f.f.v.alt_nxv32bf16_nxv32i8(<vscale x 32 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_f.f.v.alt_nxv32bf16_nxv32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, m4, ta, ma
+; CHECK-NEXT:    vmv4r.v v16, v8
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.nxv32.nxv32i8(
+    <vscale x 32 x bfloat> undef,
+    <vscale x 32 x i8> %0,
+    iXLen %1)
+
+  ret <vscale x 32 x bfloat> %a
+}
+
+define <vscale x 32 x bfloat> @intrinsic_vfwcvt_mask_f.f.v.alt_nxv32bf16_nxv32i8(<vscale x 32 x bfloat> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v.alt_nxv32bf16_nxv32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8alt, m4, ta, mu
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v8, v16, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.mask.nxv32bf16.nxv32i8(
+    <vscale x 32 x bfloat> %0,
+    <vscale x 32 x i8> %1,
+    <vscale x 32 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 32 x bfloat> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f.ll
index b51c8efca9f7c..53cdaa9753975 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f.ll
@@ -1,13 +1,205 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfh \
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfh,+zvfbfmin,+experimental-zvfofp8min \
 ; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
-; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh \
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh,+zvfbfmin,+experimental-zvfofp8min \
 ; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
-; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfhmin \
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfhmin,+zvfbfmin,+experimental-zvfofp8min \
 ; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
-; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin \
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin,+experimental-zvfofp8min \
 ; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 
+define <vscale x 1 x bfloat> @intrinsic_vfwcvt_f.f.v_nxv1bf16_nxv1i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv1bf16_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.f.v.nxv1.nxv1i8(
+    <vscale x 1 x bfloat> undef,
+    <vscale x 1 x i8> %0,
+    iXLen %1)
+
+  ret <vscale x 1 x bfloat> %a
+}
+
+define <vscale x 1 x bfloat> @intrinsic_vfwcvt_mask_f.f.v_nxv1bf16_nxv1i8(<vscale x 1 x bfloat> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv1bf16_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1bf16.nxv1i8(
+    <vscale x 1 x bfloat> %0,
+    <vscale x 1 x i8> %1,
+    <vscale x 1 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 1 x bfloat> %a
+}
+
+define <vscale x 2 x bfloat> @intrinsic_vfwcvt_f.f.v_nxv2bf16_nxv2i8(<vscale x 2 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv2bf16_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.f.v.nxv2.nxv2i8(
+    <vscale x 2 x bfloat> undef,
+    <vscale x 2 x i8> %0,
+    iXLen %1)
+
+  ret <vscale x 2 x bfloat> %a
+}
+
+define <vscale x 2 x bfloat> @intrinsic_vfwcvt_mask_f.f.v_nxv2bf16_nxv2i8(<vscale x 2 x bfloat> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv2bf16_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.f.v.mask.nxv2bf16.nxv2i8(
+    <vscale x 2 x bfloat> %0,
+    <vscale x 2 x i8> %1,
+    <vscale x 2 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 2 x bfloat> %a
+}
+
+define <vscale x 4 x bfloat> @intrinsic_vfwcvt_f.f.v_nxv4bf16_nxv4i8(<vscale x 4 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv4bf16_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.f.v.nxv4.nxv4i8(
+    <vscale x 4 x bfloat> undef,
+    <vscale x 4 x i8> %0,
+    iXLen %1)
+
+  ret <vscale x 4 x bfloat> %a
+}
+
+define <vscale x 4 x bfloat> @intrinsic_vfwcvt_mask_f.f.v_nxv4bf16_nxv4i8(<vscale x 4 x bfloat> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv4bf16_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.f.v.mask.nxv4bf16.nxv4i8(
+    <vscale x 4 x bfloat> %0,
+    <vscale x 4 x i8> %1,
+    <vscale x 4 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 4 x bfloat> %a
+}
+
+define <vscale x 8 x bfloat> @intrinsic_vfwcvt_f.f.v_nxv8bf16_nxv8i8(<vscale x 8 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv8bf16_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.f.v.nxv8.nxv8i8(
+    <vscale x 8 x bfloat> undef,
+    <vscale x 8 x i8> %0,
+    iXLen %1)
+
+  ret <vscale x 8 x bfloat> %a
+}
+
+define <vscale x 8 x bfloat> @intrinsic_vfwcvt_mask_f.f.v_nxv8bf16_nxv8i8(<vscale x 8 x bfloat> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv8bf16_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v8, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.f.v.mask.nxv8bf16.nxv8i8(
+    <vscale x 8 x bfloat> %0,
+    <vscale x 8 x i8> %1,
+    <vscale x 8 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 8 x bfloat> %a
+}
+
+define <vscale x 16 x bfloat> @intrinsic_vfwcvt_f.f.v_nxv16bf16_nxv16i8(<vscale x 16 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv16bf16_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT:    vmv2r.v v12, v8
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.f.v.nxv16.nxv16i8(
+    <vscale x 16 x bfloat> undef,
+    <vscale x 16 x i8> %0,
+    iXLen %1)
+
+  ret <vscale x 16 x bfloat> %a
+}
+
+define <vscale x 16 x bfloat> @intrinsic_vfwcvt_mask_f.f.v_nxv16bf16_nxv16i8(<vscale x 16 x bfloat> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv16bf16_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v8, v12, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.f.v.mask.nxv16bf16.nxv16i8(
+    <vscale x 16 x bfloat> %0,
+    <vscale x 16 x i8> %1,
+    <vscale x 16 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 16 x bfloat> %a
+}
+
+define <vscale x 32 x bfloat> @intrinsic_vfwcvt_f.f.v_nxv32bf16_nxv32i8(<vscale x 32 x i8> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv32bf16_nxv32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT:    vmv4r.v v16, v8
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.f.v.nxv32.nxv32i8(
+    <vscale x 32 x bfloat> undef,
+    <vscale x 32 x i8> %0,
+    iXLen %1)
+
+  ret <vscale x 32 x bfloat> %a
+}
+
+define <vscale x 32 x bfloat> @intrinsic_vfwcvt_mask_f.f.v_nxv32bf16_nxv32i8(<vscale x 32 x bfloat> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv32bf16_nxv32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v8, v16, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.f.v.mask.nxv32bf16.nxv32i8(
+    <vscale x 32 x bfloat> %0,
+    <vscale x 32 x i8> %1,
+    <vscale x 32 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 32 x bfloat> %a
+}
+
 define <vscale x 1 x float> @intrinsic_vfwcvt_f.f.v_nxv1f32_nxv1f16(<vscale x 1 x half> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv1f32_nxv1f16:
 ; CHECK:       # %bb.0: # %entry

>From 7ab793b6f796c72f997f64abd559e36b3ae78e78 Mon Sep 17 00:00:00 2001
From: Brandon Wu <brandon.wu at sifive.com>
Date: Tue, 16 Dec 2025 18:26:51 -0800
Subject: [PATCH 2/3] fixup! replace undef

---
 llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-alt.ll | 22 +++++++++----------
 llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f.ll     | 22 +++++++++----------
 .../CodeGen/RISCV/rvv/vfncvt-sat-f-f-alt.ll   | 22 +++++++++----------
 llvm/test/CodeGen/RISCV/rvv/vfncvt-sat-f-f.ll | 22 +++++++++----------
 llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f-alt.ll | 12 +++++-----
 llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f.ll     | 12 +++++-----
 6 files changed, 56 insertions(+), 56 deletions(-)

diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-alt.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-alt.ll
index feafd4ba2d01a..e90b7de3eacf2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-alt.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-alt.ll
@@ -13,7 +13,7 @@ define <vscale x 1 x i8> @intrinsic_vfncvt_f.f.w.alt_nxv1i8_nxv1bf16(<vscale x 1
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.f.f.w.alt.nxv1i8.nxv1bf16(
-    <vscale x 1 x i8> undef,
+    <vscale x 1 x i8> poison,
     <vscale x 1 x bfloat> %0,
     iXLen 7, iXLen %1)
 
@@ -45,7 +45,7 @@ define <vscale x 2 x i8> @intrinsic_vfncvt_f.f.w.alt_nxv2i8_nxv2bf16(<vscale x 2
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.f.f.w.alt.nxv2i8.nxv2bf16(
-    <vscale x 2 x i8> undef,
+    <vscale x 2 x i8> poison,
     <vscale x 2 x bfloat> %0,
     iXLen 7, iXLen %1)
 
@@ -77,7 +77,7 @@ define <vscale x 4 x i8> @intrinsic_vfncvt_f.f.w.alt_nxv4i8_nxv4bf16(<vscale x 4
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.f.f.w.alt.nxv4i8.nxv4bf16(
-    <vscale x 4 x i8> undef,
+    <vscale x 4 x i8> poison,
     <vscale x 4 x bfloat> %0,
     iXLen 7, iXLen %1)
 
@@ -109,7 +109,7 @@ define <vscale x 8 x i8> @intrinsic_vfncvt_f.f.w.alt_nxv8i8_nxv8bf16(<vscale x 8
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.f.f.w.alt.nxv8i8.nxv8bf16(
-    <vscale x 8 x i8> undef,
+    <vscale x 8 x i8> poison,
     <vscale x 8 x bfloat> %0,
     iXLen 7, iXLen %1)
 
@@ -141,7 +141,7 @@ define <vscale x 16 x i8> @intrinsic_vfncvt_f.f.w.alt_nxv16i8_nxv16bf16(<vscale
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.f.f.w.alt.nxv16i8.nxv16bf16(
-    <vscale x 16 x i8> undef,
+    <vscale x 16 x i8> poison,
     <vscale x 16 x bfloat> %0,
     iXLen 7, iXLen %1)
 
@@ -173,7 +173,7 @@ define <vscale x 32 x i8> @intrinsic_vfncvt_f.f.w.alt_nxv32i8_nxv32bf16(<vscale
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.f.f.w.alt.nxv32i8.nxv32bf16(
-    <vscale x 32 x i8> undef,
+    <vscale x 32 x i8> poison,
     <vscale x 32 x bfloat> %0,
     iXLen 7, iXLen %1)
 
@@ -205,7 +205,7 @@ define <vscale x 1 x i8> @intrinsic_vfncvt_f.f.q.alt_nxv1i8_nxv1f32(<vscale x 1
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.f.f.q.alt.nxv1i8.nxv1f32(
-    <vscale x 1 x i8> undef,
+    <vscale x 1 x i8> poison,
     <vscale x 1 x float> %0,
     iXLen 7, iXLen %1)
 
@@ -237,7 +237,7 @@ define <vscale x 2 x i8> @intrinsic_vfncvt_f.f.q.alt_nxv2i8_nxv2f32(<vscale x 2
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.f.f.q.alt.nxv2i8.nxv2f32(
-    <vscale x 2 x i8> undef,
+    <vscale x 2 x i8> poison,
     <vscale x 2 x float> %0,
     iXLen 7, iXLen %1)
 
@@ -269,7 +269,7 @@ define <vscale x 4 x i8> @intrinsic_vfncvt_f.f.q.alt_nxv4i8_nxv4f32(<vscale x 4
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.f.f.q.alt.nxv4i8.nxv4f32(
-    <vscale x 4 x i8> undef,
+    <vscale x 4 x i8> poison,
     <vscale x 4 x float> %0,
     iXLen 7, iXLen %1)
 
@@ -301,7 +301,7 @@ define <vscale x 8 x i8> @intrinsic_vfncvt_f.f.q.alt_nxv8i8_nxv8f32(<vscale x 8
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.f.f.q.alt.nxv8i8.nxv8f32(
-    <vscale x 8 x i8> undef,
+    <vscale x 8 x i8> poison,
     <vscale x 8 x float> %0,
     iXLen 7, iXLen %1)
 
@@ -333,7 +333,7 @@ define <vscale x 16 x i8> @intrinsic_vfncvt_f.f.q.alt_nxv16i8_nxv16f32(<vscale x
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.f.f.q.alt.nxv16i8.nxv16f32(
-    <vscale x 16 x i8> undef,
+    <vscale x 16 x i8> poison,
     <vscale x 16 x float> %0,
     iXLen 7, iXLen %1)
 
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f.ll
index 88f2f1d741121..acc38f7b01a48 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f.ll
@@ -341,7 +341,7 @@ define <vscale x 1 x i8> @intrinsic_vfncvt_f.f.w_nxv1i8_nxv1bf16(<vscale x 1 x b
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.f.f.w.nxv1i8.nxv1bf16(
-    <vscale x 1 x i8> undef,
+    <vscale x 1 x i8> poison,
     <vscale x 1 x bfloat> %0,
     iXLen 7, iXLen %1)
 
@@ -373,7 +373,7 @@ define <vscale x 2 x i8> @intrinsic_vfncvt_f.f.w_nxv2i8_nxv2bf16(<vscale x 2 x b
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.f.f.w.nxv2i8.nxv2bf16(
-    <vscale x 2 x i8> undef,
+    <vscale x 2 x i8> poison,
     <vscale x 2 x bfloat> %0,
     iXLen 7, iXLen %1)
 
@@ -405,7 +405,7 @@ define <vscale x 4 x i8> @intrinsic_vfncvt_f.f.w_nxv4i8_nxv4bf16(<vscale x 4 x b
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.f.f.w.nxv4i8.nxv4bf16(
-    <vscale x 4 x i8> undef,
+    <vscale x 4 x i8> poison,
     <vscale x 4 x bfloat> %0,
     iXLen 7, iXLen %1)
 
@@ -437,7 +437,7 @@ define <vscale x 8 x i8> @intrinsic_vfncvt_f.f.w_nxv8i8_nxv8bf16(<vscale x 8 x b
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.f.f.w.nxv8i8.nxv8bf16(
-    <vscale x 8 x i8> undef,
+    <vscale x 8 x i8> poison,
     <vscale x 8 x bfloat> %0,
     iXLen 7, iXLen %1)
 
@@ -469,7 +469,7 @@ define <vscale x 16 x i8> @intrinsic_vfncvt_f.f.w_nxv16i8_nxv16bf16(<vscale x 16
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.f.f.w.nxv16i8.nxv16bf16(
-    <vscale x 16 x i8> undef,
+    <vscale x 16 x i8> poison,
     <vscale x 16 x bfloat> %0,
     iXLen 7, iXLen %1)
 
@@ -501,7 +501,7 @@ define <vscale x 32 x i8> @intrinsic_vfncvt_f.f.w_nxv32i8_nxv32bf16(<vscale x 32
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.f.f.w.nxv32i8.nxv32bf16(
-    <vscale x 32 x i8> undef,
+    <vscale x 32 x i8> poison,
     <vscale x 32 x bfloat> %0,
     iXLen 7, iXLen %1)
 
@@ -533,7 +533,7 @@ define <vscale x 1 x i8> @intrinsic_vfncvt_f.f.q_nxv1i8_nxv1f32(<vscale x 1 x fl
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.f.f.q.nxv1i8.nxv1f32(
-    <vscale x 1 x i8> undef,
+    <vscale x 1 x i8> poison,
     <vscale x 1 x float> %0,
     iXLen 7, iXLen %1)
 
@@ -565,7 +565,7 @@ define <vscale x 2 x i8> @intrinsic_vfncvt_f.f.q_nxv2i8_nxv2f32(<vscale x 2 x fl
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.f.f.q.nxv2i8.nxv2f32(
-    <vscale x 2 x i8> undef,
+    <vscale x 2 x i8> poison,
     <vscale x 2 x float> %0,
     iXLen 7, iXLen %1)
 
@@ -597,7 +597,7 @@ define <vscale x 4 x i8> @intrinsic_vfncvt_f.f.q_nxv4i8_nxv4f32(<vscale x 4 x fl
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.f.f.q.nxv4i8.nxv4f32(
-    <vscale x 4 x i8> undef,
+    <vscale x 4 x i8> poison,
     <vscale x 4 x float> %0,
     iXLen 7, iXLen %1)
 
@@ -629,7 +629,7 @@ define <vscale x 8 x i8> @intrinsic_vfncvt_f.f.q_nxv8i8_nxv8f32(<vscale x 8 x fl
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.f.f.q.nxv8i8.nxv8f32(
-    <vscale x 8 x i8> undef,
+    <vscale x 8 x i8> poison,
     <vscale x 8 x float> %0,
     iXLen 7, iXLen %1)
 
@@ -661,7 +661,7 @@ define <vscale x 16 x i8> @intrinsic_vfncvt_f.f.q_nxv16i8_nxv16f32(<vscale x 16
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.f.f.q.nxv16i8.nxv16f32(
-    <vscale x 16 x i8> undef,
+    <vscale x 16 x i8> poison,
     <vscale x 16 x float> %0,
     iXLen 7, iXLen %1)
 
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-sat-f-f-alt.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-sat-f-f-alt.ll
index 93b892be37904..92c36bc1d46d2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-sat-f-f-alt.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-sat-f-f-alt.ll
@@ -13,7 +13,7 @@ define <vscale x 1 x i8> @intrinsic_vfncvt.sat_f.f.w.alt_nxv1i8_nxv1bf16(<vscale
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.nxv1i8.nxv1bf16(
-    <vscale x 1 x i8> undef,
+    <vscale x 1 x i8> poison,
     <vscale x 1 x bfloat> %0,
     iXLen 7, iXLen %1)
 
@@ -45,7 +45,7 @@ define <vscale x 2 x i8> @intrinsic_vfncvt.sat_f.f.w.alt_nxv2i8_nxv2bf16(<vscale
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.nxv2i8.nxv2bf16(
-    <vscale x 2 x i8> undef,
+    <vscale x 2 x i8> poison,
     <vscale x 2 x bfloat> %0,
     iXLen 7, iXLen %1)
 
@@ -77,7 +77,7 @@ define <vscale x 4 x i8> @intrinsic_vfncvt.sat_f.f.w.alt_nxv4i8_nxv4bf16(<vscale
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.nxv4i8.nxv4bf16(
-    <vscale x 4 x i8> undef,
+    <vscale x 4 x i8> poison,
     <vscale x 4 x bfloat> %0,
     iXLen 7, iXLen %1)
 
@@ -109,7 +109,7 @@ define <vscale x 8 x i8> @intrinsic_vfncvt.sat_f.f.w.alt_nxv8i8_nxv8bf16(<vscale
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.nxv8i8.nxv8bf16(
-    <vscale x 8 x i8> undef,
+    <vscale x 8 x i8> poison,
     <vscale x 8 x bfloat> %0,
     iXLen 7, iXLen %1)
 
@@ -141,7 +141,7 @@ define <vscale x 16 x i8> @intrinsic_vfncvt.sat_f.f.w.alt_nxv16i8_nxv16bf16(<vsc
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.nxv16i8.nxv16bf16(
-    <vscale x 16 x i8> undef,
+    <vscale x 16 x i8> poison,
     <vscale x 16 x bfloat> %0,
     iXLen 7, iXLen %1)
 
@@ -173,7 +173,7 @@ define <vscale x 32 x i8> @intrinsic_vfncvt.sat_f.f.w.alt_nxv32i8_nxv32bf16(<vsc
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.sat.f.f.w.alt.nxv32i8.nxv32bf16(
-    <vscale x 32 x i8> undef,
+    <vscale x 32 x i8> poison,
     <vscale x 32 x bfloat> %0,
     iXLen 7, iXLen %1)
 
@@ -205,7 +205,7 @@ define <vscale x 1 x i8> @intrinsic_vfncvt.sat_f.f.q.alt_nxv1i8_nxv1f32(<vscale
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.sat.f.f.q.alt.nxv1i8.nxv1f32(
-    <vscale x 1 x i8> undef,
+    <vscale x 1 x i8> poison,
     <vscale x 1 x float> %0,
     iXLen 7, iXLen %1)
 
@@ -237,7 +237,7 @@ define <vscale x 2 x i8> @intrinsic_vfncvt.sat_f.f.q.alt_nxv2i8_nxv2f32(<vscale
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.sat.f.f.q.alt.nxv2i8.nxv2f32(
-    <vscale x 2 x i8> undef,
+    <vscale x 2 x i8> poison,
     <vscale x 2 x float> %0,
     iXLen 7, iXLen %1)
 
@@ -269,7 +269,7 @@ define <vscale x 4 x i8> @intrinsic_vfncvt.sat_f.f.q.alt_nxv4i8_nxv4f32(<vscale
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.sat.f.f.q.alt.nxv4i8.nxv4f32(
-    <vscale x 4 x i8> undef,
+    <vscale x 4 x i8> poison,
     <vscale x 4 x float> %0,
     iXLen 7, iXLen %1)
 
@@ -301,7 +301,7 @@ define <vscale x 8 x i8> @intrinsic_vfncvt.sat_f.f.q.alt_nxv8i8_nxv8f32(<vscale
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.sat.f.f.q.alt.nxv8i8.nxv8f32(
-    <vscale x 8 x i8> undef,
+    <vscale x 8 x i8> poison,
     <vscale x 8 x float> %0,
     iXLen 7, iXLen %1)
 
@@ -333,7 +333,7 @@ define <vscale x 16 x i8> @intrinsic_vfncvt.sat_f.f.q.alt_nxv16i8_nxv16f32(<vsca
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.sat.f.f.q.alt.nxv16i8.nxv16f32(
-    <vscale x 16 x i8> undef,
+    <vscale x 16 x i8> poison,
     <vscale x 16 x float> %0,
     iXLen 7, iXLen %1)
 
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-sat-f-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-sat-f-f.ll
index e81dd721b63be..4c3edc9005ecb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-sat-f-f.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-sat-f-f.ll
@@ -13,7 +13,7 @@ define <vscale x 1 x i8> @intrinsic_vfncvt.sat_f.f.w_nxv1i8_nxv1bf16(<vscale x 1
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.sat.f.f.w.nxv1i8.nxv1bf16(
-    <vscale x 1 x i8> undef,
+    <vscale x 1 x i8> poison,
     <vscale x 1 x bfloat> %0,
     iXLen 7, iXLen %1)
 
@@ -45,7 +45,7 @@ define <vscale x 2 x i8> @intrinsic_vfncvt.sat_f.f.w_nxv2i8_nxv2bf16(<vscale x 2
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.sat.f.f.w.nxv2i8.nxv2bf16(
-    <vscale x 2 x i8> undef,
+    <vscale x 2 x i8> poison,
     <vscale x 2 x bfloat> %0,
     iXLen 7, iXLen %1)
 
@@ -77,7 +77,7 @@ define <vscale x 4 x i8> @intrinsic_vfncvt.sat_f.f.w_nxv4i8_nxv4bf16(<vscale x 4
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.sat.f.f.w.nxv4i8.nxv4bf16(
-    <vscale x 4 x i8> undef,
+    <vscale x 4 x i8> poison,
     <vscale x 4 x bfloat> %0,
     iXLen 7, iXLen %1)
 
@@ -109,7 +109,7 @@ define <vscale x 8 x i8> @intrinsic_vfncvt.sat_f.f.w_nxv8i8_nxv8bf16(<vscale x 8
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.sat.f.f.w.nxv8i8.nxv8bf16(
-    <vscale x 8 x i8> undef,
+    <vscale x 8 x i8> poison,
     <vscale x 8 x bfloat> %0,
     iXLen 7, iXLen %1)
 
@@ -141,7 +141,7 @@ define <vscale x 16 x i8> @intrinsic_vfncvt.sat_f.f.w_nxv16i8_nxv16bf16(<vscale
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.sat.f.f.w.nxv16i8.nxv16bf16(
-    <vscale x 16 x i8> undef,
+    <vscale x 16 x i8> poison,
     <vscale x 16 x bfloat> %0,
     iXLen 7, iXLen %1)
 
@@ -173,7 +173,7 @@ define <vscale x 32 x i8> @intrinsic_vfncvt.sat_f.f.w_nxv32i8_nxv32bf16(<vscale
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.sat.f.f.w.nxv32i8.nxv32bf16(
-    <vscale x 32 x i8> undef,
+    <vscale x 32 x i8> poison,
     <vscale x 32 x bfloat> %0,
     iXLen 7, iXLen %1)
 
@@ -205,7 +205,7 @@ define <vscale x 1 x i8> @intrinsic_vfncvt.sat_f.f.q_nxv1i8_nxv1f32(<vscale x 1
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.sat.f.f.q.nxv1i8.nxv1f32(
-    <vscale x 1 x i8> undef,
+    <vscale x 1 x i8> poison,
     <vscale x 1 x float> %0,
     iXLen 7, iXLen %1)
 
@@ -237,7 +237,7 @@ define <vscale x 2 x i8> @intrinsic_vfncvt.sat_f.f.q_nxv2i8_nxv2f32(<vscale x 2
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.sat.f.f.q.nxv2i8.nxv2f32(
-    <vscale x 2 x i8> undef,
+    <vscale x 2 x i8> poison,
     <vscale x 2 x float> %0,
     iXLen 7, iXLen %1)
 
@@ -269,7 +269,7 @@ define <vscale x 4 x i8> @intrinsic_vfncvt.sat_f.f.q_nxv4i8_nxv4f32(<vscale x 4
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.sat.f.f.q.nxv4i8.nxv4f32(
-    <vscale x 4 x i8> undef,
+    <vscale x 4 x i8> poison,
     <vscale x 4 x float> %0,
     iXLen 7, iXLen %1)
 
@@ -301,7 +301,7 @@ define <vscale x 8 x i8> @intrinsic_vfncvt.sat_f.f.q_nxv8i8_nxv8f32(<vscale x 8
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.sat.f.f.q.nxv8i8.nxv8f32(
-    <vscale x 8 x i8> undef,
+    <vscale x 8 x i8> poison,
     <vscale x 8 x float> %0,
     iXLen 7, iXLen %1)
 
@@ -333,7 +333,7 @@ define <vscale x 16 x i8> @intrinsic_vfncvt.sat_f.f.q_nxv16i8_nxv16f32(<vscale x
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.sat.f.f.q.nxv16i8.nxv16f32(
-    <vscale x 16 x i8> undef,
+    <vscale x 16 x i8> poison,
     <vscale x 16 x float> %0,
     iXLen 7, iXLen %1)
 
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f-alt.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f-alt.ll
index 6569c07125e92..2f789f5f85ebc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f-alt.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f-alt.ll
@@ -18,7 +18,7 @@ define <vscale x 1 x bfloat> @intrinsic_vfwcvt_f.f.v.alt_nxv1bf16_nxv1i8(<vscale
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.nxv1.nxv1i8(
-    <vscale x 1 x bfloat> undef,
+    <vscale x 1 x bfloat> poison,
     <vscale x 1 x i8> %0,
     iXLen %1)
 
@@ -55,7 +55,7 @@ define <vscale x 2 x bfloat> @intrinsic_vfwcvt_f.f.v.alt_nxv2bf16_nxv2i8(<vscale
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.nxv2.nxv2i8(
-    <vscale x 2 x bfloat> undef,
+    <vscale x 2 x bfloat> poison,
     <vscale x 2 x i8> %0,
     iXLen %1)
 
@@ -92,7 +92,7 @@ define <vscale x 4 x bfloat> @intrinsic_vfwcvt_f.f.v.alt_nxv4bf16_nxv4i8(<vscale
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.nxv4.nxv4i8(
-    <vscale x 4 x bfloat> undef,
+    <vscale x 4 x bfloat> poison,
     <vscale x 4 x i8> %0,
     iXLen %1)
 
@@ -129,7 +129,7 @@ define <vscale x 8 x bfloat> @intrinsic_vfwcvt_f.f.v.alt_nxv8bf16_nxv8i8(<vscale
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.nxv8.nxv8i8(
-    <vscale x 8 x bfloat> undef,
+    <vscale x 8 x bfloat> poison,
     <vscale x 8 x i8> %0,
     iXLen %1)
 
@@ -166,7 +166,7 @@ define <vscale x 16 x bfloat> @intrinsic_vfwcvt_f.f.v.alt_nxv16bf16_nxv16i8(<vsc
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.nxv16.nxv16i8(
-    <vscale x 16 x bfloat> undef,
+    <vscale x 16 x bfloat> poison,
     <vscale x 16 x i8> %0,
     iXLen %1)
 
@@ -203,7 +203,7 @@ define <vscale x 32 x bfloat> @intrinsic_vfwcvt_f.f.v.alt_nxv32bf16_nxv32i8(<vsc
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.f.v.alt.nxv32.nxv32i8(
-    <vscale x 32 x bfloat> undef,
+    <vscale x 32 x bfloat> poison,
     <vscale x 32 x i8> %0,
     iXLen %1)
 
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f.ll
index 53cdaa9753975..7f43ad2e3ad94 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f.ll
@@ -17,7 +17,7 @@ define <vscale x 1 x bfloat> @intrinsic_vfwcvt_f.f.v_nxv1bf16_nxv1i8(<vscale x 1
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x bfloat> @llvm.riscv.vfwcvt.f.f.v.nxv1.nxv1i8(
-    <vscale x 1 x bfloat> undef,
+    <vscale x 1 x bfloat> poison,
     <vscale x 1 x i8> %0,
     iXLen %1)
 
@@ -49,7 +49,7 @@ define <vscale x 2 x bfloat> @intrinsic_vfwcvt_f.f.v_nxv2bf16_nxv2i8(<vscale x 2
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x bfloat> @llvm.riscv.vfwcvt.f.f.v.nxv2.nxv2i8(
-    <vscale x 2 x bfloat> undef,
+    <vscale x 2 x bfloat> poison,
     <vscale x 2 x i8> %0,
     iXLen %1)
 
@@ -81,7 +81,7 @@ define <vscale x 4 x bfloat> @intrinsic_vfwcvt_f.f.v_nxv4bf16_nxv4i8(<vscale x 4
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x bfloat> @llvm.riscv.vfwcvt.f.f.v.nxv4.nxv4i8(
-    <vscale x 4 x bfloat> undef,
+    <vscale x 4 x bfloat> poison,
     <vscale x 4 x i8> %0,
     iXLen %1)
 
@@ -113,7 +113,7 @@ define <vscale x 8 x bfloat> @intrinsic_vfwcvt_f.f.v_nxv8bf16_nxv8i8(<vscale x 8
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x bfloat> @llvm.riscv.vfwcvt.f.f.v.nxv8.nxv8i8(
-    <vscale x 8 x bfloat> undef,
+    <vscale x 8 x bfloat> poison,
     <vscale x 8 x i8> %0,
     iXLen %1)
 
@@ -145,7 +145,7 @@ define <vscale x 16 x bfloat> @intrinsic_vfwcvt_f.f.v_nxv16bf16_nxv16i8(<vscale
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x bfloat> @llvm.riscv.vfwcvt.f.f.v.nxv16.nxv16i8(
-    <vscale x 16 x bfloat> undef,
+    <vscale x 16 x bfloat> poison,
     <vscale x 16 x i8> %0,
     iXLen %1)
 
@@ -177,7 +177,7 @@ define <vscale x 32 x bfloat> @intrinsic_vfwcvt_f.f.v_nxv32bf16_nxv32i8(<vscale
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x bfloat> @llvm.riscv.vfwcvt.f.f.v.nxv32.nxv32i8(
-    <vscale x 32 x bfloat> undef,
+    <vscale x 32 x bfloat> poison,
     <vscale x 32 x i8> %0,
     iXLen %1)
 

>From 54bdef50490349a805621a299b648f15ef129e92 Mon Sep 17 00:00:00 2001
From: Brandon Wu <songwu0813 at gmail.com>
Date: Thu, 10 Jul 2025 21:00:28 -0700
Subject: [PATCH 3/3] [RISCV][clang] Support f8e4m3 and f8e5m2 suffix type for
 intrinsics

This commit support OFP8 type suffix for C intrinsic in tablegen.
It also support AltFmt flag which append _alt suffix in builtin name
for the purpose of codegen where the intrinsic requires vtype.altfmt = 1
at the moment that we don't have fe4m3 and fe5m2 llvm type yet, this
can be removed in the future.
---
 .../clang/Basic/riscv_vector_common.td        |  3 +
 .../clang/Support/RISCVVIntrinsicUtils.h      | 19 +++--
 clang/lib/Sema/SemaRISCV.cpp                  |  6 +-
 clang/lib/Support/RISCVVIntrinsicUtils.cpp    | 72 ++++++++++++++-----
 clang/utils/TableGen/RISCVVEmitter.cpp        | 17 +++--
 5 files changed, 90 insertions(+), 27 deletions(-)

diff --git a/clang/include/clang/Basic/riscv_vector_common.td b/clang/include/clang/Basic/riscv_vector_common.td
index 7e2d339291713..5cf45fcc845da 100644
--- a/clang/include/clang/Basic/riscv_vector_common.td
+++ b/clang/include/clang/Basic/riscv_vector_common.td
@@ -181,6 +181,9 @@ class RVVBuiltin<string suffix, string prototype, string type_range,
   // This builtin has a masked form.
   bit HasMasked = true;
 
+  // This is used by intrinsics that need vtype.altfmt
+  bit AltFmt = false;
+
   // If HasMasked, this flag states that this builtin has a maskedoff operand. It
   // is always the first operand in builtin and IR intrinsic.
   bit HasMaskedOffOperand = true;
diff --git a/clang/include/clang/Support/RISCVVIntrinsicUtils.h b/clang/include/clang/Support/RISCVVIntrinsicUtils.h
index 67149f23116e0..4016cc2f77dec 100644
--- a/clang/include/clang/Support/RISCVVIntrinsicUtils.h
+++ b/clang/include/clang/Support/RISCVVIntrinsicUtils.h
@@ -202,7 +202,7 @@ llvm::SmallVector<PrototypeDescriptor>
 parsePrototypes(llvm::StringRef Prototypes);
 
 // Basic type of vector type.
-enum class BasicType : uint8_t {
+enum class BasicType : uint16_t {
   Unknown = 0,
   Int8 = 1 << 0,
   Int16 = 1 << 1,
@@ -212,8 +212,10 @@ enum class BasicType : uint8_t {
   Float16 = 1 << 5,
   Float32 = 1 << 6,
   Float64 = 1 << 7,
-  MaxOffset = 7,
-  LLVM_MARK_AS_BITMASK_ENUM(Float64),
+  F8E4M3 = 1 << 8,
+  F8E5M2 = 1 << 9,
+  MaxOffset = 9,
+  LLVM_MARK_AS_BITMASK_ENUM(F8E5M2),
 };
 
 // Type of vector type.
@@ -228,6 +230,8 @@ enum ScalarTypeKind : uint8_t {
   UnsignedInteger,
   Float,
   BFloat,
+  FloatE4M3,
+  FloatE5M2,
   Invalid,
   Undefined,
 };
@@ -412,7 +416,8 @@ class RVVIntrinsic {
                bool HasBuiltinAlias, llvm::StringRef ManualCodegen,
                const RVVTypes &Types,
                const std::vector<int64_t> &IntrinsicTypes, unsigned NF,
-               Policy PolicyAttrs, bool HasFRMRoundModeOp, unsigned TWiden);
+               Policy PolicyAttrs, bool HasFRMRoundModeOp, unsigned TWiden,
+               bool AltFmt);
   ~RVVIntrinsic() = default;
 
   RVVTypePtr getOutputType() const { return OutputType; }
@@ -482,7 +487,8 @@ class RVVIntrinsic {
   static void updateNamesAndPolicy(bool IsMasked, bool HasPolicy,
                                    std::string &Name, std::string &BuiltinName,
                                    std::string &OverloadedName,
-                                   Policy &PolicyAttrs, bool HasFRMRoundModeOp);
+                                   Policy &PolicyAttrs, bool HasFRMRoundModeOp,
+                                   bool AltFmt);
 };
 
 // Raw RVV intrinsic info, used to expand later.
@@ -517,7 +523,7 @@ struct RVVIntrinsicRecord {
   uint8_t OverloadedSuffixSize;
 
   // Supported type, mask of BasicType.
-  uint8_t TypeRangeMask;
+  uint16_t TypeRangeMask;
 
   // Supported LMUL.
   uint8_t Log2LMULMask;
@@ -531,6 +537,7 @@ struct RVVIntrinsicRecord {
   bool HasTailPolicy : 1;
   bool HasMaskPolicy : 1;
   bool HasFRMRoundModeOp : 1;
+  bool AltFmt : 1;
   bool IsTuple : 1;
   LLVM_PREFERRED_TYPE(PolicyScheme)
   uint8_t UnMaskedPolicyScheme : 2;
diff --git a/clang/lib/Sema/SemaRISCV.cpp b/clang/lib/Sema/SemaRISCV.cpp
index 6153948a6a589..32d79cab46bde 100644
--- a/clang/lib/Sema/SemaRISCV.cpp
+++ b/clang/lib/Sema/SemaRISCV.cpp
@@ -137,6 +137,10 @@ static QualType RVVType2Qual(ASTContext &Context, const RVVType *Type) {
   case ScalarTypeKind::UnsignedInteger:
     QT = Context.getIntTypeForBitwidth(Type->getElementBitwidth(), false);
     break;
+  case ScalarTypeKind::FloatE4M3:
+  case ScalarTypeKind::FloatE5M2:
+    QT = Context.getIntTypeForBitwidth(8, false);
+    break;
   case ScalarTypeKind::BFloat:
     QT = Context.BFloat16Ty;
     break;
@@ -379,7 +383,7 @@ void RISCVIntrinsicManagerImpl::InitRVVIntrinsic(
 
   RVVIntrinsic::updateNamesAndPolicy(IsMasked, HasPolicy, Name, BuiltinName,
                                      OverloadedName, PolicyAttrs,
-                                     Record.HasFRMRoundModeOp);
+                                     Record.HasFRMRoundModeOp, Record.AltFmt);
 
   // Put into IntrinsicList.
   uint32_t Index = IntrinsicList.size();
diff --git a/clang/lib/Support/RISCVVIntrinsicUtils.cpp b/clang/lib/Support/RISCVVIntrinsicUtils.cpp
index 12e209aec92ce..a5430aee6b746 100644
--- a/clang/lib/Support/RISCVVIntrinsicUtils.cpp
+++ b/clang/lib/Support/RISCVVIntrinsicUtils.cpp
@@ -202,6 +202,12 @@ void RVVType::initBuiltinStr() {
   case ScalarTypeKind::BFloat:
     BuiltinStr += "y";
     break;
+  case ScalarTypeKind::FloatE4M3:
+    BuiltinStr += "a";
+    break;
+  case ScalarTypeKind::FloatE5M2:
+    BuiltinStr += "b";
+    break;
   default:
     llvm_unreachable("ScalarType is invalid!");
   }
@@ -244,6 +250,8 @@ void RVVType::initClangBuiltinStr() {
     ClangBuiltinStr += "int";
     break;
   case ScalarTypeKind::UnsignedInteger:
+  case ScalarTypeKind::FloatE4M3:
+  case ScalarTypeKind::FloatE5M2:
     ClangBuiltinStr += "uint";
     break;
   default:
@@ -319,6 +327,8 @@ void RVVType::initTypeStr() {
     Str += getTypeString("int");
     break;
   case ScalarTypeKind::UnsignedInteger:
+  case ScalarTypeKind::FloatE4M3:
+  case ScalarTypeKind::FloatE5M2:
     Str += getTypeString("uint");
     break;
   default:
@@ -346,6 +356,12 @@ void RVVType::initShortStr() {
   case ScalarTypeKind::UnsignedInteger:
     ShortStr = "u" + utostr(ElementBitwidth);
     break;
+  case ScalarTypeKind::FloatE4M3:
+    ShortStr = "f8e4m3";
+    break;
+  case ScalarTypeKind::FloatE5M2:
+    ShortStr = "f8e5m2";
+    break;
   default:
     llvm_unreachable("Unhandled case!");
   }
@@ -395,6 +411,14 @@ void RVVType::applyBasicType() {
     ElementBitwidth = 16;
     ScalarType = ScalarTypeKind::BFloat;
     break;
+  case BasicType::F8E4M3:
+    ElementBitwidth = 8;
+    ScalarType = ScalarTypeKind::FloatE4M3;
+    break;
+  case BasicType::F8E5M2:
+    ElementBitwidth = 8;
+    ScalarType = ScalarTypeKind::FloatE5M2;
+    break;
   default:
     llvm_unreachable("Unhandled type code!");
   }
@@ -709,11 +733,17 @@ void RVVType::applyModifier(const PrototypeDescriptor &Transformer) {
     Scale = LMUL.getScale(ElementBitwidth);
     if (ScalarType == ScalarTypeKind::BFloat)
       ScalarType = ScalarTypeKind::Float;
+    if (ScalarType == ScalarTypeKind::FloatE4M3 ||
+        ScalarType == ScalarTypeKind::FloatE5M2)
+      ScalarType = ScalarTypeKind::BFloat;
     break;
   case VectorTypeModifier::Widening4XVector:
     ElementBitwidth *= 4;
     LMUL.MulLog2LMUL(2);
     Scale = LMUL.getScale(ElementBitwidth);
+    if (ScalarType == ScalarTypeKind::FloatE4M3 ||
+        ScalarType == ScalarTypeKind::FloatE5M2)
+      ScalarType = ScalarTypeKind::Float;
     break;
   case VectorTypeModifier::Widening8XVector:
     ElementBitwidth *= 8;
@@ -938,13 +968,13 @@ RVVTypeCache::computeTypes(BasicType BT, int Log2LMUL, unsigned NF,
 static uint64_t computeRVVTypeHashValue(BasicType BT, int Log2LMUL,
                                         PrototypeDescriptor Proto) {
   // Layout of hash value:
-  // 0               8    16          24        32          40
+  // 0               8    24          32        40          48
   // | Log2LMUL + 3  | BT  | Proto.PT | Proto.TM | Proto.VTM |
   assert(Log2LMUL >= -3 && Log2LMUL <= 3);
-  return (Log2LMUL + 3) | (static_cast<uint64_t>(BT) & 0xff) << 8 |
-         ((uint64_t)(Proto.PT & 0xff) << 16) |
-         ((uint64_t)(Proto.TM & 0xff) << 24) |
-         ((uint64_t)(Proto.VTM & 0xff) << 32);
+  return (Log2LMUL + 3) | (static_cast<uint64_t>(BT) & 0xffff) << 8 |
+         ((uint64_t)(Proto.PT & 0xff) << 24) |
+         ((uint64_t)(Proto.TM & 0xff) << 32) |
+         ((uint64_t)(Proto.VTM & 0xff) << 40);
 }
 
 std::optional<RVVTypePtr> RVVTypeCache::computeType(BasicType BT, int Log2LMUL,
@@ -974,13 +1004,16 @@ std::optional<RVVTypePtr> RVVTypeCache::computeType(BasicType BT, int Log2LMUL,
 //===----------------------------------------------------------------------===//
 // RVVIntrinsic implementation
 //===----------------------------------------------------------------------===//
-RVVIntrinsic::RVVIntrinsic(
-    StringRef NewName, StringRef Suffix, StringRef NewOverloadedName,
-    StringRef OverloadedSuffix, StringRef IRName, bool IsMasked,
-    bool HasMaskedOffOperand, bool HasVL, PolicyScheme Scheme,
-    bool SupportOverloading, bool HasBuiltinAlias, StringRef ManualCodegen,
-    const RVVTypes &OutInTypes, const std::vector<int64_t> &NewIntrinsicTypes,
-    unsigned NF, Policy NewPolicyAttrs, bool HasFRMRoundModeOp, unsigned TWiden)
+RVVIntrinsic::RVVIntrinsic(StringRef NewName, StringRef Suffix,
+                           StringRef NewOverloadedName,
+                           StringRef OverloadedSuffix, StringRef IRName,
+                           bool IsMasked, bool HasMaskedOffOperand, bool HasVL,
+                           PolicyScheme Scheme, bool SupportOverloading,
+                           bool HasBuiltinAlias, StringRef ManualCodegen,
+                           const RVVTypes &OutInTypes,
+                           const std::vector<int64_t> &NewIntrinsicTypes,
+                           unsigned NF, Policy NewPolicyAttrs,
+                           bool HasFRMRoundModeOp, unsigned TWiden, bool AltFmt)
     : IRName(IRName), IsMasked(IsMasked),
       HasMaskedOffOperand(HasMaskedOffOperand), HasVL(HasVL), Scheme(Scheme),
       SupportOverloading(SupportOverloading), HasBuiltinAlias(HasBuiltinAlias),
@@ -1000,7 +1033,7 @@ RVVIntrinsic::RVVIntrinsic(
     OverloadedName += "_" + OverloadedSuffix.str();
 
   updateNamesAndPolicy(IsMasked, hasPolicy(), Name, BuiltinName, OverloadedName,
-                       PolicyAttrs, HasFRMRoundModeOp);
+                       PolicyAttrs, HasFRMRoundModeOp, AltFmt);
 
   // Init OutputType and InputTypes
   OutputType = OutInTypes[0];
@@ -1141,9 +1174,12 @@ RVVIntrinsic::getSupportedMaskedPolicies(bool HasTailPolicy,
                    "and mask policy");
 }
 
-void RVVIntrinsic::updateNamesAndPolicy(
-    bool IsMasked, bool HasPolicy, std::string &Name, std::string &BuiltinName,
-    std::string &OverloadedName, Policy &PolicyAttrs, bool HasFRMRoundModeOp) {
+void RVVIntrinsic::updateNamesAndPolicy(bool IsMasked, bool HasPolicy,
+                                        std::string &Name,
+                                        std::string &BuiltinName,
+                                        std::string &OverloadedName,
+                                        Policy &PolicyAttrs,
+                                        bool HasFRMRoundModeOp, bool AltFmt) {
 
   auto appendPolicySuffix = [&](const std::string &suffix) {
     Name += suffix;
@@ -1156,6 +1192,9 @@ void RVVIntrinsic::updateNamesAndPolicy(
     BuiltinName += "_rm";
   }
 
+  if (AltFmt)
+    BuiltinName += "_alt";
+
   if (IsMasked) {
     if (PolicyAttrs.isTUMUPolicy())
       appendPolicySuffix("_tumu");
@@ -1239,6 +1278,7 @@ raw_ostream &operator<<(raw_ostream &OS, const RVVIntrinsicRecord &Record) {
   OS << "/*HasTailPolicy=*/" << (int)Record.HasTailPolicy << ", ";
   OS << "/*HasMaskPolicy=*/" << (int)Record.HasMaskPolicy << ", ";
   OS << "/*HasFRMRoundModeOp=*/" << (int)Record.HasFRMRoundModeOp << ", ";
+  OS << "/*AltFmt=*/" << (int)Record.AltFmt << ",";
   OS << "/*IsTuple=*/" << (int)Record.IsTuple << ", ";
   OS << "/*UnMaskedPolicyScheme=*/" << (PolicyScheme)Record.UnMaskedPolicyScheme
      << ", ";
diff --git a/clang/utils/TableGen/RISCVVEmitter.cpp b/clang/utils/TableGen/RISCVVEmitter.cpp
index c316dfd30d9bf..970132d85d5b6 100644
--- a/clang/utils/TableGen/RISCVVEmitter.cpp
+++ b/clang/utils/TableGen/RISCVVEmitter.cpp
@@ -65,6 +65,7 @@ struct SemaRecord {
   bool HasTailPolicy : 1;
   bool HasMaskPolicy : 1;
   bool HasFRMRoundModeOp : 1;
+  bool AltFmt : 1;
   bool IsTuple : 1;
   LLVM_PREFERRED_TYPE(PolicyScheme)
   uint8_t UnMaskedPolicyScheme : 2;
@@ -147,6 +148,10 @@ static BasicType ParseBasicType(char c) {
     return BasicType::Float64;
   case 'y':
     return BasicType::BFloat16;
+  case 'a':
+    return BasicType::F8E4M3;
+  case 'b':
+    return BasicType::F8E5M2;
   default:
     return BasicType::Unknown;
   }
@@ -641,6 +646,7 @@ void RVVEmitter::createRVVIntrinsics(
     std::vector<int64_t> Log2LMULList = R->getValueAsListOfInts("Log2LMUL");
     bool HasTailPolicy = R->getValueAsBit("HasTailPolicy");
     bool HasMaskPolicy = R->getValueAsBit("HasMaskPolicy");
+    bool AltFmt = R->getValueAsBit("AltFmt");
     bool SupportOverloading = R->getValueAsBit("SupportOverloading");
     bool HasBuiltinAlias = R->getValueAsBit("HasBuiltinAlias");
     StringRef ManualCodegen = R->getValueAsString("ManualCodegen");
@@ -701,7 +707,7 @@ void RVVEmitter::createRVVIntrinsics(
             /*IsMasked=*/false, /*HasMaskedOffOperand=*/false, HasVL,
             UnMaskedPolicyScheme, SupportOverloading, HasBuiltinAlias,
             ManualCodegen, *Types, IntrinsicTypes, NF, DefaultPolicy,
-            HasFRMRoundModeOp, TWiden));
+            HasFRMRoundModeOp, TWiden, AltFmt));
         if (UnMaskedPolicyScheme != PolicyScheme::SchemeNone)
           for (auto P : SupportedUnMaskedPolicies) {
             SmallVector<PrototypeDescriptor> PolicyPrototype =
@@ -716,7 +722,7 @@ void RVVEmitter::createRVVIntrinsics(
                 /*IsMask=*/false, /*HasMaskedOffOperand=*/false, HasVL,
                 UnMaskedPolicyScheme, SupportOverloading, HasBuiltinAlias,
                 ManualCodegen, *PolicyTypes, IntrinsicTypes, NF, P,
-                HasFRMRoundModeOp, TWiden));
+                HasFRMRoundModeOp, TWiden, AltFmt));
           }
         if (!HasMasked)
           continue;
@@ -727,7 +733,8 @@ void RVVEmitter::createRVVIntrinsics(
             Name, SuffixStr, OverloadedName, OverloadedSuffixStr, MaskedIRName,
             /*IsMasked=*/true, HasMaskedOffOperand, HasVL, MaskedPolicyScheme,
             SupportOverloading, HasBuiltinAlias, ManualCodegen, *MaskTypes,
-            IntrinsicTypes, NF, DefaultPolicy, HasFRMRoundModeOp, TWiden));
+            IntrinsicTypes, NF, DefaultPolicy, HasFRMRoundModeOp, TWiden,
+            AltFmt));
         if (MaskedPolicyScheme == PolicyScheme::SchemeNone)
           continue;
         for (auto P : SupportedMaskedPolicies) {
@@ -742,7 +749,7 @@ void RVVEmitter::createRVVIntrinsics(
               MaskedIRName, /*IsMasked=*/true, HasMaskedOffOperand, HasVL,
               MaskedPolicyScheme, SupportOverloading, HasBuiltinAlias,
               ManualCodegen, *PolicyTypes, IntrinsicTypes, NF, P,
-              HasFRMRoundModeOp, TWiden));
+              HasFRMRoundModeOp, TWiden, AltFmt));
         }
       } // End for Log2LMULList
     }   // End for TypeRange
@@ -780,6 +787,7 @@ void RVVEmitter::createRVVIntrinsics(
     SR.HasMaskedOffOperand = HasMaskedOffOperand;
     SR.HasTailPolicy = HasTailPolicy;
     SR.HasMaskPolicy = HasMaskPolicy;
+    SR.AltFmt = AltFmt;
     SR.UnMaskedPolicyScheme = static_cast<uint8_t>(UnMaskedPolicyScheme);
     SR.MaskedPolicyScheme = static_cast<uint8_t>(MaskedPolicyScheme);
     SR.Prototype = std::move(BasicPrototype);
@@ -824,6 +832,7 @@ void RVVEmitter::createRVVIntrinsicRecords(std::vector<RVVIntrinsicRecord> &Out,
     R.HasMaskedOffOperand = SR.HasMaskedOffOperand;
     R.HasTailPolicy = SR.HasTailPolicy;
     R.HasMaskPolicy = SR.HasMaskPolicy;
+    R.AltFmt = SR.AltFmt;
     R.UnMaskedPolicyScheme = SR.UnMaskedPolicyScheme;
     R.MaskedPolicyScheme = SR.MaskedPolicyScheme;
     R.IsTuple = SR.IsTuple;



More information about the llvm-commits mailing list