[llvm] 587d4cc - [RISCV] Lower bf16 {S,U}INT_TO_FP, FP_TO_{S,U}INT and VP variants (#108338)

via llvm-commits llvm-commits at lists.llvm.org
Thu Sep 12 22:05:00 PDT 2024


Author: Luke Lau
Date: 2024-09-13T13:04:56+08:00
New Revision: 587d4cc797e09eeb2ec93d8b4ee9cd484626bf8f

URL: https://github.com/llvm/llvm-project/commit/587d4cc797e09eeb2ec93d8b4ee9cd484626bf8f
DIFF: https://github.com/llvm/llvm-project/commit/587d4cc797e09eeb2ec93d8b4ee9cd484626bf8f.diff

LOG: [RISCV] Lower bf16 {S,U}INT_TO_FP, FP_TO_{S,U}INT and VP variants (#108338)

This handles int->fp/fp->int nodes for zvfbfmin, reusing the same parts
that f16 uses with zvfhmin.

There's quite a bit of replication here that can probably be cleaned up
at some point.

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/test/CodeGen/RISCV/rvv/vfptoi-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/vfptosi-vp-mask.ll
    llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll
    llvm/test/CodeGen/RISCV/rvv/vfptoui-vp-mask.ll
    llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll
    llvm/test/CodeGen/RISCV/rvv/vitofp-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/vsitofp-vp-mask.ll
    llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll
    llvm/test/CodeGen/RISCV/rvv/vuitofp-vp-mask.ll
    llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index ccc74ca9965899..6f2dc710cb3d4d 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -1117,6 +1117,9 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
         setOperationAction({ISD::VP_MERGE, ISD::VP_SELECT, ISD::SELECT}, VT,
                            Custom);
         setOperationAction(ISD::SELECT_CC, VT, Expand);
+        setOperationAction({ISD::SINT_TO_FP, ISD::UINT_TO_FP,
+                            ISD::VP_SINT_TO_FP, ISD::VP_UINT_TO_FP},
+                           VT, Custom);
         setOperationAction({ISD::CONCAT_VECTORS, ISD::INSERT_SUBVECTOR,
                             ISD::EXTRACT_SUBVECTOR, ISD::VECTOR_INTERLEAVE,
                             ISD::VECTOR_DEINTERLEAVE},
@@ -6677,17 +6680,19 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
   case ISD::SINT_TO_FP:
   case ISD::UINT_TO_FP:
     if (Op.getValueType().isVector() &&
-        Op.getValueType().getScalarType() == MVT::f16 &&
-        (Subtarget.hasVInstructionsF16Minimal() &&
-         !Subtarget.hasVInstructionsF16())) {
-      if (Op.getValueType() == MVT::nxv32f16)
+        ((Op.getValueType().getScalarType() == MVT::f16 &&
+          (Subtarget.hasVInstructionsF16Minimal() &&
+           !Subtarget.hasVInstructionsF16())) ||
+         Op.getValueType().getScalarType() == MVT::bf16)) {
+      if (Op.getValueType() == MVT::nxv32f16 ||
+          Op.getValueType() == MVT::nxv32bf16)
         return SplitVectorOp(Op, DAG);
       // int -> f32
       SDLoc DL(Op);
       MVT NVT =
           MVT::getVectorVT(MVT::f32, Op.getValueType().getVectorElementCount());
       SDValue NC = DAG.getNode(Op.getOpcode(), DL, NVT, Op->ops());
-      // f32 -> f16
+      // f32 -> [b]f16
       return DAG.getNode(ISD::FP_ROUND, DL, Op.getValueType(), NC,
                          DAG.getIntPtrConstant(0, DL, /*isTarget=*/true));
     }
@@ -6696,12 +6701,14 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
   case ISD::FP_TO_UINT:
     if (SDValue Op1 = Op.getOperand(0);
         Op1.getValueType().isVector() &&
-        Op1.getValueType().getScalarType() == MVT::f16 &&
-        (Subtarget.hasVInstructionsF16Minimal() &&
-         !Subtarget.hasVInstructionsF16())) {
-      if (Op1.getValueType() == MVT::nxv32f16)
+        ((Op1.getValueType().getScalarType() == MVT::f16 &&
+          (Subtarget.hasVInstructionsF16Minimal() &&
+           !Subtarget.hasVInstructionsF16())) ||
+         Op1.getValueType().getScalarType() == MVT::bf16)) {
+      if (Op1.getValueType() == MVT::nxv32f16 ||
+          Op1.getValueType() == MVT::nxv32bf16)
         return SplitVectorOp(Op, DAG);
-      // f16 -> f32
+      // [b]f16 -> f32
       SDLoc DL(Op);
       MVT NVT = MVT::getVectorVT(MVT::f32,
                                  Op1.getValueType().getVectorElementCount());
@@ -7412,17 +7419,19 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
   case ISD::VP_SINT_TO_FP:
   case ISD::VP_UINT_TO_FP:
     if (Op.getValueType().isVector() &&
-        Op.getValueType().getScalarType() == MVT::f16 &&
-        (Subtarget.hasVInstructionsF16Minimal() &&
-         !Subtarget.hasVInstructionsF16())) {
-      if (Op.getValueType() == MVT::nxv32f16)
-        return SplitVPOp(Op, DAG);
+        ((Op.getValueType().getScalarType() == MVT::f16 &&
+          (Subtarget.hasVInstructionsF16Minimal() &&
+           !Subtarget.hasVInstructionsF16())) ||
+         Op.getValueType().getScalarType() == MVT::bf16)) {
+      if (Op.getValueType() == MVT::nxv32f16 ||
+          Op.getValueType() == MVT::nxv32bf16)
+        return SplitVectorOp(Op, DAG);
       // int -> f32
       SDLoc DL(Op);
       MVT NVT =
           MVT::getVectorVT(MVT::f32, Op.getValueType().getVectorElementCount());
       auto NC = DAG.getNode(Op.getOpcode(), DL, NVT, Op->ops());
-      // f32 -> f16
+      // f32 -> [b]f16
       return DAG.getNode(ISD::FP_ROUND, DL, Op.getValueType(), NC,
                          DAG.getIntPtrConstant(0, DL, /*isTarget=*/true));
     }
@@ -7431,12 +7440,14 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
   case ISD::VP_FP_TO_UINT:
     if (SDValue Op1 = Op.getOperand(0);
         Op1.getValueType().isVector() &&
-        Op1.getValueType().getScalarType() == MVT::f16 &&
-        (Subtarget.hasVInstructionsF16Minimal() &&
-         !Subtarget.hasVInstructionsF16())) {
-      if (Op1.getValueType() == MVT::nxv32f16)
-        return SplitVPOp(Op, DAG);
-      // f16 -> f32
+        ((Op1.getValueType().getScalarType() == MVT::f16 &&
+          (Subtarget.hasVInstructionsF16Minimal() &&
+           !Subtarget.hasVInstructionsF16())) ||
+         Op1.getValueType().getScalarType() == MVT::bf16)) {
+      if (Op1.getValueType() == MVT::nxv32f16 ||
+          Op1.getValueType() == MVT::nxv32bf16)
+        return SplitVectorOp(Op, DAG);
+      // [b]f16 -> f32
       SDLoc DL(Op);
       MVT NVT = MVT::getVectorVT(MVT::f32,
                                  Op1.getValueType().getVectorElementCount());

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfptoi-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfptoi-sdnode.ll
index b888fde7d06836..4edaa3825e5879 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfptoi-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfptoi-sdnode.ll
@@ -1,12 +1,734 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v -target-abi=ilp32d \
-; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
-; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d \
-; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
-; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfhmin,+v -target-abi=ilp32d \
-; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
-; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfhmin,+v -target-abi=lp64d \
-; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
+; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+zvfbfmin,+v \
+; RUN:     -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \
+; RUN:     --check-prefixes=CHECK,ZVFH
+; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v,+zvfbfmin \
+; RUN:     -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \
+; RUN:     --check-prefixes=CHECK,ZVFH
+; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfhmin,+v,+zvfbfmin \
+; RUN:     -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \
+; RUN:     --check-prefixes=CHECK,ZVFHMIN
+; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfhmin,+v,+zvfbfmin \
+; RUN:     -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \
+; RUN:     --check-prefixes=CHECK,ZVFHMIN
+
+define <vscale x 1 x i1> @vfptosi_nxv1bf16_nxv1i1(<vscale x 1 x bfloat> %va) {
+; CHECK-LABEL: vfptosi_nxv1bf16_nxv1i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
+; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v9
+; CHECK-NEXT:    vand.vi v8, v8, 1
+; CHECK-NEXT:    vmsne.vi v0, v8, 0
+; CHECK-NEXT:    ret
+  %evec = fptosi <vscale x 1 x bfloat> %va to <vscale x 1 x i1>
+  ret <vscale x 1 x i1> %evec
+}
+
+define <vscale x 1 x i7> @vfptosi_nxv1bf16_nxv1i7(<vscale x 1 x bfloat> %va) {
+; CHECK-LABEL: vfptosi_nxv1bf16_nxv1i7:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
+; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v9
+; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    ret
+  %evec = fptosi <vscale x 1 x bfloat> %va to <vscale x 1 x i7>
+  ret <vscale x 1 x i7> %evec
+}
+
+define <vscale x 1 x i7> @vfptoui_nxv1bf16_nxv1i7(<vscale x 1 x bfloat> %va) {
+; CHECK-LABEL: vfptoui_nxv1bf16_nxv1i7:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
+; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v9
+; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    ret
+  %evec = fptoui <vscale x 1 x bfloat> %va to <vscale x 1 x i7>
+  ret <vscale x 1 x i7> %evec
+}
+
+define <vscale x 1 x i1> @vfptoui_nxv1bf16_nxv1i1(<vscale x 1 x bfloat> %va) {
+; CHECK-LABEL: vfptoui_nxv1bf16_nxv1i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
+; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v9
+; CHECK-NEXT:    vand.vi v8, v8, 1
+; CHECK-NEXT:    vmsne.vi v0, v8, 0
+; CHECK-NEXT:    ret
+  %evec = fptoui <vscale x 1 x bfloat> %va to <vscale x 1 x i1>
+  ret <vscale x 1 x i1> %evec
+}
+
+define <vscale x 1 x i8> @vfptosi_nxv1bf16_nxv1i8(<vscale x 1 x bfloat> %va) {
+; CHECK-LABEL: vfptosi_nxv1bf16_nxv1i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
+; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v9
+; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    ret
+  %evec = fptosi <vscale x 1 x bfloat> %va to <vscale x 1 x i8>
+  ret <vscale x 1 x i8> %evec
+}
+
+define <vscale x 1 x i8> @vfptoui_nxv1bf16_nxv1i8(<vscale x 1 x bfloat> %va) {
+; CHECK-LABEL: vfptoui_nxv1bf16_nxv1i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
+; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v9
+; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    ret
+  %evec = fptoui <vscale x 1 x bfloat> %va to <vscale x 1 x i8>
+  ret <vscale x 1 x i8> %evec
+}
+
+define <vscale x 1 x i16> @vfptosi_nxv1bf16_nxv1i16(<vscale x 1 x bfloat> %va) {
+; CHECK-LABEL: vfptosi_nxv1bf16_nxv1i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
+; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v9
+; CHECK-NEXT:    ret
+  %evec = fptosi <vscale x 1 x bfloat> %va to <vscale x 1 x i16>
+  ret <vscale x 1 x i16> %evec
+}
+
+define <vscale x 1 x i16> @vfptoui_nxv1bf16_nxv1i16(<vscale x 1 x bfloat> %va) {
+; CHECK-LABEL: vfptoui_nxv1bf16_nxv1i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
+; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v9
+; CHECK-NEXT:    ret
+  %evec = fptoui <vscale x 1 x bfloat> %va to <vscale x 1 x i16>
+  ret <vscale x 1 x i16> %evec
+}
+
+define <vscale x 1 x i32> @vfptosi_nxv1bf16_nxv1i32(<vscale x 1 x bfloat> %va) {
+; CHECK-LABEL: vfptosi_nxv1bf16_nxv1i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
+; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v9
+; CHECK-NEXT:    ret
+  %evec = fptosi <vscale x 1 x bfloat> %va to <vscale x 1 x i32>
+  ret <vscale x 1 x i32> %evec
+}
+
+define <vscale x 1 x i32> @vfptoui_nxv1bf16_nxv1i32(<vscale x 1 x bfloat> %va) {
+; CHECK-LABEL: vfptoui_nxv1bf16_nxv1i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
+; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v9
+; CHECK-NEXT:    ret
+  %evec = fptoui <vscale x 1 x bfloat> %va to <vscale x 1 x i32>
+  ret <vscale x 1 x i32> %evec
+}
+
+define <vscale x 1 x i64> @vfptosi_nxv1bf16_nxv1i64(<vscale x 1 x bfloat> %va) {
+; CHECK-LABEL: vfptosi_nxv1bf16_nxv1i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
+; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT:    vfwcvt.rtz.x.f.v v8, v9
+; CHECK-NEXT:    ret
+  %evec = fptosi <vscale x 1 x bfloat> %va to <vscale x 1 x i64>
+  ret <vscale x 1 x i64> %evec
+}
+
+define <vscale x 1 x i64> @vfptoui_nxv1bf16_nxv1i64(<vscale x 1 x bfloat> %va) {
+; CHECK-LABEL: vfptoui_nxv1bf16_nxv1i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
+; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v8, v9
+; CHECK-NEXT:    ret
+  %evec = fptoui <vscale x 1 x bfloat> %va to <vscale x 1 x i64>
+  ret <vscale x 1 x i64> %evec
+}
+
+define <vscale x 2 x i1> @vfptosi_nxv2bf16_nxv2i1(<vscale x 2 x bfloat> %va) {
+; CHECK-LABEL: vfptosi_nxv2bf16_nxv2i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
+; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v9
+; CHECK-NEXT:    vand.vi v8, v8, 1
+; CHECK-NEXT:    vmsne.vi v0, v8, 0
+; CHECK-NEXT:    ret
+  %evec = fptosi <vscale x 2 x bfloat> %va to <vscale x 2 x i1>
+  ret <vscale x 2 x i1> %evec
+}
+
+define <vscale x 2 x i1> @vfptoui_nxv2bf16_nxv2i1(<vscale x 2 x bfloat> %va) {
+; CHECK-LABEL: vfptoui_nxv2bf16_nxv2i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
+; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v9
+; CHECK-NEXT:    vand.vi v8, v8, 1
+; CHECK-NEXT:    vmsne.vi v0, v8, 0
+; CHECK-NEXT:    ret
+  %evec = fptoui <vscale x 2 x bfloat> %va to <vscale x 2 x i1>
+  ret <vscale x 2 x i1> %evec
+}
+
+define <vscale x 2 x i8> @vfptosi_nxv2bf16_nxv2i8(<vscale x 2 x bfloat> %va) {
+; CHECK-LABEL: vfptosi_nxv2bf16_nxv2i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
+; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v9
+; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    ret
+  %evec = fptosi <vscale x 2 x bfloat> %va to <vscale x 2 x i8>
+  ret <vscale x 2 x i8> %evec
+}
+
+define <vscale x 2 x i8> @vfptoui_nxv2bf16_nxv2i8(<vscale x 2 x bfloat> %va) {
+; CHECK-LABEL: vfptoui_nxv2bf16_nxv2i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
+; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v9
+; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    ret
+  %evec = fptoui <vscale x 2 x bfloat> %va to <vscale x 2 x i8>
+  ret <vscale x 2 x i8> %evec
+}
+
+define <vscale x 2 x i16> @vfptosi_nxv2bf16_nxv2i16(<vscale x 2 x bfloat> %va) {
+; CHECK-LABEL: vfptosi_nxv2bf16_nxv2i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
+; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v9
+; CHECK-NEXT:    ret
+  %evec = fptosi <vscale x 2 x bfloat> %va to <vscale x 2 x i16>
+  ret <vscale x 2 x i16> %evec
+}
+
+define <vscale x 2 x i16> @vfptoui_nxv2bf16_nxv2i16(<vscale x 2 x bfloat> %va) {
+; CHECK-LABEL: vfptoui_nxv2bf16_nxv2i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
+; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v9
+; CHECK-NEXT:    ret
+  %evec = fptoui <vscale x 2 x bfloat> %va to <vscale x 2 x i16>
+  ret <vscale x 2 x i16> %evec
+}
+
+define <vscale x 2 x i32> @vfptosi_nxv2bf16_nxv2i32(<vscale x 2 x bfloat> %va) {
+; CHECK-LABEL: vfptosi_nxv2bf16_nxv2i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
+; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v9
+; CHECK-NEXT:    ret
+  %evec = fptosi <vscale x 2 x bfloat> %va to <vscale x 2 x i32>
+  ret <vscale x 2 x i32> %evec
+}
+
+define <vscale x 2 x i32> @vfptoui_nxv2bf16_nxv2i32(<vscale x 2 x bfloat> %va) {
+; CHECK-LABEL: vfptoui_nxv2bf16_nxv2i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
+; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v9
+; CHECK-NEXT:    ret
+  %evec = fptoui <vscale x 2 x bfloat> %va to <vscale x 2 x i32>
+  ret <vscale x 2 x i32> %evec
+}
+
+define <vscale x 2 x i64> @vfptosi_nxv2bf16_nxv2i64(<vscale x 2 x bfloat> %va) {
+; CHECK-LABEL: vfptosi_nxv2bf16_nxv2i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vfwcvt.rtz.x.f.v v8, v10
+; CHECK-NEXT:    ret
+  %evec = fptosi <vscale x 2 x bfloat> %va to <vscale x 2 x i64>
+  ret <vscale x 2 x i64> %evec
+}
+
+define <vscale x 2 x i64> @vfptoui_nxv2bf16_nxv2i64(<vscale x 2 x bfloat> %va) {
+; CHECK-LABEL: vfptoui_nxv2bf16_nxv2i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v8, v10
+; CHECK-NEXT:    ret
+  %evec = fptoui <vscale x 2 x bfloat> %va to <vscale x 2 x i64>
+  ret <vscale x 2 x i64> %evec
+}
+
+define <vscale x 4 x i1> @vfptosi_nxv4bf16_nxv4i1(<vscale x 4 x bfloat> %va) {
+; CHECK-LABEL: vfptosi_nxv4bf16_nxv4i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v10
+; CHECK-NEXT:    vand.vi v8, v8, 1
+; CHECK-NEXT:    vmsne.vi v0, v8, 0
+; CHECK-NEXT:    ret
+  %evec = fptosi <vscale x 4 x bfloat> %va to <vscale x 4 x i1>
+  ret <vscale x 4 x i1> %evec
+}
+
+define <vscale x 4 x i1> @vfptoui_nxv4bf16_nxv4i1(<vscale x 4 x bfloat> %va) {
+; CHECK-LABEL: vfptoui_nxv4bf16_nxv4i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v10
+; CHECK-NEXT:    vand.vi v8, v8, 1
+; CHECK-NEXT:    vmsne.vi v0, v8, 0
+; CHECK-NEXT:    ret
+  %evec = fptoui <vscale x 4 x bfloat> %va to <vscale x 4 x i1>
+  ret <vscale x 4 x i1> %evec
+}
+
+define <vscale x 4 x i8> @vfptosi_nxv4bf16_nxv4i8(<vscale x 4 x bfloat> %va) {
+; CHECK-LABEL: vfptosi_nxv4bf16_nxv4i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v10
+; CHECK-NEXT:    vsetvli zero, zero, e8, mf2, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    ret
+  %evec = fptosi <vscale x 4 x bfloat> %va to <vscale x 4 x i8>
+  ret <vscale x 4 x i8> %evec
+}
+
+define <vscale x 4 x i8> @vfptoui_nxv4bf16_nxv4i8(<vscale x 4 x bfloat> %va) {
+; CHECK-LABEL: vfptoui_nxv4bf16_nxv4i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v10
+; CHECK-NEXT:    vsetvli zero, zero, e8, mf2, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    ret
+  %evec = fptoui <vscale x 4 x bfloat> %va to <vscale x 4 x i8>
+  ret <vscale x 4 x i8> %evec
+}
+
+define <vscale x 4 x i16> @vfptosi_nxv4bf16_nxv4i16(<vscale x 4 x bfloat> %va) {
+; CHECK-LABEL: vfptosi_nxv4bf16_nxv4i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v10
+; CHECK-NEXT:    ret
+  %evec = fptosi <vscale x 4 x bfloat> %va to <vscale x 4 x i16>
+  ret <vscale x 4 x i16> %evec
+}
+
+define <vscale x 4 x i16> @vfptoui_nxv4bf16_nxv4i16(<vscale x 4 x bfloat> %va) {
+; CHECK-LABEL: vfptoui_nxv4bf16_nxv4i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v10
+; CHECK-NEXT:    ret
+  %evec = fptoui <vscale x 4 x bfloat> %va to <vscale x 4 x i16>
+  ret <vscale x 4 x i16> %evec
+}
+
+define <vscale x 4 x i32> @vfptosi_nxv4bf16_nxv4i32(<vscale x 4 x bfloat> %va) {
+; CHECK-LABEL: vfptosi_nxv4bf16_nxv4i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v10
+; CHECK-NEXT:    ret
+  %evec = fptosi <vscale x 4 x bfloat> %va to <vscale x 4 x i32>
+  ret <vscale x 4 x i32> %evec
+}
+
+define <vscale x 4 x i32> @vfptoui_nxv4bf16_nxv4i32(<vscale x 4 x bfloat> %va) {
+; CHECK-LABEL: vfptoui_nxv4bf16_nxv4i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v10
+; CHECK-NEXT:    ret
+  %evec = fptoui <vscale x 4 x bfloat> %va to <vscale x 4 x i32>
+  ret <vscale x 4 x i32> %evec
+}
+
+define <vscale x 4 x i64> @vfptosi_nxv4bf16_nxv4i64(<vscale x 4 x bfloat> %va) {
+; CHECK-LABEL: vfptosi_nxv4bf16_nxv4i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v12, v8
+; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vfwcvt.rtz.x.f.v v8, v12
+; CHECK-NEXT:    ret
+  %evec = fptosi <vscale x 4 x bfloat> %va to <vscale x 4 x i64>
+  ret <vscale x 4 x i64> %evec
+}
+
+define <vscale x 4 x i64> @vfptoui_nxv4bf16_nxv4i64(<vscale x 4 x bfloat> %va) {
+; CHECK-LABEL: vfptoui_nxv4bf16_nxv4i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v12, v8
+; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v8, v12
+; CHECK-NEXT:    ret
+  %evec = fptoui <vscale x 4 x bfloat> %va to <vscale x 4 x i64>
+  ret <vscale x 4 x i64> %evec
+}
+
+define <vscale x 8 x i1> @vfptosi_nxv8bf16_nxv8i1(<vscale x 8 x bfloat> %va) {
+; CHECK-LABEL: vfptosi_nxv8bf16_nxv8i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v12, v8
+; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v12
+; CHECK-NEXT:    vand.vi v8, v8, 1
+; CHECK-NEXT:    vmsne.vi v0, v8, 0
+; CHECK-NEXT:    ret
+  %evec = fptosi <vscale x 8 x bfloat> %va to <vscale x 8 x i1>
+  ret <vscale x 8 x i1> %evec
+}
+
+define <vscale x 8 x i1> @vfptoui_nxv8bf16_nxv8i1(<vscale x 8 x bfloat> %va) {
+; CHECK-LABEL: vfptoui_nxv8bf16_nxv8i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v12, v8
+; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v12
+; CHECK-NEXT:    vand.vi v8, v8, 1
+; CHECK-NEXT:    vmsne.vi v0, v8, 0
+; CHECK-NEXT:    ret
+  %evec = fptoui <vscale x 8 x bfloat> %va to <vscale x 8 x i1>
+  ret <vscale x 8 x i1> %evec
+}
+
+define <vscale x 8 x i8> @vfptosi_nxv8bf16_nxv8i8(<vscale x 8 x bfloat> %va) {
+; CHECK-LABEL: vfptosi_nxv8bf16_nxv8i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v12, v8
+; CHECK-NEXT:    vfncvt.rtz.x.f.w v10, v12
+; CHECK-NEXT:    vsetvli zero, zero, e8, m1, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v10, 0
+; CHECK-NEXT:    ret
+  %evec = fptosi <vscale x 8 x bfloat> %va to <vscale x 8 x i8>
+  ret <vscale x 8 x i8> %evec
+}
+
+define <vscale x 8 x i8> @vfptoui_nxv8bf16_nxv8i8(<vscale x 8 x bfloat> %va) {
+; CHECK-LABEL: vfptoui_nxv8bf16_nxv8i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v12, v8
+; CHECK-NEXT:    vfncvt.rtz.xu.f.w v10, v12
+; CHECK-NEXT:    vsetvli zero, zero, e8, m1, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v10, 0
+; CHECK-NEXT:    ret
+  %evec = fptoui <vscale x 8 x bfloat> %va to <vscale x 8 x i8>
+  ret <vscale x 8 x i8> %evec
+}
+
+define <vscale x 8 x i16> @vfptosi_nxv8bf16_nxv8i16(<vscale x 8 x bfloat> %va) {
+; CHECK-LABEL: vfptosi_nxv8bf16_nxv8i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v12, v8
+; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v12
+; CHECK-NEXT:    ret
+  %evec = fptosi <vscale x 8 x bfloat> %va to <vscale x 8 x i16>
+  ret <vscale x 8 x i16> %evec
+}
+
+define <vscale x 8 x i16> @vfptoui_nxv8bf16_nxv8i16(<vscale x 8 x bfloat> %va) {
+; CHECK-LABEL: vfptoui_nxv8bf16_nxv8i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v12, v8
+; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v12
+; CHECK-NEXT:    ret
+  %evec = fptoui <vscale x 8 x bfloat> %va to <vscale x 8 x i16>
+  ret <vscale x 8 x i16> %evec
+}
+
+define <vscale x 8 x i32> @vfptosi_nxv8bf16_nxv8i32(<vscale x 8 x bfloat> %va) {
+; CHECK-LABEL: vfptosi_nxv8bf16_nxv8i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v12, v8
+; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v12
+; CHECK-NEXT:    ret
+  %evec = fptosi <vscale x 8 x bfloat> %va to <vscale x 8 x i32>
+  ret <vscale x 8 x i32> %evec
+}
+
+define <vscale x 8 x i32> @vfptoui_nxv8bf16_nxv8i32(<vscale x 8 x bfloat> %va) {
+; CHECK-LABEL: vfptoui_nxv8bf16_nxv8i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v12, v8
+; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v12
+; CHECK-NEXT:    ret
+  %evec = fptoui <vscale x 8 x bfloat> %va to <vscale x 8 x i32>
+  ret <vscale x 8 x i32> %evec
+}
+
+define <vscale x 8 x i64> @vfptosi_nxv8bf16_nxv8i64(<vscale x 8 x bfloat> %va) {
+; CHECK-LABEL: vfptosi_nxv8bf16_nxv8i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v16, v8
+; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vfwcvt.rtz.x.f.v v8, v16
+; CHECK-NEXT:    ret
+  %evec = fptosi <vscale x 8 x bfloat> %va to <vscale x 8 x i64>
+  ret <vscale x 8 x i64> %evec
+}
+
+define <vscale x 8 x i64> @vfptoui_nxv8bf16_nxv8i64(<vscale x 8 x bfloat> %va) {
+; CHECK-LABEL: vfptoui_nxv8bf16_nxv8i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v16, v8
+; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v8, v16
+; CHECK-NEXT:    ret
+  %evec = fptoui <vscale x 8 x bfloat> %va to <vscale x 8 x i64>
+  ret <vscale x 8 x i64> %evec
+}
+
+define <vscale x 16 x i1> @vfptosi_nxv16bf16_nxv16i1(<vscale x 16 x bfloat> %va) {
+; CHECK-LABEL: vfptosi_nxv16bf16_nxv16i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v16, v8
+; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v16
+; CHECK-NEXT:    vand.vi v8, v8, 1
+; CHECK-NEXT:    vmsne.vi v0, v8, 0
+; CHECK-NEXT:    ret
+  %evec = fptosi <vscale x 16 x bfloat> %va to <vscale x 16 x i1>
+  ret <vscale x 16 x i1> %evec
+}
+
+define <vscale x 16 x i1> @vfptoui_nxv16bf16_nxv16i1(<vscale x 16 x bfloat> %va) {
+; CHECK-LABEL: vfptoui_nxv16bf16_nxv16i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v16, v8
+; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v16
+; CHECK-NEXT:    vand.vi v8, v8, 1
+; CHECK-NEXT:    vmsne.vi v0, v8, 0
+; CHECK-NEXT:    ret
+  %evec = fptoui <vscale x 16 x bfloat> %va to <vscale x 16 x i1>
+  ret <vscale x 16 x i1> %evec
+}
+
+define <vscale x 16 x i8> @vfptosi_nxv16bf16_nxv16i8(<vscale x 16 x bfloat> %va) {
+; CHECK-LABEL: vfptosi_nxv16bf16_nxv16i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v16, v8
+; CHECK-NEXT:    vfncvt.rtz.x.f.w v12, v16
+; CHECK-NEXT:    vsetvli zero, zero, e8, m2, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v12, 0
+; CHECK-NEXT:    ret
+  %evec = fptosi <vscale x 16 x bfloat> %va to <vscale x 16 x i8>
+  ret <vscale x 16 x i8> %evec
+}
+
+define <vscale x 16 x i8> @vfptoui_nxv16bf16_nxv16i8(<vscale x 16 x bfloat> %va) {
+; CHECK-LABEL: vfptoui_nxv16bf16_nxv16i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v16, v8
+; CHECK-NEXT:    vfncvt.rtz.xu.f.w v12, v16
+; CHECK-NEXT:    vsetvli zero, zero, e8, m2, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v12, 0
+; CHECK-NEXT:    ret
+  %evec = fptoui <vscale x 16 x bfloat> %va to <vscale x 16 x i8>
+  ret <vscale x 16 x i8> %evec
+}
+
+define <vscale x 16 x i16> @vfptosi_nxv16bf16_nxv16i16(<vscale x 16 x bfloat> %va) {
+; CHECK-LABEL: vfptosi_nxv16bf16_nxv16i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v16, v8
+; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v16
+; CHECK-NEXT:    ret
+  %evec = fptosi <vscale x 16 x bfloat> %va to <vscale x 16 x i16>
+  ret <vscale x 16 x i16> %evec
+}
+
+define <vscale x 16 x i16> @vfptoui_nxv16bf16_nxv16i16(<vscale x 16 x bfloat> %va) {
+; CHECK-LABEL: vfptoui_nxv16bf16_nxv16i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v16, v8
+; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v16
+; CHECK-NEXT:    ret
+  %evec = fptoui <vscale x 16 x bfloat> %va to <vscale x 16 x i16>
+  ret <vscale x 16 x i16> %evec
+}
+
+define <vscale x 16 x i32> @vfptosi_nxv16bf16_nxv16i32(<vscale x 16 x bfloat> %va) {
+; CHECK-LABEL: vfptosi_nxv16bf16_nxv16i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v16, v8
+; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v16
+; CHECK-NEXT:    ret
+  %evec = fptosi <vscale x 16 x bfloat> %va to <vscale x 16 x i32>
+  ret <vscale x 16 x i32> %evec
+}
+
+define <vscale x 16 x i32> @vfptoui_nxv16bf16_nxv16i32(<vscale x 16 x bfloat> %va) {
+; CHECK-LABEL: vfptoui_nxv16bf16_nxv16i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v16, v8
+; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v16
+; CHECK-NEXT:    ret
+  %evec = fptoui <vscale x 16 x bfloat> %va to <vscale x 16 x i32>
+  ret <vscale x 16 x i32> %evec
+}
+
+define <vscale x 32 x i1> @vfptosi_nxv32bf16_nxv32i1(<vscale x 32 x bfloat> %va) {
+; CHECK-LABEL: vfptosi_nxv32bf16_nxv32i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    srli a0, a0, 2
+; CHECK-NEXT:    add a1, a0, a0
+; CHECK-NEXT:    vsetvli a2, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v16, v12
+; CHECK-NEXT:    vfncvt.rtz.x.f.w v12, v16
+; CHECK-NEXT:    vand.vi v12, v12, 1
+; CHECK-NEXT:    vmsne.vi v16, v12, 0
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v24, v8
+; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v24
+; CHECK-NEXT:    vand.vi v8, v8, 1
+; CHECK-NEXT:    vmsne.vi v0, v8, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT:    vslideup.vx v0, v16, a0
+; CHECK-NEXT:    ret
+  %evec = fptosi <vscale x 32 x bfloat> %va to <vscale x 32 x i1>
+  ret <vscale x 32 x i1> %evec
+}
+
+define <vscale x 32 x i1> @vfptoui_nxv32bf16_nxv32i1(<vscale x 32 x bfloat> %va) {
+; CHECK-LABEL: vfptoui_nxv32bf16_nxv32i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    srli a0, a0, 2
+; CHECK-NEXT:    add a1, a0, a0
+; CHECK-NEXT:    vsetvli a2, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v16, v12
+; CHECK-NEXT:    vfncvt.rtz.xu.f.w v12, v16
+; CHECK-NEXT:    vand.vi v12, v12, 1
+; CHECK-NEXT:    vmsne.vi v16, v12, 0
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v24, v8
+; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v24
+; CHECK-NEXT:    vand.vi v8, v8, 1
+; CHECK-NEXT:    vmsne.vi v0, v8, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT:    vslideup.vx v0, v16, a0
+; CHECK-NEXT:    ret
+  %evec = fptoui <vscale x 32 x bfloat> %va to <vscale x 32 x i1>
+  ret <vscale x 32 x i1> %evec
+}
+
+define <vscale x 32 x i8> @vfptosi_nxv32bf16_nxv32i8(<vscale x 32 x bfloat> %va) {
+; CHECK-LABEL: vfptosi_nxv32bf16_nxv32i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v16, v8
+; CHECK-NEXT:    vfncvt.rtz.x.f.w v24, v16
+; CHECK-NEXT:    vsetvli zero, zero, e8, m2, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v24, 0
+; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v16, v12
+; CHECK-NEXT:    vfncvt.rtz.x.f.w v12, v16
+; CHECK-NEXT:    vsetvli zero, zero, e8, m2, ta, ma
+; CHECK-NEXT:    vnsrl.wi v10, v12, 0
+; CHECK-NEXT:    ret
+  %evec = fptosi <vscale x 32 x bfloat> %va to <vscale x 32 x i8>
+  ret <vscale x 32 x i8> %evec
+}
+
+define <vscale x 32 x i8> @vfptoui_nxv32bf16_nxv32i8(<vscale x 32 x bfloat> %va) {
+; CHECK-LABEL: vfptoui_nxv32bf16_nxv32i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v16, v8
+; CHECK-NEXT:    vfncvt.rtz.xu.f.w v24, v16
+; CHECK-NEXT:    vsetvli zero, zero, e8, m2, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v24, 0
+; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v16, v12
+; CHECK-NEXT:    vfncvt.rtz.xu.f.w v12, v16
+; CHECK-NEXT:    vsetvli zero, zero, e8, m2, ta, ma
+; CHECK-NEXT:    vnsrl.wi v10, v12, 0
+; CHECK-NEXT:    ret
+  %evec = fptoui <vscale x 32 x bfloat> %va to <vscale x 32 x i8>
+  ret <vscale x 32 x i8> %evec
+}
+
+define <vscale x 32 x i16> @vfptosi_nxv32bf16_nxv32i16(<vscale x 32 x bfloat> %va) {
+; CHECK-LABEL: vfptosi_nxv32bf16_nxv32i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v16, v8
+; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v16
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v16, v12
+; CHECK-NEXT:    vfncvt.rtz.x.f.w v12, v16
+; CHECK-NEXT:    ret
+  %evec = fptosi <vscale x 32 x bfloat> %va to <vscale x 32 x i16>
+  ret <vscale x 32 x i16> %evec
+}
+
+define <vscale x 32 x i16> @vfptoui_nxv32bf16_nxv32i16(<vscale x 32 x bfloat> %va) {
+; CHECK-LABEL: vfptoui_nxv32bf16_nxv32i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v16, v8
+; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v16
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v16, v12
+; CHECK-NEXT:    vfncvt.rtz.xu.f.w v12, v16
+; CHECK-NEXT:    ret
+  %evec = fptoui <vscale x 32 x bfloat> %va to <vscale x 32 x i16>
+  ret <vscale x 32 x i16> %evec
+}
 
 define <vscale x 1 x i1> @vfptosi_nxv1f16_nxv1i1(<vscale x 1 x half> %va) {
 ; ZVFH-LABEL: vfptosi_nxv1f16_nxv1i1:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp-mask.ll
index e5048eaf9d0c23..2e9ceadb96592e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp-mask.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp-mask.ll
@@ -1,8 +1,34 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zfh,+zvfh < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
-; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zfh,+zvfh < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
-; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zfh,+zvfhmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
-; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zfh,+zvfhmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
+; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zfh,+zvfh,+zvfbfmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
+; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zfh,+zvfh,+zvfbfmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
+; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zfh,+zvfhmin,+zvfbfmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
+; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zfh,+zvfhmin,+zvfbfmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
+
+define <vscale x 2 x i1> @vfptosi_nxv2i1_nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vfptosi_nxv2i1_nxv2bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v9, v0.t
+; CHECK-NEXT:    vmsne.vi v0, v8, 0, v0.t
+; CHECK-NEXT:    ret
+  %v = call <vscale x 2 x i1> @llvm.vp.fptosi.nxv2i1.nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x i1> %m, i32 %evl)
+  ret <vscale x 2 x i1> %v
+}
+
+define <vscale x 2 x i1> @vfptosi_nxv2i1_nxv2bf16_unmasked(<vscale x 2 x bfloat> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vfptosi_nxv2i1_nxv2bf16_unmasked:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v9
+; CHECK-NEXT:    vmsne.vi v0, v8, 0
+; CHECK-NEXT:    ret
+  %v = call <vscale x 2 x i1> @llvm.vp.fptosi.nxv2i1.nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  ret <vscale x 2 x i1> %v
+}
 
 declare <vscale x 2 x i1> @llvm.vp.fptosi.nxv2i1.nxv2f16(<vscale x 2 x half>, <vscale x 2 x i1>, i32)
 

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll
index f3544589407d8c..f42b603509c22a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll
@@ -1,8 +1,122 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zfh,+zvfh < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
-; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zfh,+zvfh < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
-; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zfh,+zvfhmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
-; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zfh,+zvfhmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
+; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zfh,+zvfh,+zvfbfmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
+; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zfh,+zvfh,+zvfbfmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
+; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zfh,+zvfhmin,+zvfbfmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
+; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zfh,+zvfhmin,+zvfbfmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
+
+define <vscale x 2 x i7> @vfptosi_v4i7_v4bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vfptosi_v4i7_v4bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v9, v0.t
+; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v8, 0, v0.t
+; CHECK-NEXT:    ret
+  %v = call <vscale x 2 x i7> @llvm.vp.fptosi.v4i7.v4bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x i1> %m, i32 %evl)
+  ret <vscale x 2 x i7> %v
+}
+
+define <vscale x 2 x i8> @vfptosi_nxv2i8_nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vfptosi_nxv2i8_nxv2bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v9, v0.t
+; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v8, 0, v0.t
+; CHECK-NEXT:    ret
+  %v = call <vscale x 2 x i8> @llvm.vp.fptosi.nxv2i8.nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x i1> %m, i32 %evl)
+  ret <vscale x 2 x i8> %v
+}
+
+define <vscale x 2 x i8> @vfptosi_nxv2i8_nxv2bf16_unmasked(<vscale x 2 x bfloat> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vfptosi_nxv2i8_nxv2bf16_unmasked:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v9
+; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    ret
+  %v = call <vscale x 2 x i8> @llvm.vp.fptosi.nxv2i8.nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  ret <vscale x 2 x i8> %v
+}
+
+define <vscale x 2 x i16> @vfptosi_nxv2i16_nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vfptosi_nxv2i16_nxv2bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v9, v0.t
+; CHECK-NEXT:    ret
+  %v = call <vscale x 2 x i16> @llvm.vp.fptosi.nxv2i16.nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x i1> %m, i32 %evl)
+  ret <vscale x 2 x i16> %v
+}
+
+define <vscale x 2 x i16> @vfptosi_nxv2i16_nxv2bf16_unmasked(<vscale x 2 x bfloat> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vfptosi_nxv2i16_nxv2bf16_unmasked:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v9
+; CHECK-NEXT:    ret
+  %v = call <vscale x 2 x i16> @llvm.vp.fptosi.nxv2i16.nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  ret <vscale x 2 x i16> %v
+}
+
+define <vscale x 2 x i32> @vfptosi_nxv2i32_nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vfptosi_nxv2i32_nxv2bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+  %v = call <vscale x 2 x i32> @llvm.vp.fptosi.nxv2i32.nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x i1> %m, i32 %evl)
+  ret <vscale x 2 x i32> %v
+}
+
+define <vscale x 2 x i32> @vfptosi_nxv2i32_nxv2bf16_unmasked(<vscale x 2 x bfloat> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vfptosi_nxv2i32_nxv2bf16_unmasked:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v9
+; CHECK-NEXT:    ret
+  %v = call <vscale x 2 x i32> @llvm.vp.fptosi.nxv2i32.nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  ret <vscale x 2 x i32> %v
+}
+
+define <vscale x 2 x i64> @vfptosi_nxv2i64_nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vfptosi_nxv2i64_nxv2bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    vfwcvt.rtz.x.f.v v8, v10, v0.t
+; CHECK-NEXT:    ret
+  %v = call <vscale x 2 x i64> @llvm.vp.fptosi.nxv2i64.nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x i1> %m, i32 %evl)
+  ret <vscale x 2 x i64> %v
+}
+
+define <vscale x 2 x i64> @vfptosi_nxv2i64_nxv2bf16_unmasked(<vscale x 2 x bfloat> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vfptosi_nxv2i64_nxv2bf16_unmasked:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    vfwcvt.rtz.x.f.v v8, v10
+; CHECK-NEXT:    ret
+  %v = call <vscale x 2 x i64> @llvm.vp.fptosi.nxv2i64.nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  ret <vscale x 2 x i64> %v
+}
 
 declare <vscale x 2 x i7> @llvm.vp.fptosi.v4i7.v4f16(<vscale x 2 x half>, <vscale x 2 x i1>, i32)
 
@@ -416,10 +530,10 @@ define <vscale x 32 x i16> @vfptosi_nxv32i16_nxv32f32(<vscale x 32 x float> %va,
 ; CHECK-NEXT:    vl8r.v v24, (a3) # Unknown-size Folded Reload
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v20, v24, v0.t
-; CHECK-NEXT:    bltu a0, a1, .LBB25_2
+; CHECK-NEXT:    bltu a0, a1, .LBB34_2
 ; CHECK-NEXT:  # %bb.1:
 ; CHECK-NEXT:    mv a0, a1
-; CHECK-NEXT:  .LBB25_2:
+; CHECK-NEXT:  .LBB34_2:
 ; CHECK-NEXT:    vmv1r.v v0, v7
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vfncvt.rtz.x.f.w v16, v8, v0.t
@@ -450,10 +564,10 @@ define <vscale x 32 x i32> @vfptosi_nxv32i32_nxv32f32(<vscale x 32 x float> %va,
 ; CHECK-NEXT:    and a2, a3, a2
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v16, v16, v0.t
-; CHECK-NEXT:    bltu a0, a1, .LBB26_2
+; CHECK-NEXT:    bltu a0, a1, .LBB35_2
 ; CHECK-NEXT:  # %bb.1:
 ; CHECK-NEXT:    mv a0, a1
-; CHECK-NEXT:  .LBB26_2:
+; CHECK-NEXT:  .LBB35_2:
 ; CHECK-NEXT:    vmv1r.v v0, v24
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8, v0.t
@@ -473,10 +587,10 @@ define <vscale x 32 x i32> @vfptosi_nxv32i32_nxv32f32_unmasked(<vscale x 32 x fl
 ; CHECK-NEXT:    and a2, a3, a2
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v16, v16
-; CHECK-NEXT:    bltu a0, a1, .LBB27_2
+; CHECK-NEXT:    bltu a0, a1, .LBB36_2
 ; CHECK-NEXT:  # %bb.1:
 ; CHECK-NEXT:    mv a0, a1
-; CHECK-NEXT:  .LBB27_2:
+; CHECK-NEXT:  .LBB36_2:
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp-mask.ll
index 4b609d07c1e7ae..2cf158ddbd50d6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp-mask.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp-mask.ll
@@ -1,8 +1,34 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zfh,+zvfh < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
-; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zfh,+zvfh < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
-; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zfh,+zvfhmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
-; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zfh,+zvfhmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
+; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zfh,+zvfh,+zvfbfmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
+; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zfh,+zvfh,+zvfbfmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
+; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zfh,+zvfhmin,+zvfbfmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
+; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zfh,+zvfhmin,+zvfbfmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
+
+define <vscale x 2 x i1> @vfptoui_nxv2i1_nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vfptoui_nxv2i1_nxv2bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v9, v0.t
+; CHECK-NEXT:    vmsne.vi v0, v8, 0, v0.t
+; CHECK-NEXT:    ret
+  %v = call <vscale x 2 x i1> @llvm.vp.fptoui.nxv2i1.nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x i1> %m, i32 %evl)
+  ret <vscale x 2 x i1> %v
+}
+
+define <vscale x 2 x i1> @vfptoui_nxv2i1_nxv2bf16_unmasked(<vscale x 2 x bfloat> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vfptoui_nxv2i1_nxv2bf16_unmasked:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v9
+; CHECK-NEXT:    vmsne.vi v0, v8, 0
+; CHECK-NEXT:    ret
+  %v = call <vscale x 2 x i1> @llvm.vp.fptoui.nxv2i1.nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  ret <vscale x 2 x i1> %v
+}
 
 declare <vscale x 2 x i1> @llvm.vp.fptoui.nxv2i1.nxv2f16(<vscale x 2 x half>, <vscale x 2 x i1>, i32)
 

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll
index 9fd2d8edb2203d..403bc595b9bbd1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll
@@ -1,8 +1,122 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zfh,+zvfh < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
-; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zfh,+zvfh < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
-; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zfh,+zvfhmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
-; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zfh,+zvfhmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
+; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zfh,+zvfh,+zvfbfmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
+; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zfh,+zvfh,+zvfbfmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
+; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zfh,+zvfhmin,+zvfbfmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
+; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zfh,+zvfhmin,+zvfbfmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
+
+define <vscale x 2 x i7> @vfptoui_v4i7_v4bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vfptoui_v4i7_v4bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v9, v0.t
+; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v8, 0, v0.t
+; CHECK-NEXT:    ret
+  %v = call <vscale x 2 x i7> @llvm.vp.fptoui.v4i7.v4bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x i1> %m, i32 %evl)
+  ret <vscale x 2 x i7> %v
+}
+
+define <vscale x 2 x i8> @vfptoui_nxv2i8_nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vfptoui_nxv2i8_nxv2bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v9, v0.t
+; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v8, 0, v0.t
+; CHECK-NEXT:    ret
+  %v = call <vscale x 2 x i8> @llvm.vp.fptoui.nxv2i8.nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x i1> %m, i32 %evl)
+  ret <vscale x 2 x i8> %v
+}
+
+define <vscale x 2 x i8> @vfptoui_nxv2i8_nxv2bf16_unmasked(<vscale x 2 x bfloat> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vfptoui_nxv2i8_nxv2bf16_unmasked:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v9
+; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    ret
+  %v = call <vscale x 2 x i8> @llvm.vp.fptoui.nxv2i8.nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  ret <vscale x 2 x i8> %v
+}
+
+define <vscale x 2 x i16> @vfptoui_nxv2i16_nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vfptoui_nxv2i16_nxv2bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v9, v0.t
+; CHECK-NEXT:    ret
+  %v = call <vscale x 2 x i16> @llvm.vp.fptoui.nxv2i16.nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x i1> %m, i32 %evl)
+  ret <vscale x 2 x i16> %v
+}
+
+define <vscale x 2 x i16> @vfptoui_nxv2i16_nxv2bf16_unmasked(<vscale x 2 x bfloat> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vfptoui_nxv2i16_nxv2bf16_unmasked:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v9
+; CHECK-NEXT:    ret
+  %v = call <vscale x 2 x i16> @llvm.vp.fptoui.nxv2i16.nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  ret <vscale x 2 x i16> %v
+}
+
+define <vscale x 2 x i32> @vfptoui_nxv2i32_nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vfptoui_nxv2i32_nxv2bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+  %v = call <vscale x 2 x i32> @llvm.vp.fptoui.nxv2i32.nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x i1> %m, i32 %evl)
+  ret <vscale x 2 x i32> %v
+}
+
+define <vscale x 2 x i32> @vfptoui_nxv2i32_nxv2bf16_unmasked(<vscale x 2 x bfloat> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vfptoui_nxv2i32_nxv2bf16_unmasked:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v9
+; CHECK-NEXT:    ret
+  %v = call <vscale x 2 x i32> @llvm.vp.fptoui.nxv2i32.nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  ret <vscale x 2 x i32> %v
+}
+
+define <vscale x 2 x i64> @vfptoui_nxv2i64_nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vfptoui_nxv2i64_nxv2bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v8, v10, v0.t
+; CHECK-NEXT:    ret
+  %v = call <vscale x 2 x i64> @llvm.vp.fptoui.nxv2i64.nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x i1> %m, i32 %evl)
+  ret <vscale x 2 x i64> %v
+}
+
+define <vscale x 2 x i64> @vfptoui_nxv2i64_nxv2bf16_unmasked(<vscale x 2 x bfloat> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vfptoui_nxv2i64_nxv2bf16_unmasked:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v8, v10
+; CHECK-NEXT:    ret
+  %v = call <vscale x 2 x i64> @llvm.vp.fptoui.nxv2i64.nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  ret <vscale x 2 x i64> %v
+}
 
 declare <vscale x 2 x i7> @llvm.vp.fptoui.v4i7.v4f16(<vscale x 2 x half>, <vscale x 2 x i1>, i32)
 
@@ -416,10 +530,10 @@ define <vscale x 32 x i16> @vfptoui_nxv32i16_nxv32f32(<vscale x 32 x float> %va,
 ; CHECK-NEXT:    vl8r.v v24, (a3) # Unknown-size Folded Reload
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v20, v24, v0.t
-; CHECK-NEXT:    bltu a0, a1, .LBB25_2
+; CHECK-NEXT:    bltu a0, a1, .LBB34_2
 ; CHECK-NEXT:  # %bb.1:
 ; CHECK-NEXT:    mv a0, a1
-; CHECK-NEXT:  .LBB25_2:
+; CHECK-NEXT:  .LBB34_2:
 ; CHECK-NEXT:    vmv1r.v v0, v7
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vfncvt.rtz.xu.f.w v16, v8, v0.t
@@ -450,10 +564,10 @@ define <vscale x 32 x i32> @vfptoui_nxv32i32_nxv32f32(<vscale x 32 x float> %va,
 ; CHECK-NEXT:    and a2, a3, a2
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v16, v16, v0.t
-; CHECK-NEXT:    bltu a0, a1, .LBB26_2
+; CHECK-NEXT:    bltu a0, a1, .LBB35_2
 ; CHECK-NEXT:  # %bb.1:
 ; CHECK-NEXT:    mv a0, a1
-; CHECK-NEXT:  .LBB26_2:
+; CHECK-NEXT:  .LBB35_2:
 ; CHECK-NEXT:    vmv1r.v v0, v24
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8, v0.t
@@ -473,10 +587,10 @@ define <vscale x 32 x i32> @vfptoui_nxv32i32_nxv32f32_unmasked(<vscale x 32 x fl
 ; CHECK-NEXT:    and a2, a3, a2
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v16, v16
-; CHECK-NEXT:    bltu a0, a1, .LBB27_2
+; CHECK-NEXT:    bltu a0, a1, .LBB36_2
 ; CHECK-NEXT:  # %bb.1:
 ; CHECK-NEXT:    mv a0, a1
-; CHECK-NEXT:  .LBB27_2:
+; CHECK-NEXT:  .LBB36_2:
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vitofp-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vitofp-sdnode.ll
index 77ef0a340270f5..f5f8ee91c31c47 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vitofp-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vitofp-sdnode.ll
@@ -1,12 +1,722 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v -target-abi=ilp32d \
-; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
-; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d \
-; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
-; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfhmin,+v -target-abi=ilp32d \
-; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
-; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfhmin,+v -target-abi=lp64d \
-; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
+; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v,+zvfbfmin \
+; RUN:     -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \
+; RUN:     --check-prefixes=CHECK,ZVFH
+; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v,+zvfbfmin \
+; RUN:     -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \
+; RUN:     --check-prefixes=CHECK,ZVFH
+; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfhmin,+v,+zvfbfmin \
+; RUN:     -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \
+; RUN:     --check-prefixes=CHECK,ZVFHMIN
+; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfhmin,+v,+zvfbfmin \
+; RUN:     -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \
+; RUN:     --check-prefixes=CHECK,ZVFHMIN
+
+define <vscale x 1 x bfloat> @vsitofp_nxv1i1_nxv1bf16(<vscale x 1 x i1> %va) {
+; CHECK-LABEL: vsitofp_nxv1i1_nxv1bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vmv.v.i v8, 0
+; CHECK-NEXT:    vmerge.vim v8, v8, -1, v0
+; CHECK-NEXT:    vfwcvt.f.x.v v9, v8
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v9
+; CHECK-NEXT:    ret
+  %evec = sitofp <vscale x 1 x i1> %va to <vscale x 1 x bfloat>
+  ret <vscale x 1 x bfloat> %evec
+}
+
+define <vscale x 1 x bfloat> @vuitofp_nxv1i1_nxv1bf16(<vscale x 1 x i1> %va) {
+; CHECK-LABEL: vuitofp_nxv1i1_nxv1bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vmv.v.i v8, 0
+; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
+; CHECK-NEXT:    vfwcvt.f.xu.v v9, v8
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v9
+; CHECK-NEXT:    ret
+  %evec = uitofp <vscale x 1 x i1> %va to <vscale x 1 x bfloat>
+  ret <vscale x 1 x bfloat> %evec
+}
+
+define <vscale x 2 x bfloat> @vsitofp_nxv2i1_nxv2bf16(<vscale x 2 x i1> %va) {
+; CHECK-LABEL: vsitofp_nxv2i1_nxv2bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vmv.v.i v8, 0
+; CHECK-NEXT:    vmerge.vim v8, v8, -1, v0
+; CHECK-NEXT:    vfwcvt.f.x.v v9, v8
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v9
+; CHECK-NEXT:    ret
+  %evec = sitofp <vscale x 2 x i1> %va to <vscale x 2 x bfloat>
+  ret <vscale x 2 x bfloat> %evec
+}
+
+define <vscale x 2 x bfloat> @vuitofp_nxv2i1_nxv2bf16(<vscale x 2 x i1> %va) {
+; CHECK-LABEL: vuitofp_nxv2i1_nxv2bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vmv.v.i v8, 0
+; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
+; CHECK-NEXT:    vfwcvt.f.xu.v v9, v8
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v9
+; CHECK-NEXT:    ret
+  %evec = uitofp <vscale x 2 x i1> %va to <vscale x 2 x bfloat>
+  ret <vscale x 2 x bfloat> %evec
+}
+
+define <vscale x 4 x bfloat> @vsitofp_nxv4i1_nxv4bf16(<vscale x 4 x i1> %va) {
+; CHECK-LABEL: vsitofp_nxv4i1_nxv4bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vmv.v.i v8, 0
+; CHECK-NEXT:    vmerge.vim v8, v8, -1, v0
+; CHECK-NEXT:    vfwcvt.f.x.v v10, v8
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v10
+; CHECK-NEXT:    ret
+  %evec = sitofp <vscale x 4 x i1> %va to <vscale x 4 x bfloat>
+  ret <vscale x 4 x bfloat> %evec
+}
+
+define <vscale x 4 x bfloat> @vuitofp_nxv4i1_nxv4bf16(<vscale x 4 x i1> %va) {
+; CHECK-LABEL: vuitofp_nxv4i1_nxv4bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vmv.v.i v8, 0
+; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
+; CHECK-NEXT:    vfwcvt.f.xu.v v10, v8
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v10
+; CHECK-NEXT:    ret
+  %evec = uitofp <vscale x 4 x i1> %va to <vscale x 4 x bfloat>
+  ret <vscale x 4 x bfloat> %evec
+}
+
+define <vscale x 8 x bfloat> @vsitofp_nxv8i1_nxv8bf16(<vscale x 8 x i1> %va) {
+; CHECK-LABEL: vsitofp_nxv8i1_nxv8bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vmv.v.i v8, 0
+; CHECK-NEXT:    vmerge.vim v8, v8, -1, v0
+; CHECK-NEXT:    vfwcvt.f.x.v v12, v8
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v12
+; CHECK-NEXT:    ret
+  %evec = sitofp <vscale x 8 x i1> %va to <vscale x 8 x bfloat>
+  ret <vscale x 8 x bfloat> %evec
+}
+
+define <vscale x 8 x bfloat> @vuitofp_nxv8i1_nxv8bf16(<vscale x 8 x i1> %va) {
+; CHECK-LABEL: vuitofp_nxv8i1_nxv8bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vmv.v.i v8, 0
+; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
+; CHECK-NEXT:    vfwcvt.f.xu.v v12, v8
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v12
+; CHECK-NEXT:    ret
+  %evec = uitofp <vscale x 8 x i1> %va to <vscale x 8 x bfloat>
+  ret <vscale x 8 x bfloat> %evec
+}
+
+define <vscale x 16 x bfloat> @vsitofp_nxv16i1_nxv16bf16(<vscale x 16 x i1> %va) {
+; CHECK-LABEL: vsitofp_nxv16i1_nxv16bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vmv.v.i v8, 0
+; CHECK-NEXT:    vmerge.vim v8, v8, -1, v0
+; CHECK-NEXT:    vfwcvt.f.x.v v16, v8
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v16
+; CHECK-NEXT:    ret
+  %evec = sitofp <vscale x 16 x i1> %va to <vscale x 16 x bfloat>
+  ret <vscale x 16 x bfloat> %evec
+}
+
+define <vscale x 16 x bfloat> @vuitofp_nxv16i1_nxv16bf16(<vscale x 16 x i1> %va) {
+; CHECK-LABEL: vuitofp_nxv16i1_nxv16bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vmv.v.i v8, 0
+; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
+; CHECK-NEXT:    vfwcvt.f.xu.v v16, v8
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v16
+; CHECK-NEXT:    ret
+  %evec = uitofp <vscale x 16 x i1> %va to <vscale x 16 x bfloat>
+  ret <vscale x 16 x bfloat> %evec
+}
+
+define <vscale x 32 x bfloat> @vsitofp_nxv32i1_nxv32bf16(<vscale x 32 x i1> %va) {
+; CHECK-LABEL: vsitofp_nxv32i1_nxv32bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vmv.v.i v12, 0
+; CHECK-NEXT:    vmerge.vim v8, v12, -1, v0
+; CHECK-NEXT:    vfwcvt.f.x.v v16, v8
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    srli a0, a0, 2
+; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
+; CHECK-NEXT:    vslidedown.vx v0, v0, a0
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v16
+; CHECK-NEXT:    vmerge.vim v12, v12, -1, v0
+; CHECK-NEXT:    vfwcvt.f.x.v v16, v12
+; CHECK-NEXT:    vfncvtbf16.f.f.w v12, v16
+; CHECK-NEXT:    ret
+  %evec = sitofp <vscale x 32 x i1> %va to <vscale x 32 x bfloat>
+  ret <vscale x 32 x bfloat> %evec
+}
+
+define <vscale x 32 x bfloat> @vuitofp_nxv32i1_nxv32bf16(<vscale x 32 x i1> %va) {
+; CHECK-LABEL: vuitofp_nxv32i1_nxv32bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vmv.v.i v12, 0
+; CHECK-NEXT:    vmerge.vim v8, v12, 1, v0
+; CHECK-NEXT:    vfwcvt.f.xu.v v16, v8
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    srli a0, a0, 2
+; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
+; CHECK-NEXT:    vslidedown.vx v0, v0, a0
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v16
+; CHECK-NEXT:    vmerge.vim v12, v12, 1, v0
+; CHECK-NEXT:    vfwcvt.f.xu.v v16, v12
+; CHECK-NEXT:    vfncvtbf16.f.f.w v12, v16
+; CHECK-NEXT:    ret
+  %evec = uitofp <vscale x 32 x i1> %va to <vscale x 32 x bfloat>
+  ret <vscale x 32 x bfloat> %evec
+}
+
+define <vscale x 1 x bfloat> @vsitofp_nxv1i8_nxv1bf16(<vscale x 1 x i8> %va) {
+; CHECK-LABEL: vsitofp_nxv1i8_nxv1bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vsext.vf2 v9, v8
+; CHECK-NEXT:    vfwcvt.f.x.v v10, v9
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v10
+; CHECK-NEXT:    ret
+  %evec = sitofp <vscale x 1 x i8> %va to <vscale x 1 x bfloat>
+  ret <vscale x 1 x bfloat> %evec
+}
+
+define <vscale x 1 x bfloat> @vsitofp_nxv1i7_nxv1bf16(<vscale x 1 x i7> %va) {
+; CHECK-LABEL: vsitofp_nxv1i7_nxv1bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
+; CHECK-NEXT:    vadd.vv v8, v8, v8
+; CHECK-NEXT:    vsra.vi v8, v8, 1
+; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vsext.vf2 v9, v8
+; CHECK-NEXT:    vfwcvt.f.x.v v10, v9
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v10
+; CHECK-NEXT:    ret
+  %evec = sitofp <vscale x 1 x i7> %va to <vscale x 1 x bfloat>
+  ret <vscale x 1 x bfloat> %evec
+}
+
+define <vscale x 1 x bfloat> @vuitofp_nxv1i7_nxv1bf16(<vscale x 1 x i7> %va) {
+; CHECK-LABEL: vuitofp_nxv1i7_nxv1bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a0, 127
+; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
+; CHECK-NEXT:    vand.vx v8, v8, a0
+; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vzext.vf2 v9, v8
+; CHECK-NEXT:    vfwcvt.f.xu.v v10, v9
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v10
+; CHECK-NEXT:    ret
+  %evec = uitofp <vscale x 1 x i7> %va to <vscale x 1 x bfloat>
+  ret <vscale x 1 x bfloat> %evec
+}
+
+define <vscale x 1 x bfloat> @vuitofp_nxv1i8_nxv1bf16(<vscale x 1 x i8> %va) {
+; CHECK-LABEL: vuitofp_nxv1i8_nxv1bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vzext.vf2 v9, v8
+; CHECK-NEXT:    vfwcvt.f.xu.v v10, v9
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v10
+; CHECK-NEXT:    ret
+  %evec = uitofp <vscale x 1 x i8> %va to <vscale x 1 x bfloat>
+  ret <vscale x 1 x bfloat> %evec
+}
+
+define <vscale x 2 x bfloat> @vsitofp_nxv2i8_nxv2bf16(<vscale x 2 x i8> %va) {
+; CHECK-LABEL: vsitofp_nxv2i8_nxv2bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vsext.vf2 v9, v8
+; CHECK-NEXT:    vfwcvt.f.x.v v10, v9
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v10
+; CHECK-NEXT:    ret
+  %evec = sitofp <vscale x 2 x i8> %va to <vscale x 2 x bfloat>
+  ret <vscale x 2 x bfloat> %evec
+}
+
+define <vscale x 2 x bfloat> @vuitofp_nxv2i8_nxv2bf16(<vscale x 2 x i8> %va) {
+; CHECK-LABEL: vuitofp_nxv2i8_nxv2bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vzext.vf2 v9, v8
+; CHECK-NEXT:    vfwcvt.f.xu.v v10, v9
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v10
+; CHECK-NEXT:    ret
+  %evec = uitofp <vscale x 2 x i8> %va to <vscale x 2 x bfloat>
+  ret <vscale x 2 x bfloat> %evec
+}
+
+define <vscale x 4 x bfloat> @vsitofp_nxv4i8_nxv4bf16(<vscale x 4 x i8> %va) {
+; CHECK-LABEL: vsitofp_nxv4i8_nxv4bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vsext.vf2 v9, v8
+; CHECK-NEXT:    vfwcvt.f.x.v v10, v9
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v10
+; CHECK-NEXT:    ret
+  %evec = sitofp <vscale x 4 x i8> %va to <vscale x 4 x bfloat>
+  ret <vscale x 4 x bfloat> %evec
+}
+
+define <vscale x 4 x bfloat> @vuitofp_nxv4i8_nxv4bf16(<vscale x 4 x i8> %va) {
+; CHECK-LABEL: vuitofp_nxv4i8_nxv4bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vzext.vf2 v9, v8
+; CHECK-NEXT:    vfwcvt.f.xu.v v10, v9
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v10
+; CHECK-NEXT:    ret
+  %evec = uitofp <vscale x 4 x i8> %va to <vscale x 4 x bfloat>
+  ret <vscale x 4 x bfloat> %evec
+}
+
+define <vscale x 8 x bfloat> @vsitofp_nxv8i8_nxv8bf16(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: vsitofp_nxv8i8_nxv8bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vsext.vf2 v10, v8
+; CHECK-NEXT:    vfwcvt.f.x.v v12, v10
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v12
+; CHECK-NEXT:    ret
+  %evec = sitofp <vscale x 8 x i8> %va to <vscale x 8 x bfloat>
+  ret <vscale x 8 x bfloat> %evec
+}
+
+define <vscale x 8 x bfloat> @vuitofp_nxv8i8_nxv8bf16(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: vuitofp_nxv8i8_nxv8bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vzext.vf2 v10, v8
+; CHECK-NEXT:    vfwcvt.f.xu.v v12, v10
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v12
+; CHECK-NEXT:    ret
+  %evec = uitofp <vscale x 8 x i8> %va to <vscale x 8 x bfloat>
+  ret <vscale x 8 x bfloat> %evec
+}
+
+define <vscale x 16 x bfloat> @vsitofp_nxv16i8_nxv16bf16(<vscale x 16 x i8> %va) {
+; CHECK-LABEL: vsitofp_nxv16i8_nxv16bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vsext.vf2 v12, v8
+; CHECK-NEXT:    vfwcvt.f.x.v v16, v12
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v16
+; CHECK-NEXT:    ret
+  %evec = sitofp <vscale x 16 x i8> %va to <vscale x 16 x bfloat>
+  ret <vscale x 16 x bfloat> %evec
+}
+
+define <vscale x 16 x bfloat> @vuitofp_nxv16i8_nxv16bf16(<vscale x 16 x i8> %va) {
+; CHECK-LABEL: vuitofp_nxv16i8_nxv16bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vzext.vf2 v12, v8
+; CHECK-NEXT:    vfwcvt.f.xu.v v16, v12
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v16
+; CHECK-NEXT:    ret
+  %evec = uitofp <vscale x 16 x i8> %va to <vscale x 16 x bfloat>
+  ret <vscale x 16 x bfloat> %evec
+}
+
+define <vscale x 32 x bfloat> @vsitofp_nxv32i8_nxv32bf16(<vscale x 32 x i8> %va) {
+; CHECK-LABEL: vsitofp_nxv32i8_nxv32bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vsext.vf2 v12, v8
+; CHECK-NEXT:    vfwcvt.f.x.v v24, v12
+; CHECK-NEXT:    vfncvtbf16.f.f.w v16, v24
+; CHECK-NEXT:    vsext.vf2 v12, v10
+; CHECK-NEXT:    vfwcvt.f.x.v v24, v12
+; CHECK-NEXT:    vfncvtbf16.f.f.w v20, v24
+; CHECK-NEXT:    vmv8r.v v8, v16
+; CHECK-NEXT:    ret
+  %evec = sitofp <vscale x 32 x i8> %va to <vscale x 32 x bfloat>
+  ret <vscale x 32 x bfloat> %evec
+}
+
+define <vscale x 32 x bfloat> @vuitofp_nxv32i8_nxv32bf16(<vscale x 32 x i8> %va) {
+; CHECK-LABEL: vuitofp_nxv32i8_nxv32bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vzext.vf2 v12, v8
+; CHECK-NEXT:    vfwcvt.f.xu.v v24, v12
+; CHECK-NEXT:    vfncvtbf16.f.f.w v16, v24
+; CHECK-NEXT:    vzext.vf2 v12, v10
+; CHECK-NEXT:    vfwcvt.f.xu.v v24, v12
+; CHECK-NEXT:    vfncvtbf16.f.f.w v20, v24
+; CHECK-NEXT:    vmv8r.v v8, v16
+; CHECK-NEXT:    ret
+  %evec = uitofp <vscale x 32 x i8> %va to <vscale x 32 x bfloat>
+  ret <vscale x 32 x bfloat> %evec
+}
+
+define <vscale x 1 x bfloat> @vsitofp_nxv1i16_nxv1bf16(<vscale x 1 x i16> %va) {
+; CHECK-LABEL: vsitofp_nxv1i16_nxv1bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vfwcvt.f.x.v v9, v8
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v9
+; CHECK-NEXT:    ret
+  %evec = sitofp <vscale x 1 x i16> %va to <vscale x 1 x bfloat>
+  ret <vscale x 1 x bfloat> %evec
+}
+
+define <vscale x 1 x bfloat> @vuitofp_nxv1i16_nxv1bf16(<vscale x 1 x i16> %va) {
+; CHECK-LABEL: vuitofp_nxv1i16_nxv1bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vfwcvt.f.xu.v v9, v8
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v9
+; CHECK-NEXT:    ret
+  %evec = uitofp <vscale x 1 x i16> %va to <vscale x 1 x bfloat>
+  ret <vscale x 1 x bfloat> %evec
+}
+
+define <vscale x 2 x bfloat> @vsitofp_nxv2i16_nxv2bf16(<vscale x 2 x i16> %va) {
+; CHECK-LABEL: vsitofp_nxv2i16_nxv2bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfwcvt.f.x.v v9, v8
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v9
+; CHECK-NEXT:    ret
+  %evec = sitofp <vscale x 2 x i16> %va to <vscale x 2 x bfloat>
+  ret <vscale x 2 x bfloat> %evec
+}
+
+define <vscale x 2 x bfloat> @vuitofp_nxv2i16_nxv2bf16(<vscale x 2 x i16> %va) {
+; CHECK-LABEL: vuitofp_nxv2i16_nxv2bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfwcvt.f.xu.v v9, v8
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v9
+; CHECK-NEXT:    ret
+  %evec = uitofp <vscale x 2 x i16> %va to <vscale x 2 x bfloat>
+  ret <vscale x 2 x bfloat> %evec
+}
+
+define <vscale x 4 x bfloat> @vsitofp_nxv4i16_nxv4bf16(<vscale x 4 x i16> %va) {
+; CHECK-LABEL: vsitofp_nxv4i16_nxv4bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vfwcvt.f.x.v v10, v8
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v10
+; CHECK-NEXT:    ret
+  %evec = sitofp <vscale x 4 x i16> %va to <vscale x 4 x bfloat>
+  ret <vscale x 4 x bfloat> %evec
+}
+
+define <vscale x 4 x bfloat> @vuitofp_nxv4i16_nxv4bf16(<vscale x 4 x i16> %va) {
+; CHECK-LABEL: vuitofp_nxv4i16_nxv4bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vfwcvt.f.xu.v v10, v8
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v10
+; CHECK-NEXT:    ret
+  %evec = uitofp <vscale x 4 x i16> %va to <vscale x 4 x bfloat>
+  ret <vscale x 4 x bfloat> %evec
+}
+
+define <vscale x 8 x bfloat> @vsitofp_nxv8i16_nxv8bf16(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: vsitofp_nxv8i16_nxv8bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vfwcvt.f.x.v v12, v8
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v12
+; CHECK-NEXT:    ret
+  %evec = sitofp <vscale x 8 x i16> %va to <vscale x 8 x bfloat>
+  ret <vscale x 8 x bfloat> %evec
+}
+
+define <vscale x 8 x bfloat> @vuitofp_nxv8i16_nxv8bf16(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: vuitofp_nxv8i16_nxv8bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vfwcvt.f.xu.v v12, v8
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v12
+; CHECK-NEXT:    ret
+  %evec = uitofp <vscale x 8 x i16> %va to <vscale x 8 x bfloat>
+  ret <vscale x 8 x bfloat> %evec
+}
+
+define <vscale x 16 x bfloat> @vsitofp_nxv16i16_nxv16bf16(<vscale x 16 x i16> %va) {
+; CHECK-LABEL: vsitofp_nxv16i16_nxv16bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vfwcvt.f.x.v v16, v8
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v16
+; CHECK-NEXT:    ret
+  %evec = sitofp <vscale x 16 x i16> %va to <vscale x 16 x bfloat>
+  ret <vscale x 16 x bfloat> %evec
+}
+
+define <vscale x 16 x bfloat> @vuitofp_nxv16i16_nxv16bf16(<vscale x 16 x i16> %va) {
+; CHECK-LABEL: vuitofp_nxv16i16_nxv16bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vfwcvt.f.xu.v v16, v8
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v16
+; CHECK-NEXT:    ret
+  %evec = uitofp <vscale x 16 x i16> %va to <vscale x 16 x bfloat>
+  ret <vscale x 16 x bfloat> %evec
+}
+
+define <vscale x 32 x bfloat> @vsitofp_nxv32i16_nxv32bf16(<vscale x 32 x i16> %va) {
+; CHECK-LABEL: vsitofp_nxv32i16_nxv32bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vfwcvt.f.x.v v16, v8
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v16
+; CHECK-NEXT:    vfwcvt.f.x.v v16, v12
+; CHECK-NEXT:    vfncvtbf16.f.f.w v12, v16
+; CHECK-NEXT:    ret
+  %evec = sitofp <vscale x 32 x i16> %va to <vscale x 32 x bfloat>
+  ret <vscale x 32 x bfloat> %evec
+}
+
+define <vscale x 32 x bfloat> @vuitofp_nxv32i16_nxv32bf16(<vscale x 32 x i16> %va) {
+; CHECK-LABEL: vuitofp_nxv32i16_nxv32bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vfwcvt.f.xu.v v16, v8
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v16
+; CHECK-NEXT:    vfwcvt.f.xu.v v16, v12
+; CHECK-NEXT:    vfncvtbf16.f.f.w v12, v16
+; CHECK-NEXT:    ret
+  %evec = uitofp <vscale x 32 x i16> %va to <vscale x 32 x bfloat>
+  ret <vscale x 32 x bfloat> %evec
+}
+
+define <vscale x 1 x bfloat> @vsitofp_nxv1i32_nxv1bf16(<vscale x 1 x i32> %va) {
+; CHECK-LABEL: vsitofp_nxv1i32_nxv1bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT:    vfcvt.f.x.v v9, v8
+; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v9
+; CHECK-NEXT:    ret
+  %evec = sitofp <vscale x 1 x i32> %va to <vscale x 1 x bfloat>
+  ret <vscale x 1 x bfloat> %evec
+}
+
+define <vscale x 1 x bfloat> @vuitofp_nxv1i32_nxv1bf16(<vscale x 1 x i32> %va) {
+; CHECK-LABEL: vuitofp_nxv1i32_nxv1bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT:    vfcvt.f.xu.v v9, v8
+; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v9
+; CHECK-NEXT:    ret
+  %evec = uitofp <vscale x 1 x i32> %va to <vscale x 1 x bfloat>
+  ret <vscale x 1 x bfloat> %evec
+}
+
+define <vscale x 2 x bfloat> @vsitofp_nxv2i32_nxv2bf16(<vscale x 2 x i32> %va) {
+; CHECK-LABEL: vsitofp_nxv2i32_nxv2bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vfcvt.f.x.v v9, v8
+; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v9
+; CHECK-NEXT:    ret
+  %evec = sitofp <vscale x 2 x i32> %va to <vscale x 2 x bfloat>
+  ret <vscale x 2 x bfloat> %evec
+}
+
+define <vscale x 2 x bfloat> @vuitofp_nxv2i32_nxv2bf16(<vscale x 2 x i32> %va) {
+; CHECK-LABEL: vuitofp_nxv2i32_nxv2bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vfcvt.f.xu.v v9, v8
+; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v9
+; CHECK-NEXT:    ret
+  %evec = uitofp <vscale x 2 x i32> %va to <vscale x 2 x bfloat>
+  ret <vscale x 2 x bfloat> %evec
+}
+
+define <vscale x 4 x bfloat> @vsitofp_nxv4i32_nxv4bf16(<vscale x 4 x i32> %va) {
+; CHECK-LABEL: vsitofp_nxv4i32_nxv4bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vfcvt.f.x.v v10, v8
+; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v10
+; CHECK-NEXT:    ret
+  %evec = sitofp <vscale x 4 x i32> %va to <vscale x 4 x bfloat>
+  ret <vscale x 4 x bfloat> %evec
+}
+
+define <vscale x 4 x bfloat> @vuitofp_nxv4i32_nxv4bf16(<vscale x 4 x i32> %va) {
+; CHECK-LABEL: vuitofp_nxv4i32_nxv4bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vfcvt.f.xu.v v10, v8
+; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v10
+; CHECK-NEXT:    ret
+  %evec = uitofp <vscale x 4 x i32> %va to <vscale x 4 x bfloat>
+  ret <vscale x 4 x bfloat> %evec
+}
+
+define <vscale x 8 x bfloat> @vsitofp_nxv8i32_nxv8bf16(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: vsitofp_nxv8i32_nxv8bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vfcvt.f.x.v v12, v8
+; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v12
+; CHECK-NEXT:    ret
+  %evec = sitofp <vscale x 8 x i32> %va to <vscale x 8 x bfloat>
+  ret <vscale x 8 x bfloat> %evec
+}
+
+define <vscale x 8 x bfloat> @vuitofp_nxv8i32_nxv8bf16(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: vuitofp_nxv8i32_nxv8bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vfcvt.f.xu.v v12, v8
+; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v12
+; CHECK-NEXT:    ret
+  %evec = uitofp <vscale x 8 x i32> %va to <vscale x 8 x bfloat>
+  ret <vscale x 8 x bfloat> %evec
+}
+
+define <vscale x 16 x bfloat> @vsitofp_nxv16i32_nxv16bf16(<vscale x 16 x i32> %va) {
+; CHECK-LABEL: vsitofp_nxv16i32_nxv16bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
+; CHECK-NEXT:    vfcvt.f.x.v v16, v8
+; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v16
+; CHECK-NEXT:    ret
+  %evec = sitofp <vscale x 16 x i32> %va to <vscale x 16 x bfloat>
+  ret <vscale x 16 x bfloat> %evec
+}
+
+define <vscale x 16 x bfloat> @vuitofp_nxv16i32_nxv16bf16(<vscale x 16 x i32> %va) {
+; CHECK-LABEL: vuitofp_nxv16i32_nxv16bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
+; CHECK-NEXT:    vfcvt.f.xu.v v16, v8
+; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v16
+; CHECK-NEXT:    ret
+  %evec = uitofp <vscale x 16 x i32> %va to <vscale x 16 x bfloat>
+  ret <vscale x 16 x bfloat> %evec
+}
+
+define <vscale x 1 x bfloat> @vsitofp_nxv1i64_nxv1bf16(<vscale x 1 x i64> %va) {
+; CHECK-LABEL: vsitofp_nxv1i64_nxv1bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT:    vfncvt.f.x.w v9, v8
+; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v9
+; CHECK-NEXT:    ret
+  %evec = sitofp <vscale x 1 x i64> %va to <vscale x 1 x bfloat>
+  ret <vscale x 1 x bfloat> %evec
+}
+
+define <vscale x 1 x bfloat> @vuitofp_nxv1i64_nxv1bf16(<vscale x 1 x i64> %va) {
+; CHECK-LABEL: vuitofp_nxv1i64_nxv1bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT:    vfncvt.f.xu.w v9, v8
+; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v9
+; CHECK-NEXT:    ret
+  %evec = uitofp <vscale x 1 x i64> %va to <vscale x 1 x bfloat>
+  ret <vscale x 1 x bfloat> %evec
+}
+
+define <vscale x 2 x bfloat> @vsitofp_nxv2i64_nxv2bf16(<vscale x 2 x i64> %va) {
+; CHECK-LABEL: vsitofp_nxv2i64_nxv2bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vfncvt.f.x.w v10, v8
+; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v10
+; CHECK-NEXT:    ret
+  %evec = sitofp <vscale x 2 x i64> %va to <vscale x 2 x bfloat>
+  ret <vscale x 2 x bfloat> %evec
+}
+
+define <vscale x 2 x bfloat> @vuitofp_nxv2i64_nxv2bf16(<vscale x 2 x i64> %va) {
+; CHECK-LABEL: vuitofp_nxv2i64_nxv2bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vfncvt.f.xu.w v10, v8
+; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v10
+; CHECK-NEXT:    ret
+  %evec = uitofp <vscale x 2 x i64> %va to <vscale x 2 x bfloat>
+  ret <vscale x 2 x bfloat> %evec
+}
+
+define <vscale x 4 x bfloat> @vsitofp_nxv4i64_nxv4bf16(<vscale x 4 x i64> %va) {
+; CHECK-LABEL: vsitofp_nxv4i64_nxv4bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vfncvt.f.x.w v12, v8
+; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v12
+; CHECK-NEXT:    ret
+  %evec = sitofp <vscale x 4 x i64> %va to <vscale x 4 x bfloat>
+  ret <vscale x 4 x bfloat> %evec
+}
+
+define <vscale x 4 x bfloat> @vuitofp_nxv4i64_nxv4bf16(<vscale x 4 x i64> %va) {
+; CHECK-LABEL: vuitofp_nxv4i64_nxv4bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vfncvt.f.xu.w v12, v8
+; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v12
+; CHECK-NEXT:    ret
+  %evec = uitofp <vscale x 4 x i64> %va to <vscale x 4 x bfloat>
+  ret <vscale x 4 x bfloat> %evec
+}
+
+define <vscale x 8 x bfloat> @vsitofp_nxv8i64_nxv8bf16(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: vsitofp_nxv8i64_nxv8bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vfncvt.f.x.w v16, v8
+; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v16
+; CHECK-NEXT:    ret
+  %evec = sitofp <vscale x 8 x i64> %va to <vscale x 8 x bfloat>
+  ret <vscale x 8 x bfloat> %evec
+}
+
+define <vscale x 8 x bfloat> @vuitofp_nxv8i64_nxv8bf16(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: vuitofp_nxv8i64_nxv8bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vfncvt.f.xu.w v16, v8
+; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v16
+; CHECK-NEXT:    ret
+  %evec = uitofp <vscale x 8 x i64> %va to <vscale x 8 x bfloat>
+  ret <vscale x 8 x bfloat> %evec
+}
 
 define <vscale x 1 x half> @vsitofp_nxv1i1_nxv1f16(<vscale x 1 x i1> %va) {
 ; ZVFH-LABEL: vsitofp_nxv1i1_nxv1f16:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp-mask.ll
index 6e09ceefb72920..d1c2cf325bec49 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp-mask.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp-mask.ll
@@ -1,6 +1,35 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zfh,+zvfh < %s | FileCheck %s
-; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zfh,+zvfh < %s | FileCheck %s
+; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zfh,+zvfh,+zvfbfmin < %s | FileCheck %s
+; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zfh,+zvfh,+zvfbfmin < %s | FileCheck %s
+
+define <vscale x 2 x bfloat> @vsitofp_nxv2bf16_nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsitofp_nxv2bf16_nxv2i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    vmv.v.i v9, 0
+; CHECK-NEXT:    vmerge.vim v9, v9, -1, v0
+; CHECK-NEXT:    vmv1r.v v0, v8
+; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v9
+; CHECK-NEXT:    ret
+  %v = call <vscale x 2 x bfloat> @llvm.vp.sitofp.nxv2bf16.nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %m, i32 %evl)
+  ret <vscale x 2 x bfloat> %v
+}
+
+define <vscale x 2 x bfloat> @vsitofp_nxv2bf16_nxv2i1_unmasked(<vscale x 2 x i1> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsitofp_nxv2bf16_nxv2i1_unmasked:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    vmv.v.i v8, 0
+; CHECK-NEXT:    vmerge.vim v8, v8, -1, v0
+; CHECK-NEXT:    vfcvt.f.x.v v9, v8
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v9
+; CHECK-NEXT:    ret
+  %v = call <vscale x 2 x bfloat> @llvm.vp.sitofp.nxv2bf16.nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  ret <vscale x 2 x bfloat> %v
+}
 
 declare <vscale x 2 x half> @llvm.vp.sitofp.nxv2f16.nxv2i1(<vscale x 2 x i1>, <vscale x 2 x i1>, i32)
 

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll
index ca44a9a64de4ca..d163988b3d41cd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll
@@ -1,8 +1,130 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zfh,+zvfh < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
-; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zfh,+zvfh < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
-; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zfh,+zvfhmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
-; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zfh,+zvfhmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
+; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zfh,+zvfh,+zvfbfmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
+; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zfh,+zvfh,+zvfbfmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
+; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zfh,+zvfhmin,+zvfbfmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
+; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zfh,+zvfhmin,+zvfbfmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
+
+define <vscale x 2 x bfloat> @vsitofp_nxv2bf16_nxv2i7(<vscale x 2 x i7> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsitofp_nxv2bf16_nxv2i7:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
+; CHECK-NEXT:    vadd.vv v8, v8, v8
+; CHECK-NEXT:    vsra.vi v8, v8, 1
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    vsext.vf2 v9, v8, v0.t
+; CHECK-NEXT:    vfwcvt.f.x.v v10, v9, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v10
+; CHECK-NEXT:    ret
+  %v = call <vscale x 2 x bfloat> @llvm.vp.sitofp.nxv2bf16.nxv2i7(<vscale x 2 x i7> %va, <vscale x 2 x i1> %m, i32 %evl)
+  ret <vscale x 2 x bfloat> %v
+}
+
+declare <vscale x 2 x bfloat> @llvm.vp.sitofp.nxv2bf16.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x bfloat> @vsitofp_nxv2bf16_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsitofp_nxv2bf16_nxv2i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    vsext.vf2 v9, v8, v0.t
+; CHECK-NEXT:    vfwcvt.f.x.v v10, v9, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v10
+; CHECK-NEXT:    ret
+  %v = call <vscale x 2 x bfloat> @llvm.vp.sitofp.nxv2bf16.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1> %m, i32 %evl)
+  ret <vscale x 2 x bfloat> %v
+}
+
+define <vscale x 2 x bfloat> @vsitofp_nxv2bf16_nxv2i8_unmasked(<vscale x 2 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsitofp_nxv2bf16_nxv2i8_unmasked:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    vsext.vf2 v9, v8
+; CHECK-NEXT:    vfwcvt.f.x.v v10, v9
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v10
+; CHECK-NEXT:    ret
+  %v = call <vscale x 2 x bfloat> @llvm.vp.sitofp.nxv2bf16.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  ret <vscale x 2 x bfloat> %v
+}
+
+declare <vscale x 2 x bfloat> @llvm.vp.sitofp.nxv2bf16.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x bfloat> @vsitofp_nxv2bf16_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsitofp_nxv2bf16_nxv2i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    vfwcvt.f.x.v v9, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v9
+; CHECK-NEXT:    ret
+  %v = call <vscale x 2 x bfloat> @llvm.vp.sitofp.nxv2bf16.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i1> %m, i32 %evl)
+  ret <vscale x 2 x bfloat> %v
+}
+
+define <vscale x 2 x bfloat> @vsitofp_nxv2bf16_nxv2i16_unmasked(<vscale x 2 x i16> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsitofp_nxv2bf16_nxv2i16_unmasked:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    vfwcvt.f.x.v v9, v8
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v9
+; CHECK-NEXT:    ret
+  %v = call <vscale x 2 x bfloat> @llvm.vp.sitofp.nxv2bf16.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  ret <vscale x 2 x bfloat> %v
+}
+
+declare <vscale x 2 x bfloat> @llvm.vp.sitofp.nxv2bf16.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x bfloat> @vsitofp_nxv2bf16_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsitofp_nxv2bf16_nxv2i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    vfcvt.f.x.v v9, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v9
+; CHECK-NEXT:    ret
+  %v = call <vscale x 2 x bfloat> @llvm.vp.sitofp.nxv2bf16.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i1> %m, i32 %evl)
+  ret <vscale x 2 x bfloat> %v
+}
+
+define <vscale x 2 x bfloat> @vsitofp_nxv2bf16_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsitofp_nxv2bf16_nxv2i32_unmasked:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    vfcvt.f.x.v v9, v8
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v9
+; CHECK-NEXT:    ret
+  %v = call <vscale x 2 x bfloat> @llvm.vp.sitofp.nxv2bf16.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  ret <vscale x 2 x bfloat> %v
+}
+
+declare <vscale x 2 x bfloat> @llvm.vp.sitofp.nxv2bf16.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x bfloat> @vsitofp_nxv2bf16_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vsitofp_nxv2bf16_nxv2i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    vfncvt.f.x.w v10, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v10
+; CHECK-NEXT:    ret
+  %v = call <vscale x 2 x bfloat> @llvm.vp.sitofp.nxv2bf16.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i1> %m, i32 %evl)
+  ret <vscale x 2 x bfloat> %v
+}
+
+define <vscale x 2 x bfloat> @vsitofp_nxv2bf16_nxv2i64_unmasked(<vscale x 2 x i64> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vsitofp_nxv2bf16_nxv2i64_unmasked:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    vfncvt.f.x.w v10, v8
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v10
+; CHECK-NEXT:    ret
+  %v = call <vscale x 2 x bfloat> @llvm.vp.sitofp.nxv2bf16.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  ret <vscale x 2 x bfloat> %v
+}
 
 declare <vscale x 2 x half> @llvm.vp.sitofp.nxv2f16.nxv2i7(<vscale x 2 x i7>, <vscale x 2 x i1>, i32)
 
@@ -408,10 +530,10 @@ define <vscale x 32 x half> @vsitofp_nxv32f16_nxv32i32(<vscale x 32 x i32> %va,
 ; ZVFH-NEXT:    vl8r.v v24, (a3) # Unknown-size Folded Reload
 ; ZVFH-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
 ; ZVFH-NEXT:    vfncvt.f.x.w v20, v24, v0.t
-; ZVFH-NEXT:    bltu a0, a1, .LBB25_2
+; ZVFH-NEXT:    bltu a0, a1, .LBB34_2
 ; ZVFH-NEXT:  # %bb.1:
 ; ZVFH-NEXT:    mv a0, a1
-; ZVFH-NEXT:  .LBB25_2:
+; ZVFH-NEXT:  .LBB34_2:
 ; ZVFH-NEXT:    vmv1r.v v0, v7
 ; ZVFH-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; ZVFH-NEXT:    vfncvt.f.x.w v16, v8, v0.t
@@ -438,10 +560,10 @@ define <vscale x 32 x half> @vsitofp_nxv32f16_nxv32i32(<vscale x 32 x i32> %va,
 ; ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v16, v0.t
 ; ZVFHMIN-NEXT:    vsetvli a2, zero, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v20, v24
-; ZVFHMIN-NEXT:    bltu a0, a1, .LBB25_2
+; ZVFHMIN-NEXT:    bltu a0, a1, .LBB34_2
 ; ZVFHMIN-NEXT:  # %bb.1:
 ; ZVFHMIN-NEXT:    mv a0, a1
-; ZVFHMIN-NEXT:  .LBB25_2:
+; ZVFHMIN-NEXT:  .LBB34_2:
 ; ZVFHMIN-NEXT:    vmv1r.v v0, v7
 ; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
@@ -470,10 +592,10 @@ define <vscale x 32 x float> @vsitofp_nxv32f32_nxv32i32(<vscale x 32 x i32> %va,
 ; CHECK-NEXT:    and a2, a3, a2
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
 ; CHECK-NEXT:    vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT:    bltu a0, a1, .LBB26_2
+; CHECK-NEXT:    bltu a0, a1, .LBB35_2
 ; CHECK-NEXT:  # %bb.1:
 ; CHECK-NEXT:    mv a0, a1
-; CHECK-NEXT:  .LBB26_2:
+; CHECK-NEXT:  .LBB35_2:
 ; CHECK-NEXT:    vmv1r.v v0, v24
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8, v0.t
@@ -493,10 +615,10 @@ define <vscale x 32 x float> @vsitofp_nxv32f32_nxv32i32_unmasked(<vscale x 32 x
 ; CHECK-NEXT:    and a2, a3, a2
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
 ; CHECK-NEXT:    vfcvt.f.x.v v16, v16
-; CHECK-NEXT:    bltu a0, a1, .LBB27_2
+; CHECK-NEXT:    bltu a0, a1, .LBB36_2
 ; CHECK-NEXT:  # %bb.1:
 ; CHECK-NEXT:    mv a0, a1
-; CHECK-NEXT:  .LBB27_2:
+; CHECK-NEXT:  .LBB36_2:
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp-mask.ll
index cf4bb161ea75b0..5426102efc73e3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp-mask.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp-mask.ll
@@ -1,6 +1,35 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zfh,+zvfh < %s | FileCheck %s
-; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zfh,+zvfh < %s | FileCheck %s
+; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zfh,+zvfh,+zvfbfmin < %s | FileCheck %s
+; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zfh,+zvfh,+zvfbfmin < %s | FileCheck %s
+
+define <vscale x 2 x bfloat> @vuitofp_nxv2bf16_nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vuitofp_nxv2bf16_nxv2i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    vmv.v.i v9, 0
+; CHECK-NEXT:    vmerge.vim v9, v9, 1, v0
+; CHECK-NEXT:    vmv1r.v v0, v8
+; CHECK-NEXT:    vfcvt.f.xu.v v9, v9, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v9
+; CHECK-NEXT:    ret
+  %v = call <vscale x 2 x bfloat> @llvm.vp.uitofp.nxv2bf16.nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %m, i32 %evl)
+  ret <vscale x 2 x bfloat> %v
+}
+
+define <vscale x 2 x bfloat> @vuitofp_nxv2bf16_nxv2i1_unmasked(<vscale x 2 x i1> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vuitofp_nxv2bf16_nxv2i1_unmasked:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    vmv.v.i v8, 0
+; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
+; CHECK-NEXT:    vfcvt.f.xu.v v9, v8
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v9
+; CHECK-NEXT:    ret
+  %v = call <vscale x 2 x bfloat> @llvm.vp.uitofp.nxv2bf16.nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  ret <vscale x 2 x bfloat> %v
+}
 
 declare <vscale x 2 x half> @llvm.vp.uitofp.nxv2f16.nxv2i1(<vscale x 2 x i1>, <vscale x 2 x i1>, i32)
 

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll
index e5941dc7b5ab28..7c96a9e9e10f65 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll
@@ -1,8 +1,122 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zfh,+zvfh < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
-; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zfh,+zvfh < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
-; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zfh,+zvfhmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
-; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zfh,+zvfhmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
+; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zfh,+zvfh,+zvfbfmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
+; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zfh,+zvfh,+zvfbfmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
+; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zfh,+zvfhmin,+zvfbfmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
+; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zfh,+zvfhmin,+zvfbfmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
+
+define <vscale x 2 x bfloat> @vuitofp_nxv2bf16_nxv2i7(<vscale x 2 x i7> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vuitofp_nxv2bf16_nxv2i7:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a1, 127
+; CHECK-NEXT:    vsetvli a2, zero, e8, mf4, ta, ma
+; CHECK-NEXT:    vand.vx v8, v8, a1
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    vzext.vf2 v9, v8, v0.t
+; CHECK-NEXT:    vfwcvt.f.xu.v v10, v9, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v10
+; CHECK-NEXT:    ret
+  %v = call <vscale x 2 x bfloat> @llvm.vp.uitofp.nxv2bf16.nxv2i7(<vscale x 2 x i7> %va, <vscale x 2 x i1> %m, i32 %evl)
+  ret <vscale x 2 x bfloat> %v
+}
+
+define <vscale x 2 x bfloat> @vuitofp_nxv2bf16_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vuitofp_nxv2bf16_nxv2i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    vzext.vf2 v9, v8, v0.t
+; CHECK-NEXT:    vfwcvt.f.xu.v v10, v9, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v10
+; CHECK-NEXT:    ret
+  %v = call <vscale x 2 x bfloat> @llvm.vp.uitofp.nxv2bf16.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1> %m, i32 %evl)
+  ret <vscale x 2 x bfloat> %v
+}
+
+define <vscale x 2 x bfloat> @vuitofp_nxv2bf16_nxv2i8_unmasked(<vscale x 2 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vuitofp_nxv2bf16_nxv2i8_unmasked:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    vzext.vf2 v9, v8
+; CHECK-NEXT:    vfwcvt.f.xu.v v10, v9
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v10
+; CHECK-NEXT:    ret
+  %v = call <vscale x 2 x bfloat> @llvm.vp.uitofp.nxv2bf16.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  ret <vscale x 2 x bfloat> %v
+}
+
+define <vscale x 2 x bfloat> @vuitofp_nxv2bf16_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vuitofp_nxv2bf16_nxv2i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    vfwcvt.f.xu.v v9, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v9
+; CHECK-NEXT:    ret
+  %v = call <vscale x 2 x bfloat> @llvm.vp.uitofp.nxv2bf16.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i1> %m, i32 %evl)
+  ret <vscale x 2 x bfloat> %v
+}
+
+define <vscale x 2 x bfloat> @vuitofp_nxv2bf16_nxv2i16_unmasked(<vscale x 2 x i16> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vuitofp_nxv2bf16_nxv2i16_unmasked:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    vfwcvt.f.xu.v v9, v8
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v9
+; CHECK-NEXT:    ret
+  %v = call <vscale x 2 x bfloat> @llvm.vp.uitofp.nxv2bf16.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  ret <vscale x 2 x bfloat> %v
+}
+
+define <vscale x 2 x bfloat> @vuitofp_nxv2bf16_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vuitofp_nxv2bf16_nxv2i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    vfcvt.f.xu.v v9, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v9
+; CHECK-NEXT:    ret
+  %v = call <vscale x 2 x bfloat> @llvm.vp.uitofp.nxv2bf16.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i1> %m, i32 %evl)
+  ret <vscale x 2 x bfloat> %v
+}
+
+define <vscale x 2 x bfloat> @vuitofp_nxv2bf16_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vuitofp_nxv2bf16_nxv2i32_unmasked:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    vfcvt.f.xu.v v9, v8
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v9
+; CHECK-NEXT:    ret
+  %v = call <vscale x 2 x bfloat> @llvm.vp.uitofp.nxv2bf16.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  ret <vscale x 2 x bfloat> %v
+}
+
+define <vscale x 2 x bfloat> @vuitofp_nxv2bf16_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vuitofp_nxv2bf16_nxv2i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    vfncvt.f.xu.w v10, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v10
+; CHECK-NEXT:    ret
+  %v = call <vscale x 2 x bfloat> @llvm.vp.uitofp.nxv2bf16.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i1> %m, i32 %evl)
+  ret <vscale x 2 x bfloat> %v
+}
+
+define <vscale x 2 x bfloat> @vuitofp_nxv2bf16_nxv2i64_unmasked(<vscale x 2 x i64> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vuitofp_nxv2bf16_nxv2i64_unmasked:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    vfncvt.f.xu.w v10, v8
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v10
+; CHECK-NEXT:    ret
+  %v = call <vscale x 2 x bfloat> @llvm.vp.uitofp.nxv2bf16.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
+  ret <vscale x 2 x bfloat> %v
+}
 
 declare <vscale x 2 x half> @llvm.vp.uitofp.nxv2f16.nxv2i7(<vscale x 2 x i7>, <vscale x 2 x i1>, i32)
 
@@ -408,10 +522,10 @@ define <vscale x 32 x half> @vuitofp_nxv32f16_nxv32i32(<vscale x 32 x i32> %va,
 ; ZVFH-NEXT:    vl8r.v v24, (a3) # Unknown-size Folded Reload
 ; ZVFH-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
 ; ZVFH-NEXT:    vfncvt.f.xu.w v20, v24, v0.t
-; ZVFH-NEXT:    bltu a0, a1, .LBB25_2
+; ZVFH-NEXT:    bltu a0, a1, .LBB34_2
 ; ZVFH-NEXT:  # %bb.1:
 ; ZVFH-NEXT:    mv a0, a1
-; ZVFH-NEXT:  .LBB25_2:
+; ZVFH-NEXT:  .LBB34_2:
 ; ZVFH-NEXT:    vmv1r.v v0, v7
 ; ZVFH-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; ZVFH-NEXT:    vfncvt.f.xu.w v16, v8, v0.t
@@ -438,10 +552,10 @@ define <vscale x 32 x half> @vuitofp_nxv32f16_nxv32i32(<vscale x 32 x i32> %va,
 ; ZVFHMIN-NEXT:    vfcvt.f.xu.v v24, v16, v0.t
 ; ZVFHMIN-NEXT:    vsetvli a2, zero, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v20, v24
-; ZVFHMIN-NEXT:    bltu a0, a1, .LBB25_2
+; ZVFHMIN-NEXT:    bltu a0, a1, .LBB34_2
 ; ZVFHMIN-NEXT:  # %bb.1:
 ; ZVFHMIN-NEXT:    mv a0, a1
-; ZVFHMIN-NEXT:  .LBB25_2:
+; ZVFHMIN-NEXT:  .LBB34_2:
 ; ZVFHMIN-NEXT:    vmv1r.v v0, v7
 ; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; ZVFHMIN-NEXT:    vfcvt.f.xu.v v8, v8, v0.t
@@ -470,10 +584,10 @@ define <vscale x 32 x float> @vuitofp_nxv32f32_nxv32i32(<vscale x 32 x i32> %va,
 ; CHECK-NEXT:    and a2, a3, a2
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
 ; CHECK-NEXT:    vfcvt.f.xu.v v16, v16, v0.t
-; CHECK-NEXT:    bltu a0, a1, .LBB26_2
+; CHECK-NEXT:    bltu a0, a1, .LBB35_2
 ; CHECK-NEXT:  # %bb.1:
 ; CHECK-NEXT:    mv a0, a1
-; CHECK-NEXT:  .LBB26_2:
+; CHECK-NEXT:  .LBB35_2:
 ; CHECK-NEXT:    vmv1r.v v0, v24
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v8, v0.t
@@ -493,10 +607,10 @@ define <vscale x 32 x float> @vuitofp_nxv32f32_nxv32i32_unmasked(<vscale x 32 x
 ; CHECK-NEXT:    and a2, a3, a2
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
 ; CHECK-NEXT:    vfcvt.f.xu.v v16, v16
-; CHECK-NEXT:    bltu a0, a1, .LBB27_2
+; CHECK-NEXT:    bltu a0, a1, .LBB36_2
 ; CHECK-NEXT:  # %bb.1:
 ; CHECK-NEXT:    mv a0, a1
-; CHECK-NEXT:  .LBB27_2:
+; CHECK-NEXT:  .LBB36_2:
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
 ; CHECK-NEXT:    ret


        


More information about the llvm-commits mailing list