[llvm] 04e8433 - [RISCV] Add vector bf16 load/store intrinsic tests. NFC

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Wed Jul 31 23:30:58 PDT 2024


Author: Craig Topper
Date: 2024-07-31T23:30:44-07:00
New Revision: 04e8433165de66fa8514ef2db53d9f6dd7c244c0

URL: https://github.com/llvm/llvm-project/commit/04e8433165de66fa8514ef2db53d9f6dd7c244c0
DIFF: https://github.com/llvm/llvm-project/commit/04e8433165de66fa8514ef2db53d9f6dd7c244c0.diff

LOG: [RISCV] Add vector bf16 load/store intrinsic tests. NFC

This adds bf16 to the unit stride, strided, and index load and
store intrinsics. clang already assumes these work with Zvfbfmin.

Added: 
    

Modified: 
    llvm/test/CodeGen/RISCV/rvv/vle.ll
    llvm/test/CodeGen/RISCV/rvv/vleff.ll
    llvm/test/CodeGen/RISCV/rvv/vloxei-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vloxei.ll
    llvm/test/CodeGen/RISCV/rvv/vlse.ll
    llvm/test/CodeGen/RISCV/rvv/vluxei-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vluxei.ll
    llvm/test/CodeGen/RISCV/rvv/vse.ll
    llvm/test/CodeGen/RISCV/rvv/vsoxei-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vsoxei.ll
    llvm/test/CodeGen/RISCV/rvv/vsse.ll
    llvm/test/CodeGen/RISCV/rvv/vsuxei-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vsuxei.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/rvv/vle.ll b/llvm/test/CodeGen/RISCV/rvv/vle.ll
index a16792235f1ba..7591bb7358e56 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vle.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vle.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+zvfh,+zvfbfmin \
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfh,+zvfbfmin \
 ; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
-; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+zvfh,+zvfbfmin \
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh,+zvfbfmin \
 ; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 
 declare <vscale x 1 x i64> @llvm.riscv.vle.nxv1i64(

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vleff.ll b/llvm/test/CodeGen/RISCV/rvv/vleff.ll
index 7a8ed4153c352..6cbe858e44ea9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vleff.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vleff.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+zvfh \
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+zvfh,+zvfbfmin \
 ; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s --check-prefixes=CHECK,RV32
-; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+zvfh \
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+zvfh,+zvfbfmin \
 ; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s --check-prefixes=CHECK,RV64
 
 declare { <vscale x 1 x i64>, iXLen } @llvm.riscv.vleff.nxv1i64(
@@ -1636,13 +1636,13 @@ entry:
   ret <vscale x 32 x i16> %b
 }
 
-declare { <vscale x 1 x half>, iXLen } @llvm.riscv.vleff.nxv1f16(
+declare { <vscale x 1 x half>, iXLen } @llvm.riscv.vleff.nxv1bf16(
   <vscale x 1 x half>,
   ptr,
   iXLen);
 
-define <vscale x 1 x half> @intrinsic_vleff_v_nxv1half_nxv1f16(ptr %0, iXLen %1, iXLen* %2) nounwind {
-; RV32-LABEL: intrinsic_vleff_v_nxv1half_nxv1f16:
+define <vscale x 1 x half> @intrinsic_vleff_v_nxv1half_nxv1bf16(ptr %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv1half_nxv1bf16:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; RV32-NEXT:    vle16ff.v v8, (a0)
@@ -1650,7 +1650,7 @@ define <vscale x 1 x half> @intrinsic_vleff_v_nxv1half_nxv1f16(ptr %0, iXLen %1,
 ; RV32-NEXT:    sw a0, 0(a2)
 ; RV32-NEXT:    ret
 ;
-; RV64-LABEL: intrinsic_vleff_v_nxv1half_nxv1f16:
+; RV64-LABEL: intrinsic_vleff_v_nxv1half_nxv1bf16:
 ; RV64:       # %bb.0: # %entry
 ; RV64-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; RV64-NEXT:    vle16ff.v v8, (a0)
@@ -1658,7 +1658,7 @@ define <vscale x 1 x half> @intrinsic_vleff_v_nxv1half_nxv1f16(ptr %0, iXLen %1,
 ; RV64-NEXT:    sd a0, 0(a2)
 ; RV64-NEXT:    ret
 entry:
-  %a = call { <vscale x 1 x half>, iXLen } @llvm.riscv.vleff.nxv1f16(
+  %a = call { <vscale x 1 x half>, iXLen } @llvm.riscv.vleff.nxv1bf16(
     <vscale x 1 x half> undef,
     ptr %0,
     iXLen %1)
@@ -1668,15 +1668,15 @@ entry:
   ret <vscale x 1 x half> %b
 }
 
-declare { <vscale x 1 x half>, iXLen } @llvm.riscv.vleff.mask.nxv1f16(
+declare { <vscale x 1 x half>, iXLen } @llvm.riscv.vleff.mask.nxv1bf16(
   <vscale x 1 x half>,
   ptr,
   <vscale x 1 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 1 x half> @intrinsic_vleff_mask_v_nxv1half_nxv1f16(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3, iXLen* %4) nounwind {
-; RV32-LABEL: intrinsic_vleff_mask_v_nxv1half_nxv1f16:
+define <vscale x 1 x half> @intrinsic_vleff_mask_v_nxv1half_nxv1bf16(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv1half_nxv1bf16:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; RV32-NEXT:    vle16ff.v v8, (a0), v0.t
@@ -1684,7 +1684,7 @@ define <vscale x 1 x half> @intrinsic_vleff_mask_v_nxv1half_nxv1f16(<vscale x 1
 ; RV32-NEXT:    sw a0, 0(a2)
 ; RV32-NEXT:    ret
 ;
-; RV64-LABEL: intrinsic_vleff_mask_v_nxv1half_nxv1f16:
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv1half_nxv1bf16:
 ; RV64:       # %bb.0: # %entry
 ; RV64-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; RV64-NEXT:    vle16ff.v v8, (a0), v0.t
@@ -1692,7 +1692,7 @@ define <vscale x 1 x half> @intrinsic_vleff_mask_v_nxv1half_nxv1f16(<vscale x 1
 ; RV64-NEXT:    sd a0, 0(a2)
 ; RV64-NEXT:    ret
 entry:
-  %a = call { <vscale x 1 x half>, iXLen } @llvm.riscv.vleff.mask.nxv1f16(
+  %a = call { <vscale x 1 x half>, iXLen } @llvm.riscv.vleff.mask.nxv1bf16(
     <vscale x 1 x half> %0,
     ptr %1,
     <vscale x 1 x i1> %2,
@@ -1704,13 +1704,13 @@ entry:
   ret <vscale x 1 x half> %b
 }
 
-declare { <vscale x 2 x half>, iXLen } @llvm.riscv.vleff.nxv2f16(
+declare { <vscale x 2 x half>, iXLen } @llvm.riscv.vleff.nxv2bf16(
   <vscale x 2 x half>,
   ptr,
   iXLen);
 
-define <vscale x 2 x half> @intrinsic_vleff_v_nxv2half_nxv2f16(ptr %0, iXLen %1, iXLen* %2) nounwind {
-; RV32-LABEL: intrinsic_vleff_v_nxv2half_nxv2f16:
+define <vscale x 2 x half> @intrinsic_vleff_v_nxv2half_nxv2bf16(ptr %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv2half_nxv2bf16:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; RV32-NEXT:    vle16ff.v v8, (a0)
@@ -1718,7 +1718,7 @@ define <vscale x 2 x half> @intrinsic_vleff_v_nxv2half_nxv2f16(ptr %0, iXLen %1,
 ; RV32-NEXT:    sw a0, 0(a2)
 ; RV32-NEXT:    ret
 ;
-; RV64-LABEL: intrinsic_vleff_v_nxv2half_nxv2f16:
+; RV64-LABEL: intrinsic_vleff_v_nxv2half_nxv2bf16:
 ; RV64:       # %bb.0: # %entry
 ; RV64-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; RV64-NEXT:    vle16ff.v v8, (a0)
@@ -1726,7 +1726,7 @@ define <vscale x 2 x half> @intrinsic_vleff_v_nxv2half_nxv2f16(ptr %0, iXLen %1,
 ; RV64-NEXT:    sd a0, 0(a2)
 ; RV64-NEXT:    ret
 entry:
-  %a = call { <vscale x 2 x half>, iXLen } @llvm.riscv.vleff.nxv2f16(
+  %a = call { <vscale x 2 x half>, iXLen } @llvm.riscv.vleff.nxv2bf16(
     <vscale x 2 x half> undef,
     ptr %0,
     iXLen %1)
@@ -1736,15 +1736,15 @@ entry:
   ret <vscale x 2 x half> %b
 }
 
-declare { <vscale x 2 x half>, iXLen } @llvm.riscv.vleff.mask.nxv2f16(
+declare { <vscale x 2 x half>, iXLen } @llvm.riscv.vleff.mask.nxv2bf16(
   <vscale x 2 x half>,
   ptr,
   <vscale x 2 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 2 x half> @intrinsic_vleff_mask_v_nxv2half_nxv2f16(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3, iXLen* %4) nounwind {
-; RV32-LABEL: intrinsic_vleff_mask_v_nxv2half_nxv2f16:
+define <vscale x 2 x half> @intrinsic_vleff_mask_v_nxv2half_nxv2bf16(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv2half_nxv2bf16:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; RV32-NEXT:    vle16ff.v v8, (a0), v0.t
@@ -1752,7 +1752,7 @@ define <vscale x 2 x half> @intrinsic_vleff_mask_v_nxv2half_nxv2f16(<vscale x 2
 ; RV32-NEXT:    sw a0, 0(a2)
 ; RV32-NEXT:    ret
 ;
-; RV64-LABEL: intrinsic_vleff_mask_v_nxv2half_nxv2f16:
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv2half_nxv2bf16:
 ; RV64:       # %bb.0: # %entry
 ; RV64-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; RV64-NEXT:    vle16ff.v v8, (a0), v0.t
@@ -1760,7 +1760,7 @@ define <vscale x 2 x half> @intrinsic_vleff_mask_v_nxv2half_nxv2f16(<vscale x 2
 ; RV64-NEXT:    sd a0, 0(a2)
 ; RV64-NEXT:    ret
 entry:
-  %a = call { <vscale x 2 x half>, iXLen } @llvm.riscv.vleff.mask.nxv2f16(
+  %a = call { <vscale x 2 x half>, iXLen } @llvm.riscv.vleff.mask.nxv2bf16(
     <vscale x 2 x half> %0,
     ptr %1,
     <vscale x 2 x i1> %2,
@@ -1772,13 +1772,13 @@ entry:
   ret <vscale x 2 x half> %b
 }
 
-declare { <vscale x 4 x half>, iXLen } @llvm.riscv.vleff.nxv4f16(
+declare { <vscale x 4 x half>, iXLen } @llvm.riscv.vleff.nxv4bf16(
   <vscale x 4 x half>,
   ptr,
   iXLen);
 
-define <vscale x 4 x half> @intrinsic_vleff_v_nxv4half_nxv4f16(ptr %0, iXLen %1, iXLen* %2) nounwind {
-; RV32-LABEL: intrinsic_vleff_v_nxv4half_nxv4f16:
+define <vscale x 4 x half> @intrinsic_vleff_v_nxv4half_nxv4bf16(ptr %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv4half_nxv4bf16:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; RV32-NEXT:    vle16ff.v v8, (a0)
@@ -1786,7 +1786,7 @@ define <vscale x 4 x half> @intrinsic_vleff_v_nxv4half_nxv4f16(ptr %0, iXLen %1,
 ; RV32-NEXT:    sw a0, 0(a2)
 ; RV32-NEXT:    ret
 ;
-; RV64-LABEL: intrinsic_vleff_v_nxv4half_nxv4f16:
+; RV64-LABEL: intrinsic_vleff_v_nxv4half_nxv4bf16:
 ; RV64:       # %bb.0: # %entry
 ; RV64-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; RV64-NEXT:    vle16ff.v v8, (a0)
@@ -1794,7 +1794,7 @@ define <vscale x 4 x half> @intrinsic_vleff_v_nxv4half_nxv4f16(ptr %0, iXLen %1,
 ; RV64-NEXT:    sd a0, 0(a2)
 ; RV64-NEXT:    ret
 entry:
-  %a = call { <vscale x 4 x half>, iXLen } @llvm.riscv.vleff.nxv4f16(
+  %a = call { <vscale x 4 x half>, iXLen } @llvm.riscv.vleff.nxv4bf16(
     <vscale x 4 x half> undef,
     ptr %0,
     iXLen %1)
@@ -1804,15 +1804,15 @@ entry:
   ret <vscale x 4 x half> %b
 }
 
-declare { <vscale x 4 x half>, iXLen } @llvm.riscv.vleff.mask.nxv4f16(
+declare { <vscale x 4 x half>, iXLen } @llvm.riscv.vleff.mask.nxv4bf16(
   <vscale x 4 x half>,
   ptr,
   <vscale x 4 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 4 x half> @intrinsic_vleff_mask_v_nxv4half_nxv4f16(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3, iXLen* %4) nounwind {
-; RV32-LABEL: intrinsic_vleff_mask_v_nxv4half_nxv4f16:
+define <vscale x 4 x half> @intrinsic_vleff_mask_v_nxv4half_nxv4bf16(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv4half_nxv4bf16:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; RV32-NEXT:    vle16ff.v v8, (a0), v0.t
@@ -1820,7 +1820,7 @@ define <vscale x 4 x half> @intrinsic_vleff_mask_v_nxv4half_nxv4f16(<vscale x 4
 ; RV32-NEXT:    sw a0, 0(a2)
 ; RV32-NEXT:    ret
 ;
-; RV64-LABEL: intrinsic_vleff_mask_v_nxv4half_nxv4f16:
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv4half_nxv4bf16:
 ; RV64:       # %bb.0: # %entry
 ; RV64-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; RV64-NEXT:    vle16ff.v v8, (a0), v0.t
@@ -1828,7 +1828,7 @@ define <vscale x 4 x half> @intrinsic_vleff_mask_v_nxv4half_nxv4f16(<vscale x 4
 ; RV64-NEXT:    sd a0, 0(a2)
 ; RV64-NEXT:    ret
 entry:
-  %a = call { <vscale x 4 x half>, iXLen } @llvm.riscv.vleff.mask.nxv4f16(
+  %a = call { <vscale x 4 x half>, iXLen } @llvm.riscv.vleff.mask.nxv4bf16(
     <vscale x 4 x half> %0,
     ptr %1,
     <vscale x 4 x i1> %2,
@@ -1840,13 +1840,13 @@ entry:
   ret <vscale x 4 x half> %b
 }
 
-declare { <vscale x 8 x half>, iXLen } @llvm.riscv.vleff.nxv8f16(
+declare { <vscale x 8 x half>, iXLen } @llvm.riscv.vleff.nxv8bf16(
   <vscale x 8 x half>,
   ptr,
   iXLen);
 
-define <vscale x 8 x half> @intrinsic_vleff_v_nxv8half_nxv8f16(ptr %0, iXLen %1, iXLen* %2) nounwind {
-; RV32-LABEL: intrinsic_vleff_v_nxv8half_nxv8f16:
+define <vscale x 8 x half> @intrinsic_vleff_v_nxv8half_nxv8bf16(ptr %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv8half_nxv8bf16:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; RV32-NEXT:    vle16ff.v v8, (a0)
@@ -1854,7 +1854,7 @@ define <vscale x 8 x half> @intrinsic_vleff_v_nxv8half_nxv8f16(ptr %0, iXLen %1,
 ; RV32-NEXT:    sw a0, 0(a2)
 ; RV32-NEXT:    ret
 ;
-; RV64-LABEL: intrinsic_vleff_v_nxv8half_nxv8f16:
+; RV64-LABEL: intrinsic_vleff_v_nxv8half_nxv8bf16:
 ; RV64:       # %bb.0: # %entry
 ; RV64-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; RV64-NEXT:    vle16ff.v v8, (a0)
@@ -1862,7 +1862,7 @@ define <vscale x 8 x half> @intrinsic_vleff_v_nxv8half_nxv8f16(ptr %0, iXLen %1,
 ; RV64-NEXT:    sd a0, 0(a2)
 ; RV64-NEXT:    ret
 entry:
-  %a = call { <vscale x 8 x half>, iXLen } @llvm.riscv.vleff.nxv8f16(
+  %a = call { <vscale x 8 x half>, iXLen } @llvm.riscv.vleff.nxv8bf16(
     <vscale x 8 x half> undef,
     ptr %0,
     iXLen %1)
@@ -1872,15 +1872,15 @@ entry:
   ret <vscale x 8 x half> %b
 }
 
-declare { <vscale x 8 x half>, iXLen } @llvm.riscv.vleff.mask.nxv8f16(
+declare { <vscale x 8 x half>, iXLen } @llvm.riscv.vleff.mask.nxv8bf16(
   <vscale x 8 x half>,
   ptr,
   <vscale x 8 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 8 x half> @intrinsic_vleff_mask_v_nxv8half_nxv8f16(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3, iXLen* %4) nounwind {
-; RV32-LABEL: intrinsic_vleff_mask_v_nxv8half_nxv8f16:
+define <vscale x 8 x half> @intrinsic_vleff_mask_v_nxv8half_nxv8bf16(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv8half_nxv8bf16:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; RV32-NEXT:    vle16ff.v v8, (a0), v0.t
@@ -1888,7 +1888,7 @@ define <vscale x 8 x half> @intrinsic_vleff_mask_v_nxv8half_nxv8f16(<vscale x 8
 ; RV32-NEXT:    sw a0, 0(a2)
 ; RV32-NEXT:    ret
 ;
-; RV64-LABEL: intrinsic_vleff_mask_v_nxv8half_nxv8f16:
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv8half_nxv8bf16:
 ; RV64:       # %bb.0: # %entry
 ; RV64-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; RV64-NEXT:    vle16ff.v v8, (a0), v0.t
@@ -1896,7 +1896,7 @@ define <vscale x 8 x half> @intrinsic_vleff_mask_v_nxv8half_nxv8f16(<vscale x 8
 ; RV64-NEXT:    sd a0, 0(a2)
 ; RV64-NEXT:    ret
 entry:
-  %a = call { <vscale x 8 x half>, iXLen } @llvm.riscv.vleff.mask.nxv8f16(
+  %a = call { <vscale x 8 x half>, iXLen } @llvm.riscv.vleff.mask.nxv8bf16(
     <vscale x 8 x half> %0,
     ptr %1,
     <vscale x 8 x i1> %2,
@@ -1908,13 +1908,13 @@ entry:
   ret <vscale x 8 x half> %b
 }
 
-declare { <vscale x 16 x half>, iXLen } @llvm.riscv.vleff.nxv16f16(
+declare { <vscale x 16 x half>, iXLen } @llvm.riscv.vleff.nxv16bf16(
   <vscale x 16 x half>,
   ptr,
   iXLen);
 
-define <vscale x 16 x half> @intrinsic_vleff_v_nxv16half_nxv16f16(ptr %0, iXLen %1, iXLen* %2) nounwind {
-; RV32-LABEL: intrinsic_vleff_v_nxv16half_nxv16f16:
+define <vscale x 16 x half> @intrinsic_vleff_v_nxv16half_nxv16bf16(ptr %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv16half_nxv16bf16:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
 ; RV32-NEXT:    vle16ff.v v8, (a0)
@@ -1922,7 +1922,7 @@ define <vscale x 16 x half> @intrinsic_vleff_v_nxv16half_nxv16f16(ptr %0, iXLen
 ; RV32-NEXT:    sw a0, 0(a2)
 ; RV32-NEXT:    ret
 ;
-; RV64-LABEL: intrinsic_vleff_v_nxv16half_nxv16f16:
+; RV64-LABEL: intrinsic_vleff_v_nxv16half_nxv16bf16:
 ; RV64:       # %bb.0: # %entry
 ; RV64-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
 ; RV64-NEXT:    vle16ff.v v8, (a0)
@@ -1930,7 +1930,7 @@ define <vscale x 16 x half> @intrinsic_vleff_v_nxv16half_nxv16f16(ptr %0, iXLen
 ; RV64-NEXT:    sd a0, 0(a2)
 ; RV64-NEXT:    ret
 entry:
-  %a = call { <vscale x 16 x half>, iXLen } @llvm.riscv.vleff.nxv16f16(
+  %a = call { <vscale x 16 x half>, iXLen } @llvm.riscv.vleff.nxv16bf16(
     <vscale x 16 x half> undef,
     ptr %0,
     iXLen %1)
@@ -1940,15 +1940,15 @@ entry:
   ret <vscale x 16 x half> %b
 }
 
-declare { <vscale x 16 x half>, iXLen } @llvm.riscv.vleff.mask.nxv16f16(
+declare { <vscale x 16 x half>, iXLen } @llvm.riscv.vleff.mask.nxv16bf16(
   <vscale x 16 x half>,
   ptr,
   <vscale x 16 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 16 x half> @intrinsic_vleff_mask_v_nxv16half_nxv16f16(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i1> %2, iXLen %3, iXLen* %4) nounwind {
-; RV32-LABEL: intrinsic_vleff_mask_v_nxv16half_nxv16f16:
+define <vscale x 16 x half> @intrinsic_vleff_mask_v_nxv16half_nxv16bf16(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv16half_nxv16bf16:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; RV32-NEXT:    vle16ff.v v8, (a0), v0.t
@@ -1956,7 +1956,7 @@ define <vscale x 16 x half> @intrinsic_vleff_mask_v_nxv16half_nxv16f16(<vscale x
 ; RV32-NEXT:    sw a0, 0(a2)
 ; RV32-NEXT:    ret
 ;
-; RV64-LABEL: intrinsic_vleff_mask_v_nxv16half_nxv16f16:
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv16half_nxv16bf16:
 ; RV64:       # %bb.0: # %entry
 ; RV64-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; RV64-NEXT:    vle16ff.v v8, (a0), v0.t
@@ -1964,7 +1964,7 @@ define <vscale x 16 x half> @intrinsic_vleff_mask_v_nxv16half_nxv16f16(<vscale x
 ; RV64-NEXT:    sd a0, 0(a2)
 ; RV64-NEXT:    ret
 entry:
-  %a = call { <vscale x 16 x half>, iXLen } @llvm.riscv.vleff.mask.nxv16f16(
+  %a = call { <vscale x 16 x half>, iXLen } @llvm.riscv.vleff.mask.nxv16bf16(
     <vscale x 16 x half> %0,
     ptr %1,
     <vscale x 16 x i1> %2,
@@ -1976,13 +1976,13 @@ entry:
   ret <vscale x 16 x half> %b
 }
 
-declare { <vscale x 32 x half>, iXLen } @llvm.riscv.vleff.nxv32f16(
+declare { <vscale x 32 x half>, iXLen } @llvm.riscv.vleff.nxv32bf16(
   <vscale x 32 x half>,
   ptr,
   iXLen);
 
-define <vscale x 32 x half> @intrinsic_vleff_v_nxv32half_nxv32f16(ptr %0, iXLen %1, iXLen* %2) nounwind {
-; RV32-LABEL: intrinsic_vleff_v_nxv32half_nxv32f16:
+define <vscale x 32 x half> @intrinsic_vleff_v_nxv32half_nxv32bf16(ptr %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv32half_nxv32bf16:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
 ; RV32-NEXT:    vle16ff.v v8, (a0)
@@ -1990,7 +1990,7 @@ define <vscale x 32 x half> @intrinsic_vleff_v_nxv32half_nxv32f16(ptr %0, iXLen
 ; RV32-NEXT:    sw a0, 0(a2)
 ; RV32-NEXT:    ret
 ;
-; RV64-LABEL: intrinsic_vleff_v_nxv32half_nxv32f16:
+; RV64-LABEL: intrinsic_vleff_v_nxv32half_nxv32bf16:
 ; RV64:       # %bb.0: # %entry
 ; RV64-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
 ; RV64-NEXT:    vle16ff.v v8, (a0)
@@ -1998,7 +1998,7 @@ define <vscale x 32 x half> @intrinsic_vleff_v_nxv32half_nxv32f16(ptr %0, iXLen
 ; RV64-NEXT:    sd a0, 0(a2)
 ; RV64-NEXT:    ret
 entry:
-  %a = call { <vscale x 32 x half>, iXLen } @llvm.riscv.vleff.nxv32f16(
+  %a = call { <vscale x 32 x half>, iXLen } @llvm.riscv.vleff.nxv32bf16(
     <vscale x 32 x half> undef,
     ptr %0,
     iXLen %1)
@@ -2008,15 +2008,15 @@ entry:
   ret <vscale x 32 x half> %b
 }
 
-declare { <vscale x 32 x half>, iXLen } @llvm.riscv.vleff.mask.nxv32f16(
+declare { <vscale x 32 x half>, iXLen } @llvm.riscv.vleff.mask.nxv32bf16(
   <vscale x 32 x half>,
   ptr,
   <vscale x 32 x i1>,
   iXLen,
   iXLen);
 
-define <vscale x 32 x half> @intrinsic_vleff_mask_v_nxv32half_nxv32f16(<vscale x 32 x half> %0, ptr %1, <vscale x 32 x i1> %2, iXLen %3, iXLen* %4) nounwind {
-; RV32-LABEL: intrinsic_vleff_mask_v_nxv32half_nxv32f16:
+define <vscale x 32 x half> @intrinsic_vleff_mask_v_nxv32half_nxv32bf16(<vscale x 32 x half> %0, ptr %1, <vscale x 32 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv32half_nxv32bf16:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
 ; RV32-NEXT:    vle16ff.v v8, (a0), v0.t
@@ -2024,7 +2024,7 @@ define <vscale x 32 x half> @intrinsic_vleff_mask_v_nxv32half_nxv32f16(<vscale x
 ; RV32-NEXT:    sw a0, 0(a2)
 ; RV32-NEXT:    ret
 ;
-; RV64-LABEL: intrinsic_vleff_mask_v_nxv32half_nxv32f16:
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv32half_nxv32bf16:
 ; RV64:       # %bb.0: # %entry
 ; RV64-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
 ; RV64-NEXT:    vle16ff.v v8, (a0), v0.t
@@ -2032,7 +2032,7 @@ define <vscale x 32 x half> @intrinsic_vleff_mask_v_nxv32half_nxv32f16(<vscale x
 ; RV64-NEXT:    sd a0, 0(a2)
 ; RV64-NEXT:    ret
 entry:
-  %a = call { <vscale x 32 x half>, iXLen } @llvm.riscv.vleff.mask.nxv32f16(
+  %a = call { <vscale x 32 x half>, iXLen } @llvm.riscv.vleff.mask.nxv32bf16(
     <vscale x 32 x half> %0,
     ptr %1,
     <vscale x 32 x i1> %2,
@@ -2044,6 +2044,414 @@ entry:
   ret <vscale x 32 x half> %b
 }
 
+declare { <vscale x 1 x bfloat>, iXLen } @llvm.riscv.vleff.nxv1f16(
+  <vscale x 1 x bfloat>,
+  ptr,
+  iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vleff_v_nxv1bfloat_nxv1f16(ptr %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv1bfloat_nxv1f16:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
+; RV32-NEXT:    vle16ff.v v8, (a0)
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv1bfloat_nxv1f16:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
+; RV64-NEXT:    vle16ff.v v8, (a0)
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 1 x bfloat>, iXLen } @llvm.riscv.vleff.nxv1f16(
+    <vscale x 1 x bfloat> undef,
+    ptr %0,
+    iXLen %1)
+  %b = extractvalue { <vscale x 1 x bfloat>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 1 x bfloat>, iXLen } %a, 1
+  store iXLen %c, iXLen* %2
+  ret <vscale x 1 x bfloat> %b
+}
+
+declare { <vscale x 1 x bfloat>, iXLen } @llvm.riscv.vleff.mask.nxv1f16(
+  <vscale x 1 x bfloat>,
+  ptr,
+  <vscale x 1 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vleff_mask_v_nxv1bfloat_nxv1f16(<vscale x 1 x bfloat> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv1bfloat_nxv1f16:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
+; RV32-NEXT:    vle16ff.v v8, (a0), v0.t
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv1bfloat_nxv1f16:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
+; RV64-NEXT:    vle16ff.v v8, (a0), v0.t
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 1 x bfloat>, iXLen } @llvm.riscv.vleff.mask.nxv1f16(
+    <vscale x 1 x bfloat> %0,
+    ptr %1,
+    <vscale x 1 x i1> %2,
+    iXLen %3, iXLen 1)
+  %b = extractvalue { <vscale x 1 x bfloat>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 1 x bfloat>, iXLen } %a, 1
+  store iXLen %c, iXLen* %4
+
+  ret <vscale x 1 x bfloat> %b
+}
+
+declare { <vscale x 2 x bfloat>, iXLen } @llvm.riscv.vleff.nxv2f16(
+  <vscale x 2 x bfloat>,
+  ptr,
+  iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vleff_v_nxv2bfloat_nxv2f16(ptr %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv2bfloat_nxv2f16:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
+; RV32-NEXT:    vle16ff.v v8, (a0)
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv2bfloat_nxv2f16:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
+; RV64-NEXT:    vle16ff.v v8, (a0)
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 2 x bfloat>, iXLen } @llvm.riscv.vleff.nxv2f16(
+    <vscale x 2 x bfloat> undef,
+    ptr %0,
+    iXLen %1)
+  %b = extractvalue { <vscale x 2 x bfloat>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 2 x bfloat>, iXLen } %a, 1
+  store iXLen %c, iXLen* %2
+  ret <vscale x 2 x bfloat> %b
+}
+
+declare { <vscale x 2 x bfloat>, iXLen } @llvm.riscv.vleff.mask.nxv2f16(
+  <vscale x 2 x bfloat>,
+  ptr,
+  <vscale x 2 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vleff_mask_v_nxv2bfloat_nxv2f16(<vscale x 2 x bfloat> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv2bfloat_nxv2f16:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
+; RV32-NEXT:    vle16ff.v v8, (a0), v0.t
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv2bfloat_nxv2f16:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
+; RV64-NEXT:    vle16ff.v v8, (a0), v0.t
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 2 x bfloat>, iXLen } @llvm.riscv.vleff.mask.nxv2f16(
+    <vscale x 2 x bfloat> %0,
+    ptr %1,
+    <vscale x 2 x i1> %2,
+    iXLen %3, iXLen 1)
+  %b = extractvalue { <vscale x 2 x bfloat>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 2 x bfloat>, iXLen } %a, 1
+  store iXLen %c, iXLen* %4
+
+  ret <vscale x 2 x bfloat> %b
+}
+
+declare { <vscale x 4 x bfloat>, iXLen } @llvm.riscv.vleff.nxv4f16(
+  <vscale x 4 x bfloat>,
+  ptr,
+  iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vleff_v_nxv4bfloat_nxv4f16(ptr %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv4bfloat_nxv4f16:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; RV32-NEXT:    vle16ff.v v8, (a0)
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv4bfloat_nxv4f16:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; RV64-NEXT:    vle16ff.v v8, (a0)
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 4 x bfloat>, iXLen } @llvm.riscv.vleff.nxv4f16(
+    <vscale x 4 x bfloat> undef,
+    ptr %0,
+    iXLen %1)
+  %b = extractvalue { <vscale x 4 x bfloat>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 4 x bfloat>, iXLen } %a, 1
+  store iXLen %c, iXLen* %2
+  ret <vscale x 4 x bfloat> %b
+}
+
+declare { <vscale x 4 x bfloat>, iXLen } @llvm.riscv.vleff.mask.nxv4f16(
+  <vscale x 4 x bfloat>,
+  ptr,
+  <vscale x 4 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vleff_mask_v_nxv4bfloat_nxv4f16(<vscale x 4 x bfloat> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv4bfloat_nxv4f16:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
+; RV32-NEXT:    vle16ff.v v8, (a0), v0.t
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv4bfloat_nxv4f16:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
+; RV64-NEXT:    vle16ff.v v8, (a0), v0.t
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 4 x bfloat>, iXLen } @llvm.riscv.vleff.mask.nxv4f16(
+    <vscale x 4 x bfloat> %0,
+    ptr %1,
+    <vscale x 4 x i1> %2,
+    iXLen %3, iXLen 1)
+  %b = extractvalue { <vscale x 4 x bfloat>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 4 x bfloat>, iXLen } %a, 1
+  store iXLen %c, iXLen* %4
+
+  ret <vscale x 4 x bfloat> %b
+}
+
+declare { <vscale x 8 x bfloat>, iXLen } @llvm.riscv.vleff.nxv8f16(
+  <vscale x 8 x bfloat>,
+  ptr,
+  iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vleff_v_nxv8bfloat_nxv8f16(ptr %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv8bfloat_nxv8f16:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
+; RV32-NEXT:    vle16ff.v v8, (a0)
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv8bfloat_nxv8f16:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
+; RV64-NEXT:    vle16ff.v v8, (a0)
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 8 x bfloat>, iXLen } @llvm.riscv.vleff.nxv8f16(
+    <vscale x 8 x bfloat> undef,
+    ptr %0,
+    iXLen %1)
+  %b = extractvalue { <vscale x 8 x bfloat>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 8 x bfloat>, iXLen } %a, 1
+  store iXLen %c, iXLen* %2
+  ret <vscale x 8 x bfloat> %b
+}
+
+declare { <vscale x 8 x bfloat>, iXLen } @llvm.riscv.vleff.mask.nxv8f16(
+  <vscale x 8 x bfloat>,
+  ptr,
+  <vscale x 8 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vleff_mask_v_nxv8bfloat_nxv8f16(<vscale x 8 x bfloat> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv8bfloat_nxv8f16:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
+; RV32-NEXT:    vle16ff.v v8, (a0), v0.t
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv8bfloat_nxv8f16:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
+; RV64-NEXT:    vle16ff.v v8, (a0), v0.t
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 8 x bfloat>, iXLen } @llvm.riscv.vleff.mask.nxv8f16(
+    <vscale x 8 x bfloat> %0,
+    ptr %1,
+    <vscale x 8 x i1> %2,
+    iXLen %3, iXLen 1)
+  %b = extractvalue { <vscale x 8 x bfloat>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 8 x bfloat>, iXLen } %a, 1
+  store iXLen %c, iXLen* %4
+
+  ret <vscale x 8 x bfloat> %b
+}
+
+declare { <vscale x 16 x bfloat>, iXLen } @llvm.riscv.vleff.nxv16f16(
+  <vscale x 16 x bfloat>,
+  ptr,
+  iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vleff_v_nxv16bfloat_nxv16f16(ptr %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv16bfloat_nxv16f16:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
+; RV32-NEXT:    vle16ff.v v8, (a0)
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv16bfloat_nxv16f16:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
+; RV64-NEXT:    vle16ff.v v8, (a0)
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 16 x bfloat>, iXLen } @llvm.riscv.vleff.nxv16f16(
+    <vscale x 16 x bfloat> undef,
+    ptr %0,
+    iXLen %1)
+  %b = extractvalue { <vscale x 16 x bfloat>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 16 x bfloat>, iXLen } %a, 1
+  store iXLen %c, iXLen* %2
+  ret <vscale x 16 x bfloat> %b
+}
+
+declare { <vscale x 16 x bfloat>, iXLen } @llvm.riscv.vleff.mask.nxv16f16(
+  <vscale x 16 x bfloat>,
+  ptr,
+  <vscale x 16 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vleff_mask_v_nxv16bfloat_nxv16f16(<vscale x 16 x bfloat> %0, ptr %1, <vscale x 16 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv16bfloat_nxv16f16:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
+; RV32-NEXT:    vle16ff.v v8, (a0), v0.t
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv16bfloat_nxv16f16:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
+; RV64-NEXT:    vle16ff.v v8, (a0), v0.t
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 16 x bfloat>, iXLen } @llvm.riscv.vleff.mask.nxv16f16(
+    <vscale x 16 x bfloat> %0,
+    ptr %1,
+    <vscale x 16 x i1> %2,
+    iXLen %3, iXLen 1)
+  %b = extractvalue { <vscale x 16 x bfloat>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 16 x bfloat>, iXLen } %a, 1
+  store iXLen %c, iXLen* %4
+
+  ret <vscale x 16 x bfloat> %b
+}
+
+declare { <vscale x 32 x bfloat>, iXLen } @llvm.riscv.vleff.nxv32f16(
+  <vscale x 32 x bfloat>,
+  ptr,
+  iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vleff_v_nxv32bfloat_nxv32f16(ptr %0, iXLen %1, iXLen* %2) nounwind {
+; RV32-LABEL: intrinsic_vleff_v_nxv32bfloat_nxv32f16:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; RV32-NEXT:    vle16ff.v v8, (a0)
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_v_nxv32bfloat_nxv32f16:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; RV64-NEXT:    vle16ff.v v8, (a0)
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 32 x bfloat>, iXLen } @llvm.riscv.vleff.nxv32f16(
+    <vscale x 32 x bfloat> undef,
+    ptr %0,
+    iXLen %1)
+  %b = extractvalue { <vscale x 32 x bfloat>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 32 x bfloat>, iXLen } %a, 1
+  store iXLen %c, iXLen* %2
+  ret <vscale x 32 x bfloat> %b
+}
+
+declare { <vscale x 32 x bfloat>, iXLen } @llvm.riscv.vleff.mask.nxv32f16(
+  <vscale x 32 x bfloat>,
+  ptr,
+  <vscale x 32 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vleff_mask_v_nxv32bfloat_nxv32f16(<vscale x 32 x bfloat> %0, ptr %1, <vscale x 32 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+; RV32-LABEL: intrinsic_vleff_mask_v_nxv32bfloat_nxv32f16:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
+; RV32-NEXT:    vle16ff.v v8, (a0), v0.t
+; RV32-NEXT:    csrr a0, vl
+; RV32-NEXT:    sw a0, 0(a2)
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: intrinsic_vleff_mask_v_nxv32bfloat_nxv32f16:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
+; RV64-NEXT:    vle16ff.v v8, (a0), v0.t
+; RV64-NEXT:    csrr a0, vl
+; RV64-NEXT:    sd a0, 0(a2)
+; RV64-NEXT:    ret
+entry:
+  %a = call { <vscale x 32 x bfloat>, iXLen } @llvm.riscv.vleff.mask.nxv32f16(
+    <vscale x 32 x bfloat> %0,
+    ptr %1,
+    <vscale x 32 x i1> %2,
+    iXLen %3, iXLen 1)
+  %b = extractvalue { <vscale x 32 x bfloat>, iXLen } %a, 0
+  %c = extractvalue { <vscale x 32 x bfloat>, iXLen } %a, 1
+  store iXLen %c, iXLen* %4
+
+  ret <vscale x 32 x bfloat> %b
+}
+
 declare { <vscale x 1 x i8>, iXLen } @llvm.riscv.vleff.nxv1i8(
   <vscale x 1 x i8>,
   ptr,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vloxei-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vloxei-rv64.ll
index c56ee04fb6f60..0578248c6f72d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vloxei-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vloxei-rv64.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh,+zvfh,+f,+d -verify-machineinstrs \
+; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zvfh,+zvfbfmin -verify-machineinstrs \
 ; RUN:   < %s | FileCheck %s
 
 ; The intrinsics are not supported with RV32.
@@ -960,6 +960,198 @@ entry:
   ret <vscale x 8 x half> %a
 }
 
+declare <vscale x 1 x bfloat> @llvm.riscv.vloxei.nxv1bf16.nxv1i64(
+  <vscale x 1 x bfloat>,
+  ptr,
+  <vscale x 1 x i64>,
+  i64);
+
+define <vscale x 1 x bfloat> @intrinsic_vloxei_v_nxv1bf16_nxv1bf16_nxv1i64(ptr %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv1bf16_nxv1bf16_nxv1i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT:    vloxei64.v v9, (a0), v8
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x bfloat> @llvm.riscv.vloxei.nxv1bf16.nxv1i64(
+    <vscale x 1 x bfloat> undef,
+    ptr %0,
+    <vscale x 1 x i64> %1,
+    i64 %2)
+
+  ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vloxei.mask.nxv1bf16.nxv1i64(
+  <vscale x 1 x bfloat>,
+  ptr,
+  <vscale x 1 x i64>,
+  <vscale x 1 x i1>,
+  i64,
+  i64);
+
+define <vscale x 1 x bfloat> @intrinsic_vloxei_mask_v_nxv1bf16_nxv1bf16_nxv1i64(<vscale x 1 x bfloat> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1bf16_nxv1bf16_nxv1i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
+; CHECK-NEXT:    vloxei64.v v8, (a0), v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x bfloat> @llvm.riscv.vloxei.mask.nxv1bf16.nxv1i64(
+    <vscale x 1 x bfloat> %0,
+    ptr %1,
+    <vscale x 1 x i64> %2,
+    <vscale x 1 x i1> %3,
+    i64 %4, i64 1)
+
+  ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vloxei.nxv2bf16.nxv2i64(
+  <vscale x 2 x bfloat>,
+  ptr,
+  <vscale x 2 x i64>,
+  i64);
+
+define <vscale x 2 x bfloat> @intrinsic_vloxei_v_nxv2bf16_nxv2bf16_nxv2i64(ptr %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv2bf16_nxv2bf16_nxv2i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT:    vloxei64.v v10, (a0), v8
+; CHECK-NEXT:    vmv1r.v v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x bfloat> @llvm.riscv.vloxei.nxv2bf16.nxv2i64(
+    <vscale x 2 x bfloat> undef,
+    ptr %0,
+    <vscale x 2 x i64> %1,
+    i64 %2)
+
+  ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vloxei.mask.nxv2bf16.nxv2i64(
+  <vscale x 2 x bfloat>,
+  ptr,
+  <vscale x 2 x i64>,
+  <vscale x 2 x i1>,
+  i64,
+  i64);
+
+define <vscale x 2 x bfloat> @intrinsic_vloxei_mask_v_nxv2bf16_nxv2bf16_nxv2i64(<vscale x 2 x bfloat> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2bf16_nxv2bf16_nxv2i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
+; CHECK-NEXT:    vloxei64.v v8, (a0), v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x bfloat> @llvm.riscv.vloxei.mask.nxv2bf16.nxv2i64(
+    <vscale x 2 x bfloat> %0,
+    ptr %1,
+    <vscale x 2 x i64> %2,
+    <vscale x 2 x i1> %3,
+    i64 %4, i64 1)
+
+  ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vloxei.nxv4bf16.nxv4i64(
+  <vscale x 4 x bfloat>,
+  ptr,
+  <vscale x 4 x i64>,
+  i64);
+
+define <vscale x 4 x bfloat> @intrinsic_vloxei_v_nxv4bf16_nxv4bf16_nxv4i64(ptr %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv4bf16_nxv4bf16_nxv4i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT:    vloxei64.v v12, (a0), v8
+; CHECK-NEXT:    vmv.v.v v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x bfloat> @llvm.riscv.vloxei.nxv4bf16.nxv4i64(
+    <vscale x 4 x bfloat> undef,
+    ptr %0,
+    <vscale x 4 x i64> %1,
+    i64 %2)
+
+  ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vloxei.mask.nxv4bf16.nxv4i64(
+  <vscale x 4 x bfloat>,
+  ptr,
+  <vscale x 4 x i64>,
+  <vscale x 4 x i1>,
+  i64,
+  i64);
+
+define <vscale x 4 x bfloat> @intrinsic_vloxei_mask_v_nxv4bf16_nxv4bf16_nxv4i64(<vscale x 4 x bfloat> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4bf16_nxv4bf16_nxv4i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT:    vloxei64.v v8, (a0), v12, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x bfloat> @llvm.riscv.vloxei.mask.nxv4bf16.nxv4i64(
+    <vscale x 4 x bfloat> %0,
+    ptr %1,
+    <vscale x 4 x i64> %2,
+    <vscale x 4 x i1> %3,
+    i64 %4, i64 1)
+
+  ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vloxei.nxv8bf16.nxv8i64(
+  <vscale x 8 x bfloat>,
+  ptr,
+  <vscale x 8 x i64>,
+  i64);
+
+define <vscale x 8 x bfloat> @intrinsic_vloxei_v_nxv8bf16_nxv8bf16_nxv8i64(ptr %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv8bf16_nxv8bf16_nxv8i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT:    vloxei64.v v16, (a0), v8
+; CHECK-NEXT:    vmv.v.v v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x bfloat> @llvm.riscv.vloxei.nxv8bf16.nxv8i64(
+    <vscale x 8 x bfloat> undef,
+    ptr %0,
+    <vscale x 8 x i64> %1,
+    i64 %2)
+
+  ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vloxei.mask.nxv8bf16.nxv8i64(
+  <vscale x 8 x bfloat>,
+  ptr,
+  <vscale x 8 x i64>,
+  <vscale x 8 x i1>,
+  i64,
+  i64);
+
+define <vscale x 8 x bfloat> @intrinsic_vloxei_mask_v_nxv8bf16_nxv8bf16_nxv8i64(<vscale x 8 x bfloat> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8bf16_nxv8bf16_nxv8i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
+; CHECK-NEXT:    vloxei64.v v8, (a0), v16, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x bfloat> @llvm.riscv.vloxei.mask.nxv8bf16.nxv8i64(
+    <vscale x 8 x bfloat> %0,
+    ptr %1,
+    <vscale x 8 x i64> %2,
+    <vscale x 8 x i1> %3,
+    i64 %4, i64 1)
+
+  ret <vscale x 8 x bfloat> %a
+}
+
 declare <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i64(
   <vscale x 1 x float>,
   ptr,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vloxei.ll b/llvm/test/CodeGen/RISCV/rvv/vloxei.ll
index 8f0141526a62b..9126b44caf99f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vloxei.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vloxei.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+zvfh \
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfh,+zvfbfmin \
 ; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
-; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+zvfh \
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh,+zvfbfmin \
 ; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 
 declare <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i32(
@@ -4631,6 +4631,246 @@ entry:
   ret <vscale x 32 x half> %a
 }
 
+declare <vscale x 1 x bfloat> @llvm.riscv.vloxei.nxv1bf16.nxv1i32(
+  <vscale x 1 x bfloat>,
+  ptr,
+  <vscale x 1 x i32>,
+  iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vloxei_v_nxv1bf16_nxv1bf16_nxv1i32(ptr %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv1bf16_nxv1bf16_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT:    vloxei32.v v9, (a0), v8
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x bfloat> @llvm.riscv.vloxei.nxv1bf16.nxv1i32(
+    <vscale x 1 x bfloat> undef,
+    ptr %0,
+    <vscale x 1 x i32> %1,
+    iXLen %2)
+
+  ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vloxei.mask.nxv1bf16.nxv1i32(
+  <vscale x 1 x bfloat>,
+  ptr,
+  <vscale x 1 x i32>,
+  <vscale x 1 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vloxei_mask_v_nxv1bf16_nxv1bf16_nxv1i32(<vscale x 1 x bfloat> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1bf16_nxv1bf16_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
+; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x bfloat> @llvm.riscv.vloxei.mask.nxv1bf16.nxv1i32(
+    <vscale x 1 x bfloat> %0,
+    ptr %1,
+    <vscale x 1 x i32> %2,
+    <vscale x 1 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vloxei.nxv2bf16.nxv2i32(
+  <vscale x 2 x bfloat>,
+  ptr,
+  <vscale x 2 x i32>,
+  iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vloxei_v_nxv2bf16_nxv2bf16_nxv2i32(ptr %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv2bf16_nxv2bf16_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT:    vloxei32.v v9, (a0), v8
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x bfloat> @llvm.riscv.vloxei.nxv2bf16.nxv2i32(
+    <vscale x 2 x bfloat> undef,
+    ptr %0,
+    <vscale x 2 x i32> %1,
+    iXLen %2)
+
+  ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vloxei.mask.nxv2bf16.nxv2i32(
+  <vscale x 2 x bfloat>,
+  ptr,
+  <vscale x 2 x i32>,
+  <vscale x 2 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vloxei_mask_v_nxv2bf16_nxv2bf16_nxv2i32(<vscale x 2 x bfloat> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2bf16_nxv2bf16_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
+; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x bfloat> @llvm.riscv.vloxei.mask.nxv2bf16.nxv2i32(
+    <vscale x 2 x bfloat> %0,
+    ptr %1,
+    <vscale x 2 x i32> %2,
+    <vscale x 2 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vloxei.nxv4bf16.nxv4i32(
+  <vscale x 4 x bfloat>,
+  ptr,
+  <vscale x 4 x i32>,
+  iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vloxei_v_nxv4bf16_nxv4bf16_nxv4i32(ptr %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv4bf16_nxv4bf16_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT:    vloxei32.v v10, (a0), v8
+; CHECK-NEXT:    vmv.v.v v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x bfloat> @llvm.riscv.vloxei.nxv4bf16.nxv4i32(
+    <vscale x 4 x bfloat> undef,
+    ptr %0,
+    <vscale x 4 x i32> %1,
+    iXLen %2)
+
+  ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vloxei.mask.nxv4bf16.nxv4i32(
+  <vscale x 4 x bfloat>,
+  ptr,
+  <vscale x 4 x i32>,
+  <vscale x 4 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vloxei_mask_v_nxv4bf16_nxv4bf16_nxv4i32(<vscale x 4 x bfloat> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4bf16_nxv4bf16_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT:    vloxei32.v v8, (a0), v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x bfloat> @llvm.riscv.vloxei.mask.nxv4bf16.nxv4i32(
+    <vscale x 4 x bfloat> %0,
+    ptr %1,
+    <vscale x 4 x i32> %2,
+    <vscale x 4 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vloxei.nxv8bf16.nxv8i32(
+  <vscale x 8 x bfloat>,
+  ptr,
+  <vscale x 8 x i32>,
+  iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vloxei_v_nxv8bf16_nxv8bf16_nxv8i32(ptr %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv8bf16_nxv8bf16_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT:    vloxei32.v v12, (a0), v8
+; CHECK-NEXT:    vmv.v.v v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x bfloat> @llvm.riscv.vloxei.nxv8bf16.nxv8i32(
+    <vscale x 8 x bfloat> undef,
+    ptr %0,
+    <vscale x 8 x i32> %1,
+    iXLen %2)
+
+  ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vloxei.mask.nxv8bf16.nxv8i32(
+  <vscale x 8 x bfloat>,
+  ptr,
+  <vscale x 8 x i32>,
+  <vscale x 8 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vloxei_mask_v_nxv8bf16_nxv8bf16_nxv8i32(<vscale x 8 x bfloat> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8bf16_nxv8bf16_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
+; CHECK-NEXT:    vloxei32.v v8, (a0), v12, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x bfloat> @llvm.riscv.vloxei.mask.nxv8bf16.nxv8i32(
+    <vscale x 8 x bfloat> %0,
+    ptr %1,
+    <vscale x 8 x i32> %2,
+    <vscale x 8 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vloxei.nxv16bf16.nxv16i32(
+  <vscale x 16 x bfloat>,
+  ptr,
+  <vscale x 16 x i32>,
+  iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vloxei_v_nxv16bf16_nxv16bf16_nxv16i32(ptr %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv16bf16_nxv16bf16_nxv16i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT:    vloxei32.v v16, (a0), v8
+; CHECK-NEXT:    vmv.v.v v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x bfloat> @llvm.riscv.vloxei.nxv16bf16.nxv16i32(
+    <vscale x 16 x bfloat> undef,
+    ptr %0,
+    <vscale x 16 x i32> %1,
+    iXLen %2)
+
+  ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vloxei.mask.nxv16bf16.nxv16i32(
+  <vscale x 16 x bfloat>,
+  ptr,
+  <vscale x 16 x i32>,
+  <vscale x 16 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vloxei_mask_v_nxv16bf16_nxv16bf16_nxv16i32(<vscale x 16 x bfloat> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16bf16_nxv16bf16_nxv16i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
+; CHECK-NEXT:    vloxei32.v v8, (a0), v16, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x bfloat> @llvm.riscv.vloxei.mask.nxv16bf16.nxv16i32(
+    <vscale x 16 x bfloat> %0,
+    ptr %1,
+    <vscale x 16 x i32> %2,
+    <vscale x 16 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 16 x bfloat> %a
+}
+
 declare <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i8(
   <vscale x 1 x float>,
   ptr,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vlse.ll b/llvm/test/CodeGen/RISCV/rvv/vlse.ll
index 3b191a7f8bb80..7f8f2d61e8b76 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vlse.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vlse.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+zvfh \
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfh,+zvfbfmin \
 ; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
-; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+zvfh \
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh,+zvfbfmin \
 ; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 
 declare <vscale x 1 x i64> @llvm.riscv.vlse.nxv1i64(
@@ -1414,6 +1414,288 @@ entry:
   ret <vscale x 32 x half> %a
 }
 
+declare <vscale x 1 x bfloat> @llvm.riscv.vlse.nxv1bf16(
+  <vscale x 1 x bfloat>,
+  ptr,
+  iXLen,
+  iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vlse_v_nxv1bf16_nxv1bf16(ptr %0, iXLen %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vlse_v_nxv1bf16_nxv1bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, ma
+; CHECK-NEXT:    vlse16.v v8, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x bfloat> @llvm.riscv.vlse.nxv1bf16(
+    <vscale x 1 x bfloat> undef,
+    ptr %0,
+    iXLen %1,
+    iXLen %2)
+
+  ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vlse.mask.nxv1bf16(
+  <vscale x 1 x bfloat>,
+  ptr,
+  iXLen,
+  <vscale x 1 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vlse_mask_v_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, ptr %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1bf16_nxv1bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, mu
+; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x bfloat> @llvm.riscv.vlse.mask.nxv1bf16(
+    <vscale x 1 x bfloat> %0,
+    ptr %1,
+    iXLen %2,
+    <vscale x 1 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vlse.nxv2bf16(
+  <vscale x 2 x bfloat>,
+  ptr,
+  iXLen,
+  iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vlse_v_nxv2bf16_nxv2bf16(ptr %0, iXLen %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vlse_v_nxv2bf16_nxv2bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, ma
+; CHECK-NEXT:    vlse16.v v8, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x bfloat> @llvm.riscv.vlse.nxv2bf16(
+    <vscale x 2 x bfloat> undef,
+    ptr %0,
+    iXLen %1,
+    iXLen %2)
+
+  ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vlse.mask.nxv2bf16(
+  <vscale x 2 x bfloat>,
+  ptr,
+  iXLen,
+  <vscale x 2 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vlse_mask_v_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, ptr %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2bf16_nxv2bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, mu
+; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x bfloat> @llvm.riscv.vlse.mask.nxv2bf16(
+    <vscale x 2 x bfloat> %0,
+    ptr %1,
+    iXLen %2,
+    <vscale x 2 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vlse.nxv4bf16(
+  <vscale x 4 x bfloat>,
+  ptr,
+  iXLen,
+  iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vlse_v_nxv4bf16_nxv4bf16(ptr %0, iXLen %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vlse_v_nxv4bf16_nxv4bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, ma
+; CHECK-NEXT:    vlse16.v v8, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x bfloat> @llvm.riscv.vlse.nxv4bf16(
+    <vscale x 4 x bfloat> undef,
+    ptr %0,
+    iXLen %1,
+    iXLen %2)
+
+  ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vlse.mask.nxv4bf16(
+  <vscale x 4 x bfloat>,
+  ptr,
+  iXLen,
+  <vscale x 4 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vlse_mask_v_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, ptr %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4bf16_nxv4bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, mu
+; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x bfloat> @llvm.riscv.vlse.mask.nxv4bf16(
+    <vscale x 4 x bfloat> %0,
+    ptr %1,
+    iXLen %2,
+    <vscale x 4 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vlse.nxv8bf16(
+  <vscale x 8 x bfloat>,
+  ptr,
+  iXLen,
+  iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vlse_v_nxv8bf16_nxv8bf16(ptr %0, iXLen %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vlse_v_nxv8bf16_nxv8bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, ma
+; CHECK-NEXT:    vlse16.v v8, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x bfloat> @llvm.riscv.vlse.nxv8bf16(
+    <vscale x 8 x bfloat> undef,
+    ptr %0,
+    iXLen %1,
+    iXLen %2)
+
+  ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vlse.mask.nxv8bf16(
+  <vscale x 8 x bfloat>,
+  ptr,
+  iXLen,
+  <vscale x 8 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vlse_mask_v_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, ptr %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8bf16_nxv8bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, mu
+; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x bfloat> @llvm.riscv.vlse.mask.nxv8bf16(
+    <vscale x 8 x bfloat> %0,
+    ptr %1,
+    iXLen %2,
+    <vscale x 8 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vlse.nxv16bf16(
+  <vscale x 16 x bfloat>,
+  ptr,
+  iXLen,
+  iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vlse_v_nxv16bf16_nxv16bf16(ptr %0, iXLen %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vlse_v_nxv16bf16_nxv16bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
+; CHECK-NEXT:    vlse16.v v8, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x bfloat> @llvm.riscv.vlse.nxv16bf16(
+    <vscale x 16 x bfloat> undef,
+    ptr %0,
+    iXLen %1,
+    iXLen %2)
+
+  ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vlse.mask.nxv16bf16(
+  <vscale x 16 x bfloat>,
+  ptr,
+  iXLen,
+  <vscale x 16 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vlse_mask_v_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, ptr %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16bf16_nxv16bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, mu
+; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x bfloat> @llvm.riscv.vlse.mask.nxv16bf16(
+    <vscale x 16 x bfloat> %0,
+    ptr %1,
+    iXLen %2,
+    <vscale x 16 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vlse.nxv32bf16(
+  <vscale x 32 x bfloat>,
+  ptr,
+  iXLen,
+  iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vlse_v_nxv32bf16_nxv32bf16(ptr %0, iXLen %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vlse_v_nxv32bf16_nxv32bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a2, e16, m8, ta, ma
+; CHECK-NEXT:    vlse16.v v8, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x bfloat> @llvm.riscv.vlse.nxv32bf16(
+    <vscale x 32 x bfloat> undef,
+    ptr %0,
+    iXLen %1,
+    iXLen %2)
+
+  ret <vscale x 32 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vlse.mask.nxv32bf16(
+  <vscale x 32 x bfloat>,
+  ptr,
+  iXLen,
+  <vscale x 32 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vlse_mask_v_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat> %0, ptr %1, iXLen %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vlse_mask_v_nxv32bf16_nxv32bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a2, e16, m8, ta, mu
+; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x bfloat> @llvm.riscv.vlse.mask.nxv32bf16(
+    <vscale x 32 x bfloat> %0,
+    ptr %1,
+    iXLen %2,
+    <vscale x 32 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 32 x bfloat> %a
+}
+
 declare <vscale x 1 x i8> @llvm.riscv.vlse.nxv1i8(
   <vscale x 1 x i8>,
   ptr,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vluxei-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vluxei-rv64.ll
index 93c821b5357c8..d908bd8b4d950 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vluxei-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vluxei-rv64.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh,+zvfh,+f,+d -verify-machineinstrs \
+; RUN: llc -mtriple=riscv64 -mattr=+v,+zvfh,+zvfbfmin -verify-machineinstrs \
 ; RUN:   < %s | FileCheck %s
 
 ; The intrinsics are not supported with RV32.
@@ -960,6 +960,198 @@ entry:
   ret <vscale x 8 x half> %a
 }
 
+declare <vscale x 1 x bfloat> @llvm.riscv.vluxei.nxv1bf16.nxv1i64(
+  <vscale x 1 x bfloat>,
+  ptr,
+  <vscale x 1 x i64>,
+  i64);
+
+define <vscale x 1 x bfloat> @intrinsic_vluxei_v_nxv1bf16_nxv1bf16_nxv1i64(ptr %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv1bf16_nxv1bf16_nxv1i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT:    vluxei64.v v9, (a0), v8
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x bfloat> @llvm.riscv.vluxei.nxv1bf16.nxv1i64(
+    <vscale x 1 x bfloat> undef,
+    ptr %0,
+    <vscale x 1 x i64> %1,
+    i64 %2)
+
+  ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vluxei.mask.nxv1bf16.nxv1i64(
+  <vscale x 1 x bfloat>,
+  ptr,
+  <vscale x 1 x i64>,
+  <vscale x 1 x i1>,
+  i64,
+  i64);
+
+define <vscale x 1 x bfloat> @intrinsic_vluxei_mask_v_nxv1bf16_nxv1bf16_nxv1i64(<vscale x 1 x bfloat> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1bf16_nxv1bf16_nxv1i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
+; CHECK-NEXT:    vluxei64.v v8, (a0), v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x bfloat> @llvm.riscv.vluxei.mask.nxv1bf16.nxv1i64(
+    <vscale x 1 x bfloat> %0,
+    ptr %1,
+    <vscale x 1 x i64> %2,
+    <vscale x 1 x i1> %3,
+    i64 %4, i64 1)
+
+  ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vluxei.nxv2bf16.nxv2i64(
+  <vscale x 2 x bfloat>,
+  ptr,
+  <vscale x 2 x i64>,
+  i64);
+
+define <vscale x 2 x bfloat> @intrinsic_vluxei_v_nxv2bf16_nxv2bf16_nxv2i64(ptr %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv2bf16_nxv2bf16_nxv2i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT:    vluxei64.v v10, (a0), v8
+; CHECK-NEXT:    vmv1r.v v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x bfloat> @llvm.riscv.vluxei.nxv2bf16.nxv2i64(
+    <vscale x 2 x bfloat> undef,
+    ptr %0,
+    <vscale x 2 x i64> %1,
+    i64 %2)
+
+  ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vluxei.mask.nxv2bf16.nxv2i64(
+  <vscale x 2 x bfloat>,
+  ptr,
+  <vscale x 2 x i64>,
+  <vscale x 2 x i1>,
+  i64,
+  i64);
+
+define <vscale x 2 x bfloat> @intrinsic_vluxei_mask_v_nxv2bf16_nxv2bf16_nxv2i64(<vscale x 2 x bfloat> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2bf16_nxv2bf16_nxv2i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
+; CHECK-NEXT:    vluxei64.v v8, (a0), v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x bfloat> @llvm.riscv.vluxei.mask.nxv2bf16.nxv2i64(
+    <vscale x 2 x bfloat> %0,
+    ptr %1,
+    <vscale x 2 x i64> %2,
+    <vscale x 2 x i1> %3,
+    i64 %4, i64 1)
+
+  ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vluxei.nxv4bf16.nxv4i64(
+  <vscale x 4 x bfloat>,
+  ptr,
+  <vscale x 4 x i64>,
+  i64);
+
+define <vscale x 4 x bfloat> @intrinsic_vluxei_v_nxv4bf16_nxv4bf16_nxv4i64(ptr %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv4bf16_nxv4bf16_nxv4i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT:    vluxei64.v v12, (a0), v8
+; CHECK-NEXT:    vmv.v.v v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x bfloat> @llvm.riscv.vluxei.nxv4bf16.nxv4i64(
+    <vscale x 4 x bfloat> undef,
+    ptr %0,
+    <vscale x 4 x i64> %1,
+    i64 %2)
+
+  ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vluxei.mask.nxv4bf16.nxv4i64(
+  <vscale x 4 x bfloat>,
+  ptr,
+  <vscale x 4 x i64>,
+  <vscale x 4 x i1>,
+  i64,
+  i64);
+
+define <vscale x 4 x bfloat> @intrinsic_vluxei_mask_v_nxv4bf16_nxv4bf16_nxv4i64(<vscale x 4 x bfloat> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4bf16_nxv4bf16_nxv4i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT:    vluxei64.v v8, (a0), v12, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x bfloat> @llvm.riscv.vluxei.mask.nxv4bf16.nxv4i64(
+    <vscale x 4 x bfloat> %0,
+    ptr %1,
+    <vscale x 4 x i64> %2,
+    <vscale x 4 x i1> %3,
+    i64 %4, i64 1)
+
+  ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vluxei.nxv8bf16.nxv8i64(
+  <vscale x 8 x bfloat>,
+  ptr,
+  <vscale x 8 x i64>,
+  i64);
+
+define <vscale x 8 x bfloat> @intrinsic_vluxei_v_nxv8bf16_nxv8bf16_nxv8i64(ptr %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv8bf16_nxv8bf16_nxv8i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT:    vluxei64.v v16, (a0), v8
+; CHECK-NEXT:    vmv.v.v v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x bfloat> @llvm.riscv.vluxei.nxv8bf16.nxv8i64(
+    <vscale x 8 x bfloat> undef,
+    ptr %0,
+    <vscale x 8 x i64> %1,
+    i64 %2)
+
+  ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vluxei.mask.nxv8bf16.nxv8i64(
+  <vscale x 8 x bfloat>,
+  ptr,
+  <vscale x 8 x i64>,
+  <vscale x 8 x i1>,
+  i64,
+  i64);
+
+define <vscale x 8 x bfloat> @intrinsic_vluxei_mask_v_nxv8bf16_nxv8bf16_nxv8i64(<vscale x 8 x bfloat> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8bf16_nxv8bf16_nxv8i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
+; CHECK-NEXT:    vluxei64.v v8, (a0), v16, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x bfloat> @llvm.riscv.vluxei.mask.nxv8bf16.nxv8i64(
+    <vscale x 8 x bfloat> %0,
+    ptr %1,
+    <vscale x 8 x i64> %2,
+    <vscale x 8 x i1> %3,
+    i64 %4, i64 1)
+
+  ret <vscale x 8 x bfloat> %a
+}
+
 declare <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i64(
   <vscale x 1 x float>,
   ptr,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vluxei.ll b/llvm/test/CodeGen/RISCV/rvv/vluxei.ll
index 679726fe66695..565ede979d12c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vluxei.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vluxei.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+zvfh \
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfh,+zvfbfmin \
 ; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
-; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+zvfh \
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh,+zvfbfmin \
 ; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 
 declare <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i32(
@@ -1151,6 +1151,246 @@ entry:
   ret <vscale x 16 x half> %a
 }
 
+declare <vscale x 1 x bfloat> @llvm.riscv.vluxei.nxv1bf16.nxv1i32(
+  <vscale x 1 x bfloat>,
+  ptr,
+  <vscale x 1 x i32>,
+  iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vluxei_v_nxv1bf16_nxv1bf16_nxv1i32(ptr %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv1bf16_nxv1bf16_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT:    vluxei32.v v9, (a0), v8
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x bfloat> @llvm.riscv.vluxei.nxv1bf16.nxv1i32(
+    <vscale x 1 x bfloat> undef,
+    ptr %0,
+    <vscale x 1 x i32> %1,
+    iXLen %2)
+
+  ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vluxei.mask.nxv1bf16.nxv1i32(
+  <vscale x 1 x bfloat>,
+  ptr,
+  <vscale x 1 x i32>,
+  <vscale x 1 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vluxei_mask_v_nxv1bf16_nxv1bf16_nxv1i32(<vscale x 1 x bfloat> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1bf16_nxv1bf16_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
+; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x bfloat> @llvm.riscv.vluxei.mask.nxv1bf16.nxv1i32(
+    <vscale x 1 x bfloat> %0,
+    ptr %1,
+    <vscale x 1 x i32> %2,
+    <vscale x 1 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vluxei.nxv2bf16.nxv2i32(
+  <vscale x 2 x bfloat>,
+  ptr,
+  <vscale x 2 x i32>,
+  iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vluxei_v_nxv2bf16_nxv2bf16_nxv2i32(ptr %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv2bf16_nxv2bf16_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT:    vluxei32.v v9, (a0), v8
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x bfloat> @llvm.riscv.vluxei.nxv2bf16.nxv2i32(
+    <vscale x 2 x bfloat> undef,
+    ptr %0,
+    <vscale x 2 x i32> %1,
+    iXLen %2)
+
+  ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vluxei.mask.nxv2bf16.nxv2i32(
+  <vscale x 2 x bfloat>,
+  ptr,
+  <vscale x 2 x i32>,
+  <vscale x 2 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vluxei_mask_v_nxv2bf16_nxv2bf16_nxv2i32(<vscale x 2 x bfloat> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2bf16_nxv2bf16_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
+; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x bfloat> @llvm.riscv.vluxei.mask.nxv2bf16.nxv2i32(
+    <vscale x 2 x bfloat> %0,
+    ptr %1,
+    <vscale x 2 x i32> %2,
+    <vscale x 2 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vluxei.nxv4bf16.nxv4i32(
+  <vscale x 4 x bfloat>,
+  ptr,
+  <vscale x 4 x i32>,
+  iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vluxei_v_nxv4bf16_nxv4bf16_nxv4i32(ptr %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv4bf16_nxv4bf16_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT:    vluxei32.v v10, (a0), v8
+; CHECK-NEXT:    vmv.v.v v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x bfloat> @llvm.riscv.vluxei.nxv4bf16.nxv4i32(
+    <vscale x 4 x bfloat> undef,
+    ptr %0,
+    <vscale x 4 x i32> %1,
+    iXLen %2)
+
+  ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vluxei.mask.nxv4bf16.nxv4i32(
+  <vscale x 4 x bfloat>,
+  ptr,
+  <vscale x 4 x i32>,
+  <vscale x 4 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vluxei_mask_v_nxv4bf16_nxv4bf16_nxv4i32(<vscale x 4 x bfloat> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4bf16_nxv4bf16_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT:    vluxei32.v v8, (a0), v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x bfloat> @llvm.riscv.vluxei.mask.nxv4bf16.nxv4i32(
+    <vscale x 4 x bfloat> %0,
+    ptr %1,
+    <vscale x 4 x i32> %2,
+    <vscale x 4 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vluxei.nxv8bf16.nxv8i32(
+  <vscale x 8 x bfloat>,
+  ptr,
+  <vscale x 8 x i32>,
+  iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vluxei_v_nxv8bf16_nxv8bf16_nxv8i32(ptr %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv8bf16_nxv8bf16_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT:    vluxei32.v v12, (a0), v8
+; CHECK-NEXT:    vmv.v.v v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x bfloat> @llvm.riscv.vluxei.nxv8bf16.nxv8i32(
+    <vscale x 8 x bfloat> undef,
+    ptr %0,
+    <vscale x 8 x i32> %1,
+    iXLen %2)
+
+  ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vluxei.mask.nxv8bf16.nxv8i32(
+  <vscale x 8 x bfloat>,
+  ptr,
+  <vscale x 8 x i32>,
+  <vscale x 8 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vluxei_mask_v_nxv8bf16_nxv8bf16_nxv8i32(<vscale x 8 x bfloat> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8bf16_nxv8bf16_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
+; CHECK-NEXT:    vluxei32.v v8, (a0), v12, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x bfloat> @llvm.riscv.vluxei.mask.nxv8bf16.nxv8i32(
+    <vscale x 8 x bfloat> %0,
+    ptr %1,
+    <vscale x 8 x i32> %2,
+    <vscale x 8 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vluxei.nxv16bf16.nxv16i32(
+  <vscale x 16 x bfloat>,
+  ptr,
+  <vscale x 16 x i32>,
+  iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vluxei_v_nxv16bf16_nxv16bf16_nxv16i32(ptr %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv16bf16_nxv16bf16_nxv16i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT:    vluxei32.v v16, (a0), v8
+; CHECK-NEXT:    vmv.v.v v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x bfloat> @llvm.riscv.vluxei.nxv16bf16.nxv16i32(
+    <vscale x 16 x bfloat> undef,
+    ptr %0,
+    <vscale x 16 x i32> %1,
+    iXLen %2)
+
+  ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vluxei.mask.nxv16bf16.nxv16i32(
+  <vscale x 16 x bfloat>,
+  ptr,
+  <vscale x 16 x i32>,
+  <vscale x 16 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vluxei_mask_v_nxv16bf16_nxv16bf16_nxv16i32(<vscale x 16 x bfloat> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16bf16_nxv16bf16_nxv16i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
+; CHECK-NEXT:    vluxei32.v v8, (a0), v16, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x bfloat> @llvm.riscv.vluxei.mask.nxv16bf16.nxv16i32(
+    <vscale x 16 x bfloat> %0,
+    ptr %1,
+    <vscale x 16 x i32> %2,
+    <vscale x 16 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 16 x bfloat> %a
+}
+
 declare <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i32(
   <vscale x 1 x float>,
   ptr,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vse.ll b/llvm/test/CodeGen/RISCV/rvv/vse.ll
index 556b7702649db..e20d91aa03a81 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vse.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vse.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+zvfh,+zvfbfmin \
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfh,+zvfbfmin \
 ; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
-; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+zvfh,+zvfbfmin \
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh,+zvfbfmin \
 ; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 
 declare void @llvm.riscv.vse.nxv1i64(

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsoxei-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsoxei-rv64.ll
index 168d71dab92d9..5497913fc3723 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsoxei-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsoxei-rv64.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh,+zvfh,+f -verify-machineinstrs \
+; RUN: llc -mtriple=riscv64 -mattr=+v,+zvfh,+zvfbfmin -verify-machineinstrs \
 ; RUN:   < %s | FileCheck %s
 
 ; The intrinsics are not supported with RV32.
@@ -924,6 +924,190 @@ entry:
   ret void
 }
 
+declare void @llvm.riscv.vsoxei.nxv1bf16.nxv1i64(
+  <vscale x 1 x bfloat>,
+  ptr,
+  <vscale x 1 x i64>,
+  i64);
+
+define void @intrinsic_vsoxei_v_nxv1bf16_nxv1bf16_nxv1i64(<vscale x 1 x bfloat> %0, ptr %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv1bf16_nxv1bf16_nxv1i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT:    vsoxei64.v v8, (a0), v9
+; CHECK-NEXT:    ret
+entry:
+  call void @llvm.riscv.vsoxei.nxv1bf16.nxv1i64(
+    <vscale x 1 x bfloat> %0,
+    ptr %1,
+    <vscale x 1 x i64> %2,
+    i64 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1bf16.nxv1i64(
+  <vscale x 1 x bfloat>,
+  ptr,
+  <vscale x 1 x i64>,
+  <vscale x 1 x i1>,
+  i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv1bf16_nxv1bf16_nxv1i64(<vscale x 1 x bfloat> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1bf16_nxv1bf16_nxv1i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT:    vsoxei64.v v8, (a0), v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  call void @llvm.riscv.vsoxei.mask.nxv1bf16.nxv1i64(
+    <vscale x 1 x bfloat> %0,
+    ptr %1,
+    <vscale x 1 x i64> %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2bf16.nxv2i64(
+  <vscale x 2 x bfloat>,
+  ptr,
+  <vscale x 2 x i64>,
+  i64);
+
+define void @intrinsic_vsoxei_v_nxv2bf16_nxv2bf16_nxv2i64(<vscale x 2 x bfloat> %0, ptr %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv2bf16_nxv2bf16_nxv2i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT:    vsoxei64.v v8, (a0), v10
+; CHECK-NEXT:    ret
+entry:
+  call void @llvm.riscv.vsoxei.nxv2bf16.nxv2i64(
+    <vscale x 2 x bfloat> %0,
+    ptr %1,
+    <vscale x 2 x i64> %2,
+    i64 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2bf16.nxv2i64(
+  <vscale x 2 x bfloat>,
+  ptr,
+  <vscale x 2 x i64>,
+  <vscale x 2 x i1>,
+  i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv2bf16_nxv2bf16_nxv2i64(<vscale x 2 x bfloat> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2bf16_nxv2bf16_nxv2i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT:    vsoxei64.v v8, (a0), v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  call void @llvm.riscv.vsoxei.mask.nxv2bf16.nxv2i64(
+    <vscale x 2 x bfloat> %0,
+    ptr %1,
+    <vscale x 2 x i64> %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4bf16.nxv4i64(
+  <vscale x 4 x bfloat>,
+  ptr,
+  <vscale x 4 x i64>,
+  i64);
+
+define void @intrinsic_vsoxei_v_nxv4bf16_nxv4bf16_nxv4i64(<vscale x 4 x bfloat> %0, ptr %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv4bf16_nxv4bf16_nxv4i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT:    vsoxei64.v v8, (a0), v12
+; CHECK-NEXT:    ret
+entry:
+  call void @llvm.riscv.vsoxei.nxv4bf16.nxv4i64(
+    <vscale x 4 x bfloat> %0,
+    ptr %1,
+    <vscale x 4 x i64> %2,
+    i64 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4bf16.nxv4i64(
+  <vscale x 4 x bfloat>,
+  ptr,
+  <vscale x 4 x i64>,
+  <vscale x 4 x i1>,
+  i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv4bf16_nxv4bf16_nxv4i64(<vscale x 4 x bfloat> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4bf16_nxv4bf16_nxv4i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT:    vsoxei64.v v8, (a0), v12, v0.t
+; CHECK-NEXT:    ret
+entry:
+  call void @llvm.riscv.vsoxei.mask.nxv4bf16.nxv4i64(
+    <vscale x 4 x bfloat> %0,
+    ptr %1,
+    <vscale x 4 x i64> %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8bf16.nxv8i64(
+  <vscale x 8 x bfloat>,
+  ptr,
+  <vscale x 8 x i64>,
+  i64);
+
+define void @intrinsic_vsoxei_v_nxv8bf16_nxv8bf16_nxv8i64(<vscale x 8 x bfloat> %0, ptr %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv8bf16_nxv8bf16_nxv8i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT:    vsoxei64.v v8, (a0), v16
+; CHECK-NEXT:    ret
+entry:
+  call void @llvm.riscv.vsoxei.nxv8bf16.nxv8i64(
+    <vscale x 8 x bfloat> %0,
+    ptr %1,
+    <vscale x 8 x i64> %2,
+    i64 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8bf16.nxv8i64(
+  <vscale x 8 x bfloat>,
+  ptr,
+  <vscale x 8 x i64>,
+  <vscale x 8 x i1>,
+  i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv8bf16_nxv8bf16_nxv8i64(<vscale x 8 x bfloat> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8bf16_nxv8bf16_nxv8i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
+; CHECK-NEXT:    ret
+entry:
+  call void @llvm.riscv.vsoxei.mask.nxv8bf16.nxv8i64(
+    <vscale x 8 x bfloat> %0,
+    ptr %1,
+    <vscale x 8 x i64> %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret void
+}
+
 declare void @llvm.riscv.vsoxei.nxv1f32.nxv1i64(
   <vscale x 1 x float>,
   ptr,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsoxei.ll b/llvm/test/CodeGen/RISCV/rvv/vsoxei.ll
index bcb00242741cb..568b34f4e32be 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsoxei.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsoxei.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+zvfh \
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfh,+zvfbfmin \
 ; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
-; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+zvfh \
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh,+zvfbfmin \
 ; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 
 declare void @llvm.riscv.vsoxei.nxv1i8.nxv1i32(
@@ -4466,6 +4466,236 @@ entry:
   ret void
 }
 
+declare void @llvm.riscv.vsoxei.nxv1bf16.nxv1i32(
+  <vscale x 1 x bfloat>,
+  ptr,
+  <vscale x 1 x i32>,
+  iXLen);
+
+define void @intrinsic_vsoxei_v_nxv1bf16_nxv1bf16_nxv1i32(<vscale x 1 x bfloat> %0, ptr %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv1bf16_nxv1bf16_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT:    vsoxei32.v v8, (a0), v9
+; CHECK-NEXT:    ret
+entry:
+  call void @llvm.riscv.vsoxei.nxv1bf16.nxv1i32(
+    <vscale x 1 x bfloat> %0,
+    ptr %1,
+    <vscale x 1 x i32> %2,
+    iXLen %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1bf16.nxv1i32(
+  <vscale x 1 x bfloat>,
+  ptr,
+  <vscale x 1 x i32>,
+  <vscale x 1 x i1>,
+  iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv1bf16_nxv1bf16_nxv1i32(<vscale x 1 x bfloat> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1bf16_nxv1bf16_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  call void @llvm.riscv.vsoxei.mask.nxv1bf16.nxv1i32(
+    <vscale x 1 x bfloat> %0,
+    ptr %1,
+    <vscale x 1 x i32> %2,
+    <vscale x 1 x i1> %3,
+    iXLen %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2bf16.nxv2i32(
+  <vscale x 2 x bfloat>,
+  ptr,
+  <vscale x 2 x i32>,
+  iXLen);
+
+define void @intrinsic_vsoxei_v_nxv2bf16_nxv2bf16_nxv2i32(<vscale x 2 x bfloat> %0, ptr %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv2bf16_nxv2bf16_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT:    vsoxei32.v v8, (a0), v9
+; CHECK-NEXT:    ret
+entry:
+  call void @llvm.riscv.vsoxei.nxv2bf16.nxv2i32(
+    <vscale x 2 x bfloat> %0,
+    ptr %1,
+    <vscale x 2 x i32> %2,
+    iXLen %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2bf16.nxv2i32(
+  <vscale x 2 x bfloat>,
+  ptr,
+  <vscale x 2 x i32>,
+  <vscale x 2 x i1>,
+  iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv2bf16_nxv2bf16_nxv2i32(<vscale x 2 x bfloat> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2bf16_nxv2bf16_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  call void @llvm.riscv.vsoxei.mask.nxv2bf16.nxv2i32(
+    <vscale x 2 x bfloat> %0,
+    ptr %1,
+    <vscale x 2 x i32> %2,
+    <vscale x 2 x i1> %3,
+    iXLen %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4bf16.nxv4i32(
+  <vscale x 4 x bfloat>,
+  ptr,
+  <vscale x 4 x i32>,
+  iXLen);
+
+define void @intrinsic_vsoxei_v_nxv4bf16_nxv4bf16_nxv4i32(<vscale x 4 x bfloat> %0, ptr %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv4bf16_nxv4bf16_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT:    vsoxei32.v v8, (a0), v10
+; CHECK-NEXT:    ret
+entry:
+  call void @llvm.riscv.vsoxei.nxv4bf16.nxv4i32(
+    <vscale x 4 x bfloat> %0,
+    ptr %1,
+    <vscale x 4 x i32> %2,
+    iXLen %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4bf16.nxv4i32(
+  <vscale x 4 x bfloat>,
+  ptr,
+  <vscale x 4 x i32>,
+  <vscale x 4 x i1>,
+  iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv4bf16_nxv4bf16_nxv4i32(<vscale x 4 x bfloat> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4bf16_nxv4bf16_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT:    vsoxei32.v v8, (a0), v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  call void @llvm.riscv.vsoxei.mask.nxv4bf16.nxv4i32(
+    <vscale x 4 x bfloat> %0,
+    ptr %1,
+    <vscale x 4 x i32> %2,
+    <vscale x 4 x i1> %3,
+    iXLen %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8bf16.nxv8i32(
+  <vscale x 8 x bfloat>,
+  ptr,
+  <vscale x 8 x i32>,
+  iXLen);
+
+define void @intrinsic_vsoxei_v_nxv8bf16_nxv8bf16_nxv8i32(<vscale x 8 x bfloat> %0, ptr %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv8bf16_nxv8bf16_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT:    vsoxei32.v v8, (a0), v12
+; CHECK-NEXT:    ret
+entry:
+  call void @llvm.riscv.vsoxei.nxv8bf16.nxv8i32(
+    <vscale x 8 x bfloat> %0,
+    ptr %1,
+    <vscale x 8 x i32> %2,
+    iXLen %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8bf16.nxv8i32(
+  <vscale x 8 x bfloat>,
+  ptr,
+  <vscale x 8 x i32>,
+  <vscale x 8 x i1>,
+  iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv8bf16_nxv8bf16_nxv8i32(<vscale x 8 x bfloat> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8bf16_nxv8bf16_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT:    vsoxei32.v v8, (a0), v12, v0.t
+; CHECK-NEXT:    ret
+entry:
+  call void @llvm.riscv.vsoxei.mask.nxv8bf16.nxv8i32(
+    <vscale x 8 x bfloat> %0,
+    ptr %1,
+    <vscale x 8 x i32> %2,
+    <vscale x 8 x i1> %3,
+    iXLen %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv16bf16.nxv16i32(
+  <vscale x 16 x bfloat>,
+  ptr,
+  <vscale x 16 x i32>,
+  iXLen);
+
+define void @intrinsic_vsoxei_v_nxv16bf16_nxv16bf16_nxv16i32(<vscale x 16 x bfloat> %0, ptr %1, <vscale x 16 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv16bf16_nxv16bf16_nxv16i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT:    vsoxei32.v v8, (a0), v16
+; CHECK-NEXT:    ret
+entry:
+  call void @llvm.riscv.vsoxei.nxv16bf16.nxv16i32(
+    <vscale x 16 x bfloat> %0,
+    ptr %1,
+    <vscale x 16 x i32> %2,
+    iXLen %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv16bf16.nxv16i32(
+  <vscale x 16 x bfloat>,
+  ptr,
+  <vscale x 16 x i32>,
+  <vscale x 16 x i1>,
+  iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv16bf16_nxv16bf16_nxv16i32(<vscale x 16 x bfloat> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16bf16_nxv16bf16_nxv16i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
+; CHECK-NEXT:    ret
+entry:
+  call void @llvm.riscv.vsoxei.mask.nxv16bf16.nxv16i32(
+    <vscale x 16 x bfloat> %0,
+    ptr %1,
+    <vscale x 16 x i32> %2,
+    <vscale x 16 x i1> %3,
+    iXLen %4)
+
+  ret void
+}
+
 declare void @llvm.riscv.vsoxei.nxv1f32.nxv1i8(
   <vscale x 1 x float>,
   ptr,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsse.ll b/llvm/test/CodeGen/RISCV/rvv/vsse.ll
index 9b627bcd66467..b2b8334e7a604 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsse.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsse.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+zvfh \
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfh,+zvfbfmin \
 ; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
-; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+zvfh \
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh,+zvfbfmin \
 ; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 
 declare void @llvm.riscv.vsse.nxv1i64(
@@ -1384,6 +1384,282 @@ entry:
   ret void
 }
 
+declare void @llvm.riscv.vsse.nxv1bf16(
+  <vscale x 1 x bfloat>,
+  ptr,
+  iXLen,
+  iXLen);
+
+define void @intrinsic_vsse_v_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsse_v_nxv1bf16_nxv1bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, ma
+; CHECK-NEXT:    vsse16.v v8, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  call void @llvm.riscv.vsse.nxv1bf16(
+    <vscale x 1 x bfloat> %0,
+    ptr %1,
+    iXLen %2,
+    iXLen %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv1bf16(
+  <vscale x 1 x bfloat>,
+  ptr,
+  iXLen,
+  <vscale x 1 x i1>,
+  iXLen);
+
+define void @intrinsic_vsse_mask_v_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, ptr %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1bf16_nxv1bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, ma
+; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  call void @llvm.riscv.vsse.mask.nxv1bf16(
+    <vscale x 1 x bfloat> %0,
+    ptr %1,
+    iXLen %2,
+    <vscale x 1 x i1> %3,
+    iXLen %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.nxv2bf16(
+  <vscale x 2 x bfloat>,
+  ptr,
+  iXLen,
+  iXLen);
+
+define void @intrinsic_vsse_v_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsse_v_nxv2bf16_nxv2bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, ma
+; CHECK-NEXT:    vsse16.v v8, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  call void @llvm.riscv.vsse.nxv2bf16(
+    <vscale x 2 x bfloat> %0,
+    ptr %1,
+    iXLen %2,
+    iXLen %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv2bf16(
+  <vscale x 2 x bfloat>,
+  ptr,
+  iXLen,
+  <vscale x 2 x i1>,
+  iXLen);
+
+define void @intrinsic_vsse_mask_v_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, ptr %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2bf16_nxv2bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, ma
+; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  call void @llvm.riscv.vsse.mask.nxv2bf16(
+    <vscale x 2 x bfloat> %0,
+    ptr %1,
+    iXLen %2,
+    <vscale x 2 x i1> %3,
+    iXLen %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.nxv4bf16(
+  <vscale x 4 x bfloat>,
+  ptr,
+  iXLen,
+  iXLen);
+
+define void @intrinsic_vsse_v_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsse_v_nxv4bf16_nxv4bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, ma
+; CHECK-NEXT:    vsse16.v v8, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  call void @llvm.riscv.vsse.nxv4bf16(
+    <vscale x 4 x bfloat> %0,
+    ptr %1,
+    iXLen %2,
+    iXLen %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv4bf16(
+  <vscale x 4 x bfloat>,
+  ptr,
+  iXLen,
+  <vscale x 4 x i1>,
+  iXLen);
+
+define void @intrinsic_vsse_mask_v_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, ptr %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4bf16_nxv4bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, ma
+; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  call void @llvm.riscv.vsse.mask.nxv4bf16(
+    <vscale x 4 x bfloat> %0,
+    ptr %1,
+    iXLen %2,
+    <vscale x 4 x i1> %3,
+    iXLen %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.nxv8bf16(
+  <vscale x 8 x bfloat>,
+  ptr,
+  iXLen,
+  iXLen);
+
+define void @intrinsic_vsse_v_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsse_v_nxv8bf16_nxv8bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, ma
+; CHECK-NEXT:    vsse16.v v8, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  call void @llvm.riscv.vsse.nxv8bf16(
+    <vscale x 8 x bfloat> %0,
+    ptr %1,
+    iXLen %2,
+    iXLen %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv8bf16(
+  <vscale x 8 x bfloat>,
+  ptr,
+  iXLen,
+  <vscale x 8 x i1>,
+  iXLen);
+
+define void @intrinsic_vsse_mask_v_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, ptr %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8bf16_nxv8bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, ma
+; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  call void @llvm.riscv.vsse.mask.nxv8bf16(
+    <vscale x 8 x bfloat> %0,
+    ptr %1,
+    iXLen %2,
+    <vscale x 8 x i1> %3,
+    iXLen %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.nxv16bf16(
+  <vscale x 16 x bfloat>,
+  ptr,
+  iXLen,
+  iXLen);
+
+define void @intrinsic_vsse_v_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsse_v_nxv16bf16_nxv16bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
+; CHECK-NEXT:    vsse16.v v8, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  call void @llvm.riscv.vsse.nxv16bf16(
+    <vscale x 16 x bfloat> %0,
+    ptr %1,
+    iXLen %2,
+    iXLen %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv16bf16(
+  <vscale x 16 x bfloat>,
+  ptr,
+  iXLen,
+  <vscale x 16 x i1>,
+  iXLen);
+
+define void @intrinsic_vsse_mask_v_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, ptr %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16bf16_nxv16bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
+; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  call void @llvm.riscv.vsse.mask.nxv16bf16(
+    <vscale x 16 x bfloat> %0,
+    ptr %1,
+    iXLen %2,
+    <vscale x 16 x i1> %3,
+    iXLen %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.nxv32bf16(
+  <vscale x 32 x bfloat>,
+  ptr,
+  iXLen,
+  iXLen);
+
+define void @intrinsic_vsse_v_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsse_v_nxv32bf16_nxv32bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a2, e16, m8, ta, ma
+; CHECK-NEXT:    vsse16.v v8, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  call void @llvm.riscv.vsse.nxv32bf16(
+    <vscale x 32 x bfloat> %0,
+    ptr %1,
+    iXLen %2,
+    iXLen %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv32bf16(
+  <vscale x 32 x bfloat>,
+  ptr,
+  iXLen,
+  <vscale x 32 x i1>,
+  iXLen);
+
+define void @intrinsic_vsse_mask_v_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat> %0, ptr %1, iXLen %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv32bf16_nxv32bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a2, e16, m8, ta, ma
+; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  call void @llvm.riscv.vsse.mask.nxv32bf16(
+    <vscale x 32 x bfloat> %0,
+    ptr %1,
+    iXLen %2,
+    <vscale x 32 x i1> %3,
+    iXLen %4)
+
+  ret void
+}
+
 declare void @llvm.riscv.vsse.nxv1i8(
   <vscale x 1 x i8>,
   ptr,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsuxei-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsuxei-rv64.ll
index dbc7e719f14a8..3572f5909400a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsuxei-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsuxei-rv64.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh,+zvfh,+f -verify-machineinstrs \
+; RUN: llc -mtriple=riscv64 -mattr=+v,+zvfh,+zvfbfmin -verify-machineinstrs \
 ; RUN:   < %s | FileCheck %s
 
 ; The intrinsics are not supported with RV32.
@@ -924,6 +924,190 @@ entry:
   ret void
 }
 
+declare void @llvm.riscv.vsuxei.nxv1bf16.nxv1i64(
+  <vscale x 1 x bfloat>,
+  ptr,
+  <vscale x 1 x i64>,
+  i64);
+
+define void @intrinsic_vsuxei_v_nxv1bf16_nxv1bf16_nxv1i64(<vscale x 1 x bfloat> %0, ptr %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv1bf16_nxv1bf16_nxv1i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT:    vsuxei64.v v8, (a0), v9
+; CHECK-NEXT:    ret
+entry:
+  call void @llvm.riscv.vsuxei.nxv1bf16.nxv1i64(
+    <vscale x 1 x bfloat> %0,
+    ptr %1,
+    <vscale x 1 x i64> %2,
+    i64 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1bf16.nxv1i64(
+  <vscale x 1 x bfloat>,
+  ptr,
+  <vscale x 1 x i64>,
+  <vscale x 1 x i1>,
+  i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv1bf16_nxv1bf16_nxv1i64(<vscale x 1 x bfloat> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1bf16_nxv1bf16_nxv1i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT:    vsuxei64.v v8, (a0), v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  call void @llvm.riscv.vsuxei.mask.nxv1bf16.nxv1i64(
+    <vscale x 1 x bfloat> %0,
+    ptr %1,
+    <vscale x 1 x i64> %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2bf16.nxv2i64(
+  <vscale x 2 x bfloat>,
+  ptr,
+  <vscale x 2 x i64>,
+  i64);
+
+define void @intrinsic_vsuxei_v_nxv2bf16_nxv2bf16_nxv2i64(<vscale x 2 x bfloat> %0, ptr %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv2bf16_nxv2bf16_nxv2i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT:    vsuxei64.v v8, (a0), v10
+; CHECK-NEXT:    ret
+entry:
+  call void @llvm.riscv.vsuxei.nxv2bf16.nxv2i64(
+    <vscale x 2 x bfloat> %0,
+    ptr %1,
+    <vscale x 2 x i64> %2,
+    i64 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2bf16.nxv2i64(
+  <vscale x 2 x bfloat>,
+  ptr,
+  <vscale x 2 x i64>,
+  <vscale x 2 x i1>,
+  i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv2bf16_nxv2bf16_nxv2i64(<vscale x 2 x bfloat> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2bf16_nxv2bf16_nxv2i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT:    vsuxei64.v v8, (a0), v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  call void @llvm.riscv.vsuxei.mask.nxv2bf16.nxv2i64(
+    <vscale x 2 x bfloat> %0,
+    ptr %1,
+    <vscale x 2 x i64> %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4bf16.nxv4i64(
+  <vscale x 4 x bfloat>,
+  ptr,
+  <vscale x 4 x i64>,
+  i64);
+
+define void @intrinsic_vsuxei_v_nxv4bf16_nxv4bf16_nxv4i64(<vscale x 4 x bfloat> %0, ptr %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv4bf16_nxv4bf16_nxv4i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT:    vsuxei64.v v8, (a0), v12
+; CHECK-NEXT:    ret
+entry:
+  call void @llvm.riscv.vsuxei.nxv4bf16.nxv4i64(
+    <vscale x 4 x bfloat> %0,
+    ptr %1,
+    <vscale x 4 x i64> %2,
+    i64 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4bf16.nxv4i64(
+  <vscale x 4 x bfloat>,
+  ptr,
+  <vscale x 4 x i64>,
+  <vscale x 4 x i1>,
+  i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv4bf16_nxv4bf16_nxv4i64(<vscale x 4 x bfloat> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4bf16_nxv4bf16_nxv4i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT:    vsuxei64.v v8, (a0), v12, v0.t
+; CHECK-NEXT:    ret
+entry:
+  call void @llvm.riscv.vsuxei.mask.nxv4bf16.nxv4i64(
+    <vscale x 4 x bfloat> %0,
+    ptr %1,
+    <vscale x 4 x i64> %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8bf16.nxv8i64(
+  <vscale x 8 x bfloat>,
+  ptr,
+  <vscale x 8 x i64>,
+  i64);
+
+define void @intrinsic_vsuxei_v_nxv8bf16_nxv8bf16_nxv8i64(<vscale x 8 x bfloat> %0, ptr %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv8bf16_nxv8bf16_nxv8i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT:    vsuxei64.v v8, (a0), v16
+; CHECK-NEXT:    ret
+entry:
+  call void @llvm.riscv.vsuxei.nxv8bf16.nxv8i64(
+    <vscale x 8 x bfloat> %0,
+    ptr %1,
+    <vscale x 8 x i64> %2,
+    i64 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8bf16.nxv8i64(
+  <vscale x 8 x bfloat>,
+  ptr,
+  <vscale x 8 x i64>,
+  <vscale x 8 x i1>,
+  i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv8bf16_nxv8bf16_nxv8i64(<vscale x 8 x bfloat> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8bf16_nxv8bf16_nxv8i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT:    vsuxei64.v v8, (a0), v16, v0.t
+; CHECK-NEXT:    ret
+entry:
+  call void @llvm.riscv.vsuxei.mask.nxv8bf16.nxv8i64(
+    <vscale x 8 x bfloat> %0,
+    ptr %1,
+    <vscale x 8 x i64> %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret void
+}
+
 declare void @llvm.riscv.vsuxei.nxv1f32.nxv1i64(
   <vscale x 1 x float>,
   ptr,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsuxei.ll b/llvm/test/CodeGen/RISCV/rvv/vsuxei.ll
index 7413177918e63..dc6dbe768741a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsuxei.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsuxei.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+zvfh \
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfh,+zvfbfmin \
 ; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
-; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+zvfh \
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh,+zvfbfmin \
 ; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 
 declare void @llvm.riscv.vsuxei.nxv1i8.nxv1i32(
@@ -4466,6 +4466,236 @@ entry:
   ret void
 }
 
+declare void @llvm.riscv.vsuxei.nxv1bf16.nxv1i32(
+  <vscale x 1 x bfloat>,
+  ptr,
+  <vscale x 1 x i32>,
+  iXLen);
+
+define void @intrinsic_vsuxei_v_nxv1bf16_nxv1bf16_nxv1i32(<vscale x 1 x bfloat> %0, ptr %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv1bf16_nxv1bf16_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT:    vsuxei32.v v8, (a0), v9
+; CHECK-NEXT:    ret
+entry:
+  call void @llvm.riscv.vsuxei.nxv1bf16.nxv1i32(
+    <vscale x 1 x bfloat> %0,
+    ptr %1,
+    <vscale x 1 x i32> %2,
+    iXLen %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1bf16.nxv1i32(
+  <vscale x 1 x bfloat>,
+  ptr,
+  <vscale x 1 x i32>,
+  <vscale x 1 x i1>,
+  iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv1bf16_nxv1bf16_nxv1i32(<vscale x 1 x bfloat> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1bf16_nxv1bf16_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT:    vsuxei32.v v8, (a0), v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  call void @llvm.riscv.vsuxei.mask.nxv1bf16.nxv1i32(
+    <vscale x 1 x bfloat> %0,
+    ptr %1,
+    <vscale x 1 x i32> %2,
+    <vscale x 1 x i1> %3,
+    iXLen %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2bf16.nxv2i32(
+  <vscale x 2 x bfloat>,
+  ptr,
+  <vscale x 2 x i32>,
+  iXLen);
+
+define void @intrinsic_vsuxei_v_nxv2bf16_nxv2bf16_nxv2i32(<vscale x 2 x bfloat> %0, ptr %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv2bf16_nxv2bf16_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT:    vsuxei32.v v8, (a0), v9
+; CHECK-NEXT:    ret
+entry:
+  call void @llvm.riscv.vsuxei.nxv2bf16.nxv2i32(
+    <vscale x 2 x bfloat> %0,
+    ptr %1,
+    <vscale x 2 x i32> %2,
+    iXLen %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2bf16.nxv2i32(
+  <vscale x 2 x bfloat>,
+  ptr,
+  <vscale x 2 x i32>,
+  <vscale x 2 x i1>,
+  iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv2bf16_nxv2bf16_nxv2i32(<vscale x 2 x bfloat> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2bf16_nxv2bf16_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT:    vsuxei32.v v8, (a0), v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  call void @llvm.riscv.vsuxei.mask.nxv2bf16.nxv2i32(
+    <vscale x 2 x bfloat> %0,
+    ptr %1,
+    <vscale x 2 x i32> %2,
+    <vscale x 2 x i1> %3,
+    iXLen %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4bf16.nxv4i32(
+  <vscale x 4 x bfloat>,
+  ptr,
+  <vscale x 4 x i32>,
+  iXLen);
+
+define void @intrinsic_vsuxei_v_nxv4bf16_nxv4bf16_nxv4i32(<vscale x 4 x bfloat> %0, ptr %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv4bf16_nxv4bf16_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT:    vsuxei32.v v8, (a0), v10
+; CHECK-NEXT:    ret
+entry:
+  call void @llvm.riscv.vsuxei.nxv4bf16.nxv4i32(
+    <vscale x 4 x bfloat> %0,
+    ptr %1,
+    <vscale x 4 x i32> %2,
+    iXLen %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4bf16.nxv4i32(
+  <vscale x 4 x bfloat>,
+  ptr,
+  <vscale x 4 x i32>,
+  <vscale x 4 x i1>,
+  iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv4bf16_nxv4bf16_nxv4i32(<vscale x 4 x bfloat> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4bf16_nxv4bf16_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT:    vsuxei32.v v8, (a0), v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  call void @llvm.riscv.vsuxei.mask.nxv4bf16.nxv4i32(
+    <vscale x 4 x bfloat> %0,
+    ptr %1,
+    <vscale x 4 x i32> %2,
+    <vscale x 4 x i1> %3,
+    iXLen %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8bf16.nxv8i32(
+  <vscale x 8 x bfloat>,
+  ptr,
+  <vscale x 8 x i32>,
+  iXLen);
+
+define void @intrinsic_vsuxei_v_nxv8bf16_nxv8bf16_nxv8i32(<vscale x 8 x bfloat> %0, ptr %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv8bf16_nxv8bf16_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT:    vsuxei32.v v8, (a0), v12
+; CHECK-NEXT:    ret
+entry:
+  call void @llvm.riscv.vsuxei.nxv8bf16.nxv8i32(
+    <vscale x 8 x bfloat> %0,
+    ptr %1,
+    <vscale x 8 x i32> %2,
+    iXLen %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8bf16.nxv8i32(
+  <vscale x 8 x bfloat>,
+  ptr,
+  <vscale x 8 x i32>,
+  <vscale x 8 x i1>,
+  iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv8bf16_nxv8bf16_nxv8i32(<vscale x 8 x bfloat> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8bf16_nxv8bf16_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT:    vsuxei32.v v8, (a0), v12, v0.t
+; CHECK-NEXT:    ret
+entry:
+  call void @llvm.riscv.vsuxei.mask.nxv8bf16.nxv8i32(
+    <vscale x 8 x bfloat> %0,
+    ptr %1,
+    <vscale x 8 x i32> %2,
+    <vscale x 8 x i1> %3,
+    iXLen %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv16bf16.nxv16i32(
+  <vscale x 16 x bfloat>,
+  ptr,
+  <vscale x 16 x i32>,
+  iXLen);
+
+define void @intrinsic_vsuxei_v_nxv16bf16_nxv16bf16_nxv16i32(<vscale x 16 x bfloat> %0, ptr %1, <vscale x 16 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv16bf16_nxv16bf16_nxv16i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT:    vsuxei32.v v8, (a0), v16
+; CHECK-NEXT:    ret
+entry:
+  call void @llvm.riscv.vsuxei.nxv16bf16.nxv16i32(
+    <vscale x 16 x bfloat> %0,
+    ptr %1,
+    <vscale x 16 x i32> %2,
+    iXLen %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv16bf16.nxv16i32(
+  <vscale x 16 x bfloat>,
+  ptr,
+  <vscale x 16 x i32>,
+  <vscale x 16 x i1>,
+  iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv16bf16_nxv16bf16_nxv16i32(<vscale x 16 x bfloat> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16bf16_nxv16bf16_nxv16i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT:    vsuxei32.v v8, (a0), v16, v0.t
+; CHECK-NEXT:    ret
+entry:
+  call void @llvm.riscv.vsuxei.mask.nxv16bf16.nxv16i32(
+    <vscale x 16 x bfloat> %0,
+    ptr %1,
+    <vscale x 16 x i32> %2,
+    <vscale x 16 x i1> %3,
+    iXLen %4)
+
+  ret void
+}
+
 declare void @llvm.riscv.vsuxei.nxv1f32.nxv1i8(
   <vscale x 1 x float>,
   ptr,


        


More information about the llvm-commits mailing list