[llvm] 6a59dea - [RISCV] Support `vrgather` and `vcompress` for `zvfhmin` and `zvfbfmin` (#101633)

via llvm-commits llvm-commits at lists.llvm.org
Tue Aug 6 03:00:20 PDT 2024


Author: Brandon Wu
Date: 2024-08-06T18:00:16+08:00
New Revision: 6a59deafde742e30daf3bf886f98afc37f00d75b

URL: https://github.com/llvm/llvm-project/commit/6a59deafde742e30daf3bf886f98afc37f00d75b
DIFF: https://github.com/llvm/llvm-project/commit/6a59deafde742e30daf3bf886f98afc37f00d75b.diff

LOG: [RISCV] Support `vrgather` and `vcompress` for `zvfhmin` and `zvfbfmin` (#101633)

Support these in both C intrinsics and CodeGen, they can work with other
intrinsics in `zvfhmin` or `zvfbfmin`.

This resolve:
https://github.com/riscv-non-isa/rvv-intrinsic-doc/issues/350

Added: 
    

Modified: 
    clang/include/clang/Basic/riscv_vector.td
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vcompress.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vrgather.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vcompress.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vrgather.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vcompress.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vrgather.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vcompress.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vrgather.c
    llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
    llvm/test/CodeGen/RISCV/rvv/vcompress.ll
    llvm/test/CodeGen/RISCV/rvv/vrgather.ll

Removed: 
    


################################################################################
diff  --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td
index b838917e39cfe..06ce0affced89 100644
--- a/clang/include/clang/Basic/riscv_vector.td
+++ b/clang/include/clang/Basic/riscv_vector.td
@@ -2262,10 +2262,22 @@ defm vfslide1down : RVVFloatingBinVFBuiltinSet;
 
 // 16.4. Vector Register Gather Instructions
 // signed and floating type
-defm vrgather : RVVOutBuiltinSet<"vrgather_vv", "csilxfd",
+defm vrgather : RVVOutBuiltinSet<"vrgather_vv", "csilfd",
                                  [["vv", "v", "vvUv"]]>;
-defm vrgather : RVVOutBuiltinSet<"vrgather_vx", "csilxfd",
+defm vrgather : RVVOutBuiltinSet<"vrgather_vx", "csilfd",
                                  [["vx", "v", "vvz"]]>;
+let RequiredFeatures = ["Zvfhmin"] in {
+  defm vrgather : RVVOutBuiltinSet<"vrgather_vv", "x",
+                                   [["vv", "v", "vvUv"]]>;
+  defm vrgather : RVVOutBuiltinSet<"vrgather_vx", "x",
+                                   [["vx", "v", "vvz"]]>;
+}
+let RequiredFeatures = ["Zvfbfmin"] in {
+  defm vrgather : RVVOutBuiltinSet<"vrgather_vv", "y",
+                                   [["vv", "v", "vvUv"]]>;
+  defm vrgather : RVVOutBuiltinSet<"vrgather_vx", "y",
+                                   [["vx", "v", "vvz"]]>;
+}
 defm vrgatherei16 : RVVOutBuiltinSet<"vrgatherei16_vv", "csilxfd",
                                      [["vv", "v", "vv(Log2EEW:4)Uv"]]>;
 // unsigned type
@@ -2288,8 +2300,14 @@ let HasMasked = false,
       IntrinsicTypes = {ResultType, Ops.back()->getType()};
     }] in {
   // signed and floating type
-  defm vcompress : RVVOutBuiltinSet<"vcompress", "csilxfd",
+  defm vcompress : RVVOutBuiltinSet<"vcompress", "csilfd",
                                     [["vm", "v", "vvm"]]>;
+  let RequiredFeatures = ["Zvfhmin"] in
+    defm vcompress : RVVOutBuiltinSet<"vcompress", "x",
+                                      [["vm", "v", "vvm"]]>;
+  let RequiredFeatures = ["Zvfbfmin"] in
+    defm vcompress : RVVOutBuiltinSet<"vcompress", "y",
+                                      [["vm", "v", "vvm"]]>;
   // unsigned type
   defm vcompress : RVVOutBuiltinSet<"vcompress", "csil",
                                     [["vm", "Uv", "UvUvm"]]>;

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vcompress.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vcompress.c
index 43ed94b70d0e0..31b7da69cc193 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vcompress.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vcompress.c
@@ -1,7 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
 // REQUIRES: riscv-registered-target
 // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \
-// RUN:   -target-feature +zvfh -disable-O0-optnone  \
+// RUN:   -target-feature +zvfhmin -target-feature +zvfbfmin -disable-O0-optnone  \
 // RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
 // RUN:   FileCheck --check-prefix=CHECK-RV64 %s
 
@@ -597,3 +597,63 @@ vuint64m8_t test_vcompress_vm_u64m8(vuint64m8_t src, vbool8_t mask, size_t vl) {
   return __riscv_vcompress_vm_u64m8(src, mask, vl);
 }
 
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vcompress_vm_bf16mf4
+// CHECK-RV64-SAME: (<vscale x 1 x bfloat> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vcompress.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[SRC]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vcompress_vm_bf16mf4(vbfloat16mf4_t src, vbool64_t mask, size_t vl) {
+  return __riscv_vcompress_vm_bf16mf4(src, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vcompress_vm_bf16mf2
+// CHECK-RV64-SAME: (<vscale x 2 x bfloat> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vcompress.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[SRC]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vcompress_vm_bf16mf2(vbfloat16mf2_t src, vbool32_t mask, size_t vl) {
+  return __riscv_vcompress_vm_bf16mf2(src, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vcompress_vm_bf16m1
+// CHECK-RV64-SAME: (<vscale x 4 x bfloat> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vcompress.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[SRC]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vcompress_vm_bf16m1(vbfloat16m1_t src, vbool16_t mask, size_t vl) {
+  return __riscv_vcompress_vm_bf16m1(src, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vcompress_vm_bf16m2
+// CHECK-RV64-SAME: (<vscale x 8 x bfloat> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vcompress.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[SRC]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vcompress_vm_bf16m2(vbfloat16m2_t src, vbool8_t mask, size_t vl) {
+  return __riscv_vcompress_vm_bf16m2(src, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vcompress_vm_bf16m4
+// CHECK-RV64-SAME: (<vscale x 16 x bfloat> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vcompress.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[SRC]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vcompress_vm_bf16m4(vbfloat16m4_t src, vbool4_t mask, size_t vl) {
+  return __riscv_vcompress_vm_bf16m4(src, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vcompress_vm_bf16m8
+// CHECK-RV64-SAME: (<vscale x 32 x bfloat> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vcompress.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[SRC]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vcompress_vm_bf16m8(vbfloat16m8_t src, vbool2_t mask, size_t vl) {
+  return __riscv_vcompress_vm_bf16m8(src, mask, vl);
+}
+

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vrgather.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vrgather.c
index 5da76ddfb8004..bb73d62f878d1 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vrgather.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vrgather.c
@@ -1,7 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
 // REQUIRES: riscv-registered-target
 // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \
-// RUN:   -target-feature +zvfh -disable-O0-optnone  \
+// RUN:   -target-feature +zvfhmin -target-feature +zvfbfmin -disable-O0-optnone  \
 // RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
 // RUN:   FileCheck --check-prefix=CHECK-RV64 %s
 
@@ -2367,3 +2367,243 @@ vuint64m8_t test_vrgather_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, size_t inde
   return __riscv_vrgather_vx_u64m8_m(mask, op1, index, vl);
 }
 
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vrgather_vv_bf16mf4
+// CHECK-RV64-SAME: (<vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vrgather.vv.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i16> [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vrgather_vv_bf16mf4(vbfloat16mf4_t op1, vuint16mf4_t index, size_t vl) {
+  return __riscv_vrgather_vv_bf16mf4(op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vrgather_vx_bf16mf4
+// CHECK-RV64-SAME: (<vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vrgather.vx.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], i64 [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vrgather_vx_bf16mf4(vbfloat16mf4_t op1, size_t index, size_t vl) {
+  return __riscv_vrgather_vx_bf16mf4(op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vrgather_vv_bf16mf2
+// CHECK-RV64-SAME: (<vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vrgather.vv.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i16> [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vrgather_vv_bf16mf2(vbfloat16mf2_t op1, vuint16mf2_t index, size_t vl) {
+  return __riscv_vrgather_vv_bf16mf2(op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vrgather_vx_bf16mf2
+// CHECK-RV64-SAME: (<vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vrgather.vx.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], i64 [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vrgather_vx_bf16mf2(vbfloat16mf2_t op1, size_t index, size_t vl) {
+  return __riscv_vrgather_vx_bf16mf2(op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vrgather_vv_bf16m1
+// CHECK-RV64-SAME: (<vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vrgather.vv.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i16> [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vrgather_vv_bf16m1(vbfloat16m1_t op1, vuint16m1_t index, size_t vl) {
+  return __riscv_vrgather_vv_bf16m1(op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vrgather_vx_bf16m1
+// CHECK-RV64-SAME: (<vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vrgather.vx.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], i64 [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vrgather_vx_bf16m1(vbfloat16m1_t op1, size_t index, size_t vl) {
+  return __riscv_vrgather_vx_bf16m1(op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vrgather_vv_bf16m2
+// CHECK-RV64-SAME: (<vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vrgather.vv.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i16> [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vrgather_vv_bf16m2(vbfloat16m2_t op1, vuint16m2_t index, size_t vl) {
+  return __riscv_vrgather_vv_bf16m2(op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vrgather_vx_bf16m2
+// CHECK-RV64-SAME: (<vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vrgather.vx.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], i64 [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vrgather_vx_bf16m2(vbfloat16m2_t op1, size_t index, size_t vl) {
+  return __riscv_vrgather_vx_bf16m2(op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vrgather_vv_bf16m4
+// CHECK-RV64-SAME: (<vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vrgather.vv.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i16> [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vrgather_vv_bf16m4(vbfloat16m4_t op1, vuint16m4_t index, size_t vl) {
+  return __riscv_vrgather_vv_bf16m4(op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vrgather_vx_bf16m4
+// CHECK-RV64-SAME: (<vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vrgather.vx.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], i64 [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vrgather_vx_bf16m4(vbfloat16m4_t op1, size_t index, size_t vl) {
+  return __riscv_vrgather_vx_bf16m4(op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vrgather_vv_bf16m8
+// CHECK-RV64-SAME: (<vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vrgather.vv.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i16> [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vrgather_vv_bf16m8(vbfloat16m8_t op1, vuint16m8_t index, size_t vl) {
+  return __riscv_vrgather_vv_bf16m8(op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vrgather_vx_bf16m8
+// CHECK-RV64-SAME: (<vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vrgather.vx.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], i64 [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vrgather_vx_bf16m8(vbfloat16m8_t op1, size_t index, size_t vl) {
+  return __riscv_vrgather_vx_bf16m8(op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vrgather_vv_bf16mf4_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i16> [[INDEX]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vrgather_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vuint16mf4_t index, size_t vl) {
+  return __riscv_vrgather_vv_bf16mf4_m(mask, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vrgather_vx_bf16mf4_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], i64 [[INDEX]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vrgather_vx_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, size_t index, size_t vl) {
+  return __riscv_vrgather_vx_bf16mf4_m(mask, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vrgather_vv_bf16mf2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i16> [[INDEX]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vrgather_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vuint16mf2_t index, size_t vl) {
+  return __riscv_vrgather_vv_bf16mf2_m(mask, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vrgather_vx_bf16mf2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], i64 [[INDEX]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vrgather_vx_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, size_t index, size_t vl) {
+  return __riscv_vrgather_vx_bf16mf2_m(mask, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vrgather_vv_bf16m1_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i16> [[INDEX]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vrgather_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vuint16m1_t index, size_t vl) {
+  return __riscv_vrgather_vv_bf16m1_m(mask, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vrgather_vx_bf16m1_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], i64 [[INDEX]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vrgather_vx_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, size_t index, size_t vl) {
+  return __riscv_vrgather_vx_bf16m1_m(mask, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vrgather_vv_bf16m2_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i16> [[INDEX]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vrgather_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vuint16m2_t index, size_t vl) {
+  return __riscv_vrgather_vv_bf16m2_m(mask, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vrgather_vx_bf16m2_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], i64 [[INDEX]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vrgather_vx_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, size_t index, size_t vl) {
+  return __riscv_vrgather_vx_bf16m2_m(mask, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vrgather_vv_bf16m4_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i16> [[INDEX]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vrgather_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vuint16m4_t index, size_t vl) {
+  return __riscv_vrgather_vv_bf16m4_m(mask, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vrgather_vx_bf16m4_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], i64 [[INDEX]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vrgather_vx_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, size_t index, size_t vl) {
+  return __riscv_vrgather_vx_bf16m4_m(mask, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vrgather_vv_bf16m8_m
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i16> [[INDEX]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vrgather_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vuint16m8_t index, size_t vl) {
+  return __riscv_vrgather_vv_bf16m8_m(mask, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vrgather_vx_bf16m8_m
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], i64 [[INDEX]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vrgather_vx_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, size_t index, size_t vl) {
+  return __riscv_vrgather_vx_bf16m8_m(mask, op1, index, vl);
+}
+

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vcompress.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vcompress.c
index b2729034f9298..bb030ee000c65 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vcompress.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vcompress.c
@@ -1,7 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
 // REQUIRES: riscv-registered-target
 // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \
-// RUN:   -target-feature +zvfh -disable-O0-optnone  \
+// RUN:   -target-feature +zvfhmin -target-feature +zvfbfmin -disable-O0-optnone  \
 // RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
 // RUN:   FileCheck --check-prefix=CHECK-RV64 %s
 
@@ -597,3 +597,63 @@ vuint64m8_t test_vcompress_vm_u64m8(vuint64m8_t src, vbool8_t mask, size_t vl) {
   return __riscv_vcompress(src, mask, vl);
 }
 
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vcompress_vm_bf16mf4
+// CHECK-RV64-SAME: (<vscale x 1 x bfloat> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vcompress.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[SRC]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vcompress_vm_bf16mf4(vbfloat16mf4_t src, vbool64_t mask, size_t vl) {
+  return __riscv_vcompress(src, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vcompress_vm_bf16mf2
+// CHECK-RV64-SAME: (<vscale x 2 x bfloat> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vcompress.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[SRC]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vcompress_vm_bf16mf2(vbfloat16mf2_t src, vbool32_t mask, size_t vl) {
+  return __riscv_vcompress(src, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vcompress_vm_bf16m1
+// CHECK-RV64-SAME: (<vscale x 4 x bfloat> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vcompress.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[SRC]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vcompress_vm_bf16m1(vbfloat16m1_t src, vbool16_t mask, size_t vl) {
+  return __riscv_vcompress(src, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vcompress_vm_bf16m2
+// CHECK-RV64-SAME: (<vscale x 8 x bfloat> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vcompress.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[SRC]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vcompress_vm_bf16m2(vbfloat16m2_t src, vbool8_t mask, size_t vl) {
+  return __riscv_vcompress(src, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vcompress_vm_bf16m4
+// CHECK-RV64-SAME: (<vscale x 16 x bfloat> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vcompress.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[SRC]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vcompress_vm_bf16m4(vbfloat16m4_t src, vbool4_t mask, size_t vl) {
+  return __riscv_vcompress(src, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vcompress_vm_bf16m8
+// CHECK-RV64-SAME: (<vscale x 32 x bfloat> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vcompress.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[SRC]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vcompress_vm_bf16m8(vbfloat16m8_t src, vbool2_t mask, size_t vl) {
+  return __riscv_vcompress(src, mask, vl);
+}
+

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vrgather.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vrgather.c
index 0ea6382a6614b..c6f9cb4c3f335 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vrgather.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vrgather.c
@@ -1,7 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
 // REQUIRES: riscv-registered-target
 // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \
-// RUN:   -target-feature +zvfh -disable-O0-optnone  \
+// RUN:   -target-feature +zvfhmin -target-feature +zvfbfmin -disable-O0-optnone  \
 // RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
 // RUN:   FileCheck --check-prefix=CHECK-RV64 %s
 
@@ -2367,3 +2367,243 @@ vuint64m8_t test_vrgather_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, size_t inde
   return __riscv_vrgather(mask, op1, index, vl);
 }
 
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vrgather_vv_bf16mf4
+// CHECK-RV64-SAME: (<vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vrgather.vv.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i16> [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vrgather_vv_bf16mf4(vbfloat16mf4_t op1, vuint16mf4_t index, size_t vl) {
+  return __riscv_vrgather(op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vrgather_vx_bf16mf4
+// CHECK-RV64-SAME: (<vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vrgather.vx.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], i64 [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vrgather_vx_bf16mf4(vbfloat16mf4_t op1, size_t index, size_t vl) {
+  return __riscv_vrgather(op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vrgather_vv_bf16mf2
+// CHECK-RV64-SAME: (<vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vrgather.vv.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i16> [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vrgather_vv_bf16mf2(vbfloat16mf2_t op1, vuint16mf2_t index, size_t vl) {
+  return __riscv_vrgather(op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vrgather_vx_bf16mf2
+// CHECK-RV64-SAME: (<vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vrgather.vx.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], i64 [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vrgather_vx_bf16mf2(vbfloat16mf2_t op1, size_t index, size_t vl) {
+  return __riscv_vrgather(op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vrgather_vv_bf16m1
+// CHECK-RV64-SAME: (<vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vrgather.vv.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i16> [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vrgather_vv_bf16m1(vbfloat16m1_t op1, vuint16m1_t index, size_t vl) {
+  return __riscv_vrgather(op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vrgather_vx_bf16m1
+// CHECK-RV64-SAME: (<vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vrgather.vx.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], i64 [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vrgather_vx_bf16m1(vbfloat16m1_t op1, size_t index, size_t vl) {
+  return __riscv_vrgather(op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vrgather_vv_bf16m2
+// CHECK-RV64-SAME: (<vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vrgather.vv.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i16> [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vrgather_vv_bf16m2(vbfloat16m2_t op1, vuint16m2_t index, size_t vl) {
+  return __riscv_vrgather(op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vrgather_vx_bf16m2
+// CHECK-RV64-SAME: (<vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vrgather.vx.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], i64 [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vrgather_vx_bf16m2(vbfloat16m2_t op1, size_t index, size_t vl) {
+  return __riscv_vrgather(op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vrgather_vv_bf16m4
+// CHECK-RV64-SAME: (<vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vrgather.vv.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i16> [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vrgather_vv_bf16m4(vbfloat16m4_t op1, vuint16m4_t index, size_t vl) {
+  return __riscv_vrgather(op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vrgather_vx_bf16m4
+// CHECK-RV64-SAME: (<vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vrgather.vx.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], i64 [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vrgather_vx_bf16m4(vbfloat16m4_t op1, size_t index, size_t vl) {
+  return __riscv_vrgather(op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vrgather_vv_bf16m8
+// CHECK-RV64-SAME: (<vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vrgather.vv.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i16> [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vrgather_vv_bf16m8(vbfloat16m8_t op1, vuint16m8_t index, size_t vl) {
+  return __riscv_vrgather(op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vrgather_vx_bf16m8
+// CHECK-RV64-SAME: (<vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vrgather.vx.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], i64 [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vrgather_vx_bf16m8(vbfloat16m8_t op1, size_t index, size_t vl) {
+  return __riscv_vrgather(op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vrgather_vv_bf16mf4_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i16> [[INDEX]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vrgather_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vuint16mf4_t index, size_t vl) {
+  return __riscv_vrgather(mask, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vrgather_vx_bf16mf4_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv1bf16.i64(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> [[OP1]], i64 [[INDEX]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vrgather_vx_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, size_t index, size_t vl) {
+  return __riscv_vrgather(mask, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vrgather_vv_bf16mf2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i16> [[INDEX]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vrgather_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vuint16mf2_t index, size_t vl) {
+  return __riscv_vrgather(mask, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vrgather_vx_bf16mf2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv2bf16.i64(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> [[OP1]], i64 [[INDEX]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vrgather_vx_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, size_t index, size_t vl) {
+  return __riscv_vrgather(mask, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vrgather_vv_bf16m1_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i16> [[INDEX]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vrgather_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vuint16m1_t index, size_t vl) {
+  return __riscv_vrgather(mask, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vrgather_vx_bf16m1_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv4bf16.i64(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> [[OP1]], i64 [[INDEX]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vrgather_vx_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, size_t index, size_t vl) {
+  return __riscv_vrgather(mask, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vrgather_vv_bf16m2_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i16> [[INDEX]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vrgather_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vuint16m2_t index, size_t vl) {
+  return __riscv_vrgather(mask, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vrgather_vx_bf16m2_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv8bf16.i64(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> [[OP1]], i64 [[INDEX]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vrgather_vx_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, size_t index, size_t vl) {
+  return __riscv_vrgather(mask, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vrgather_vv_bf16m4_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i16> [[INDEX]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vrgather_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vuint16m4_t index, size_t vl) {
+  return __riscv_vrgather(mask, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vrgather_vx_bf16m4_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv16bf16.i64(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> [[OP1]], i64 [[INDEX]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vrgather_vx_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, size_t index, size_t vl) {
+  return __riscv_vrgather(mask, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vrgather_vv_bf16m8_m
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i16> [[INDEX]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vrgather_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vuint16m8_t index, size_t vl) {
+  return __riscv_vrgather(mask, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vrgather_vx_bf16m8_m
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv32bf16.i64(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> [[OP1]], i64 [[INDEX]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vrgather_vx_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, size_t index, size_t vl) {
+  return __riscv_vrgather(mask, op1, index, vl);
+}
+

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vcompress.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vcompress.c
index a39e76c2474e3..97440d62f28d7 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vcompress.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vcompress.c
@@ -1,7 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
 // REQUIRES: riscv-registered-target
 // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \
-// RUN:   -target-feature +zvfh -disable-O0-optnone  \
+// RUN:   -target-feature +zvfhmin -target-feature +zvfbfmin -disable-O0-optnone  \
 // RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
 // RUN:   FileCheck --check-prefix=CHECK-RV64 %s
 
@@ -597,3 +597,63 @@ vuint64m8_t test_vcompress_vm_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t src, v
   return __riscv_vcompress_vm_u64m8_tu(maskedoff, src, mask, vl);
 }
 
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vcompress_vm_bf16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vcompress.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[SRC]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vcompress_vm_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, vbool64_t mask, size_t vl) {
+  return __riscv_vcompress_vm_bf16mf4_tu(maskedoff, src, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vcompress_vm_bf16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vcompress.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[SRC]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vcompress_vm_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, vbool32_t mask, size_t vl) {
+  return __riscv_vcompress_vm_bf16mf2_tu(maskedoff, src, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vcompress_vm_bf16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vcompress.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[SRC]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vcompress_vm_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t src, vbool16_t mask, size_t vl) {
+  return __riscv_vcompress_vm_bf16m1_tu(maskedoff, src, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vcompress_vm_bf16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vcompress.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[SRC]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vcompress_vm_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t src, vbool8_t mask, size_t vl) {
+  return __riscv_vcompress_vm_bf16m2_tu(maskedoff, src, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vcompress_vm_bf16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vcompress.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[SRC]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vcompress_vm_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t src, vbool4_t mask, size_t vl) {
+  return __riscv_vcompress_vm_bf16m4_tu(maskedoff, src, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vcompress_vm_bf16m8_tu
+// CHECK-RV64-SAME: (<vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vcompress.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[SRC]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vcompress_vm_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t src, vbool2_t mask, size_t vl) {
+  return __riscv_vcompress_vm_bf16m8_tu(maskedoff, src, mask, vl);
+}
+

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vrgather.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vrgather.c
index dec79b77e9bfa..3c4316de9e8b8 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vrgather.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vrgather.c
@@ -1,7 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
 // REQUIRES: riscv-registered-target
 // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \
-// RUN:   -target-feature +zvfh -disable-O0-optnone  \
+// RUN:   -target-feature +zvfhmin -target-feature +zvfbfmin -disable-O0-optnone  \
 // RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
 // RUN:   FileCheck --check-prefix=CHECK-RV64 %s
 
@@ -4727,3 +4727,243 @@ vuint64m8_t test_vrgather_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuin
   return __riscv_vrgather_vx_u64m8_mu(mask, maskedoff, op1, index, vl);
 }
 
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vrgather_vv_bf16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vrgather.vv.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i16> [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vrgather_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vuint16mf4_t index, size_t vl) {
+  return __riscv_vrgather_vv_bf16mf4_tu(maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vrgather_vx_bf16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vrgather.vx.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], i64 [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vrgather_vx_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t index, size_t vl) {
+  return __riscv_vrgather_vx_bf16mf4_tu(maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vrgather_vv_bf16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vrgather.vv.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i16> [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vrgather_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vuint16mf2_t index, size_t vl) {
+  return __riscv_vrgather_vv_bf16mf2_tu(maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vrgather_vx_bf16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vrgather.vx.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], i64 [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vrgather_vx_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t index, size_t vl) {
+  return __riscv_vrgather_vx_bf16mf2_tu(maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vrgather_vv_bf16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vrgather.vv.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i16> [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vrgather_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vuint16m1_t index, size_t vl) {
+  return __riscv_vrgather_vv_bf16m1_tu(maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vrgather_vx_bf16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vrgather.vx.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], i64 [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vrgather_vx_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t index, size_t vl) {
+  return __riscv_vrgather_vx_bf16m1_tu(maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vrgather_vv_bf16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vrgather.vv.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i16> [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vrgather_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vuint16m2_t index, size_t vl) {
+  return __riscv_vrgather_vv_bf16m2_tu(maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vrgather_vx_bf16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vrgather.vx.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], i64 [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vrgather_vx_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t index, size_t vl) {
+  return __riscv_vrgather_vx_bf16m2_tu(maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vrgather_vv_bf16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vrgather.vv.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i16> [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vrgather_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vuint16m4_t index, size_t vl) {
+  return __riscv_vrgather_vv_bf16m4_tu(maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vrgather_vx_bf16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vrgather.vx.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], i64 [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vrgather_vx_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t index, size_t vl) {
+  return __riscv_vrgather_vx_bf16m4_tu(maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vrgather_vv_bf16m8_tu
+// CHECK-RV64-SAME: (<vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vrgather.vv.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i16> [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vrgather_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vuint16m8_t index, size_t vl) {
+  return __riscv_vrgather_vv_bf16m8_tu(maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vrgather_vx_bf16m8_tu
+// CHECK-RV64-SAME: (<vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vrgather.vx.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], i64 [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vrgather_vx_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t index, size_t vl) {
+  return __riscv_vrgather_vx_bf16m8_tu(maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vrgather_vv_bf16mf4_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i16> [[INDEX]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vrgather_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vuint16mf4_t index, size_t vl) {
+  return __riscv_vrgather_vv_bf16mf4_mu(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vrgather_vx_bf16mf4_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], i64 [[INDEX]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vrgather_vx_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t index, size_t vl) {
+  return __riscv_vrgather_vx_bf16mf4_mu(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vrgather_vv_bf16mf2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i16> [[INDEX]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vrgather_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vuint16mf2_t index, size_t vl) {
+  return __riscv_vrgather_vv_bf16mf2_mu(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vrgather_vx_bf16mf2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], i64 [[INDEX]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vrgather_vx_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t index, size_t vl) {
+  return __riscv_vrgather_vx_bf16mf2_mu(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vrgather_vv_bf16m1_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i16> [[INDEX]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vrgather_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vuint16m1_t index, size_t vl) {
+  return __riscv_vrgather_vv_bf16m1_mu(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vrgather_vx_bf16m1_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], i64 [[INDEX]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vrgather_vx_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t index, size_t vl) {
+  return __riscv_vrgather_vx_bf16m1_mu(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vrgather_vv_bf16m2_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i16> [[INDEX]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vrgather_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vuint16m2_t index, size_t vl) {
+  return __riscv_vrgather_vv_bf16m2_mu(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vrgather_vx_bf16m2_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], i64 [[INDEX]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vrgather_vx_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t index, size_t vl) {
+  return __riscv_vrgather_vx_bf16m2_mu(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vrgather_vv_bf16m4_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i16> [[INDEX]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vrgather_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vuint16m4_t index, size_t vl) {
+  return __riscv_vrgather_vv_bf16m4_mu(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vrgather_vx_bf16m4_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], i64 [[INDEX]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vrgather_vx_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t index, size_t vl) {
+  return __riscv_vrgather_vx_bf16m4_mu(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vrgather_vv_bf16m8_mu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i16> [[INDEX]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vrgather_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vuint16m8_t index, size_t vl) {
+  return __riscv_vrgather_vv_bf16m8_mu(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vrgather_vx_bf16m8_mu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], i64 [[INDEX]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vrgather_vx_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t index, size_t vl) {
+  return __riscv_vrgather_vx_bf16m8_mu(mask, maskedoff, op1, index, vl);
+}
+

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vcompress.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vcompress.c
index 0de71a7f8eb74..e1525e5c67878 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vcompress.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vcompress.c
@@ -1,7 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
 // REQUIRES: riscv-registered-target
 // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \
-// RUN:   -target-feature +zvfh -disable-O0-optnone  \
+// RUN:   -target-feature +zvfhmin -target-feature +zvfbfmin -disable-O0-optnone  \
 // RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
 // RUN:   FileCheck --check-prefix=CHECK-RV64 %s
 
@@ -597,3 +597,63 @@ vuint64m8_t test_vcompress_vm_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t src, v
   return __riscv_vcompress_tu(maskedoff, src, mask, vl);
 }
 
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vcompress_vm_bf16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vcompress.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[SRC]], <vscale x 1 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vcompress_vm_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, vbool64_t mask, size_t vl) {
+  return __riscv_vcompress_tu(maskedoff, src, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vcompress_vm_bf16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vcompress.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[SRC]], <vscale x 2 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vcompress_vm_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, vbool32_t mask, size_t vl) {
+  return __riscv_vcompress_tu(maskedoff, src, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vcompress_vm_bf16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vcompress.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[SRC]], <vscale x 4 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vcompress_vm_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t src, vbool16_t mask, size_t vl) {
+  return __riscv_vcompress_tu(maskedoff, src, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vcompress_vm_bf16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vcompress.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[SRC]], <vscale x 8 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vcompress_vm_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t src, vbool8_t mask, size_t vl) {
+  return __riscv_vcompress_tu(maskedoff, src, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vcompress_vm_bf16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vcompress.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[SRC]], <vscale x 16 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vcompress_vm_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t src, vbool4_t mask, size_t vl) {
+  return __riscv_vcompress_tu(maskedoff, src, mask, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vcompress_vm_bf16m8_tu
+// CHECK-RV64-SAME: (<vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vcompress.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[SRC]], <vscale x 32 x i1> [[MASK]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vcompress_vm_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t src, vbool2_t mask, size_t vl) {
+  return __riscv_vcompress_tu(maskedoff, src, mask, vl);
+}
+

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vrgather.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vrgather.c
index f3ba55c2c31c8..a094e8401b4ae 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vrgather.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vrgather.c
@@ -1,7 +1,7 @@
 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
 // REQUIRES: riscv-registered-target
 // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \
-// RUN:   -target-feature +zvfh -disable-O0-optnone  \
+// RUN:   -target-feature +zvfhmin -target-feature +zvfbfmin -disable-O0-optnone  \
 // RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
 // RUN:   FileCheck --check-prefix=CHECK-RV64 %s
 
@@ -4727,3 +4727,243 @@ vuint64m8_t test_vrgather_vx_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuin
   return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
 }
 
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vrgather_vv_bf16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vrgather.vv.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i16> [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vrgather_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vuint16mf4_t index, size_t vl) {
+  return __riscv_vrgather_tu(maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vrgather_vx_bf16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vrgather.vx.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], i64 [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vrgather_vx_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t index, size_t vl) {
+  return __riscv_vrgather_tu(maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vrgather_vv_bf16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vrgather.vv.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i16> [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vrgather_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vuint16mf2_t index, size_t vl) {
+  return __riscv_vrgather_tu(maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vrgather_vx_bf16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vrgather.vx.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], i64 [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vrgather_vx_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t index, size_t vl) {
+  return __riscv_vrgather_tu(maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vrgather_vv_bf16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vrgather.vv.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i16> [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vrgather_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vuint16m1_t index, size_t vl) {
+  return __riscv_vrgather_tu(maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vrgather_vx_bf16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vrgather.vx.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], i64 [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vrgather_vx_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t index, size_t vl) {
+  return __riscv_vrgather_tu(maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vrgather_vv_bf16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vrgather.vv.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i16> [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vrgather_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vuint16m2_t index, size_t vl) {
+  return __riscv_vrgather_tu(maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vrgather_vx_bf16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vrgather.vx.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], i64 [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vrgather_vx_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t index, size_t vl) {
+  return __riscv_vrgather_tu(maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vrgather_vv_bf16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vrgather.vv.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i16> [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vrgather_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vuint16m4_t index, size_t vl) {
+  return __riscv_vrgather_tu(maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vrgather_vx_bf16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vrgather.vx.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], i64 [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vrgather_vx_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t index, size_t vl) {
+  return __riscv_vrgather_tu(maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vrgather_vv_bf16m8_tu
+// CHECK-RV64-SAME: (<vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vrgather.vv.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i16> [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vrgather_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vuint16m8_t index, size_t vl) {
+  return __riscv_vrgather_tu(maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vrgather_vx_bf16m8_tu
+// CHECK-RV64-SAME: (<vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vrgather.vx.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], i64 [[INDEX]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vrgather_vx_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t index, size_t vl) {
+  return __riscv_vrgather_tu(maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vrgather_vv_bf16mf4_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], <vscale x 1 x i16> [[INDEX]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vrgather_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vuint16mf4_t index, size_t vl) {
+  return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vrgather_vx_bf16mf4_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x bfloat> [[MASKEDOFF:%.*]], <vscale x 1 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv1bf16.i64(<vscale x 1 x bfloat> [[MASKEDOFF]], <vscale x 1 x bfloat> [[OP1]], i64 [[INDEX]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vrgather_vx_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t index, size_t vl) {
+  return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vrgather_vv_bf16mf2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], <vscale x 2 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], <vscale x 2 x i16> [[INDEX]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vrgather_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vuint16mf2_t index, size_t vl) {
+  return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vrgather_vx_bf16mf2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x bfloat> [[MASKEDOFF:%.*]], <vscale x 2 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv2bf16.i64(<vscale x 2 x bfloat> [[MASKEDOFF]], <vscale x 2 x bfloat> [[OP1]], i64 [[INDEX]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vrgather_vx_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t index, size_t vl) {
+  return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vrgather_vv_bf16m1_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], <vscale x 4 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], <vscale x 4 x i16> [[INDEX]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vrgather_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vuint16m1_t index, size_t vl) {
+  return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vrgather_vx_bf16m1_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x bfloat> [[MASKEDOFF:%.*]], <vscale x 4 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv4bf16.i64(<vscale x 4 x bfloat> [[MASKEDOFF]], <vscale x 4 x bfloat> [[OP1]], i64 [[INDEX]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vrgather_vx_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t index, size_t vl) {
+  return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vrgather_vv_bf16m2_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], <vscale x 8 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], <vscale x 8 x i16> [[INDEX]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vrgather_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vuint16m2_t index, size_t vl) {
+  return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vrgather_vx_bf16m2_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x bfloat> [[MASKEDOFF:%.*]], <vscale x 8 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv8bf16.i64(<vscale x 8 x bfloat> [[MASKEDOFF]], <vscale x 8 x bfloat> [[OP1]], i64 [[INDEX]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vrgather_vx_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t index, size_t vl) {
+  return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vrgather_vv_bf16m4_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], <vscale x 16 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], <vscale x 16 x i16> [[INDEX]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vrgather_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vuint16m4_t index, size_t vl) {
+  return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vrgather_vx_bf16m4_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x bfloat> [[MASKEDOFF:%.*]], <vscale x 16 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv16bf16.i64(<vscale x 16 x bfloat> [[MASKEDOFF]], <vscale x 16 x bfloat> [[OP1]], i64 [[INDEX]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vrgather_vx_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t index, size_t vl) {
+  return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vrgather_vv_bf16m8_mu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], <vscale x 32 x i16> [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], <vscale x 32 x i16> [[INDEX]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vrgather_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vuint16m8_t index, size_t vl) {
+  return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vrgather_vx_bf16m8_mu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x bfloat> [[MASKEDOFF:%.*]], <vscale x 32 x bfloat> [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv32bf16.i64(<vscale x 32 x bfloat> [[MASKEDOFF]], <vscale x 32 x bfloat> [[OP1]], i64 [[INDEX]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vrgather_vx_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t index, size_t vl) {
+  return __riscv_vrgather_mu(mask, maskedoff, op1, index, vl);
+}
+

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index f78e404bbafd1..86b30e836473d 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -371,6 +371,9 @@ defset list<VTypeInfo> AllVectors = {
   }
 }
 
+defvar AllFloatVectorsExceptFP16 = !filter(vti, AllFloatVectors, !ne(vti.Scalar, f16));
+defvar AllFP16Vectors = !filter(vti, AllFloatVectors, !eq(vti.Scalar, f16));
+
 // This functor is used to obtain the int vector type that has the same SEW and
 // multiplier as the input parameter type
 class GetIntVTypeInfo<VTypeInfo vti> {
@@ -7453,14 +7456,22 @@ defm : VPatBinaryV_VV_INT_EEW<"int_riscv_vrgatherei16_vv", "PseudoVRGATHEREI16",
                               eew=16, vtilist=AllIntegerVectors>;
 
 defm : VPatBinaryV_VV_VX_VI_INT<"int_riscv_vrgather", "PseudoVRGATHER",
-                                AllFloatVectors, uimm5>;
+                                AllFloatVectorsExceptFP16, uimm5>;
+let Predicates = [HasVInstructionsF16Minimal] in
+  defm : VPatBinaryV_VV_VX_VI_INT<"int_riscv_vrgather", "PseudoVRGATHER",
+                                  AllFP16Vectors, uimm5>;
+defm : VPatBinaryV_VV_VX_VI_INT<"int_riscv_vrgather", "PseudoVRGATHER",
+                                AllBFloatVectors, uimm5>;
 defm : VPatBinaryV_VV_INT_EEW<"int_riscv_vrgatherei16_vv", "PseudoVRGATHEREI16",
                               eew=16, vtilist=AllFloatVectors>;
 //===----------------------------------------------------------------------===//
 // 16.5. Vector Compress Instruction
 //===----------------------------------------------------------------------===//
 defm : VPatUnaryV_V_AnyMask<"int_riscv_vcompress", "PseudoVCOMPRESS", AllIntegerVectors>;
-defm : VPatUnaryV_V_AnyMask<"int_riscv_vcompress", "PseudoVCOMPRESS", AllFloatVectors>;
+defm : VPatUnaryV_V_AnyMask<"int_riscv_vcompress", "PseudoVCOMPRESS", AllFloatVectorsExceptFP16>;
+let Predicates = [HasVInstructionsF16Minimal] in
+  defm : VPatUnaryV_V_AnyMask<"int_riscv_vcompress", "PseudoVCOMPRESS", AllFP16Vectors>;
+defm : VPatUnaryV_V_AnyMask<"int_riscv_vcompress", "PseudoVCOMPRESS", AllBFloatVectors>;
 
 // Include the non-intrinsic ISel patterns
 include "RISCVInstrInfoVVLPatterns.td"

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vcompress.ll b/llvm/test/CodeGen/RISCV/rvv/vcompress.ll
index 85663f08db6a0..b763e116a9f62 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vcompress.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vcompress.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+zvfh \
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+zvfhmin,+zvfbfmin \
 ; RUN:   -verify-machineinstrs | FileCheck %s
-; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+zvfh \
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+zvfhmin,+zvfbfmin \
 ; RUN:   -verify-machineinstrs | FileCheck %s
 
 declare <vscale x 1 x i8> @llvm.riscv.vcompress.nxv1i8(
@@ -817,3 +817,136 @@ entry:
 
   ret <vscale x 8 x double> %a
 }
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vcompress.nxv1bf16(
+  <vscale x 1 x bfloat>,
+  <vscale x 1 x bfloat>,
+  <vscale x 1 x i1>,
+  iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vcompress_vm_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vcompress_vm_nxv1bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    vcompress.vm v8, v9, v0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x bfloat> @llvm.riscv.vcompress.nxv1bf16(
+    <vscale x 1 x bfloat> %0,
+    <vscale x 1 x bfloat> %1,
+    <vscale x 1 x i1> %2,
+    iXLen %3)
+
+  ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vcompress.nxv2bf16(
+  <vscale x 2 x bfloat>,
+  <vscale x 2 x bfloat>,
+  <vscale x 2 x i1>,
+  iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vcompress_vm_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vcompress_vm_nxv2bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    vcompress.vm v8, v9, v0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x bfloat> @llvm.riscv.vcompress.nxv2bf16(
+    <vscale x 2 x bfloat> %0,
+    <vscale x 2 x bfloat> %1,
+    <vscale x 2 x i1> %2,
+    iXLen %3)
+
+  ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vcompress.nxv4bf16(
+  <vscale x 4 x bfloat>,
+  <vscale x 4 x bfloat>,
+  <vscale x 4 x i1>,
+  iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vcompress_vm_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vcompress_vm_nxv4bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    vcompress.vm v8, v9, v0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x bfloat> @llvm.riscv.vcompress.nxv4bf16(
+    <vscale x 4 x bfloat> %0,
+    <vscale x 4 x bfloat> %1,
+    <vscale x 4 x i1> %2,
+    iXLen %3)
+
+  ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vcompress.nxv8bf16(
+  <vscale x 8 x bfloat>,
+  <vscale x 8 x bfloat>,
+  <vscale x 8 x i1>,
+  iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vcompress_vm_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vcompress_vm_nxv8bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    vcompress.vm v8, v10, v0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x bfloat> @llvm.riscv.vcompress.nxv8bf16(
+    <vscale x 8 x bfloat> %0,
+    <vscale x 8 x bfloat> %1,
+    <vscale x 8 x i1> %2,
+    iXLen %3)
+
+  ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vcompress.nxv16bf16(
+  <vscale x 16 x bfloat>,
+  <vscale x 16 x bfloat>,
+  <vscale x 16 x i1>,
+  iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vcompress_vm_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vcompress_vm_nxv16bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    vcompress.vm v8, v12, v0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x bfloat> @llvm.riscv.vcompress.nxv16bf16(
+    <vscale x 16 x bfloat> %0,
+    <vscale x 16 x bfloat> %1,
+    <vscale x 16 x i1> %2,
+    iXLen %3)
+
+  ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vcompress.nxv32bf16(
+  <vscale x 32 x bfloat>,
+  <vscale x 32 x bfloat>,
+  <vscale x 32 x i1>,
+  iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vcompress_vm_nxv32bf16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vcompress_vm_nxv32bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, ma
+; CHECK-NEXT:    vcompress.vm v8, v16, v0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x bfloat> @llvm.riscv.vcompress.nxv32bf16(
+    <vscale x 32 x bfloat> %0,
+    <vscale x 32 x bfloat> %1,
+    <vscale x 32 x i1> %2,
+    iXLen %3)
+
+  ret <vscale x 32 x bfloat> %a
+}
+

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vrgather.ll b/llvm/test/CodeGen/RISCV/rvv/vrgather.ll
index d11e172b25037..5d700e683a969 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vrgather.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vrgather.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+d,+zfh,+zvfh \
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+d,+zfhmin,+zvfh,+zvfbfmin \
 ; RUN:   -verify-machineinstrs | FileCheck %s
-; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+d,+zfh,+zvfh \
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+d,+zfhmin,+zvfh,+zvfbfmin \
 ; RUN:   -verify-machineinstrs | FileCheck %s
 
 declare <vscale x 1 x i8> @llvm.riscv.vrgather.vv.nxv1i8.iXLen(
@@ -4820,3 +4820,785 @@ entry:
 
   ret <vscale x 8 x double> %a
 }
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vrgather.vv.nxv1bf16.iXLen(
+  <vscale x 1 x bfloat>,
+  <vscale x 1 x bfloat>,
+  <vscale x 1 x i16>,
+  iXLen)
+
+define <vscale x 1 x bfloat> @intrinsic_vrgather_vv_nxv1bf16_nxv1bf16_nxv1i16(<vscale x 1 x bfloat> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv1bf16_nxv1bf16_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    vrgather.vv v10, v8, v9
+; CHECK-NEXT:    vmv1r.v v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x bfloat> @llvm.riscv.vrgather.vv.nxv1bf16.iXLen(
+    <vscale x 1 x bfloat> undef,
+    <vscale x 1 x bfloat> %0,
+    <vscale x 1 x i16> %1,
+    iXLen %2)
+
+  ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv1bf16.iXLen(
+  <vscale x 1 x bfloat>,
+  <vscale x 1 x bfloat>,
+  <vscale x 1 x i16>,
+  <vscale x 1 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 1 x bfloat> @intrinsic_vrgather_mask_vv_nxv1bf16_nxv1bf16_nxv1i16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1bf16_nxv1bf16_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv1bf16.iXLen(
+    <vscale x 1 x bfloat> %0,
+    <vscale x 1 x bfloat> %1,
+    <vscale x 1 x i16> %2,
+    <vscale x 1 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vrgather.vv.nxv2bf16.iXLen(
+  <vscale x 2 x bfloat>,
+  <vscale x 2 x bfloat>,
+  <vscale x 2 x i16>,
+  iXLen)
+
+define <vscale x 2 x bfloat> @intrinsic_vrgather_vv_nxv2bf16_nxv2bf16_nxv2i16(<vscale x 2 x bfloat> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv2bf16_nxv2bf16_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    vrgather.vv v10, v8, v9
+; CHECK-NEXT:    vmv1r.v v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x bfloat> @llvm.riscv.vrgather.vv.nxv2bf16.iXLen(
+    <vscale x 2 x bfloat> undef,
+    <vscale x 2 x bfloat> %0,
+    <vscale x 2 x i16> %1,
+    iXLen %2)
+
+  ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv2bf16.iXLen(
+  <vscale x 2 x bfloat>,
+  <vscale x 2 x bfloat>,
+  <vscale x 2 x i16>,
+  <vscale x 2 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 2 x bfloat> @intrinsic_vrgather_mask_vv_nxv2bf16_nxv2bf16_nxv2i16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2bf16_nxv2bf16_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv2bf16.iXLen(
+    <vscale x 2 x bfloat> %0,
+    <vscale x 2 x bfloat> %1,
+    <vscale x 2 x i16> %2,
+    <vscale x 2 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vrgather.vv.nxv4bf16.iXLen(
+  <vscale x 4 x bfloat>,
+  <vscale x 4 x bfloat>,
+  <vscale x 4 x i16>,
+  iXLen)
+
+define <vscale x 4 x bfloat> @intrinsic_vrgather_vv_nxv4bf16_nxv4bf16_nxv4i16(<vscale x 4 x bfloat> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv4bf16_nxv4bf16_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    vrgather.vv v10, v8, v9
+; CHECK-NEXT:    vmv.v.v v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x bfloat> @llvm.riscv.vrgather.vv.nxv4bf16.iXLen(
+    <vscale x 4 x bfloat> undef,
+    <vscale x 4 x bfloat> %0,
+    <vscale x 4 x i16> %1,
+    iXLen %2)
+
+  ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv4bf16.iXLen(
+  <vscale x 4 x bfloat>,
+  <vscale x 4 x bfloat>,
+  <vscale x 4 x i16>,
+  <vscale x 4 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 4 x bfloat> @intrinsic_vrgather_mask_vv_nxv4bf16_nxv4bf16_nxv4i16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4bf16_nxv4bf16_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv4bf16.iXLen(
+    <vscale x 4 x bfloat> %0,
+    <vscale x 4 x bfloat> %1,
+    <vscale x 4 x i16> %2,
+    <vscale x 4 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vrgather.vv.nxv8bf16.iXLen(
+  <vscale x 8 x bfloat>,
+  <vscale x 8 x bfloat>,
+  <vscale x 8 x i16>,
+  iXLen)
+
+define <vscale x 8 x bfloat> @intrinsic_vrgather_vv_nxv8bf16_nxv8bf16_nxv8i16(<vscale x 8 x bfloat> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv8bf16_nxv8bf16_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    vrgather.vv v12, v8, v10
+; CHECK-NEXT:    vmv.v.v v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x bfloat> @llvm.riscv.vrgather.vv.nxv8bf16.iXLen(
+    <vscale x 8 x bfloat> undef,
+    <vscale x 8 x bfloat> %0,
+    <vscale x 8 x i16> %1,
+    iXLen %2)
+
+  ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv8bf16.iXLen(
+  <vscale x 8 x bfloat>,
+  <vscale x 8 x bfloat>,
+  <vscale x 8 x i16>,
+  <vscale x 8 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 8 x bfloat> @intrinsic_vrgather_mask_vv_nxv8bf16_nxv8bf16_nxv8i16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8bf16_nxv8bf16_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT:    vrgather.vv v8, v10, v12, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv8bf16.iXLen(
+    <vscale x 8 x bfloat> %0,
+    <vscale x 8 x bfloat> %1,
+    <vscale x 8 x i16> %2,
+    <vscale x 8 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vrgather.vv.nxv16bf16.iXLen(
+  <vscale x 16 x bfloat>,
+  <vscale x 16 x bfloat>,
+  <vscale x 16 x i16>,
+  iXLen)
+
+define <vscale x 16 x bfloat> @intrinsic_vrgather_vv_nxv16bf16_nxv16bf16_nxv16i16(<vscale x 16 x bfloat> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv16bf16_nxv16bf16_nxv16i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    vrgather.vv v16, v8, v12
+; CHECK-NEXT:    vmv.v.v v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x bfloat> @llvm.riscv.vrgather.vv.nxv16bf16.iXLen(
+    <vscale x 16 x bfloat> undef,
+    <vscale x 16 x bfloat> %0,
+    <vscale x 16 x i16> %1,
+    iXLen %2)
+
+  ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv16bf16.iXLen(
+  <vscale x 16 x bfloat>,
+  <vscale x 16 x bfloat>,
+  <vscale x 16 x i16>,
+  <vscale x 16 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 16 x bfloat> @intrinsic_vrgather_mask_vv_nxv16bf16_nxv16bf16_nxv16i16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16bf16_nxv16bf16_nxv16i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT:    vrgather.vv v8, v12, v16, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv16bf16.iXLen(
+    <vscale x 16 x bfloat> %0,
+    <vscale x 16 x bfloat> %1,
+    <vscale x 16 x i16> %2,
+    <vscale x 16 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vrgather.vv.nxv32bf16.iXLen(
+  <vscale x 32 x bfloat>,
+  <vscale x 32 x bfloat>,
+  <vscale x 32 x i16>,
+  iXLen)
+
+define <vscale x 32 x bfloat> @intrinsic_vrgather_vv_nxv32bf16_nxv32bf16_nxv32i16(<vscale x 32 x bfloat> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv32bf16_nxv32bf16_nxv32i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT:    vrgather.vv v24, v8, v16
+; CHECK-NEXT:    vmv.v.v v8, v24
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x bfloat> @llvm.riscv.vrgather.vv.nxv32bf16.iXLen(
+    <vscale x 32 x bfloat> undef,
+    <vscale x 32 x bfloat> %0,
+    <vscale x 32 x i16> %1,
+    iXLen %2)
+
+  ret <vscale x 32 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv32bf16.iXLen(
+  <vscale x 32 x bfloat>,
+  <vscale x 32 x bfloat>,
+  <vscale x 32 x i16>,
+  <vscale x 32 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 32 x bfloat> @intrinsic_vrgather_mask_vv_nxv32bf16_nxv32bf16_nxv32i16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv32bf16_nxv32bf16_nxv32i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vl8re16.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
+; CHECK-NEXT:    vrgather.vv v8, v16, v24, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv32bf16.iXLen(
+    <vscale x 32 x bfloat> %0,
+    <vscale x 32 x bfloat> %1,
+    <vscale x 32 x i16> %2,
+    <vscale x 32 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 32 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vrgather.vx.nxv1bf16.iXLen(
+  <vscale x 1 x bfloat>,
+  <vscale x 1 x bfloat>,
+  iXLen,
+  iXLen)
+
+define <vscale x 1 x bfloat> @intrinsic_vrgather_vx_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, iXLen %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv1bf16_nxv1bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT:    vrgather.vx v9, v8, a0
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x bfloat> @llvm.riscv.vrgather.vx.nxv1bf16.iXLen(
+    <vscale x 1 x bfloat> undef,
+    <vscale x 1 x bfloat> %0,
+    iXLen %1,
+    iXLen %2)
+
+  ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv1bf16.iXLen(
+  <vscale x 1 x bfloat>,
+  <vscale x 1 x bfloat>,
+  iXLen,
+  <vscale x 1 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 1 x bfloat> @intrinsic_vrgather_mask_vx_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1bf16_nxv1bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
+; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv1bf16.iXLen(
+    <vscale x 1 x bfloat> %0,
+    <vscale x 1 x bfloat> %1,
+    iXLen %2,
+    <vscale x 1 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vrgather.vx.nxv2bf16.iXLen(
+  <vscale x 2 x bfloat>,
+  <vscale x 2 x bfloat>,
+  iXLen,
+  iXLen)
+
+define <vscale x 2 x bfloat> @intrinsic_vrgather_vx_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, iXLen %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv2bf16_nxv2bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT:    vrgather.vx v9, v8, a0
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x bfloat> @llvm.riscv.vrgather.vx.nxv2bf16.iXLen(
+    <vscale x 2 x bfloat> undef,
+    <vscale x 2 x bfloat> %0,
+    iXLen %1,
+    iXLen %2)
+
+  ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv2bf16.iXLen(
+  <vscale x 2 x bfloat>,
+  <vscale x 2 x bfloat>,
+  iXLen,
+  <vscale x 2 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 2 x bfloat> @intrinsic_vrgather_mask_vx_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2bf16_nxv2bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
+; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv2bf1bf16XLen(
+    <vscale x 2 x bfloat> %0,
+    <vscale x 2 x bfloat> %1,
+    iXLen %2,
+    <vscale x 2 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vrgather.vx.nxv4bf16.iXLen(
+  <vscale x 4 x bfloat>,
+  <vscale x 4 x bfloat>,
+  iXLen,
+  iXLen)
+
+define <vscale x 4 x bfloat> @intrinsic_vrgather_vx_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, iXLen %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv4bf16_nxv4bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT:    vrgather.vx v9, v8, a0
+; CHECK-NEXT:    vmv.v.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x bfloat> @llvm.riscv.vrgather.vx.nxv4bf16.iXLen(
+    <vscale x 4 x bfloat> undef,
+    <vscale x 4 x bfloat> %0,
+    iXLen %1,
+    iXLen %2)
+
+  ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv4bf16.iXLen(
+  <vscale x 4 x bfloat>,
+  <vscale x 4 x bfloat>,
+  iXLen,
+  <vscale x 4 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 4 x bfloat> @intrinsic_vrgather_mask_vx_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4bf16_nxv4bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv4bf16.iXLen(
+    <vscale x 4 x bfloat> %0,
+    <vscale x 4 x bfloat> %1,
+    iXLen %2,
+    <vscale x 4 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vrgather.vx.nxv8bf16.iXLen(
+  <vscale x 8 x bfloat>,
+  <vscale x 8 x bfloat>,
+  iXLen,
+  iXLen)
+
+define <vscale x 8 x bfloat> @intrinsic_vrgather_vx_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, iXLen %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv8bf16_nxv8bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT:    vrgather.vx v10, v8, a0
+; CHECK-NEXT:    vmv.v.v v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x bfloat> @llvm.riscv.vrgather.vx.nxv8bf16.iXLen(
+    <vscale x 8 x bfloat> undef,
+    <vscale x 8 x bfloat> %0,
+    iXLen %1,
+    iXLen %2)
+
+  ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv8bf16.iXLen(
+  <vscale x 8 x bfloat>,
+  <vscale x 8 x bfloat>,
+  iXLen,
+  <vscale x 8 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 8 x bfloat> @intrinsic_vrgather_mask_vx_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8bf16_nxv8bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
+; CHECK-NEXT:    vrgather.vx v8, v10, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv8bf16.iXLen(
+    <vscale x 8 x bfloat> %0,
+    <vscale x 8 x bfloat> %1,
+    iXLen %2,
+    <vscale x 8 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vrgather.vx.nxv16bf16.iXLen(
+  <vscale x 16 x bfloat>,
+  <vscale x 16 x bfloat>,
+  iXLen,
+  iXLen)
+
+define <vscale x 16 x bfloat> @intrinsic_vrgather_vx_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, iXLen %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv16bf16_nxv16bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT:    vrgather.vx v12, v8, a0
+; CHECK-NEXT:    vmv.v.v v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x bfloat> @llvm.riscv.vrgather.vx.nxv16bf16.iXLen(
+    <vscale x 16 x bfloat> undef,
+    <vscale x 16 x bfloat> %0,
+    iXLen %1,
+    iXLen %2)
+
+  ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv16bf16.iXLen(
+  <vscale x 16 x bfloat>,
+  <vscale x 16 x bfloat>,
+  iXLen,
+  <vscale x 16 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 16 x bfloat> @intrinsic_vrgather_mask_vx_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16bf16_nxv16bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
+; CHECK-NEXT:    vrgather.vx v8, v12, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv16bf16.iXLen(
+    <vscale x 16 x bfloat> %0,
+    <vscale x 16 x bfloat> %1,
+    iXLen %2,
+    <vscale x 16 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vrgather.vx.nxv32bf16.iXLen(
+  <vscale x 32 x bfloat>,
+  <vscale x 32 x bfloat>,
+  iXLen,
+  iXLen)
+
+define <vscale x 32 x bfloat> @intrinsic_vrgather_vx_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat> %0, iXLen %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv32bf16_nxv32bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT:    vrgather.vx v16, v8, a0
+; CHECK-NEXT:    vmv.v.v v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x bfloat> @llvm.riscv.vrgather.vx.nxv32bf16.iXLen(
+    <vscale x 32 x bfloat> undef,
+    <vscale x 32 x bfloat> %0,
+    iXLen %1,
+    iXLen %2)
+
+  ret <vscale x 32 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv32bf16.iXLen(
+  <vscale x 32 x bfloat>,
+  <vscale x 32 x bfloat>,
+  iXLen,
+  <vscale x 32 x i1>,
+  iXLen,
+  iXLen)
+
+define <vscale x 32 x bfloat> @intrinsic_vrgather_mask_vx_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat> %1, iXLen %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv32bf16_nxv32bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
+; CHECK-NEXT:    vrgather.vx v8, v16, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv32bf16.iXLen(
+    <vscale x 32 x bfloat> %0,
+    <vscale x 32 x bfloat> %1,
+    iXLen %2,
+    <vscale x 32 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 32 x bfloat> %a
+}
+
+define <vscale x 1 x bfloat> @intrinsic_vrgather_vi_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv1bf16_nxv1bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    vrgather.vi v9, v8, 9
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x bfloat> @llvm.riscv.vrgather.vx.nxv1bf16.iXLen(
+    <vscale x 1 x bfloat> undef,
+    <vscale x 1 x bfloat> %0,
+    iXLen 9,
+    iXLen %1)
+
+  ret <vscale x 1 x bfloat> %a
+}
+
+define <vscale x 1 x bfloat> @intrinsic_vrgather_mask_vi_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1bf16_nxv1bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv1bf16.iXLen(
+    <vscale x 1 x bfloat> %0,
+    <vscale x 1 x bfloat> %1,
+    iXLen 9,
+    <vscale x 1 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 1 x bfloat> %a
+}
+
+define <vscale x 2 x bfloat> @intrinsic_vrgather_vi_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv2bf16_nxv2bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    vrgather.vi v9, v8, 9
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x bfloat> @llvm.riscv.vrgather.vx.nxv2bf16.iXLen(
+    <vscale x 2 x bfloat> undef,
+    <vscale x 2 x bfloat> %0,
+    iXLen 9,
+    iXLen %1)
+
+  ret <vscale x 2 x bfloat> %a
+}
+
+define <vscale x 2 x bfloat> @intrinsic_vrgather_mask_vi_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2bf16_nxv2bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv2bf16.iXLen(
+    <vscale x 2 x bfloat> %0,
+    <vscale x 2 x bfloat> %1,
+    iXLen 9,
+    <vscale x 2 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 2 x bfloat> %a
+}
+
+define <vscale x 4 x bfloat> @intrinsic_vrgather_vi_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv4bf16_nxv4bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    vrgather.vi v9, v8, 9
+; CHECK-NEXT:    vmv.v.v v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x bfloat> @llvm.riscv.vrgather.vx.nxv4bf16.iXLen(
+    <vscale x 4 x bfloat> undef,
+    <vscale x 4 x bfloat> %0,
+    iXLen 9,
+    iXLen %1)
+
+  ret <vscale x 4 x bfloat> %a
+}
+
+define <vscale x 4 x bfloat> @intrinsic_vrgather_mask_vi_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4bf16_nxv4bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv4bf16.iXLen(
+    <vscale x 4 x bfloat> %0,
+    <vscale x 4 x bfloat> %1,
+    iXLen 9,
+    <vscale x 4 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 4 x bfloat> %a
+}
+
+define <vscale x 8 x bfloat> @intrinsic_vrgather_vi_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv8bf16_nxv8bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    vrgather.vi v10, v8, 9
+; CHECK-NEXT:    vmv.v.v v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x bfloat> @llvm.riscv.vrgather.vx.nxv8bf16.iXLen(
+    <vscale x 8 x bfloat> undef,
+    <vscale x 8 x bfloat> %0,
+    iXLen 9,
+    iXLen %1)
+
+  ret <vscale x 8 x bfloat> %a
+}
+
+define <vscale x 8 x bfloat> @intrinsic_vrgather_mask_vi_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8bf16_nxv8bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT:    vrgather.vi v8, v10, 9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv8bf16.iXLen(
+    <vscale x 8 x bfloat> %0,
+    <vscale x 8 x bfloat> %1,
+    iXLen 9,
+    <vscale x 8 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 8 x bfloat> %a
+}
+
+define <vscale x 16 x bfloat> @intrinsic_vrgather_vi_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv16bf16_nxv16bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    vrgather.vi v12, v8, 9
+; CHECK-NEXT:    vmv.v.v v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x bfloat> @llvm.riscv.vrgather.vx.nxv16bf16.iXLen(
+    <vscale x 16 x bfloat> undef,
+    <vscale x 16 x bfloat> %0,
+    iXLen 9,
+    iXLen %1)
+
+  ret <vscale x 16 x bfloat> %a
+}
+
+define <vscale x 16 x bfloat> @intrinsic_vrgather_mask_vi_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16bf16_nxv16bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT:    vrgather.vi v8, v12, 9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv16bf16.iXLen(
+    <vscale x 16 x bfloat> %0,
+    <vscale x 16 x bfloat> %1,
+    iXLen 9,
+    <vscale x 16 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 16 x bfloat> %a
+}
+
+define <vscale x 32 x bfloat> @intrinsic_vrgather_vi_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat> %0, iXLen %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv32bf16_nxv32bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT:    vrgather.vi v16, v8, 9
+; CHECK-NEXT:    vmv.v.v v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x bfloat> @llvm.riscv.vrgather.vx.nxv32bf16.iXLen(
+    <vscale x 32 x bfloat> undef,
+    <vscale x 32 x bfloat> %0,
+    iXLen 9,
+    iXLen %1)
+
+  ret <vscale x 32 x bfloat> %a
+}
+
+define <vscale x 32 x bfloat> @intrinsic_vrgather_mask_vi_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv32bf16_nxv32bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
+; CHECK-NEXT:    vrgather.vi v8, v16, 9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv32bf16.iXLen(
+    <vscale x 32 x bfloat> %0,
+    <vscale x 32 x bfloat> %1,
+    iXLen 9,
+    <vscale x 32 x i1> %2,
+    iXLen %3, iXLen 1)
+
+  ret <vscale x 32 x bfloat> %a
+}
+


        


More information about the llvm-commits mailing list