[llvm] [RISCV] Add LLVM IR intrinsicis vrgatherei16/vslideup/vslidedown support for Zvfbfmin (PR #146312)

Jim Lin via llvm-commits llvm-commits at lists.llvm.org
Sun Jun 29 23:11:25 PDT 2025


https://github.com/tclin914 created https://github.com/llvm/llvm-project/pull/146312

This patch add LLVM IR intrinsicis vrgatherei16/vslideup/vslidedown support for Zvfbfmin.

>From 9062987d3181b2c006a3c847a6d871eba78a99da Mon Sep 17 00:00:00 2001
From: Jim Lin <jim at andestech.com>
Date: Mon, 30 Jun 2025 13:18:03 +0800
Subject: [PATCH] [RISCV] Add LLVM IR intrinsicis
 vrgatherei16/vslideup/vslidedown support for Zvfbfmin

This patch add LLVM IR intrinsicis vrgatherei16/vslideup/vslidedown
support for Zvfbfmin.
---
 .../Target/RISCV/RISCVInstrInfoVPseudos.td    |   5 +
 llvm/test/CodeGen/RISCV/rvv/vrgatherei16.ll   | 293 ++++++++++++-
 llvm/test/CodeGen/RISCV/rvv/vslidedown.ll     | 414 +++++++++++++++++-
 llvm/test/CodeGen/RISCV/rvv/vslideup.ll       | 414 +++++++++++++++++-
 4 files changed, 1120 insertions(+), 6 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 9c03c7c83af04..1c9242ddad56f 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -7419,6 +7419,9 @@ defm : VPatTernaryV_VX_VI<"int_riscv_vslidedown", "PseudoVSLIDEDOWN", AllFloatVe
 defm : VPatBinaryV_VX<"int_riscv_vfslide1up", "PseudoVFSLIDE1UP", AllFloatVectors>;
 defm : VPatBinaryV_VX<"int_riscv_vfslide1down", "PseudoVFSLIDE1DOWN", AllFloatVectors>;
 
+defm : VPatTernaryV_VX_VI<"int_riscv_vslideup", "PseudoVSLIDEUP", AllBFloatVectors, uimm5>;
+defm : VPatTernaryV_VX_VI<"int_riscv_vslidedown", "PseudoVSLIDEDOWN", AllBFloatVectors, uimm5>;
+
 //===----------------------------------------------------------------------===//
 // 16.4. Vector Register Gather Instructions
 //===----------------------------------------------------------------------===//
@@ -7433,6 +7436,8 @@ defm : VPatBinaryV_VV_VX_VI_INT<"int_riscv_vrgather", "PseudoVRGATHER",
                                 AllBFloatVectors, uimm5>;
 defm : VPatBinaryV_VV_INT_EEW<"int_riscv_vrgatherei16_vv", "PseudoVRGATHEREI16",
                               eew=16, vtilist=AllFloatVectors>;
+defm : VPatBinaryV_VV_INT_EEW<"int_riscv_vrgatherei16_vv", "PseudoVRGATHEREI16",
+                              eew=16, vtilist=AllBFloatVectors>;
 //===----------------------------------------------------------------------===//
 // 16.5. Vector Compress Instruction
 //===----------------------------------------------------------------------===//
diff --git a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16.ll b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16.ll
index 75c00e406b4f1..168b5eeb5592b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+d,+zvfhmin \
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+d,+zvfhmin,+zvfbfmin \
 ; RUN:   -verify-machineinstrs | FileCheck %s
-; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+d,+zvfhmin \
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+d,+zvfhmin,+zvfbfmin \
 ; RUN:   -verify-machineinstrs | FileCheck %s
 
 declare <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.nxv1i8(
@@ -1449,3 +1449,292 @@ entry:
 
   ret <vscale x 8 x double> %a
 }
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vrgatherei16.vv.nxv1bf16(
+  <vscale x 1 x bfloat>,
+  <vscale x 1 x bfloat>,
+  <vscale x 1 x i16>,
+  iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vrgatherei16_vv_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv1bf16_nxv1bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    vrgatherei16.vv v10, v8, v9
+; CHECK-NEXT:    vmv1r.v v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x bfloat> @llvm.riscv.vrgatherei16.vv.nxv1bf16(
+    <vscale x 1 x bfloat> undef,
+    <vscale x 1 x bfloat> %0,
+    <vscale x 1 x i16> %1,
+    iXLen %2)
+
+  ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vrgatherei16.vv.mask.nxv1bf16(
+  <vscale x 1 x bfloat>,
+  <vscale x 1 x bfloat>,
+  <vscale x 1 x i16>,
+  <vscale x 1 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vrgatherei16_mask_vv_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv1bf16_nxv1bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT:    vrgatherei16.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x bfloat> @llvm.riscv.vrgatherei16.vv.mask.nxv1bf16(
+    <vscale x 1 x bfloat> %0,
+    <vscale x 1 x bfloat> %1,
+    <vscale x 1 x i16> %2,
+    <vscale x 1 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vrgatherei16.vv.nxv2bf16(
+  <vscale x 2 x bfloat>,
+  <vscale x 2 x bfloat>,
+  <vscale x 2 x i16>,
+  iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vrgatherei16_vv_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv2bf16_nxv2bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    vrgatherei16.vv v10, v8, v9
+; CHECK-NEXT:    vmv1r.v v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x bfloat> @llvm.riscv.vrgatherei16.vv.nxv2bf16(
+    <vscale x 2 x bfloat> undef,
+    <vscale x 2 x bfloat> %0,
+    <vscale x 2 x i16> %1,
+    iXLen %2)
+
+  ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vrgatherei16.vv.mask.nxv2bf16(
+  <vscale x 2 x bfloat>,
+  <vscale x 2 x bfloat>,
+  <vscale x 2 x i16>,
+  <vscale x 2 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vrgatherei16_mask_vv_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv2bf16_nxv2bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT:    vrgatherei16.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x bfloat> @llvm.riscv.vrgatherei16.vv.mask.nxv2bf16(
+    <vscale x 2 x bfloat> %0,
+    <vscale x 2 x bfloat> %1,
+    <vscale x 2 x i16> %2,
+    <vscale x 2 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vrgatherei16.vv.nxv4bf16(
+  <vscale x 4 x bfloat>,
+  <vscale x 4 x bfloat>,
+  <vscale x 4 x i16>,
+  iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vrgatherei16_vv_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv4bf16_nxv4bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    vrgatherei16.vv v10, v8, v9
+; CHECK-NEXT:    vmv.v.v v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x bfloat> @llvm.riscv.vrgatherei16.vv.nxv4bf16(
+    <vscale x 4 x bfloat> undef,
+    <vscale x 4 x bfloat> %0,
+    <vscale x 4 x i16> %1,
+    iXLen %2)
+
+  ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vrgatherei16.vv.mask.nxv4bf16(
+  <vscale x 4 x bfloat>,
+  <vscale x 4 x bfloat>,
+  <vscale x 4 x i16>,
+  <vscale x 4 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vrgatherei16_mask_vv_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv4bf16_nxv4bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT:    vrgatherei16.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x bfloat> @llvm.riscv.vrgatherei16.vv.mask.nxv4bf16(
+    <vscale x 4 x bfloat> %0,
+    <vscale x 4 x bfloat> %1,
+    <vscale x 4 x i16> %2,
+    <vscale x 4 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vrgatherei16.vv.nxv8bf16(
+  <vscale x 8 x bfloat>,
+  <vscale x 8 x bfloat>,
+  <vscale x 8 x i16>,
+  iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vrgatherei16_vv_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv8bf16_nxv8bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    vrgatherei16.vv v12, v8, v10
+; CHECK-NEXT:    vmv.v.v v8, v12
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x bfloat> @llvm.riscv.vrgatherei16.vv.nxv8bf16(
+    <vscale x 8 x bfloat> undef,
+    <vscale x 8 x bfloat> %0,
+    <vscale x 8 x i16> %1,
+    iXLen %2)
+
+  ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vrgatherei16.vv.mask.nxv8bf16(
+  <vscale x 8 x bfloat>,
+  <vscale x 8 x bfloat>,
+  <vscale x 8 x i16>,
+  <vscale x 8 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vrgatherei16_mask_vv_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv8bf16_nxv8bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT:    vrgatherei16.vv v8, v10, v12, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x bfloat> @llvm.riscv.vrgatherei16.vv.mask.nxv8bf16(
+    <vscale x 8 x bfloat> %0,
+    <vscale x 8 x bfloat> %1,
+    <vscale x 8 x i16> %2,
+    <vscale x 8 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vrgatherei16.vv.nxv16bf16(
+  <vscale x 16 x bfloat>,
+  <vscale x 16 x bfloat>,
+  <vscale x 16 x i16>,
+  iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vrgatherei16_vv_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv16bf16_nxv16bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    vrgatherei16.vv v16, v8, v12
+; CHECK-NEXT:    vmv.v.v v8, v16
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x bfloat> @llvm.riscv.vrgatherei16.vv.nxv16bf16(
+    <vscale x 16 x bfloat> undef,
+    <vscale x 16 x bfloat> %0,
+    <vscale x 16 x i16> %1,
+    iXLen %2)
+
+  ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vrgatherei16.vv.mask.nxv16bf16(
+  <vscale x 16 x bfloat>,
+  <vscale x 16 x bfloat>,
+  <vscale x 16 x i16>,
+  <vscale x 16 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vrgatherei16_mask_vv_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv16bf16_nxv16bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT:    vrgatherei16.vv v8, v12, v16, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x bfloat> @llvm.riscv.vrgatherei16.vv.mask.nxv16bf16(
+    <vscale x 16 x bfloat> %0,
+    <vscale x 16 x bfloat> %1,
+    <vscale x 16 x i16> %2,
+    <vscale x 16 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vrgatherei16.vv.nxv32bf16(
+  <vscale x 32 x bfloat>,
+  <vscale x 32 x bfloat>,
+  <vscale x 32 x i16>,
+  iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vrgatherei16_vv_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgatherei16_vv_nxv32bf16_nxv32bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT:    vrgatherei16.vv v24, v8, v16
+; CHECK-NEXT:    vmv.v.v v8, v24
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x bfloat> @llvm.riscv.vrgatherei16.vv.nxv32bf16(
+    <vscale x 32 x bfloat> undef,
+    <vscale x 32 x bfloat> %0,
+    <vscale x 32 x i16> %1,
+    iXLen %2)
+
+  ret <vscale x 32 x bfloat> %a
+}
+
+declare <vscale x 32 x bfloat> @llvm.riscv.vrgatherei16.vv.mask.nxv32bf16(
+  <vscale x 32 x bfloat>,
+  <vscale x 32 x bfloat>,
+  <vscale x 32 x i16>,
+  <vscale x 32 x i1>,
+  iXLen,
+  iXLen);
+
+define <vscale x 32 x bfloat> @intrinsic_vrgatherei16_mask_vv_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv32bf16_nxv32bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vl8re16.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
+; CHECK-NEXT:    vrgatherei16.vv v8, v16, v24, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x bfloat> @llvm.riscv.vrgatherei16.vv.mask.nxv32bf16(
+    <vscale x 32 x bfloat> %0,
+    <vscale x 32 x bfloat> %1,
+    <vscale x 32 x i16> %2,
+    <vscale x 32 x i1> %3,
+    iXLen %4, iXLen 1)
+
+  ret <vscale x 32 x bfloat> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vslidedown.ll b/llvm/test/CodeGen/RISCV/rvv/vslidedown.ll
index f0f78c211c4a9..8b7a480afed72 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vslidedown.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vslidedown.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+d,+zvfhmin \
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+d,+zvfhmin,+zvfbfmin \
 ; RUN:   -verify-machineinstrs | FileCheck %s
-; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+d,+zvfhmin \
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+d,+zvfhmin,+zvfbfmin \
 ; RUN:   -verify-machineinstrs | FileCheck %s
 
 declare <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8(
@@ -2463,3 +2463,413 @@ entry:
 
   ret <vscale x 4 x double> %a
 }
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vslidedown.nxv1bf16(
+  <vscale x 1 x bfloat>,
+  <vscale x 1 x bfloat>,
+  iXLen,
+  iXLen,
+  iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vslidedown_vx_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, iXLen %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1bf16_nxv1bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT:    vslidedown.vx v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x bfloat> @llvm.riscv.vslidedown.nxv1bf16(
+    <vscale x 1 x bfloat> %0,
+    <vscale x 1 x bfloat> %1,
+    iXLen %2,
+    iXLen %3,
+    iXLen 1)
+
+  ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vslidedown.mask.nxv1bf16(
+  <vscale x 1 x bfloat>,
+  <vscale x 1 x bfloat>,
+  iXLen,
+  <vscale x 1 x i1>,
+  iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vslidedown_mask_vx_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1bf16_nxv1bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
+; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x bfloat> @llvm.riscv.vslidedown.mask.nxv1bf16(
+    <vscale x 1 x bfloat> %0,
+    <vscale x 1 x bfloat> %1,
+    iXLen %2,
+    <vscale x 1 x i1> %3,
+    iXLen %4, iXLen 0)
+
+  ret <vscale x 1 x bfloat> %a
+}
+
+define <vscale x 1 x bfloat> @intrinsic_vslidedown_vi_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1bf16_nxv1bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v9, 9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x bfloat> @llvm.riscv.vslidedown.nxv1bf16(
+    <vscale x 1 x bfloat> %0,
+    <vscale x 1 x bfloat> %1,
+    iXLen 9,
+    iXLen %2,
+    iXLen 1)
+
+  ret <vscale x 1 x bfloat> %a
+}
+
+define <vscale x 1 x bfloat> @intrinsic_vslidedown_mask_vi_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1bf16_nxv1bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x bfloat> @llvm.riscv.vslidedown.mask.nxv1bf16(
+    <vscale x 1 x bfloat> %0,
+    <vscale x 1 x bfloat> %1,
+    iXLen 9,
+    <vscale x 1 x i1> %2,
+    iXLen %3, iXLen 0)
+
+  ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vslidedown.nxv2bf16(
+  <vscale x 2 x bfloat>,
+  <vscale x 2 x bfloat>,
+  iXLen,
+  iXLen,
+  iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vslidedown_vx_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, iXLen %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2bf16_nxv2bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT:    vslidedown.vx v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x bfloat> @llvm.riscv.vslidedown.nxv2bf16(
+    <vscale x 2 x bfloat> %0,
+    <vscale x 2 x bfloat> %1,
+    iXLen %2,
+    iXLen %3,
+    iXLen 1)
+
+  ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vslidedown.mask.nxv2bf16(
+  <vscale x 2 x bfloat>,
+  <vscale x 2 x bfloat>,
+  iXLen,
+  <vscale x 2 x i1>,
+  iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vslidedown_mask_vx_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2bf16_nxv2bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
+; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x bfloat> @llvm.riscv.vslidedown.mask.nxv2bf16(
+    <vscale x 2 x bfloat> %0,
+    <vscale x 2 x bfloat> %1,
+    iXLen %2,
+    <vscale x 2 x i1> %3,
+    iXLen %4, iXLen 0)
+
+  ret <vscale x 2 x bfloat> %a
+}
+
+define <vscale x 2 x bfloat> @intrinsic_vslidedown_vi_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2bf16_nxv2bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v9, 9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x bfloat> @llvm.riscv.vslidedown.nxv2bf16(
+    <vscale x 2 x bfloat> %0,
+    <vscale x 2 x bfloat> %1,
+    iXLen 9,
+    iXLen %2,
+    iXLen 1)
+
+  ret <vscale x 2 x bfloat> %a
+}
+
+define <vscale x 2 x bfloat> @intrinsic_vslidedown_mask_vi_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2bf16_nxv2bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
+; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x bfloat> @llvm.riscv.vslidedown.mask.nxv2bf16(
+    <vscale x 2 x bfloat> %0,
+    <vscale x 2 x bfloat> %1,
+    iXLen 9,
+    <vscale x 2 x i1> %2,
+    iXLen %3, iXLen 0)
+
+  ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vslidedown.nxv4bf16(
+  <vscale x 4 x bfloat>,
+  <vscale x 4 x bfloat>,
+  iXLen,
+  iXLen,
+  iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vslidedown_vx_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, iXLen %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4bf16_nxv4bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vx v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x bfloat> @llvm.riscv.vslidedown.nxv4bf16(
+    <vscale x 4 x bfloat> %0,
+    <vscale x 4 x bfloat> %1,
+    iXLen %2,
+    iXLen %3,
+    iXLen 1)
+
+  ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vslidedown.mask.nxv4bf16(
+  <vscale x 4 x bfloat>,
+  <vscale x 4 x bfloat>,
+  iXLen,
+  <vscale x 4 x i1>,
+  iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vslidedown_mask_vx_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4bf16_nxv4bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
+; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x bfloat> @llvm.riscv.vslidedown.mask.nxv4bf16(
+    <vscale x 4 x bfloat> %0,
+    <vscale x 4 x bfloat> %1,
+    iXLen %2,
+    <vscale x 4 x i1> %3,
+    iXLen %4, iXLen 0)
+
+  ret <vscale x 4 x bfloat> %a
+}
+
+define <vscale x 4 x bfloat> @intrinsic_vslidedown_vi_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4bf16_nxv4bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v9, 9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x bfloat> @llvm.riscv.vslidedown.nxv4bf16(
+    <vscale x 4 x bfloat> %0,
+    <vscale x 4 x bfloat> %1,
+    iXLen 9,
+    iXLen %2,
+    iXLen 1)
+
+  ret <vscale x 4 x bfloat> %a
+}
+
+define <vscale x 4 x bfloat> @intrinsic_vslidedown_mask_vi_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4bf16_nxv4bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
+; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x bfloat> @llvm.riscv.vslidedown.mask.nxv4bf16(
+    <vscale x 4 x bfloat> %0,
+    <vscale x 4 x bfloat> %1,
+    iXLen 9,
+    <vscale x 4 x i1> %2,
+    iXLen %3, iXLen 0)
+
+  ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vslidedown.nxv8bf16(
+  <vscale x 8 x bfloat>,
+  <vscale x 8 x bfloat>,
+  iXLen,
+  iXLen,
+  iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vslidedown_vx_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, iXLen %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8bf16_nxv8bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT:    vslidedown.vx v8, v10, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x bfloat> @llvm.riscv.vslidedown.nxv8bf16(
+    <vscale x 8 x bfloat> %0,
+    <vscale x 8 x bfloat> %1,
+    iXLen %2,
+    iXLen %3,
+    iXLen 1)
+
+  ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vslidedown.mask.nxv8bf16(
+  <vscale x 8 x bfloat>,
+  <vscale x 8 x bfloat>,
+  iXLen,
+  <vscale x 8 x i1>,
+  iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vslidedown_mask_vx_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8bf16_nxv8bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, mu
+; CHECK-NEXT:    vslidedown.vx v8, v10, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x bfloat> @llvm.riscv.vslidedown.mask.nxv8bf16(
+    <vscale x 8 x bfloat> %0,
+    <vscale x 8 x bfloat> %1,
+    iXLen %2,
+    <vscale x 8 x i1> %3,
+    iXLen %4, iXLen 0)
+
+  ret <vscale x 8 x bfloat> %a
+}
+
+define <vscale x 8 x bfloat> @intrinsic_vslidedown_vi_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8bf16_nxv8bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v10, 9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x bfloat> @llvm.riscv.vslidedown.nxv8bf16(
+    <vscale x 8 x bfloat> %0,
+    <vscale x 8 x bfloat> %1,
+    iXLen 9,
+    iXLen %2,
+    iXLen 1)
+
+  ret <vscale x 8 x bfloat> %a
+}
+
+define <vscale x 8 x bfloat> @intrinsic_vslidedown_mask_vi_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv8bf16_nxv8bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
+; CHECK-NEXT:    vslidedown.vi v8, v10, 9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x bfloat> @llvm.riscv.vslidedown.mask.nxv8bf16(
+    <vscale x 8 x bfloat> %0,
+    <vscale x 8 x bfloat> %1,
+    iXLen 9,
+    <vscale x 8 x i1> %2,
+    iXLen %3, iXLen 0)
+
+  ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vslidedown.nxv16bf16(
+  <vscale x 16 x bfloat>,
+  <vscale x 16 x bfloat>,
+  iXLen,
+  iXLen,
+  iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vslidedown_vx_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, iXLen %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_vx_nxv16bf16_nxv16bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT:    vslidedown.vx v8, v12, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x bfloat> @llvm.riscv.vslidedown.nxv16bf16(
+    <vscale x 16 x bfloat> %0,
+    <vscale x 16 x bfloat> %1,
+    iXLen %2,
+    iXLen %3,
+    iXLen 1)
+
+  ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vslidedown.mask.nxv16bf16(
+  <vscale x 16 x bfloat>,
+  <vscale x 16 x bfloat>,
+  iXLen,
+  <vscale x 16 x i1>,
+  iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vslidedown_mask_vx_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv16bf16_nxv16bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, mu
+; CHECK-NEXT:    vslidedown.vx v8, v12, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x bfloat> @llvm.riscv.vslidedown.mask.nxv16bf16(
+    <vscale x 16 x bfloat> %0,
+    <vscale x 16 x bfloat> %1,
+    iXLen %2,
+    <vscale x 16 x i1> %3,
+    iXLen %4, iXLen 0)
+
+  ret <vscale x 16 x bfloat> %a
+}
+
+define <vscale x 16 x bfloat> @intrinsic_vslidedown_vi_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_vi_nxv16bf16_nxv16bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v12, 9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x bfloat> @llvm.riscv.vslidedown.nxv16bf16(
+    <vscale x 16 x bfloat> %0,
+    <vscale x 16 x bfloat> %1,
+    iXLen 9,
+    iXLen %2,
+    iXLen 1)
+
+  ret <vscale x 16 x bfloat> %a
+}
+
+define <vscale x 16 x bfloat> @intrinsic_vslidedown_mask_vi_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv16bf16_nxv16bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
+; CHECK-NEXT:    vslidedown.vi v8, v12, 9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x bfloat> @llvm.riscv.vslidedown.mask.nxv16bf16(
+    <vscale x 16 x bfloat> %0,
+    <vscale x 16 x bfloat> %1,
+    iXLen 9,
+    <vscale x 16 x i1> %2,
+    iXLen %3, iXLen 0)
+
+  ret <vscale x 16 x bfloat> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vslideup.ll b/llvm/test/CodeGen/RISCV/rvv/vslideup.ll
index 8e3c05611bc71..d4ad50dbd4d53 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vslideup.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vslideup.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+d,+zvfhmin \
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+d,+zvfhmin,+zvfbfmin \
 ; RUN:   -verify-machineinstrs | FileCheck %s
-; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+d,+zvfhmin \
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+d,+zvfhmin,+zvfbfmin \
 ; RUN:   -verify-machineinstrs | FileCheck %s
 
 declare <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8(
@@ -2463,3 +2463,413 @@ entry:
 
   ret <vscale x 4 x double> %a
 }
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vslideup.nxv1bf16(
+  <vscale x 1 x bfloat>,
+  <vscale x 1 x bfloat>,
+  iXLen,
+  iXLen,
+  iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vslideup_vx_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, iXLen %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv1bf16_nxv1bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT:    vslideup.vx v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x bfloat> @llvm.riscv.vslideup.nxv1bf16(
+    <vscale x 1 x bfloat> %0,
+    <vscale x 1 x bfloat> %1,
+    iXLen %2,
+    iXLen %3,
+    iXLen 1)
+
+  ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 1 x bfloat> @llvm.riscv.vslideup.mask.nxv1bf16(
+  <vscale x 1 x bfloat>,
+  <vscale x 1 x bfloat>,
+  iXLen,
+  <vscale x 1 x i1>,
+  iXLen, iXLen);
+
+define <vscale x 1 x bfloat> @intrinsic_vslideup_mask_vx_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1bf16_nxv1bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
+; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x bfloat> @llvm.riscv.vslideup.mask.nxv1bf16(
+    <vscale x 1 x bfloat> %0,
+    <vscale x 1 x bfloat> %1,
+    iXLen %2,
+    <vscale x 1 x i1> %3,
+    iXLen %4, iXLen 0)
+
+  ret <vscale x 1 x bfloat> %a
+}
+
+define <vscale x 1 x bfloat> @intrinsic_vslideup_vi_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv1bf16_nxv1bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    vslideup.vi v8, v9, 9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x bfloat> @llvm.riscv.vslideup.nxv1bf16(
+    <vscale x 1 x bfloat> %0,
+    <vscale x 1 x bfloat> %1,
+    iXLen 9,
+    iXLen %2,
+    iXLen 1)
+
+  ret <vscale x 1 x bfloat> %a
+}
+
+define <vscale x 1 x bfloat> @intrinsic_vslideup_mask_vi_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1bf16_nxv1bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x bfloat> @llvm.riscv.vslideup.mask.nxv1bf16(
+    <vscale x 1 x bfloat> %0,
+    <vscale x 1 x bfloat> %1,
+    iXLen 9,
+    <vscale x 1 x i1> %2,
+    iXLen %3, iXLen 0)
+
+  ret <vscale x 1 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vslideup.nxv2bf16(
+  <vscale x 2 x bfloat>,
+  <vscale x 2 x bfloat>,
+  iXLen,
+  iXLen,
+  iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vslideup_vx_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, iXLen %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv2bf16_nxv2bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT:    vslideup.vx v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x bfloat> @llvm.riscv.vslideup.nxv2bf16(
+    <vscale x 2 x bfloat> %0,
+    <vscale x 2 x bfloat> %1,
+    iXLen %2,
+    iXLen %3,
+    iXLen 1)
+
+  ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 2 x bfloat> @llvm.riscv.vslideup.mask.nxv2bf16(
+  <vscale x 2 x bfloat>,
+  <vscale x 2 x bfloat>,
+  iXLen,
+  <vscale x 2 x i1>,
+  iXLen, iXLen);
+
+define <vscale x 2 x bfloat> @intrinsic_vslideup_mask_vx_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2bf16_nxv2bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
+; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x bfloat> @llvm.riscv.vslideup.mask.nxv2bf16(
+    <vscale x 2 x bfloat> %0,
+    <vscale x 2 x bfloat> %1,
+    iXLen %2,
+    <vscale x 2 x i1> %3,
+    iXLen %4, iXLen 0)
+
+  ret <vscale x 2 x bfloat> %a
+}
+
+define <vscale x 2 x bfloat> @intrinsic_vslideup_vi_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv2bf16_nxv2bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    vslideup.vi v8, v9, 9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x bfloat> @llvm.riscv.vslideup.nxv2bf16(
+    <vscale x 2 x bfloat> %0,
+    <vscale x 2 x bfloat> %1,
+    iXLen 9,
+    iXLen %2,
+    iXLen 1)
+
+  ret <vscale x 2 x bfloat> %a
+}
+
+define <vscale x 2 x bfloat> @intrinsic_vslideup_mask_vi_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2bf16_nxv2bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
+; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x bfloat> @llvm.riscv.vslideup.mask.nxv2bf16(
+    <vscale x 2 x bfloat> %0,
+    <vscale x 2 x bfloat> %1,
+    iXLen 9,
+    <vscale x 2 x i1> %2,
+    iXLen %3, iXLen 0)
+
+  ret <vscale x 2 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vslideup.nxv4bf16(
+  <vscale x 4 x bfloat>,
+  <vscale x 4 x bfloat>,
+  iXLen,
+  iXLen,
+  iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vslideup_vx_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, iXLen %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv4bf16_nxv4bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT:    vslideup.vx v8, v9, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x bfloat> @llvm.riscv.vslideup.nxv4bf16(
+    <vscale x 4 x bfloat> %0,
+    <vscale x 4 x bfloat> %1,
+    iXLen %2,
+    iXLen %3,
+    iXLen 1)
+
+  ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 4 x bfloat> @llvm.riscv.vslideup.mask.nxv4bf16(
+  <vscale x 4 x bfloat>,
+  <vscale x 4 x bfloat>,
+  iXLen,
+  <vscale x 4 x i1>,
+  iXLen, iXLen);
+
+define <vscale x 4 x bfloat> @intrinsic_vslideup_mask_vx_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4bf16_nxv4bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
+; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x bfloat> @llvm.riscv.vslideup.mask.nxv4bf16(
+    <vscale x 4 x bfloat> %0,
+    <vscale x 4 x bfloat> %1,
+    iXLen %2,
+    <vscale x 4 x i1> %3,
+    iXLen %4, iXLen 0)
+
+  ret <vscale x 4 x bfloat> %a
+}
+
+define <vscale x 4 x bfloat> @intrinsic_vslideup_vi_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv4bf16_nxv4bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    vslideup.vi v8, v9, 9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x bfloat> @llvm.riscv.vslideup.nxv4bf16(
+    <vscale x 4 x bfloat> %0,
+    <vscale x 4 x bfloat> %1,
+    iXLen 9,
+    iXLen %2,
+    iXLen 1)
+
+  ret <vscale x 4 x bfloat> %a
+}
+
+define <vscale x 4 x bfloat> @intrinsic_vslideup_mask_vi_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4bf16_nxv4bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
+; CHECK-NEXT:    vslideup.vi v8, v9, 9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x bfloat> @llvm.riscv.vslideup.mask.nxv4bf16(
+    <vscale x 4 x bfloat> %0,
+    <vscale x 4 x bfloat> %1,
+    iXLen 9,
+    <vscale x 4 x i1> %2,
+    iXLen %3, iXLen 0)
+
+  ret <vscale x 4 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vslideup.nxv8bf16(
+  <vscale x 8 x bfloat>,
+  <vscale x 8 x bfloat>,
+  iXLen,
+  iXLen,
+  iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vslideup_vx_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, iXLen %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv8bf16_nxv8bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT:    vslideup.vx v8, v10, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x bfloat> @llvm.riscv.vslideup.nxv8bf16(
+    <vscale x 8 x bfloat> %0,
+    <vscale x 8 x bfloat> %1,
+    iXLen %2,
+    iXLen %3,
+    iXLen 1)
+
+  ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 8 x bfloat> @llvm.riscv.vslideup.mask.nxv8bf16(
+  <vscale x 8 x bfloat>,
+  <vscale x 8 x bfloat>,
+  iXLen,
+  <vscale x 8 x i1>,
+  iXLen, iXLen);
+
+define <vscale x 8 x bfloat> @intrinsic_vslideup_mask_vx_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8bf16_nxv8bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, mu
+; CHECK-NEXT:    vslideup.vx v8, v10, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x bfloat> @llvm.riscv.vslideup.mask.nxv8bf16(
+    <vscale x 8 x bfloat> %0,
+    <vscale x 8 x bfloat> %1,
+    iXLen %2,
+    <vscale x 8 x i1> %3,
+    iXLen %4, iXLen 0)
+
+  ret <vscale x 8 x bfloat> %a
+}
+
+define <vscale x 8 x bfloat> @intrinsic_vslideup_vi_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv8bf16_nxv8bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    vslideup.vi v8, v10, 9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x bfloat> @llvm.riscv.vslideup.nxv8bf16(
+    <vscale x 8 x bfloat> %0,
+    <vscale x 8 x bfloat> %1,
+    iXLen 9,
+    iXLen %2,
+    iXLen 1)
+
+  ret <vscale x 8 x bfloat> %a
+}
+
+define <vscale x 8 x bfloat> @intrinsic_vslideup_mask_vi_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv8bf16_nxv8bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
+; CHECK-NEXT:    vslideup.vi v8, v10, 9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x bfloat> @llvm.riscv.vslideup.mask.nxv8bf16(
+    <vscale x 8 x bfloat> %0,
+    <vscale x 8 x bfloat> %1,
+    iXLen 9,
+    <vscale x 8 x i1> %2,
+    iXLen %3, iXLen 0)
+
+  ret <vscale x 8 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vslideup.nxv16bf16(
+  <vscale x 16 x bfloat>,
+  <vscale x 16 x bfloat>,
+  iXLen,
+  iXLen,
+  iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vslideup_vx_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, iXLen %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_vx_nxv16bf16_nxv16bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT:    vslideup.vx v8, v12, a0
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x bfloat> @llvm.riscv.vslideup.nxv16bf16(
+    <vscale x 16 x bfloat> %0,
+    <vscale x 16 x bfloat> %1,
+    iXLen %2,
+    iXLen %3,
+    iXLen 1)
+
+  ret <vscale x 16 x bfloat> %a
+}
+
+declare <vscale x 16 x bfloat> @llvm.riscv.vslideup.mask.nxv16bf16(
+  <vscale x 16 x bfloat>,
+  <vscale x 16 x bfloat>,
+  iXLen,
+  <vscale x 16 x i1>,
+  iXLen, iXLen);
+
+define <vscale x 16 x bfloat> @intrinsic_vslideup_mask_vx_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv16bf16_nxv16bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, mu
+; CHECK-NEXT:    vslideup.vx v8, v12, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x bfloat> @llvm.riscv.vslideup.mask.nxv16bf16(
+    <vscale x 16 x bfloat> %0,
+    <vscale x 16 x bfloat> %1,
+    iXLen %2,
+    <vscale x 16 x i1> %3,
+    iXLen %4, iXLen 0)
+
+  ret <vscale x 16 x bfloat> %a
+}
+
+define <vscale x 16 x bfloat> @intrinsic_vslideup_vi_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_vi_nxv16bf16_nxv16bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    vslideup.vi v8, v12, 9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x bfloat> @llvm.riscv.vslideup.nxv16bf16(
+    <vscale x 16 x bfloat> %0,
+    <vscale x 16 x bfloat> %1,
+    iXLen 9,
+    iXLen %2,
+    iXLen 1)
+
+  ret <vscale x 16 x bfloat> %a
+}
+
+define <vscale x 16 x bfloat> @intrinsic_vslideup_mask_vi_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv16bf16_nxv16bf16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
+; CHECK-NEXT:    vslideup.vi v8, v12, 9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x bfloat> @llvm.riscv.vslideup.mask.nxv16bf16(
+    <vscale x 16 x bfloat> %0,
+    <vscale x 16 x bfloat> %1,
+    iXLen 9,
+    <vscale x 16 x i1> %2,
+    iXLen %3, iXLen 0)
+
+  ret <vscale x 16 x bfloat> %a
+}



More information about the llvm-commits mailing list