[llvm] [llvm][RISCV] Support splat and vp_splat for zvfbfa codegen (PR #167920)

Brandon Wu via llvm-commits llvm-commits at lists.llvm.org
Mon Nov 17 06:20:46 PST 2025


https://github.com/4vtomat updated https://github.com/llvm/llvm-project/pull/167920

>From 67db9063b9e143b65597958a6f4e8df104cdccca Mon Sep 17 00:00:00 2001
From: Brandon Wu <songwu0813 at gmail.com>
Date: Thu, 13 Nov 2025 09:40:56 -0800
Subject: [PATCH 1/2] [llvm][RISCV] Support splat and vp_splat for zvfbfa
 codegen

---
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp   |  18 +-
 .../RISCV/rvv/fixed-vectors-fp-splat-bf16.ll  |  46 +++
 .../RISCV/rvv/fixed-vectors-vp-splat.ll       | 320 ++++++++++++++----
 llvm/test/CodeGen/RISCV/rvv/vp-splat.ll       |  80 +++++
 llvm/test/CodeGen/RISCV/rvv/vsplats-bf16.ll   |  23 ++
 5 files changed, 406 insertions(+), 81 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 38cce26e44af4..009d278b9bf8b 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -88,9 +88,10 @@ static cl::opt<bool>
                       cl::init(true));
 
 // TODO: Support more ops
-static const unsigned ZvfbfaVPOps[] = {ISD::VP_FNEG, ISD::VP_FABS,
-                                       ISD::VP_FCOPYSIGN};
-static const unsigned ZvfbfaOps[] = {ISD::FNEG, ISD::FABS, ISD::FCOPYSIGN};
+static const unsigned ZvfbfaVPOps[] = {
+    ISD::VP_FNEG, ISD::VP_FABS, ISD::VP_FCOPYSIGN, ISD::EXPERIMENTAL_VP_SPLAT};
+static const unsigned ZvfbfaOps[] = {ISD::FNEG, ISD::FABS, ISD::FCOPYSIGN,
+                                     ISD::SPLAT_VECTOR};
 
 RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
                                          const RISCVSubtarget &STI)
@@ -1272,17 +1273,12 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
                          VT, Custom);
       setOperationAction(ISD::EXPERIMENTAL_VP_SPLICE, VT, Custom);
       setOperationAction(ISD::EXPERIMENTAL_VP_REVERSE, VT, Custom);
+      setOperationAction(ISD::EXPERIMENTAL_VP_SPLAT, VT, Custom);
 
       setOperationAction(ISD::FCOPYSIGN, VT, Legal);
+      setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
       setOperationAction(ZvfbfaVPOps, VT, Custom);
 
-      MVT EltVT = VT.getVectorElementType();
-      if (isTypeLegal(EltVT))
-        setOperationAction({ISD::SPLAT_VECTOR, ISD::EXPERIMENTAL_VP_SPLAT}, VT,
-                           Custom);
-      else
-        setOperationAction({ISD::SPLAT_VECTOR, ISD::EXPERIMENTAL_VP_SPLAT},
-                           EltVT, Custom);
       setOperationAction({ISD::LOAD, ISD::STORE, ISD::MLOAD, ISD::MSTORE,
                           ISD::MGATHER, ISD::MSCATTER, ISD::VP_LOAD,
                           ISD::VP_STORE, ISD::EXPERIMENTAL_VP_STRIDED_LOAD,
@@ -4870,7 +4866,7 @@ static SDValue lowerScalarSplat(SDValue Passthru, SDValue Scalar, SDValue VL,
 
   if (VT.isFloatingPoint()) {
     if ((EltVT == MVT::f16 && !Subtarget.hasStdExtZvfh()) ||
-        EltVT == MVT::bf16) {
+        (EltVT == MVT::bf16 && !Subtarget.hasVInstructionsBF16())) {
       if ((EltVT == MVT::bf16 && Subtarget.hasStdExtZfbfmin()) ||
           (EltVT == MVT::f16 && Subtarget.hasStdExtZfhmin()))
         Scalar = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, XLenVT, Scalar);
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-splat-bf16.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-splat-bf16.ll
index c94cdadc8ca59..82e199b4969db 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-splat-bf16.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-splat-bf16.ll
@@ -3,6 +3,8 @@
 ; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVFBFMIN
 ; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfbfmin,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZFBFMIN-ZVFBFMIN
 ; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVFBFMIN
+; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+experimental-zvfbfa -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVFBFA
+; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+experimental-zvfbfa -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVFBFA
 
 define <8 x bfloat> @splat_v8bf16(ptr %x, bfloat %y) {
 ; ZFBFMIN-ZVFBFMIN-LABEL: splat_v8bf16:
@@ -18,6 +20,12 @@ define <8 x bfloat> @splat_v8bf16(ptr %x, bfloat %y) {
 ; ZVFBFMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; ZVFBFMIN-NEXT:    vmv.v.x v8, a0
 ; ZVFBFMIN-NEXT:    ret
+;
+; ZVFBFA-LABEL: splat_v8bf16:
+; ZVFBFA:       # %bb.0:
+; ZVFBFA-NEXT:    vsetvli a0, zero, e16alt, m1, ta, ma
+; ZVFBFA-NEXT:    vfmv.v.f v8, fa0
+; ZVFBFA-NEXT:    ret
   %a = insertelement <8 x bfloat> poison, bfloat %y, i32 0
   %b = shufflevector <8 x bfloat> %a, <8 x bfloat> poison, <8 x i32> zeroinitializer
   ret <8 x bfloat> %b
@@ -37,6 +45,12 @@ define <16 x bfloat> @splat_16bf16(ptr %x, bfloat %y) {
 ; ZVFBFMIN-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
 ; ZVFBFMIN-NEXT:    vmv.v.x v8, a0
 ; ZVFBFMIN-NEXT:    ret
+;
+; ZVFBFA-LABEL: splat_16bf16:
+; ZVFBFA:       # %bb.0:
+; ZVFBFA-NEXT:    vsetvli a0, zero, e16alt, m2, ta, ma
+; ZVFBFA-NEXT:    vfmv.v.f v8, fa0
+; ZVFBFA-NEXT:    ret
   %a = insertelement <16 x bfloat> poison, bfloat %y, i32 0
   %b = shufflevector <16 x bfloat> %a, <16 x bfloat> poison, <16 x i32> zeroinitializer
   ret <16 x bfloat> %b
@@ -58,6 +72,12 @@ define <64 x bfloat> @splat_64bf16(ptr %x, bfloat %y) {
 ; ZVFBFMIN-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
 ; ZVFBFMIN-NEXT:    vmv.v.x v8, a0
 ; ZVFBFMIN-NEXT:    ret
+;
+; ZVFBFA-LABEL: splat_64bf16:
+; ZVFBFA:       # %bb.0:
+; ZVFBFA-NEXT:    vsetvli a0, zero, e16alt, m8, ta, ma
+; ZVFBFA-NEXT:    vfmv.v.f v8, fa0
+; ZVFBFA-NEXT:    ret
   %a = insertelement <64 x bfloat> poison, bfloat %y, i32 0
   %b = shufflevector <64 x bfloat> %a, <64 x bfloat> poison, <64 x i32> zeroinitializer
   ret <64 x bfloat> %b
@@ -75,6 +95,12 @@ define <8 x bfloat> @splat_zero_v8bf16(ptr %x) {
 ; ZVFBFMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; ZVFBFMIN-NEXT:    vmv.v.i v8, 0
 ; ZVFBFMIN-NEXT:    ret
+;
+; ZVFBFA-LABEL: splat_zero_v8bf16:
+; ZVFBFA:       # %bb.0:
+; ZVFBFA-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; ZVFBFA-NEXT:    vmv.v.i v8, 0
+; ZVFBFA-NEXT:    ret
   ret <8 x bfloat> splat (bfloat 0.0)
 }
 
@@ -90,6 +116,12 @@ define <16 x bfloat> @splat_zero_16bf16(ptr %x) {
 ; ZVFBFMIN-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
 ; ZVFBFMIN-NEXT:    vmv.v.i v8, 0
 ; ZVFBFMIN-NEXT:    ret
+;
+; ZVFBFA-LABEL: splat_zero_16bf16:
+; ZVFBFA:       # %bb.0:
+; ZVFBFA-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; ZVFBFA-NEXT:    vmv.v.i v8, 0
+; ZVFBFA-NEXT:    ret
   ret <16 x bfloat> splat (bfloat 0.0)
 }
 
@@ -107,6 +139,13 @@ define <8 x bfloat> @splat_negzero_v8bf16(ptr %x) {
 ; ZVFBFMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; ZVFBFMIN-NEXT:    vmv.v.x v8, a0
 ; ZVFBFMIN-NEXT:    ret
+;
+; ZVFBFA-LABEL: splat_negzero_v8bf16:
+; ZVFBFA:       # %bb.0:
+; ZVFBFA-NEXT:    lui a0, 1048568
+; ZVFBFA-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
+; ZVFBFA-NEXT:    vmv.v.x v8, a0
+; ZVFBFA-NEXT:    ret
   ret <8 x bfloat> splat (bfloat -0.0)
 }
 
@@ -124,5 +163,12 @@ define <16 x bfloat> @splat_negzero_16bf16(ptr %x) {
 ; ZVFBFMIN-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
 ; ZVFBFMIN-NEXT:    vmv.v.x v8, a0
 ; ZVFBFMIN-NEXT:    ret
+;
+; ZVFBFA-LABEL: splat_negzero_16bf16:
+; ZVFBFA:       # %bb.0:
+; ZVFBFA-NEXT:    lui a0, 1048568
+; ZVFBFA-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
+; ZVFBFA-NEXT:    vmv.v.x v8, a0
+; ZVFBFA-NEXT:    ret
   ret <16 x bfloat> splat (bfloat -0.0)
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vp-splat.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vp-splat.ll
index 40e337c811e8b..7901f8c290543 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vp-splat.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vp-splat.ll
@@ -1,6 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zvfh -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zvfh -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
+; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zvfh,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH,ZVFH_RV32
+; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zvfh,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH,ZVFH_RV64
+; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zvfh,+experimental-zvfbfa -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFBFA,ZVFBFA_RV32
+; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zvfh,+experimental-zvfbfa -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFBFA,ZVFBFA_RV64
 
 define <1 x i8> @vp_splat_v1i8(i8 %val, <1 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_splat_v1i8:
@@ -183,97 +185,275 @@ define <16 x i32> @vp_splat_v16i32(i32 %val, <16 x i1> %m, i32 zeroext %evl) {
 }
 
 define <1 x i64> @vp_splat_v1i64(i64 %val, <1 x i1> %m, i32 zeroext %evl) {
-; RV32-LABEL: vp_splat_v1i64:
-; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -16
-; RV32-NEXT:    .cfi_def_cfa_offset 16
-; RV32-NEXT:    sw a0, 8(sp)
-; RV32-NEXT:    sw a1, 12(sp)
-; RV32-NEXT:    addi a0, sp, 8
-; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; RV32-NEXT:    vlse64.v v8, (a0), zero
-; RV32-NEXT:    addi sp, sp, 16
-; RV32-NEXT:    .cfi_def_cfa_offset 0
-; RV32-NEXT:    ret
+; ZVFH_RV32-LABEL: vp_splat_v1i64:
+; ZVFH_RV32:       # %bb.0:
+; ZVFH_RV32-NEXT:    addi sp, sp, -16
+; ZVFH_RV32-NEXT:    .cfi_def_cfa_offset 16
+; ZVFH_RV32-NEXT:    sw a0, 8(sp)
+; ZVFH_RV32-NEXT:    sw a1, 12(sp)
+; ZVFH_RV32-NEXT:    addi a0, sp, 8
+; ZVFH_RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; ZVFH_RV32-NEXT:    vlse64.v v8, (a0), zero
+; ZVFH_RV32-NEXT:    addi sp, sp, 16
+; ZVFH_RV32-NEXT:    .cfi_def_cfa_offset 0
+; ZVFH_RV32-NEXT:    ret
 ;
-; RV64-LABEL: vp_splat_v1i64:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
-; RV64-NEXT:    vmv.v.x v8, a0
-; RV64-NEXT:    ret
+; ZVFH_RV64-LABEL: vp_splat_v1i64:
+; ZVFH_RV64:       # %bb.0:
+; ZVFH_RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; ZVFH_RV64-NEXT:    vmv.v.x v8, a0
+; ZVFH_RV64-NEXT:    ret
+;
+; ZVFBFA_RV32-LABEL: vp_splat_v1i64:
+; ZVFBFA_RV32:       # %bb.0:
+; ZVFBFA_RV32-NEXT:    addi sp, sp, -16
+; ZVFBFA_RV32-NEXT:    .cfi_def_cfa_offset 16
+; ZVFBFA_RV32-NEXT:    sw a0, 8(sp)
+; ZVFBFA_RV32-NEXT:    sw a1, 12(sp)
+; ZVFBFA_RV32-NEXT:    addi a0, sp, 8
+; ZVFBFA_RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; ZVFBFA_RV32-NEXT:    vlse64.v v8, (a0), zero
+; ZVFBFA_RV32-NEXT:    addi sp, sp, 16
+; ZVFBFA_RV32-NEXT:    .cfi_def_cfa_offset 0
+; ZVFBFA_RV32-NEXT:    ret
+;
+; ZVFBFA_RV64-LABEL: vp_splat_v1i64:
+; ZVFBFA_RV64:       # %bb.0:
+; ZVFBFA_RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; ZVFBFA_RV64-NEXT:    vmv.v.x v8, a0
+; ZVFBFA_RV64-NEXT:    ret
   %splat = call <1 x i64> @llvm.experimental.vp.splat.v1i64(i64 %val, <1 x i1> %m, i32 %evl)
   ret <1 x i64> %splat
 }
 
 define <2 x i64> @vp_splat_v2i64(i64 %val, <2 x i1> %m, i32 zeroext %evl) {
-; RV32-LABEL: vp_splat_v2i64:
-; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -16
-; RV32-NEXT:    .cfi_def_cfa_offset 16
-; RV32-NEXT:    sw a0, 8(sp)
-; RV32-NEXT:    sw a1, 12(sp)
-; RV32-NEXT:    addi a0, sp, 8
-; RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
-; RV32-NEXT:    vlse64.v v8, (a0), zero
-; RV32-NEXT:    addi sp, sp, 16
-; RV32-NEXT:    .cfi_def_cfa_offset 0
-; RV32-NEXT:    ret
+; ZVFH_RV32-LABEL: vp_splat_v2i64:
+; ZVFH_RV32:       # %bb.0:
+; ZVFH_RV32-NEXT:    addi sp, sp, -16
+; ZVFH_RV32-NEXT:    .cfi_def_cfa_offset 16
+; ZVFH_RV32-NEXT:    sw a0, 8(sp)
+; ZVFH_RV32-NEXT:    sw a1, 12(sp)
+; ZVFH_RV32-NEXT:    addi a0, sp, 8
+; ZVFH_RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; ZVFH_RV32-NEXT:    vlse64.v v8, (a0), zero
+; ZVFH_RV32-NEXT:    addi sp, sp, 16
+; ZVFH_RV32-NEXT:    .cfi_def_cfa_offset 0
+; ZVFH_RV32-NEXT:    ret
+;
+; ZVFH_RV64-LABEL: vp_splat_v2i64:
+; ZVFH_RV64:       # %bb.0:
+; ZVFH_RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; ZVFH_RV64-NEXT:    vmv.v.x v8, a0
+; ZVFH_RV64-NEXT:    ret
+;
+; ZVFBFA_RV32-LABEL: vp_splat_v2i64:
+; ZVFBFA_RV32:       # %bb.0:
+; ZVFBFA_RV32-NEXT:    addi sp, sp, -16
+; ZVFBFA_RV32-NEXT:    .cfi_def_cfa_offset 16
+; ZVFBFA_RV32-NEXT:    sw a0, 8(sp)
+; ZVFBFA_RV32-NEXT:    sw a1, 12(sp)
+; ZVFBFA_RV32-NEXT:    addi a0, sp, 8
+; ZVFBFA_RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; ZVFBFA_RV32-NEXT:    vlse64.v v8, (a0), zero
+; ZVFBFA_RV32-NEXT:    addi sp, sp, 16
+; ZVFBFA_RV32-NEXT:    .cfi_def_cfa_offset 0
+; ZVFBFA_RV32-NEXT:    ret
 ;
-; RV64-LABEL: vp_splat_v2i64:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
-; RV64-NEXT:    vmv.v.x v8, a0
-; RV64-NEXT:    ret
+; ZVFBFA_RV64-LABEL: vp_splat_v2i64:
+; ZVFBFA_RV64:       # %bb.0:
+; ZVFBFA_RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; ZVFBFA_RV64-NEXT:    vmv.v.x v8, a0
+; ZVFBFA_RV64-NEXT:    ret
   %splat = call <2 x i64> @llvm.experimental.vp.splat.v2i64(i64 %val, <2 x i1> %m, i32 %evl)
   ret <2 x i64> %splat
 }
 
 define <4 x i64> @vp_splat_v4i64(i64 %val, <4 x i1> %m, i32 zeroext %evl) {
-; RV32-LABEL: vp_splat_v4i64:
-; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -16
-; RV32-NEXT:    .cfi_def_cfa_offset 16
-; RV32-NEXT:    sw a0, 8(sp)
-; RV32-NEXT:    sw a1, 12(sp)
-; RV32-NEXT:    addi a0, sp, 8
-; RV32-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
-; RV32-NEXT:    vlse64.v v8, (a0), zero
-; RV32-NEXT:    addi sp, sp, 16
-; RV32-NEXT:    .cfi_def_cfa_offset 0
-; RV32-NEXT:    ret
+; ZVFH_RV32-LABEL: vp_splat_v4i64:
+; ZVFH_RV32:       # %bb.0:
+; ZVFH_RV32-NEXT:    addi sp, sp, -16
+; ZVFH_RV32-NEXT:    .cfi_def_cfa_offset 16
+; ZVFH_RV32-NEXT:    sw a0, 8(sp)
+; ZVFH_RV32-NEXT:    sw a1, 12(sp)
+; ZVFH_RV32-NEXT:    addi a0, sp, 8
+; ZVFH_RV32-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; ZVFH_RV32-NEXT:    vlse64.v v8, (a0), zero
+; ZVFH_RV32-NEXT:    addi sp, sp, 16
+; ZVFH_RV32-NEXT:    .cfi_def_cfa_offset 0
+; ZVFH_RV32-NEXT:    ret
 ;
-; RV64-LABEL: vp_splat_v4i64:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
-; RV64-NEXT:    vmv.v.x v8, a0
-; RV64-NEXT:    ret
+; ZVFH_RV64-LABEL: vp_splat_v4i64:
+; ZVFH_RV64:       # %bb.0:
+; ZVFH_RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
+; ZVFH_RV64-NEXT:    vmv.v.x v8, a0
+; ZVFH_RV64-NEXT:    ret
+;
+; ZVFBFA_RV32-LABEL: vp_splat_v4i64:
+; ZVFBFA_RV32:       # %bb.0:
+; ZVFBFA_RV32-NEXT:    addi sp, sp, -16
+; ZVFBFA_RV32-NEXT:    .cfi_def_cfa_offset 16
+; ZVFBFA_RV32-NEXT:    sw a0, 8(sp)
+; ZVFBFA_RV32-NEXT:    sw a1, 12(sp)
+; ZVFBFA_RV32-NEXT:    addi a0, sp, 8
+; ZVFBFA_RV32-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; ZVFBFA_RV32-NEXT:    vlse64.v v8, (a0), zero
+; ZVFBFA_RV32-NEXT:    addi sp, sp, 16
+; ZVFBFA_RV32-NEXT:    .cfi_def_cfa_offset 0
+; ZVFBFA_RV32-NEXT:    ret
+;
+; ZVFBFA_RV64-LABEL: vp_splat_v4i64:
+; ZVFBFA_RV64:       # %bb.0:
+; ZVFBFA_RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
+; ZVFBFA_RV64-NEXT:    vmv.v.x v8, a0
+; ZVFBFA_RV64-NEXT:    ret
   %splat = call <4 x i64> @llvm.experimental.vp.splat.v4i64(i64 %val, <4 x i1> %m, i32 %evl)
   ret <4 x i64> %splat
 }
 
 define <8 x i64> @vp_splat_v8i64(i64 %val, <8 x i1> %m, i32 zeroext %evl) {
-; RV32-LABEL: vp_splat_v8i64:
-; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -16
-; RV32-NEXT:    .cfi_def_cfa_offset 16
-; RV32-NEXT:    sw a0, 8(sp)
-; RV32-NEXT:    sw a1, 12(sp)
-; RV32-NEXT:    addi a0, sp, 8
-; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
-; RV32-NEXT:    vlse64.v v8, (a0), zero
-; RV32-NEXT:    addi sp, sp, 16
-; RV32-NEXT:    .cfi_def_cfa_offset 0
-; RV32-NEXT:    ret
+; ZVFH_RV32-LABEL: vp_splat_v8i64:
+; ZVFH_RV32:       # %bb.0:
+; ZVFH_RV32-NEXT:    addi sp, sp, -16
+; ZVFH_RV32-NEXT:    .cfi_def_cfa_offset 16
+; ZVFH_RV32-NEXT:    sw a0, 8(sp)
+; ZVFH_RV32-NEXT:    sw a1, 12(sp)
+; ZVFH_RV32-NEXT:    addi a0, sp, 8
+; ZVFH_RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; ZVFH_RV32-NEXT:    vlse64.v v8, (a0), zero
+; ZVFH_RV32-NEXT:    addi sp, sp, 16
+; ZVFH_RV32-NEXT:    .cfi_def_cfa_offset 0
+; ZVFH_RV32-NEXT:    ret
+;
+; ZVFH_RV64-LABEL: vp_splat_v8i64:
+; ZVFH_RV64:       # %bb.0:
+; ZVFH_RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
+; ZVFH_RV64-NEXT:    vmv.v.x v8, a0
+; ZVFH_RV64-NEXT:    ret
+;
+; ZVFBFA_RV32-LABEL: vp_splat_v8i64:
+; ZVFBFA_RV32:       # %bb.0:
+; ZVFBFA_RV32-NEXT:    addi sp, sp, -16
+; ZVFBFA_RV32-NEXT:    .cfi_def_cfa_offset 16
+; ZVFBFA_RV32-NEXT:    sw a0, 8(sp)
+; ZVFBFA_RV32-NEXT:    sw a1, 12(sp)
+; ZVFBFA_RV32-NEXT:    addi a0, sp, 8
+; ZVFBFA_RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; ZVFBFA_RV32-NEXT:    vlse64.v v8, (a0), zero
+; ZVFBFA_RV32-NEXT:    addi sp, sp, 16
+; ZVFBFA_RV32-NEXT:    .cfi_def_cfa_offset 0
+; ZVFBFA_RV32-NEXT:    ret
 ;
-; RV64-LABEL: vp_splat_v8i64:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
-; RV64-NEXT:    vmv.v.x v8, a0
-; RV64-NEXT:    ret
+; ZVFBFA_RV64-LABEL: vp_splat_v8i64:
+; ZVFBFA_RV64:       # %bb.0:
+; ZVFBFA_RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
+; ZVFBFA_RV64-NEXT:    vmv.v.x v8, a0
+; ZVFBFA_RV64-NEXT:    ret
   %splat = call <8 x i64> @llvm.experimental.vp.splat.v8i64(i64 %val, <8 x i1> %m, i32 %evl)
   ret <8 x i64> %splat
 }
 
+define <1 x bfloat> @vp_splat_v1bf16(bfloat %val, <1 x i1> %m, i32 zeroext %evl) {
+; ZVFH-LABEL: vp_splat_v1bf16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    fmv.x.w a1, fa0
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFH-NEXT:    vmv.v.x v8, a1
+; ZVFH-NEXT:    ret
+;
+; ZVFBFA-LABEL: vp_splat_v1bf16:
+; ZVFBFA:       # %bb.0:
+; ZVFBFA-NEXT:    vsetvli zero, a0, e16alt, mf4, ta, ma
+; ZVFBFA-NEXT:    vfmv.v.f v8, fa0
+; ZVFBFA-NEXT:    ret
+  %splat = call <1 x bfloat> @llvm.experimental.vp.splat.v1bf16(bfloat %val, <1 x i1> %m, i32 %evl)
+  ret <1 x bfloat> %splat
+}
+
+define <2 x bfloat> @vp_splat_v2bf16(bfloat %val, <2 x i1> %m, i32 zeroext %evl) {
+; ZVFH-LABEL: vp_splat_v2bf16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    fmv.x.w a1, fa0
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFH-NEXT:    vmv.v.x v8, a1
+; ZVFH-NEXT:    ret
+;
+; ZVFBFA-LABEL: vp_splat_v2bf16:
+; ZVFBFA:       # %bb.0:
+; ZVFBFA-NEXT:    vsetvli zero, a0, e16alt, mf4, ta, ma
+; ZVFBFA-NEXT:    vfmv.v.f v8, fa0
+; ZVFBFA-NEXT:    ret
+  %splat = call <2 x bfloat> @llvm.experimental.vp.splat.v2bf16(bfloat %val, <2 x i1> %m, i32 %evl)
+  ret <2 x bfloat> %splat
+}
+
+define <4 x bfloat> @vp_splat_v4bf16(bfloat %val, <4 x i1> %m, i32 zeroext %evl) {
+; ZVFH-LABEL: vp_splat_v4bf16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    fmv.x.w a1, fa0
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vmv.v.x v8, a1
+; ZVFH-NEXT:    ret
+;
+; ZVFBFA-LABEL: vp_splat_v4bf16:
+; ZVFBFA:       # %bb.0:
+; ZVFBFA-NEXT:    vsetvli zero, a0, e16alt, mf2, ta, ma
+; ZVFBFA-NEXT:    vfmv.v.f v8, fa0
+; ZVFBFA-NEXT:    ret
+  %splat = call <4 x bfloat> @llvm.experimental.vp.splat.v4bf16(bfloat %val, <4 x i1> %m, i32 %evl)
+  ret <4 x bfloat> %splat
+}
+
+define <8 x bfloat> @vp_splat_v8bf16(bfloat %val, <8 x i1> %m, i32 zeroext %evl) {
+; ZVFH-LABEL: vp_splat_v8bf16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    fmv.x.w a1, fa0
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVFH-NEXT:    vmv.v.x v8, a1
+; ZVFH-NEXT:    ret
+;
+; ZVFBFA-LABEL: vp_splat_v8bf16:
+; ZVFBFA:       # %bb.0:
+; ZVFBFA-NEXT:    vsetvli zero, a0, e16alt, m1, ta, ma
+; ZVFBFA-NEXT:    vfmv.v.f v8, fa0
+; ZVFBFA-NEXT:    ret
+  %splat = call <8 x bfloat> @llvm.experimental.vp.splat.v8bf16(bfloat %val, <8 x i1> %m, i32 %evl)
+  ret <8 x bfloat> %splat
+}
+
+define <16 x bfloat> @vp_splat_v16bf16(bfloat %val, <16 x i1> %m, i32 zeroext %evl) {
+; ZVFH-LABEL: vp_splat_v16bf16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    fmv.x.w a1, fa0
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVFH-NEXT:    vmv.v.x v8, a1
+; ZVFH-NEXT:    ret
+;
+; ZVFBFA-LABEL: vp_splat_v16bf16:
+; ZVFBFA:       # %bb.0:
+; ZVFBFA-NEXT:    vsetvli zero, a0, e16alt, m2, ta, ma
+; ZVFBFA-NEXT:    vfmv.v.f v8, fa0
+; ZVFBFA-NEXT:    ret
+  %splat = call <16 x bfloat> @llvm.experimental.vp.splat.v16bf16(bfloat %val, <16 x i1> %m, i32 %evl)
+  ret <16 x bfloat> %splat
+}
+
+define <32 x bfloat> @vp_splat_v32bf16(bfloat %val, <32 x i1> %m, i32 zeroext %evl) {
+; ZVFH-LABEL: vp_splat_v32bf16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    fmv.x.w a1, fa0
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; ZVFH-NEXT:    vmv.v.x v8, a1
+; ZVFH-NEXT:    ret
+;
+; ZVFBFA-LABEL: vp_splat_v32bf16:
+; ZVFBFA:       # %bb.0:
+; ZVFBFA-NEXT:    vsetvli zero, a0, e16alt, m4, ta, ma
+; ZVFBFA-NEXT:    vfmv.v.f v8, fa0
+; ZVFBFA-NEXT:    ret
+  %splat = call <32 x bfloat> @llvm.experimental.vp.splat.v32bf16(bfloat %val, <32 x i1> %m, i32 %evl)
+  ret <32 x bfloat> %splat
+}
+
 define <1 x half> @vp_splat_v1f16(half %val, <1 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_splat_v1f16:
 ; CHECK:       # %bb.0:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-splat.ll b/llvm/test/CodeGen/RISCV/rvv/vp-splat.ll
index 23fc96cd1b6e0..b8b2ba7c5e5d3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vp-splat.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vp-splat.ll
@@ -5,6 +5,8 @@
 ; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zvfhmin,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,NOZFMIN,ZVFHMIN
 ; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfhmin,+zfbfmin,+zvfhmin,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32,ZFMIN
 ; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfhmin,+zfbfmin,+zvfhmin,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,ZFMIN
+; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfhmin,+zvfhmin,+experimental-zvfbfa -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32,ZVFBFA
+; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfhmin,+zvfhmin,+experimental-zvfbfa -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,ZVFBFA
 
 define <vscale x 1 x i8> @vp_splat_nxv1i8(i8 %val, <vscale x 1 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_splat_nxv1i8:
@@ -292,6 +294,12 @@ define <vscale x 1 x bfloat> @vp_splat_nxv1bf16(bfloat %val, <vscale x 1 x i1> %
 ; ZFMIN-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; ZFMIN-NEXT:    vmv.v.x v8, a1
 ; ZFMIN-NEXT:    ret
+;
+; ZVFBFA-LABEL: vp_splat_nxv1bf16:
+; ZVFBFA:       # %bb.0:
+; ZVFBFA-NEXT:    vsetvli zero, a0, e16alt, mf4, ta, ma
+; ZVFBFA-NEXT:    vfmv.v.f v8, fa0
+; ZVFBFA-NEXT:    ret
   %splat = call <vscale x 1 x bfloat> @llvm.experimental.vp.splat.nxv1bf16(bfloat %val, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x bfloat> %splat
 }
@@ -310,6 +318,12 @@ define <vscale x 2 x bfloat> @vp_splat_nxv2bf16(bfloat %val, <vscale x 2 x i1> %
 ; ZFMIN-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; ZFMIN-NEXT:    vmv.v.x v8, a1
 ; ZFMIN-NEXT:    ret
+;
+; ZVFBFA-LABEL: vp_splat_nxv2bf16:
+; ZVFBFA:       # %bb.0:
+; ZVFBFA-NEXT:    vsetvli zero, a0, e16alt, mf2, ta, ma
+; ZVFBFA-NEXT:    vfmv.v.f v8, fa0
+; ZVFBFA-NEXT:    ret
   %splat = call <vscale x 2 x bfloat> @llvm.experimental.vp.splat.nxv2bf16(bfloat %val, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x bfloat> %splat
 }
@@ -328,6 +342,12 @@ define <vscale x 4 x bfloat> @vp_splat_nxv4bf16(bfloat %val, <vscale x 4 x i1> %
 ; ZFMIN-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; ZFMIN-NEXT:    vmv.v.x v8, a1
 ; ZFMIN-NEXT:    ret
+;
+; ZVFBFA-LABEL: vp_splat_nxv4bf16:
+; ZVFBFA:       # %bb.0:
+; ZVFBFA-NEXT:    vsetvli zero, a0, e16alt, m1, ta, ma
+; ZVFBFA-NEXT:    vfmv.v.f v8, fa0
+; ZVFBFA-NEXT:    ret
   %splat = call <vscale x 4 x bfloat> @llvm.experimental.vp.splat.nxv4bf16(bfloat %val, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x bfloat> %splat
 }
@@ -346,6 +366,12 @@ define <vscale x 8 x bfloat> @vp_splat_nxv8bf16(bfloat %val, <vscale x 8 x i1> %
 ; ZFMIN-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; ZFMIN-NEXT:    vmv.v.x v8, a1
 ; ZFMIN-NEXT:    ret
+;
+; ZVFBFA-LABEL: vp_splat_nxv8bf16:
+; ZVFBFA:       # %bb.0:
+; ZVFBFA-NEXT:    vsetvli zero, a0, e16alt, m2, ta, ma
+; ZVFBFA-NEXT:    vfmv.v.f v8, fa0
+; ZVFBFA-NEXT:    ret
   %splat = call <vscale x 8 x bfloat> @llvm.experimental.vp.splat.nxv8bf16(bfloat %val, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x bfloat> %splat
 }
@@ -364,6 +390,12 @@ define <vscale x 16 x bfloat> @vp_splat_nxv16bf16(bfloat %val, <vscale x 16 x i1
 ; ZFMIN-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; ZFMIN-NEXT:    vmv.v.x v8, a1
 ; ZFMIN-NEXT:    ret
+;
+; ZVFBFA-LABEL: vp_splat_nxv16bf16:
+; ZVFBFA:       # %bb.0:
+; ZVFBFA-NEXT:    vsetvli zero, a0, e16alt, m4, ta, ma
+; ZVFBFA-NEXT:    vfmv.v.f v8, fa0
+; ZVFBFA-NEXT:    ret
   %splat = call <vscale x 16 x bfloat> @llvm.experimental.vp.splat.nxv16bf16(bfloat %val, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x bfloat> %splat
 }
@@ -382,6 +414,12 @@ define <vscale x 32 x bfloat> @vp_splat_nxv32bf16(bfloat %val, <vscale x 32 x i1
 ; ZFMIN-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; ZFMIN-NEXT:    vmv.v.x v8, a1
 ; ZFMIN-NEXT:    ret
+;
+; ZVFBFA-LABEL: vp_splat_nxv32bf16:
+; ZVFBFA:       # %bb.0:
+; ZVFBFA-NEXT:    vsetvli zero, a0, e16alt, m8, ta, ma
+; ZVFBFA-NEXT:    vfmv.v.f v8, fa0
+; ZVFBFA-NEXT:    ret
   %splat = call <vscale x 32 x bfloat> @llvm.experimental.vp.splat.nxv32bf16(bfloat %val, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x bfloat> %splat
 }
@@ -406,6 +444,13 @@ define <vscale x 1 x half> @vp_splat_nxv1f16(half %val, <vscale x 1 x i1> %m, i3
 ; ZFMIN-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; ZFMIN-NEXT:    vmv.v.x v8, a1
 ; ZFMIN-NEXT:    ret
+;
+; ZVFBFA-LABEL: vp_splat_nxv1f16:
+; ZVFBFA:       # %bb.0:
+; ZVFBFA-NEXT:    fmv.x.h a1, fa0
+; ZVFBFA-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFBFA-NEXT:    vmv.v.x v8, a1
+; ZVFBFA-NEXT:    ret
   %splat = call <vscale x 1 x half> @llvm.experimental.vp.splat.nxv1f16(half %val, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x half> %splat
 }
@@ -430,6 +475,13 @@ define <vscale x 2 x half> @vp_splat_nxv2f16(half %val, <vscale x 2 x i1> %m, i3
 ; ZFMIN-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; ZFMIN-NEXT:    vmv.v.x v8, a1
 ; ZFMIN-NEXT:    ret
+;
+; ZVFBFA-LABEL: vp_splat_nxv2f16:
+; ZVFBFA:       # %bb.0:
+; ZVFBFA-NEXT:    fmv.x.h a1, fa0
+; ZVFBFA-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFBFA-NEXT:    vmv.v.x v8, a1
+; ZVFBFA-NEXT:    ret
   %splat = call <vscale x 2 x half> @llvm.experimental.vp.splat.nxv2f16(half %val, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x half> %splat
 }
@@ -454,6 +506,13 @@ define <vscale x 4 x half> @vp_splat_nxv4f16(half %val, <vscale x 4 x i1> %m, i3
 ; ZFMIN-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; ZFMIN-NEXT:    vmv.v.x v8, a1
 ; ZFMIN-NEXT:    ret
+;
+; ZVFBFA-LABEL: vp_splat_nxv4f16:
+; ZVFBFA:       # %bb.0:
+; ZVFBFA-NEXT:    fmv.x.h a1, fa0
+; ZVFBFA-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVFBFA-NEXT:    vmv.v.x v8, a1
+; ZVFBFA-NEXT:    ret
   %splat = call <vscale x 4 x half> @llvm.experimental.vp.splat.nxv4f16(half %val, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x half> %splat
 }
@@ -478,6 +537,13 @@ define <vscale x 8 x half> @vp_splat_nxv8f16(half %val, <vscale x 8 x i1> %m, i3
 ; ZFMIN-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; ZFMIN-NEXT:    vmv.v.x v8, a1
 ; ZFMIN-NEXT:    ret
+;
+; ZVFBFA-LABEL: vp_splat_nxv8f16:
+; ZVFBFA:       # %bb.0:
+; ZVFBFA-NEXT:    fmv.x.h a1, fa0
+; ZVFBFA-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVFBFA-NEXT:    vmv.v.x v8, a1
+; ZVFBFA-NEXT:    ret
   %splat = call <vscale x 8 x half> @llvm.experimental.vp.splat.nxv8f16(half %val, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x half> %splat
 }
@@ -502,6 +568,13 @@ define <vscale x 16 x half> @vp_splat_nxv16f16(half %val, <vscale x 16 x i1> %m,
 ; ZFMIN-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; ZFMIN-NEXT:    vmv.v.x v8, a1
 ; ZFMIN-NEXT:    ret
+;
+; ZVFBFA-LABEL: vp_splat_nxv16f16:
+; ZVFBFA:       # %bb.0:
+; ZVFBFA-NEXT:    fmv.x.h a1, fa0
+; ZVFBFA-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; ZVFBFA-NEXT:    vmv.v.x v8, a1
+; ZVFBFA-NEXT:    ret
   %splat = call <vscale x 16 x half> @llvm.experimental.vp.splat.nxv16f16(half %val, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x half> %splat
 }
@@ -526,6 +599,13 @@ define <vscale x 32 x half> @vp_splat_nxv32f16(half %val, <vscale x 32 x i1> %m,
 ; ZFMIN-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; ZFMIN-NEXT:    vmv.v.x v8, a1
 ; ZFMIN-NEXT:    ret
+;
+; ZVFBFA-LABEL: vp_splat_nxv32f16:
+; ZVFBFA:       # %bb.0:
+; ZVFBFA-NEXT:    fmv.x.h a1, fa0
+; ZVFBFA-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; ZVFBFA-NEXT:    vmv.v.x v8, a1
+; ZVFBFA-NEXT:    ret
   %splat = call <vscale x 32 x half> @llvm.experimental.vp.splat.nxv32f16(half %val, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x half> %splat
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsplats-bf16.ll b/llvm/test/CodeGen/RISCV/rvv/vsplats-bf16.ll
index af9881aca03bc..24ed31cc55225 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsplats-bf16.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsplats-bf16.ll
@@ -7,6 +7,10 @@
 ; RUN:   | FileCheck %s --check-prefixes=NOZFBFMIN
 ; RUN: llc -mtriple=riscv64 -mattr=+f,+d,+zvfbfmin,+v -target-abi lp64d -verify-machineinstrs < %s \
 ; RUN:   | FileCheck %s --check-prefixes=NOZFBFMIN
+; RUN: llc -mtriple=riscv32 -mattr=+f,+d,+experimental-zvfbfa,+v -target-abi ilp32d -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s --check-prefixes=ZVFBFA
+; RUN: llc -mtriple=riscv64 -mattr=+f,+d,+experimental-zvfbfa,+v -target-abi lp64d -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s --check-prefixes=ZVFBFA
 
 define <vscale x 8 x bfloat> @vsplat_nxv8bf16(bfloat %f) {
 ; CHECK-LABEL: vsplat_nxv8bf16:
@@ -22,6 +26,12 @@ define <vscale x 8 x bfloat> @vsplat_nxv8bf16(bfloat %f) {
 ; NOZFBFMIN-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
 ; NOZFBFMIN-NEXT:    vmv.v.x v8, a0
 ; NOZFBFMIN-NEXT:    ret
+;
+; ZVFBFA-LABEL: vsplat_nxv8bf16:
+; ZVFBFA:       # %bb.0:
+; ZVFBFA-NEXT:    vsetvli a0, zero, e16alt, m2, ta, ma
+; ZVFBFA-NEXT:    vfmv.v.f v8, fa0
+; ZVFBFA-NEXT:    ret
   %head = insertelement <vscale x 8 x bfloat> poison, bfloat %f, i32 0
   %splat = shufflevector <vscale x 8 x bfloat> %head, <vscale x 8 x bfloat> poison, <vscale x 8 x i32> zeroinitializer
   ret <vscale x 8 x bfloat> %splat
@@ -39,6 +49,12 @@ define <vscale x 8 x bfloat> @vsplat_zero_nxv8bf16() {
 ; NOZFBFMIN-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; NOZFBFMIN-NEXT:    vmv.v.i v8, 0
 ; NOZFBFMIN-NEXT:    ret
+;
+; ZVFBFA-LABEL: vsplat_zero_nxv8bf16:
+; ZVFBFA:       # %bb.0:
+; ZVFBFA-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; ZVFBFA-NEXT:    vmv.v.i v8, 0
+; ZVFBFA-NEXT:    ret
   ret <vscale x 8 x bfloat> splat (bfloat zeroinitializer)
 }
 
@@ -56,5 +72,12 @@ define <vscale x 8 x bfloat> @vsplat_negzero_nxv8bf16() {
 ; NOZFBFMIN-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
 ; NOZFBFMIN-NEXT:    vmv.v.x v8, a0
 ; NOZFBFMIN-NEXT:    ret
+;
+; ZVFBFA-LABEL: vsplat_negzero_nxv8bf16:
+; ZVFBFA:       # %bb.0:
+; ZVFBFA-NEXT:    lui a0, 1048568
+; ZVFBFA-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
+; ZVFBFA-NEXT:    vmv.v.x v8, a0
+; ZVFBFA-NEXT:    ret
   ret <vscale x 8 x bfloat> splat (bfloat -0.0)
 }

>From f8a236e48564be8ad993e606c797e7c21d6a21b4 Mon Sep 17 00:00:00 2001
From: Brandon Wu <songwu0813 at gmail.com>
Date: Mon, 17 Nov 2025 06:20:33 -0800
Subject: [PATCH 2/2] fixup! update test

---
 llvm/test/CodeGen/RISCV/rvv/vfadd-sdnode.ll | 25 +++---
 llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll     | 88 ++++++++-------------
 2 files changed, 43 insertions(+), 70 deletions(-)

diff --git a/llvm/test/CodeGen/RISCV/rvv/vfadd-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd-sdnode.ll
index abd00b647e374..c4232915895cf 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfadd-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfadd-sdnode.ll
@@ -617,25 +617,22 @@ define <vscale x 32 x bfloat> @vfadd_vf_nxv32bf16(<vscale x 32 x bfloat> %va, bf
 ; ZVFBFA-NEXT:    slli a0, a0, 3
 ; ZVFBFA-NEXT:    sub sp, sp, a0
 ; ZVFBFA-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFBFA-NEXT:    fmv.x.h a0, fa0
-; ZVFBFA-NEXT:    vsetvli a1, zero, e16alt, m4, ta, ma
-; ZVFBFA-NEXT:    vfwcvt.f.f.v v16, v8
-; ZVFBFA-NEXT:    addi a1, sp, 16
-; ZVFBFA-NEXT:    vs8r.v v16, (a1) # vscale x 64-byte Folded Spill
-; ZVFBFA-NEXT:    vfwcvt.f.f.v v24, v12
-; ZVFBFA-NEXT:    vsetvli a1, zero, e16alt, m8, ta, ma
-; ZVFBFA-NEXT:    vmv.v.x v8, a0
+; ZVFBFA-NEXT:    vsetvli a0, zero, e16alt, m8, ta, ma
+; ZVFBFA-NEXT:    vfmv.v.f v16, fa0
 ; ZVFBFA-NEXT:    vsetvli a0, zero, e16alt, m4, ta, ma
-; ZVFBFA-NEXT:    vfwcvt.f.f.v v0, v8
-; ZVFBFA-NEXT:    vfwcvt.f.f.v v16, v12
+; ZVFBFA-NEXT:    vfwcvt.f.f.v v24, v8
 ; ZVFBFA-NEXT:    addi a0, sp, 16
-; ZVFBFA-NEXT:    vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
+; ZVFBFA-NEXT:    vs8r.v v24, (a0) # vscale x 64-byte Folded Spill
+; ZVFBFA-NEXT:    vfwcvt.f.f.v v0, v12
+; ZVFBFA-NEXT:    vfwcvt.f.f.v v8, v16
+; ZVFBFA-NEXT:    vfwcvt.f.f.v v24, v20
+; ZVFBFA-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
 ; ZVFBFA-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFBFA-NEXT:    vfadd.vv v0, v8, v0
+; ZVFBFA-NEXT:    vfadd.vv v16, v16, v8
 ; ZVFBFA-NEXT:    vsetvli zero, zero, e16alt, m4, ta, ma
-; ZVFBFA-NEXT:    vfncvt.f.f.w v8, v0
+; ZVFBFA-NEXT:    vfncvt.f.f.w v8, v16
 ; ZVFBFA-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFBFA-NEXT:    vfadd.vv v16, v24, v16
+; ZVFBFA-NEXT:    vfadd.vv v16, v0, v24
 ; ZVFBFA-NEXT:    vsetvli zero, zero, e16alt, m4, ta, ma
 ; ZVFBFA-NEXT:    vfncvt.f.f.w v12, v16
 ; ZVFBFA-NEXT:    csrr a0, vlenb
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll
index 633a201c0131a..1ab2209647c80 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll
@@ -120,9 +120,8 @@ define <vscale x 1 x bfloat> @vfadd_vf_nxv1bf16(<vscale x 1 x bfloat> %va, bfloa
 ;
 ; ZVFBFA-LABEL: vfadd_vf_nxv1bf16:
 ; ZVFBFA:       # %bb.0:
-; ZVFBFA-NEXT:    fmv.x.h a1, fa0
 ; ZVFBFA-NEXT:    vsetvli zero, a0, e16alt, mf4, ta, ma
-; ZVFBFA-NEXT:    vmv.v.x v9, a1
+; ZVFBFA-NEXT:    vfmv.v.f v9, fa0
 ; ZVFBFA-NEXT:    vfwcvt.f.f.v v10, v8, v0.t
 ; ZVFBFA-NEXT:    vfwcvt.f.f.v v8, v9, v0.t
 ; ZVFBFA-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
@@ -165,9 +164,8 @@ define <vscale x 1 x bfloat> @vfadd_vf_nxv1bf16_commute(<vscale x 1 x bfloat> %v
 ;
 ; ZVFBFA-LABEL: vfadd_vf_nxv1bf16_commute:
 ; ZVFBFA:       # %bb.0:
-; ZVFBFA-NEXT:    fmv.x.h a1, fa0
 ; ZVFBFA-NEXT:    vsetvli zero, a0, e16alt, mf4, ta, ma
-; ZVFBFA-NEXT:    vmv.v.x v9, a1
+; ZVFBFA-NEXT:    vfmv.v.f v9, fa0
 ; ZVFBFA-NEXT:    vfwcvt.f.f.v v10, v8, v0.t
 ; ZVFBFA-NEXT:    vfwcvt.f.f.v v8, v9, v0.t
 ; ZVFBFA-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
@@ -210,9 +208,8 @@ define <vscale x 1 x bfloat> @vfadd_vf_nxv1bf16_unmasked(<vscale x 1 x bfloat> %
 ;
 ; ZVFBFA-LABEL: vfadd_vf_nxv1bf16_unmasked:
 ; ZVFBFA:       # %bb.0:
-; ZVFBFA-NEXT:    fmv.x.h a1, fa0
 ; ZVFBFA-NEXT:    vsetvli zero, a0, e16alt, mf4, ta, ma
-; ZVFBFA-NEXT:    vmv.v.x v9, a1
+; ZVFBFA-NEXT:    vfmv.v.f v9, fa0
 ; ZVFBFA-NEXT:    vfwcvt.f.f.v v10, v8
 ; ZVFBFA-NEXT:    vfwcvt.f.f.v v8, v9
 ; ZVFBFA-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
@@ -255,9 +252,8 @@ define <vscale x 1 x bfloat> @vfadd_vf_nxv1bf16_unmasked_commute(<vscale x 1 x b
 ;
 ; ZVFBFA-LABEL: vfadd_vf_nxv1bf16_unmasked_commute:
 ; ZVFBFA:       # %bb.0:
-; ZVFBFA-NEXT:    fmv.x.h a1, fa0
 ; ZVFBFA-NEXT:    vsetvli zero, a0, e16alt, mf4, ta, ma
-; ZVFBFA-NEXT:    vmv.v.x v9, a1
+; ZVFBFA-NEXT:    vfmv.v.f v9, fa0
 ; ZVFBFA-NEXT:    vfwcvt.f.f.v v10, v8
 ; ZVFBFA-NEXT:    vfwcvt.f.f.v v8, v9
 ; ZVFBFA-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
@@ -376,9 +372,8 @@ define <vscale x 2 x bfloat> @vfadd_vf_nxv2bf16(<vscale x 2 x bfloat> %va, bfloa
 ;
 ; ZVFBFA-LABEL: vfadd_vf_nxv2bf16:
 ; ZVFBFA:       # %bb.0:
-; ZVFBFA-NEXT:    fmv.x.h a1, fa0
 ; ZVFBFA-NEXT:    vsetvli zero, a0, e16alt, mf2, ta, ma
-; ZVFBFA-NEXT:    vmv.v.x v9, a1
+; ZVFBFA-NEXT:    vfmv.v.f v9, fa0
 ; ZVFBFA-NEXT:    vfwcvt.f.f.v v10, v8, v0.t
 ; ZVFBFA-NEXT:    vfwcvt.f.f.v v8, v9, v0.t
 ; ZVFBFA-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
@@ -421,9 +416,8 @@ define <vscale x 2 x bfloat> @vfadd_vf_nxv2bf16_unmasked(<vscale x 2 x bfloat> %
 ;
 ; ZVFBFA-LABEL: vfadd_vf_nxv2bf16_unmasked:
 ; ZVFBFA:       # %bb.0:
-; ZVFBFA-NEXT:    fmv.x.h a1, fa0
 ; ZVFBFA-NEXT:    vsetvli zero, a0, e16alt, mf2, ta, ma
-; ZVFBFA-NEXT:    vmv.v.x v9, a1
+; ZVFBFA-NEXT:    vfmv.v.f v9, fa0
 ; ZVFBFA-NEXT:    vfwcvt.f.f.v v10, v8
 ; ZVFBFA-NEXT:    vfwcvt.f.f.v v8, v9
 ; ZVFBFA-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
@@ -542,9 +536,8 @@ define <vscale x 4 x bfloat> @vfadd_vf_nxv4bf16(<vscale x 4 x bfloat> %va, bfloa
 ;
 ; ZVFBFA-LABEL: vfadd_vf_nxv4bf16:
 ; ZVFBFA:       # %bb.0:
-; ZVFBFA-NEXT:    fmv.x.h a1, fa0
 ; ZVFBFA-NEXT:    vsetvli zero, a0, e16alt, m1, ta, ma
-; ZVFBFA-NEXT:    vmv.v.x v12, a1
+; ZVFBFA-NEXT:    vfmv.v.f v12, fa0
 ; ZVFBFA-NEXT:    vfwcvt.f.f.v v10, v8, v0.t
 ; ZVFBFA-NEXT:    vfwcvt.f.f.v v8, v12, v0.t
 ; ZVFBFA-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
@@ -587,9 +580,8 @@ define <vscale x 4 x bfloat> @vfadd_vf_nxv4bf16_unmasked(<vscale x 4 x bfloat> %
 ;
 ; ZVFBFA-LABEL: vfadd_vf_nxv4bf16_unmasked:
 ; ZVFBFA:       # %bb.0:
-; ZVFBFA-NEXT:    fmv.x.h a1, fa0
 ; ZVFBFA-NEXT:    vsetvli zero, a0, e16alt, m1, ta, ma
-; ZVFBFA-NEXT:    vmv.v.x v12, a1
+; ZVFBFA-NEXT:    vfmv.v.f v12, fa0
 ; ZVFBFA-NEXT:    vfwcvt.f.f.v v10, v8
 ; ZVFBFA-NEXT:    vfwcvt.f.f.v v8, v12
 ; ZVFBFA-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
@@ -708,9 +700,8 @@ define <vscale x 8 x bfloat> @vfadd_vf_nxv8bf16(<vscale x 8 x bfloat> %va, bfloa
 ;
 ; ZVFBFA-LABEL: vfadd_vf_nxv8bf16:
 ; ZVFBFA:       # %bb.0:
-; ZVFBFA-NEXT:    fmv.x.h a1, fa0
 ; ZVFBFA-NEXT:    vsetvli zero, a0, e16alt, m2, ta, ma
-; ZVFBFA-NEXT:    vmv.v.x v16, a1
+; ZVFBFA-NEXT:    vfmv.v.f v16, fa0
 ; ZVFBFA-NEXT:    vfwcvt.f.f.v v12, v8, v0.t
 ; ZVFBFA-NEXT:    vfwcvt.f.f.v v8, v16, v0.t
 ; ZVFBFA-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
@@ -753,9 +744,8 @@ define <vscale x 8 x bfloat> @vfadd_vf_nxv8bf16_unmasked(<vscale x 8 x bfloat> %
 ;
 ; ZVFBFA-LABEL: vfadd_vf_nxv8bf16_unmasked:
 ; ZVFBFA:       # %bb.0:
-; ZVFBFA-NEXT:    fmv.x.h a1, fa0
 ; ZVFBFA-NEXT:    vsetvli zero, a0, e16alt, m2, ta, ma
-; ZVFBFA-NEXT:    vmv.v.x v16, a1
+; ZVFBFA-NEXT:    vfmv.v.f v16, fa0
 ; ZVFBFA-NEXT:    vfwcvt.f.f.v v12, v8
 ; ZVFBFA-NEXT:    vfwcvt.f.f.v v8, v16
 ; ZVFBFA-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
@@ -874,9 +864,8 @@ define <vscale x 16 x bfloat> @vfadd_vf_nxv16bf16(<vscale x 16 x bfloat> %va, bf
 ;
 ; ZVFBFA-LABEL: vfadd_vf_nxv16bf16:
 ; ZVFBFA:       # %bb.0:
-; ZVFBFA-NEXT:    fmv.x.h a1, fa0
 ; ZVFBFA-NEXT:    vsetvli zero, a0, e16alt, m4, ta, ma
-; ZVFBFA-NEXT:    vmv.v.x v24, a1
+; ZVFBFA-NEXT:    vfmv.v.f v24, fa0
 ; ZVFBFA-NEXT:    vfwcvt.f.f.v v16, v8, v0.t
 ; ZVFBFA-NEXT:    vfwcvt.f.f.v v8, v24, v0.t
 ; ZVFBFA-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
@@ -919,9 +908,8 @@ define <vscale x 16 x bfloat> @vfadd_vf_nxv16bf16_unmasked(<vscale x 16 x bfloat
 ;
 ; ZVFBFA-LABEL: vfadd_vf_nxv16bf16_unmasked:
 ; ZVFBFA:       # %bb.0:
-; ZVFBFA-NEXT:    fmv.x.h a1, fa0
 ; ZVFBFA-NEXT:    vsetvli zero, a0, e16alt, m4, ta, ma
-; ZVFBFA-NEXT:    vmv.v.x v24, a1
+; ZVFBFA-NEXT:    vfmv.v.f v24, fa0
 ; ZVFBFA-NEXT:    vfwcvt.f.f.v v16, v8
 ; ZVFBFA-NEXT:    vfwcvt.f.f.v v8, v24
 ; ZVFBFA-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
@@ -1369,26 +1357,22 @@ define <vscale x 32 x bfloat> @vfadd_vf_nxv32bf16(<vscale x 32 x bfloat> %va, bf
 ; ZVFBFA-NEXT:    addi sp, sp, -16
 ; ZVFBFA-NEXT:    .cfi_def_cfa_offset 16
 ; ZVFBFA-NEXT:    csrr a1, vlenb
-; ZVFBFA-NEXT:    slli a1, a1, 4
+; ZVFBFA-NEXT:    slli a1, a1, 3
 ; ZVFBFA-NEXT:    sub sp, sp, a1
-; ZVFBFA-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; ZVFBFA-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
+; ZVFBFA-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; ZVFBFA-NEXT:    vsetvli a1, zero, e16alt, m8, ta, ma
 ; ZVFBFA-NEXT:    vmv1r.v v7, v0
-; ZVFBFA-NEXT:    fmv.x.h a1, fa0
+; ZVFBFA-NEXT:    vfmv.v.f v24, fa0
 ; ZVFBFA-NEXT:    csrr a2, vlenb
-; ZVFBFA-NEXT:    vmv.v.x v24, a1
 ; ZVFBFA-NEXT:    slli a1, a2, 1
 ; ZVFBFA-NEXT:    srli a2, a2, 2
 ; ZVFBFA-NEXT:    sub a3, a0, a1
-; ZVFBFA-NEXT:    vsetvli a4, zero, e8, mf2, ta, ma
+; ZVFBFA-NEXT:    vsetvli a4, zero, e8alt, mf2, ta, ma
 ; ZVFBFA-NEXT:    vslidedown.vx v0, v0, a2
 ; ZVFBFA-NEXT:    sltu a2, a0, a3
 ; ZVFBFA-NEXT:    addi a2, a2, -1
 ; ZVFBFA-NEXT:    and a2, a2, a3
-; ZVFBFA-NEXT:    csrr a3, vlenb
-; ZVFBFA-NEXT:    slli a3, a3, 3
-; ZVFBFA-NEXT:    add a3, sp, a3
-; ZVFBFA-NEXT:    addi a3, a3, 16
+; ZVFBFA-NEXT:    addi a3, sp, 16
 ; ZVFBFA-NEXT:    vs8r.v v24, (a3) # vscale x 64-byte Folded Spill
 ; ZVFBFA-NEXT:    vsetvli zero, a2, e16alt, m4, ta, ma
 ; ZVFBFA-NEXT:    vfwcvt.f.f.v v16, v28, v0.t
@@ -1402,24 +1386,17 @@ define <vscale x 32 x bfloat> @vfadd_vf_nxv32bf16(<vscale x 32 x bfloat> %va, bf
 ; ZVFBFA-NEXT:    mv a0, a1
 ; ZVFBFA-NEXT:  .LBB24_2:
 ; ZVFBFA-NEXT:    vmv1r.v v0, v7
+; ZVFBFA-NEXT:    addi a1, sp, 16
+; ZVFBFA-NEXT:    vl8r.v v24, (a1) # vscale x 64-byte Folded Reload
 ; ZVFBFA-NEXT:    vsetvli zero, a0, e16alt, m4, ta, ma
-; ZVFBFA-NEXT:    vfwcvt.f.f.v v16, v8, v0.t
-; ZVFBFA-NEXT:    addi a0, sp, 16
-; ZVFBFA-NEXT:    vs8r.v v16, (a0) # vscale x 64-byte Folded Spill
-; ZVFBFA-NEXT:    csrr a0, vlenb
-; ZVFBFA-NEXT:    slli a0, a0, 3
-; ZVFBFA-NEXT:    add a0, sp, a0
-; ZVFBFA-NEXT:    addi a0, a0, 16
-; ZVFBFA-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; ZVFBFA-NEXT:    vfwcvt.f.f.v v24, v16, v0.t
-; ZVFBFA-NEXT:    addi a0, sp, 16
-; ZVFBFA-NEXT:    vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFBFA-NEXT:    vfwcvt.f.f.v v16, v24, v0.t
+; ZVFBFA-NEXT:    vfwcvt.f.f.v v24, v8, v0.t
 ; ZVFBFA-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFBFA-NEXT:    vfadd.vv v16, v16, v24, v0.t
+; ZVFBFA-NEXT:    vfadd.vv v16, v24, v16, v0.t
 ; ZVFBFA-NEXT:    vsetvli zero, zero, e16alt, m4, ta, ma
 ; ZVFBFA-NEXT:    vfncvt.f.f.w v8, v16, v0.t
 ; ZVFBFA-NEXT:    csrr a0, vlenb
-; ZVFBFA-NEXT:    slli a0, a0, 4
+; ZVFBFA-NEXT:    slli a0, a0, 3
 ; ZVFBFA-NEXT:    add sp, sp, a0
 ; ZVFBFA-NEXT:    .cfi_def_cfa sp, 16
 ; ZVFBFA-NEXT:    addi sp, sp, 16
@@ -1542,15 +1519,14 @@ define <vscale x 32 x bfloat> @vfadd_vf_nxv32bf16_unmasked(<vscale x 32 x bfloat
 ; ZVFBFA-NEXT:    slli a1, a1, 3
 ; ZVFBFA-NEXT:    sub sp, sp, a1
 ; ZVFBFA-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFBFA-NEXT:    fmv.x.h a1, fa0
+; ZVFBFA-NEXT:    vsetvli a1, zero, e16alt, m8, ta, ma
+; ZVFBFA-NEXT:    vfmv.v.f v16, fa0
 ; ZVFBFA-NEXT:    csrr a2, vlenb
-; ZVFBFA-NEXT:    vsetvli a3, zero, e16, m8, ta, ma
 ; ZVFBFA-NEXT:    vmset.m v24
-; ZVFBFA-NEXT:    vmv.v.x v16, a1
 ; ZVFBFA-NEXT:    slli a1, a2, 1
 ; ZVFBFA-NEXT:    srli a2, a2, 2
 ; ZVFBFA-NEXT:    sub a3, a0, a1
-; ZVFBFA-NEXT:    vsetvli a4, zero, e8, mf2, ta, ma
+; ZVFBFA-NEXT:    vsetvli a4, zero, e8alt, mf2, ta, ma
 ; ZVFBFA-NEXT:    vslidedown.vx v0, v24, a2
 ; ZVFBFA-NEXT:    sltu a2, a0, a3
 ; ZVFBFA-NEXT:    addi a2, a2, -1
@@ -1568,13 +1544,13 @@ define <vscale x 32 x bfloat> @vfadd_vf_nxv32bf16_unmasked(<vscale x 32 x bfloat
 ; ZVFBFA-NEXT:  # %bb.1:
 ; ZVFBFA-NEXT:    mv a0, a1
 ; ZVFBFA-NEXT:  .LBB25_2:
+; ZVFBFA-NEXT:    addi a1, sp, 16
+; ZVFBFA-NEXT:    vl8r.v v24, (a1) # vscale x 64-byte Folded Reload
 ; ZVFBFA-NEXT:    vsetvli zero, a0, e16alt, m4, ta, ma
-; ZVFBFA-NEXT:    vfwcvt.f.f.v v16, v8
-; ZVFBFA-NEXT:    addi a0, sp, 16
-; ZVFBFA-NEXT:    vl8r.v v0, (a0) # vscale x 64-byte Folded Reload
-; ZVFBFA-NEXT:    vfwcvt.f.f.v v24, v0
+; ZVFBFA-NEXT:    vfwcvt.f.f.v v16, v24
+; ZVFBFA-NEXT:    vfwcvt.f.f.v v24, v8
 ; ZVFBFA-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFBFA-NEXT:    vfadd.vv v16, v16, v24
+; ZVFBFA-NEXT:    vfadd.vv v16, v24, v16
 ; ZVFBFA-NEXT:    vsetvli zero, zero, e16alt, m4, ta, ma
 ; ZVFBFA-NEXT:    vfncvt.f.f.w v8, v16
 ; ZVFBFA-NEXT:    csrr a0, vlenb



More information about the llvm-commits mailing list